virtio: fixes, cleanups

Some bug fixes/cleanups.
 Deprecated scsi passthrough for blk removed.
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAl49E/4PHG1zdEByZWRo
 YXQuY29tAAoJECgfDbjSjVRpyecH/AlBzCOlv9kBHKvx30h2QTgbvZlZM++SRQ18
 XAuvU/gRVTPLeSsXnJGz0hMD8hxBti6esqvxHzSzs2a6DqkqLrRdnMXsjs6QlAdX
 6NwP4VesL7RNKTAjjrtmXQMr8iADtTy8FKCw/sZM+6sqhPeKAzFbBrjfH6amINru
 orEF+eGwNXLkegK4+QVQx8f1rlIm7+/Z4lAP75FsaisYWLxklvn3VjZ7YjsCNexi
 4zMxv64W8AHCRJK8k7/+vluedwwTghY9ayubw4zeRWmcfRw568bxlCZUQBmNWspC
 lj4/ZWzGmd60UQx9fUvEd7T6QCRzaaUYVzXC0Mh8I3pLV+V5tKY=
 =uqtg
 -----END PGP SIGNATURE-----

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull virtio updates from Michael Tsirkin:
 "Some bug fixes/cleanups.

  The deprecated scsi passthrough for virtio_blk is removed"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
  virtio_balloon: Fix memory leaks on errors in virtballoon_probe()
  virtio-balloon: Fix memory leak when unloading while hinting is in progress
  virtio_balloon: prevent pfn array overflow
  virtio-blk: remove VIRTIO_BLK_F_SCSI support
  virtio-pci: check name when counting MSI-X vectors
  virtio-balloon: initialize all vq callbacks
  virtio-mmio: convert to devm_platform_ioremap_resource
This commit is contained in:
Linus Torvalds 2020-02-07 12:26:34 -08:00
commit e0f121c5cc
6 changed files with 22 additions and 145 deletions

View file

@ -1,5 +1,4 @@
CONFIG_VIRTIO_BLK=y
CONFIG_VIRTIO_BLK_SCSI=y
CONFIG_SCSI_VIRTIO=y
CONFIG_VIRTIO_NET=y
CONFIG_NET_FAILOVER=y

View file

@ -432,16 +432,6 @@ config VIRTIO_BLK
This is the virtual block driver for virtio. It can be used with
QEMU based VMMs (like KVM or Xen). Say Y or M.
config VIRTIO_BLK_SCSI
bool "SCSI passthrough request for the Virtio block driver"
depends on VIRTIO_BLK
select BLK_SCSI_REQUEST
---help---
Enable support for SCSI passthrough (e.g. the SG_IO ioctl) on
virtio-blk devices. This is only supported for the legacy
virtio protocol and not enabled by default by any hypervisor.
You probably want to use virtio-scsi instead.
config BLK_DEV_RBD
tristate "Rados block device (RBD)"
depends on INET && BLOCK

View file

@ -11,7 +11,6 @@
#include <linux/virtio_blk.h>
#include <linux/scatterlist.h>
#include <linux/string_helpers.h>
#include <scsi/scsi_cmnd.h>
#include <linux/idr.h>
#include <linux/blk-mq.h>
#include <linux/blk-mq-virtio.h>
@ -56,11 +55,6 @@ struct virtio_blk {
};
struct virtblk_req {
#ifdef CONFIG_VIRTIO_BLK_SCSI
struct scsi_request sreq; /* for SCSI passthrough, must be first */
u8 sense[SCSI_SENSE_BUFFERSIZE];
struct virtio_scsi_inhdr in_hdr;
#endif
struct virtio_blk_outhdr out_hdr;
u8 status;
struct scatterlist sg[];
@ -78,80 +72,6 @@ static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
}
}
/*
* If this is a packet command we need a couple of additional headers. Behind
* the normal outhdr we put a segment with the scsi command block, and before
* the normal inhdr we put the sense data and the inhdr with additional status
* information.
*/
#ifdef CONFIG_VIRTIO_BLK_SCSI
static int virtblk_add_req_scsi(struct virtqueue *vq, struct virtblk_req *vbr,
struct scatterlist *data_sg, bool have_data)
{
struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
unsigned int num_out = 0, num_in = 0;
sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
sgs[num_out++] = &hdr;
sg_init_one(&cmd, vbr->sreq.cmd, vbr->sreq.cmd_len);
sgs[num_out++] = &cmd;
if (have_data) {
if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
sgs[num_out++] = data_sg;
else
sgs[num_out + num_in++] = data_sg;
}
sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
sgs[num_out + num_in++] = &sense;
sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
sgs[num_out + num_in++] = &inhdr;
sg_init_one(&status, &vbr->status, sizeof(vbr->status));
sgs[num_out + num_in++] = &status;
return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
}
static inline void virtblk_scsi_request_done(struct request *req)
{
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
struct virtio_blk *vblk = req->q->queuedata;
struct scsi_request *sreq = &vbr->sreq;
sreq->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
sreq->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
sreq->result = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
}
static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long data)
{
struct gendisk *disk = bdev->bd_disk;
struct virtio_blk *vblk = disk->private_data;
/*
* Only allow the generic SCSI ioctls if the host can support it.
*/
if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
return -ENOTTY;
return scsi_cmd_blk_ioctl(bdev, mode, cmd,
(void __user *)data);
}
#else
static inline int virtblk_add_req_scsi(struct virtqueue *vq,
struct virtblk_req *vbr, struct scatterlist *data_sg,
bool have_data)
{
return -EIO;
}
static inline void virtblk_scsi_request_done(struct request *req)
{
}
#define virtblk_ioctl NULL
#endif /* CONFIG_VIRTIO_BLK_SCSI */
static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
struct scatterlist *data_sg, bool have_data)
{
@ -216,13 +136,6 @@ static inline void virtblk_request_done(struct request *req)
req->special_vec.bv_offset);
}
switch (req_op(req)) {
case REQ_OP_SCSI_IN:
case REQ_OP_SCSI_OUT:
virtblk_scsi_request_done(req);
break;
}
blk_mq_end_request(req, virtblk_result(vbr));
}
@ -299,10 +212,6 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
type = VIRTIO_BLK_T_WRITE_ZEROES;
unmap = !(req->cmd_flags & REQ_NOUNMAP);
break;
case REQ_OP_SCSI_IN:
case REQ_OP_SCSI_OUT:
type = VIRTIO_BLK_T_SCSI_CMD;
break;
case REQ_OP_DRV_IN:
type = VIRTIO_BLK_T_GET_ID;
break;
@ -333,10 +242,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
}
spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
if (blk_rq_is_scsi(req))
err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num);
else
err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
if (err) {
virtqueue_kick(vblk->vqs[qid].vq);
blk_mq_stop_hw_queue(hctx);
@ -404,10 +310,6 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
}
static const struct block_device_operations virtblk_fops = {
.ioctl = virtblk_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = blkdev_compat_ptr_ioctl,
#endif
.owner = THIS_MODULE,
.getgeo = virtblk_getgeo,
};
@ -686,9 +588,6 @@ static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
struct virtio_blk *vblk = set->driver_data;
struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
#ifdef CONFIG_VIRTIO_BLK_SCSI
vbr->sreq.sense = vbr->sense;
#endif
sg_init_table(vbr->sg, vblk->sg_elems);
return 0;
}
@ -701,23 +600,11 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set)
vblk->vdev, 0);
}
#ifdef CONFIG_VIRTIO_BLK_SCSI
static void virtblk_initialize_rq(struct request *req)
{
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
scsi_req_init(&vbr->sreq);
}
#endif
static const struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq,
.commit_rqs = virtio_commit_rqs,
.complete = virtblk_request_done,
.init_request = virtblk_init_request,
#ifdef CONFIG_VIRTIO_BLK_SCSI
.initialize_rq_fn = virtblk_initialize_rq,
#endif
.map_queues = virtblk_map_queues,
};
@ -994,9 +881,6 @@ static const struct virtio_device_id id_table[] = {
static unsigned int features_legacy[] = {
VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
#ifdef CONFIG_VIRTIO_BLK_SCSI
VIRTIO_BLK_F_SCSI,
#endif
VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
}

View file

@ -158,6 +158,8 @@ static void set_page_pfns(struct virtio_balloon *vb,
{
unsigned int i;
BUILD_BUG_ON(VIRTIO_BALLOON_PAGES_PER_PAGE > VIRTIO_BALLOON_ARRAY_PFNS_MAX);
/*
* Set balloon pfns pointing at this page.
* Note that the first pfn points at start of the page.
@ -475,7 +477,9 @@ static int init_vqs(struct virtio_balloon *vb)
names[VIRTIO_BALLOON_VQ_INFLATE] = "inflate";
callbacks[VIRTIO_BALLOON_VQ_DEFLATE] = balloon_ack;
names[VIRTIO_BALLOON_VQ_DEFLATE] = "deflate";
callbacks[VIRTIO_BALLOON_VQ_STATS] = NULL;
names[VIRTIO_BALLOON_VQ_STATS] = NULL;
callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
names[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
@ -899,8 +903,7 @@ static int virtballoon_probe(struct virtio_device *vdev)
vb->vb_dev_info.inode = alloc_anon_inode(balloon_mnt->mnt_sb);
if (IS_ERR(vb->vb_dev_info.inode)) {
err = PTR_ERR(vb->vb_dev_info.inode);
kern_unmount(balloon_mnt);
goto out_del_vqs;
goto out_kern_unmount;
}
vb->vb_dev_info.inode->i_mapping->a_ops = &balloon_aops;
#endif
@ -911,13 +914,13 @@ static int virtballoon_probe(struct virtio_device *vdev)
*/
if (virtqueue_get_vring_size(vb->free_page_vq) < 2) {
err = -ENOSPC;
goto out_del_vqs;
goto out_iput;
}
vb->balloon_wq = alloc_workqueue("balloon-wq",
WQ_FREEZABLE | WQ_CPU_INTENSIVE, 0);
if (!vb->balloon_wq) {
err = -ENOMEM;
goto out_del_vqs;
goto out_iput;
}
INIT_WORK(&vb->report_free_page_work, report_free_page_func);
vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP;
@ -951,6 +954,12 @@ static int virtballoon_probe(struct virtio_device *vdev)
out_del_balloon_wq:
if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
destroy_workqueue(vb->balloon_wq);
out_iput:
#ifdef CONFIG_BALLOON_COMPACTION
iput(vb->vb_dev_info.inode);
out_kern_unmount:
kern_unmount(balloon_mnt);
#endif
out_del_vqs:
vdev->config->del_vqs(vdev);
out_free_vb:
@ -966,6 +975,10 @@ static void remove_common(struct virtio_balloon *vb)
leak_balloon(vb, vb->num_pages);
update_balloon_size(vb);
/* There might be free pages that are being reported: release them. */
if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
return_free_pages_to_mm(vb, ULONG_MAX);
/* Now we reset the device so we can clean up the queues. */
vb->vdev->config->reset(vb->vdev);

View file

@ -531,18 +531,9 @@ static void virtio_mmio_release_dev(struct device *_d)
static int virtio_mmio_probe(struct platform_device *pdev)
{
struct virtio_mmio_device *vm_dev;
struct resource *mem;
unsigned long magic;
int rc;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem)
return -EINVAL;
if (!devm_request_mem_region(&pdev->dev, mem->start,
resource_size(mem), pdev->name))
return -EBUSY;
vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
if (!vm_dev)
return -ENOMEM;
@ -554,9 +545,9 @@ static int virtio_mmio_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&vm_dev->virtqueues);
spin_lock_init(&vm_dev->lock);
vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
if (vm_dev->base == NULL)
return -EFAULT;
vm_dev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vm_dev->base))
return PTR_ERR(vm_dev->base);
/* Check magic value */
magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);

View file

@ -294,7 +294,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
/* Best option: one for change interrupt, one per vq. */
nvectors = 1;
for (i = 0; i < nvqs; ++i)
if (callbacks[i])
if (names[i] && callbacks[i])
++nvectors;
} else {
/* Second best: one for change, shared for all vqs. */