On Fri, Nov 13, 2020 at 02:47:12PM +0100, Stefano Garzarella wrote: > The previous implementation wrote only the status of each request. > This patch implements a more accurate block device simulator, > providing a ramdisk-like behavior. > > Also handle VIRTIO_BLK_T_GET_ID request, always answering the > "vdpa_blk_sim" string. Maybe an ioctl to specify the id makes more sense. > Signed-off-by: Stefano Garzarella <sgarzare@xxxxxxxxxx> > --- > drivers/vdpa/vdpa_sim/vdpa_sim_blk.c | 151 +++++++++++++++++++++++---- > 1 file changed, 133 insertions(+), 18 deletions(-) > > diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c > index 8e41b3ab98d5..68e74383322f 100644 > --- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c > +++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c > @@ -7,6 +7,7 @@ > */ > > #include <linux/module.h> > +#include <linux/blkdev.h> > #include <uapi/linux/virtio_blk.h> > > #include "vdpa_sim.h" > @@ -24,10 +25,137 @@ > > static struct vdpasim *vdpasim_blk_dev; > > +static int vdpasim_blk_handle_req(struct vdpasim *vdpasim, > + struct vdpasim_virtqueue *vq) > +{ > + size_t wrote = 0, to_read = 0, to_write = 0; > + struct virtio_blk_outhdr hdr; > + uint8_t status; > + uint32_t type; > + ssize_t bytes; > + loff_t offset; > + int i, ret; > + > + vringh_kiov_cleanup(&vq->riov); > + vringh_kiov_cleanup(&vq->wiov); > + > + ret = vringh_getdesc_iotlb(&vq->vring, &vq->riov, &vq->wiov, > + &vq->head, GFP_ATOMIC); > + if (ret != 1) > + return ret; > + > + for (i = 0; i < vq->wiov.used; i++) > + to_write += vq->wiov.iov[i].iov_len; > + to_write -= 1; /* last byte is the status */ > + > + for (i = 0; i < vq->riov.used; i++) > + to_read += vq->riov.iov[i].iov_len; > + > + bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->riov, &hdr, sizeof(hdr)); > + if (bytes != sizeof(hdr)) > + return 0; > + > + to_read -= bytes; > + > + type = le32_to_cpu(hdr.type); > + offset = le64_to_cpu(hdr.sector) << SECTOR_SHIFT; > + status = VIRTIO_BLK_S_OK; > + > + switch (type) { > + case VIRTIO_BLK_T_IN: > + if (offset + to_write > VDPASIM_BLK_CAPACITY << SECTOR_SHIFT) { > + dev_err(&vdpasim->vdpa.dev, > + "reading over the capacity - offset: 0x%llx len: 0x%lx\n", > + offset, to_write); > + status = VIRTIO_BLK_S_IOERR; > + break; > + } > + > + bytes = vringh_iov_push_iotlb(&vq->vring, &vq->wiov, > + vdpasim->buffer + offset, > + to_write); > + if (bytes < 0) { > + dev_err(&vdpasim->vdpa.dev, > + "vringh_iov_push_iotlb() error: %ld offset: 0x%llx len: 0x%lx\n", > + bytes, offset, to_write); > + status = VIRTIO_BLK_S_IOERR; > + break; > + } > + > + wrote += bytes; > + break; > + > + case VIRTIO_BLK_T_OUT: > + if (offset + to_read > VDPASIM_BLK_CAPACITY << SECTOR_SHIFT) { > + dev_err(&vdpasim->vdpa.dev, > + "writing over the capacity - offset: 0x%llx len: 0x%lx\n", > + offset, to_read); > + status = VIRTIO_BLK_S_IOERR; > + break; > + } > + > + bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->riov, > + vdpasim->buffer + offset, > + to_read); > + if (bytes < 0) { > + dev_err(&vdpasim->vdpa.dev, > + "vringh_iov_pull_iotlb() error: %ld offset: 0x%llx len: 0x%lx\n", > + bytes, offset, to_read); > + status = VIRTIO_BLK_S_IOERR; > + break; > + } > + break; > + > + case VIRTIO_BLK_T_GET_ID: { > + char id[VIRTIO_BLK_ID_BYTES] = "vdpa_blk_sim"; > + > + bytes = vringh_iov_push_iotlb(&vq->vring, > + &vq->wiov, id, > + VIRTIO_BLK_ID_BYTES); > + if (bytes < 0) { > + dev_err(&vdpasim->vdpa.dev, > + "vringh_iov_push_iotlb() error: %ld\n", bytes); > + status = VIRTIO_BLK_S_IOERR; > + break; > + } > + > + wrote += bytes; > + break; > + } > + > + default: > + dev_warn(&vdpasim->vdpa.dev, > + "Unsupported request type %d\n", type); > + status = VIRTIO_BLK_S_IOERR; > + break; > + } > + > + /* if VIRTIO_BLK_T_IN or VIRTIO_BLK_T_GET_ID fail, we need to skip > + * the remaining bytes to put the status in the last byte > + */ > + if (to_write - wrote > 0) { > + vringh_iov_push_iotlb(&vq->vring, &vq->wiov, NULL, > + to_write - wrote); > + } > + > + /* last byte is the status */ > + bytes = vringh_iov_push_iotlb(&vq->vring, &vq->wiov, &status, 1); > + if (bytes != 1) > + return 0; > + > + wrote += bytes; > + > + /* Make sure data is wrote before advancing index */ > + smp_wmb(); > + > + vringh_complete_iotlb(&vq->vring, vq->head, wrote); > + > + return ret; > +} > + > static void vdpasim_blk_work(struct work_struct *work) > { > struct vdpasim *vdpasim = container_of(work, struct vdpasim, work); > - u8 status = VIRTIO_BLK_S_OK; > int i; > > spin_lock(&vdpasim->lock); > @@ -41,21 +169,7 @@ static void vdpasim_blk_work(struct work_struct *work) > if (!vq->ready) > continue; > > - while (vringh_getdesc_iotlb(&vq->vring, &vq->riov, &vq->wiov, > - &vq->head, GFP_ATOMIC) > 0) { > - > - int write; > - > - vq->wiov.i = vq->wiov.used - 1; > - write = vringh_iov_push_iotlb(&vq->vring, &vq->wiov, &status, 1); > - if (write <= 0) > - break; > - > - /* Make sure data is wrote before advancing index */ > - smp_wmb(); > - > - vringh_complete_iotlb(&vq->vring, vq->head, write); > - > + while (vdpasim_blk_handle_req(vdpasim, vq) > 0) { > /* Make sure used is visible before rasing the interrupt. */ > smp_wmb(); > > @@ -67,6 +181,7 @@ static void vdpasim_blk_work(struct work_struct *work) > vq->cb(vq->private); > local_bh_enable(); > } > + > } > out: > spin_unlock(&vdpasim->lock); > @@ -84,7 +199,7 @@ static void vdpasim_blk_update_config(struct vdpasim *vdpasim) > config->num_queues = cpu_to_vdpasim16(vdpasim, VDPASIM_BLK_VQ_NUM); > config->min_io_size = cpu_to_vdpasim16(vdpasim, 1); > config->opt_io_size = cpu_to_vdpasim32(vdpasim, 1); > - config->blk_size = cpu_to_vdpasim32(vdpasim, 512); > + config->blk_size = cpu_to_vdpasim32(vdpasim, SECTOR_SIZE); > } > > static int __init vdpasim_blk_init(void) > @@ -100,7 +215,7 @@ static int __init vdpasim_blk_init(void) > attr.device.update_config = vdpasim_blk_update_config; > > attr.work_fn = vdpasim_blk_work; > - attr.buffer_size = PAGE_SIZE; > + attr.buffer_size = VDPASIM_BLK_CAPACITY << SECTOR_SHIFT; > > vdpasim_blk_dev = vdpasim_create(&attr); > if (IS_ERR(vdpasim_blk_dev)) { > -- > 2.26.2 _______________________________________________ Virtualization mailing list Virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx https://lists.linuxfoundation.org/mailman/listinfo/virtualization