> -static int nvme_map_user_request(struct request *req, void __user *ubuffer, > +static int nvme_map_user_request(struct request *req, u64 ubuffer, The changes to pass ubuffer as an integer trip me up every time. They are obviously fine as we do the pointer conversion less often, but maybe they'd be easier to follow if split into a prep patch. > + bool fixedbufs = ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED); > > - if (!vec) > - ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, > - GFP_KERNEL); > - else { > + if (vec) { If we check IORING_URING_CMD_FIXED first this becomes a bit simpler, and also works better with the block helper suggested earlier: diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 1a45246f0d7a8..f46142dcb8f1e 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -94,34 +94,33 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer, struct bio *bio = NULL; void *meta = NULL; int ret; - bool fixedbufs = ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED); - if (vec) { - struct iovec fast_iov[UIO_FASTIOV]; - struct iovec *iov = fast_iov; + if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) { struct iov_iter iter; /* fixedbufs is only for non-vectored io */ - WARN_ON_ONCE(fixedbufs); - ret = import_iovec(rq_data_dir(req), nvme_to_user_ptr(ubuffer), - bufflen, UIO_FASTIOV, &iov, &iter); + if (WARN_ON_ONCE(vec)) + return -EINVAL; + ret = io_uring_cmd_import_fixed(ubuffer, bufflen, + rq_data_dir(req), &iter, ioucmd); if (ret < 0) goto out; - ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL); - kfree(iov); - } else if (fixedbufs) { + } else if (vec) { + struct iovec fast_iov[UIO_FASTIOV]; + struct iovec *iov = fast_iov; struct iov_iter iter; - ret = io_uring_cmd_import_fixed(ubuffer, bufflen, - rq_data_dir(req), &iter, ioucmd); + ret = import_iovec(rq_data_dir(req), nvme_to_user_ptr(ubuffer), + bufflen, UIO_FASTIOV, &iov, &iter); if (ret < 0) goto out; ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL); - } else + kfree(iov); + } else { ret = blk_rq_map_user(q, req, NULL, - nvme_to_user_ptr(ubuffer), bufflen, - GFP_KERNEL); + nvme_to_user_ptr(ubuffer), bufflen, GFP_KERNEL); + } if (ret) goto out; bio = req->bio;