There is no point in sleeping for the expected I/O completion timeout in the io_uring async polling model as we never poll for a specific I/O. Signed-off-by: Christoph Hellwig <hch@xxxxxx> Tested-by: Mark Wunderlich <mark.wunderlich@xxxxxxxxx> --- block/blk-mq.c | 3 ++- fs/io_uring.c | 2 +- include/linux/blkdev.h | 2 ++ 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index d6e0beffaa6f3..b23dc0be01889 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4072,7 +4072,8 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags) if (current->plug) blk_flush_plug_list(current->plug, false); - if (q->poll_nsec != BLK_MQ_POLL_CLASSIC) { + if (!(flags & BLK_POLL_NOSLEEP) && + q->poll_nsec != BLK_MQ_POLL_CLASSIC) { if (blk_mq_poll_hybrid(q, cookie)) return 1; } diff --git a/fs/io_uring.c b/fs/io_uring.c index dc6af04d778f3..ba02d7d43334a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2461,7 +2461,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, long min) { - unsigned int poll_flags = 0; + unsigned int poll_flags = BLK_POLL_NOSLEEP; struct io_kiocb *req, *tmp; LIST_HEAD(done); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e177346bc0208..2b80c98fc373e 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -566,6 +566,8 @@ blk_status_t errno_to_blk_status(int errno); /* only poll the hardware once, don't continue until a completion was found */ #define BLK_POLL_ONESHOT (1 << 0) +/* do not sleep to wait for the expected completion time */ +#define BLK_POLL_NOSLEEP (1 << 1) int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags); static inline struct request_queue *bdev_get_queue(struct block_device *bdev) -- 2.30.2