6.1-stable review patch. If anyone has any objections, please let me know. ------------------ From: Christoph Hellwig <hch@xxxxxx> [ Upstream commit 5db755fbb1a0de4a4cfd5d5edfaa19853b9c56e6 ] Instead of a separate handler function that leaves no work in the interrupt hanler itself, split out a per-request end I/O helper and clean up the coding style and variable naming while we're at it. Signed-off-by: Christoph Hellwig <hch@xxxxxx> Reviewed-by: Martin K. Petersen <martin.petersen@xxxxxxxxxx> Acked-By: Anton Ivanov <anton.ivanov@xxxxxxxxxxxxxxxxxx> Link: https://lore.kernel.org/r/20240531074837.1648501-2-hch@xxxxxx Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> Stable-dep-of: 31ade7d4fdcf ("ubd: untagle discard vs write zeroes not support handling") Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> --- arch/um/drivers/ubd_kern.c | 49 ++++++++++++++------------------------ 1 file changed, 18 insertions(+), 31 deletions(-) diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 13a22a4613051..2670f951732c1 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -456,43 +456,30 @@ static int bulk_req_safe_read( return n; } -/* Called without dev->lock held, and only in interrupt context. */ -static void ubd_handler(void) +static void ubd_end_request(struct io_thread_req *io_req) { - int n; - int count; - - while(1){ - n = bulk_req_safe_read( - thread_fd, - irq_req_buffer, - &irq_remainder, - &irq_remainder_size, - UBD_REQ_BUFFER_SIZE - ); - if (n < 0) { - if(n == -EAGAIN) - break; - printk(KERN_ERR "spurious interrupt in ubd_handler, " - "err = %d\n", -n); - return; - } - for (count = 0; count < n/sizeof(struct io_thread_req *); count++) { - struct io_thread_req *io_req = (*irq_req_buffer)[count]; - - if ((io_req->error == BLK_STS_NOTSUPP) && (req_op(io_req->req) == REQ_OP_DISCARD)) { - blk_queue_max_discard_sectors(io_req->req->q, 0); - blk_queue_max_write_zeroes_sectors(io_req->req->q, 0); - } - blk_mq_end_request(io_req->req, io_req->error); - kfree(io_req); - } + if (io_req->error == BLK_STS_NOTSUPP && + req_op(io_req->req) == REQ_OP_DISCARD) { + blk_queue_max_discard_sectors(io_req->req->q, 0); + blk_queue_max_write_zeroes_sectors(io_req->req->q, 0); } + blk_mq_end_request(io_req->req, io_req->error); + kfree(io_req); } static irqreturn_t ubd_intr(int irq, void *dev) { - ubd_handler(); + int len, i; + + while ((len = bulk_req_safe_read(thread_fd, irq_req_buffer, + &irq_remainder, &irq_remainder_size, + UBD_REQ_BUFFER_SIZE)) >= 0) { + for (i = 0; i < len / sizeof(struct io_thread_req *); i++) + ubd_end_request((*irq_req_buffer)[i]); + } + + if (len < 0 && len != -EAGAIN) + pr_err("spurious interrupt in %s, err = %d\n", __func__, len); return IRQ_HANDLED; } -- 2.43.0