On 10/25/24 13:22, Ming Lei wrote:
...
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 4bc0d762627d..5a2025d48804 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -245,7 +245,8 @@ static int io_prep_rw_setup(struct io_kiocb *req, int ddir, bool do_import)
if (io_rw_alloc_async(req))
return -ENOMEM;
- if (!do_import || io_do_buffer_select(req))
+ if (!do_import || io_do_buffer_select(req) ||
+ io_use_leased_grp_kbuf(req))
return 0;
rw = req->async_data;
@@ -489,6 +490,11 @@ static bool __io_complete_rw_common(struct io_kiocb *req, long res)
}
req_set_fail(req);
req->cqe.res = res;
+ if (io_use_leased_grp_kbuf(req)) {
That's what I'm talking about, we're pushing more and
into the generic paths (or patching every single hot opcode
there is). You said it's fine for ublk the way it was, i.e.
without tracking, so let's then pretend it's a ublk specific
feature, kill that addition and settle at that if that's the
way to go.
+ struct io_async_rw *io = req->async_data;
+
+ io_req_zero_remained(req, &io->iter);
+ }
}
return false;
--
Pavel Begunkov