There are two remaining uses of the old async data allocator that do not rely on the alloc cache. I don't want to make them use the new allocator helper because that would require a if(cache) check, which will result in dead code for the cached case (for callers passing a cache, gcc can't prove the cache isn't NULL, and will therefore preserve the check. Since this is an inline function and just a few lines long, keep a second helper to deal with cases where we don't have an async data cache. No functional change intended here. This is just moving the helper around and making it inline. Signed-off-by: Gabriel Krisman Bertazi <krisman@xxxxxxx> --- io_uring/io_uring.c | 13 ------------- io_uring/io_uring.h | 12 ++++++++++++ io_uring/timeout.c | 5 ++--- io_uring/waitid.c | 4 ++-- 4 files changed, 16 insertions(+), 18 deletions(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index bd71782057de..08c42a0e3e74 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1616,19 +1616,6 @@ io_req_flags_t io_file_get_flags(struct file *file) return res; } -bool io_alloc_async_data(struct io_kiocb *req) -{ - const struct io_issue_def *def = &io_issue_defs[req->opcode]; - - WARN_ON_ONCE(!def->async_size); - req->async_data = kmalloc(def->async_size, GFP_KERNEL); - if (req->async_data) { - req->flags |= REQ_F_ASYNC_DATA; - return false; - } - return true; -} - static u32 io_get_sequence(struct io_kiocb *req) { u32 seq = req->ctx->cached_sq_head; diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 91414a4da1b2..cbd2c0358bb1 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -12,6 +12,7 @@ #include "io-wq.h" #include "slist.h" #include "filetable.h" +#include "opdef.h" #ifndef CREATE_TRACE_POINTS #include <trace/events/io_uring.h> @@ -233,6 +234,17 @@ static inline void *io_uring_alloc_async_data(struct io_alloc_cache *cache, return req->async_data; } +static inline void *io_uring_alloc_async_data_nocache(struct io_kiocb *req) +{ + const struct io_issue_def *def = &io_issue_defs[req->opcode]; + + WARN_ON_ONCE(!def->async_size); + req->async_data = kmalloc(def->async_size, GFP_KERNEL); + if (req->async_data) + req->flags |= REQ_F_ASYNC_DATA; + return req->async_data; +} + static inline bool req_has_async_data(struct io_kiocb *req) { return req->flags & REQ_F_ASYNC_DATA; diff --git a/io_uring/timeout.c b/io_uring/timeout.c index 5b12bd6a804c..078dfd6fcf71 100644 --- a/io_uring/timeout.c +++ b/io_uring/timeout.c @@ -526,10 +526,9 @@ static int __io_timeout_prep(struct io_kiocb *req, if (WARN_ON_ONCE(req_has_async_data(req))) return -EFAULT; - if (io_alloc_async_data(req)) + data = io_uring_alloc_async_data_nocache(req); + if (!data) return -ENOMEM; - - data = req->async_data; data->req = req; data->flags = flags; diff --git a/io_uring/waitid.c b/io_uring/waitid.c index daef5dd644f0..6778c0ee76c4 100644 --- a/io_uring/waitid.c +++ b/io_uring/waitid.c @@ -303,10 +303,10 @@ int io_waitid(struct io_kiocb *req, unsigned int issue_flags) struct io_waitid_async *iwa; int ret; - if (io_alloc_async_data(req)) + iwa = io_uring_alloc_async_data_nocache(req); + if (!iwa) return -ENOMEM; - iwa = req->async_data; iwa->req = req; ret = kernel_waitid_prepare(&iwa->wo, iw->which, iw->upid, &iw->info, -- 2.47.0