Recently introduced helper simplifies the code. Signed-off-by: Andrzej Hajda <andrzej.hajda@xxxxxxxxx> --- io_uring/io_uring.c | 7 ++----- io_uring/slist.h | 6 ++---- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 2ac1cd8d23ea62..2b46a692d69022 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -54,6 +54,7 @@ #include <linux/fdtable.h> #include <linux/mm.h> #include <linux/mman.h> +#include <linux/non-atomic/xchg.h> #include <linux/percpu.h> #include <linux/slab.h> #include <linux/bvec.h> @@ -1095,8 +1096,6 @@ static void __io_req_find_next_prep(struct io_kiocb *req) static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req) { - struct io_kiocb *nxt; - /* * If LINK is set, we have dependent requests in this chain. If we * didn't fail this request, queue the first one up, moving any other @@ -1105,9 +1104,7 @@ static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req) */ if (unlikely(req->flags & IO_DISARM_MASK)) __io_req_find_next_prep(req); - nxt = req->link; - req->link = NULL; - return nxt; + return __xchg(&req->link, NULL); } static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked) diff --git a/io_uring/slist.h b/io_uring/slist.h index f27601fa46607b..d3a743a1adc626 100644 --- a/io_uring/slist.h +++ b/io_uring/slist.h @@ -2,6 +2,7 @@ #define INTERNAL_IO_SLIST_H #include <linux/io_uring_types.h> +#include <linux/non-atomic/xchg.h> #define wq_list_for_each(pos, prv, head) \ for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next) @@ -121,10 +122,7 @@ static inline void wq_list_del(struct io_wq_work_list *list, static inline struct io_wq_work_node *wq_stack_extract(struct io_wq_work_node *stack) { - struct io_wq_work_node *node = stack->next; - - stack->next = node->next; - return node; + return __xchg(&stack->next, stack->next->next); } static inline struct io_wq_work *wq_next_work(struct io_wq_work *work) -- 2.34.1