[PATCH for-next 04/12] io_uring: reschedule retargeting at shutdown of ring

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



When the ring shuts down, instead of waiting for the work to release it's
reference, just reschedule it to now and get the reference back that way.

Signed-off-by: Dylan Yudaken <dylany@xxxxxxxx>
---
 io_uring/io_uring.c |  1 +
 io_uring/rsrc.c     | 26 +++++++++++++++++++++-----
 io_uring/rsrc.h     |  1 +
 3 files changed, 23 insertions(+), 5 deletions(-)

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index ea2260359c56..32eb305c4ce7 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2751,6 +2751,7 @@ static __cold void io_ring_exit_work(struct work_struct *work)
 		}
 
 		io_req_caches_free(ctx);
+		io_rsrc_retarget_exiting(ctx);
 
 		if (WARN_ON_ONCE(time_after(jiffies, timeout))) {
 			/* there is little hope left, don't run it too often */
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 8d0d40713a63..40b37899e943 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -248,12 +248,20 @@ static unsigned int io_rsrc_retarget_table(struct io_ring_ctx *ctx,
 	return refs;
 }
 
-static void io_rsrc_retarget_schedule(struct io_ring_ctx *ctx)
+static void io_rsrc_retarget_schedule(struct io_ring_ctx *ctx, bool delay)
 	__must_hold(&ctx->uring_lock)
 {
-	percpu_ref_get(&ctx->refs);
-	mod_delayed_work(system_wq, &ctx->rsrc_retarget_work, 60 * HZ);
-	ctx->rsrc_retarget_scheduled = true;
+	unsigned long del;
+
+	if (delay)
+		del = 60 * HZ;
+	else
+		del = 0;
+
+	if (likely(!mod_delayed_work(system_wq, &ctx->rsrc_retarget_work, del))) {
+		percpu_ref_get(&ctx->refs);
+		ctx->rsrc_retarget_scheduled = true;
+	}
 }
 
 static void io_retarget_rsrc_wq_cb(struct io_wq_work *work, void *data)
@@ -332,6 +340,14 @@ void io_rsrc_retarget_work(struct work_struct *work)
 	percpu_ref_put(&ctx->refs);
 }
 
+void io_rsrc_retarget_exiting(struct io_ring_ctx *ctx)
+{
+	mutex_lock(&ctx->uring_lock);
+	if (ctx->rsrc_retarget_scheduled)
+		io_rsrc_retarget_schedule(ctx, false);
+	mutex_unlock(&ctx->uring_lock);
+}
+
 void io_wait_rsrc_data(struct io_rsrc_data *data)
 {
 	if (data && !atomic_dec_and_test(&data->refs))
@@ -414,7 +430,7 @@ void io_rsrc_node_switch(struct io_ring_ctx *ctx,
 		percpu_ref_kill(&rsrc_node->refs);
 		ctx->rsrc_node = NULL;
 		if (!ctx->rsrc_retarget_scheduled)
-			io_rsrc_retarget_schedule(ctx);
+			io_rsrc_retarget_schedule(ctx, true);
 	}
 
 	if (!ctx->rsrc_node) {
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
index 2b94df8fd9e8..93c66475796e 100644
--- a/io_uring/rsrc.h
+++ b/io_uring/rsrc.h
@@ -55,6 +55,7 @@ struct io_mapped_ubuf {
 
 void io_rsrc_put_work(struct work_struct *work);
 void io_rsrc_retarget_work(struct work_struct *work);
+void io_rsrc_retarget_exiting(struct io_ring_ctx *ctx);
 void io_rsrc_refs_refill(struct io_ring_ctx *ctx);
 void io_wait_rsrc_data(struct io_rsrc_data *data);
 void io_rsrc_node_destroy(struct io_rsrc_node *ref_node);
-- 
2.30.2





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux