[PATCH 3/3] io_uring: adjust the code logic when an error occurs

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



We can quickly reclaim memory and resources immediately
when error happened, without having to go through longer logic.

In case of create uring, there is no data generated at this
time, no need to reap events and polling data, it's more
efficient to directly recycle the memory.

In addition, it will make the code easy to understand.

Signed-off-by: Jackie Liu <liuyun01@xxxxxxxxxx>
---
 fs/io_uring.c | 96 +++++++++++++++++++++++++++++++++++++++--------------------
 1 file changed, 63 insertions(+), 33 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 3bbd202..035d729 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -361,6 +361,7 @@ struct io_submit_state {
 };
 
 static void io_sq_wq_submit_work(struct work_struct *work);
+static void io_free_scq_urings(struct io_ring_ctx *ctx);
 
 static struct kmem_cache *req_cachep;
 
@@ -417,6 +418,12 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
 	return ctx;
 }
 
+static void io_ring_ctx_free(struct io_ring_ctx *ctx)
+{
+	percpu_ref_exit(&ctx->refs);
+	kfree(ctx);
+}
+
 static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
 				     struct io_kiocb *req)
 {
@@ -2254,16 +2261,20 @@ static void io_sq_thread_stop(struct io_ring_ctx *ctx)
 	}
 }
 
-static void io_finish_async(struct io_ring_ctx *ctx)
+static void io_sq_wq_destroy(struct io_ring_ctx *ctx)
 {
-	io_sq_thread_stop(ctx);
-
 	if (ctx->sqo_wq) {
 		destroy_workqueue(ctx->sqo_wq);
 		ctx->sqo_wq = NULL;
 	}
 }
 
+static void io_finish_async(struct io_ring_ctx *ctx)
+{
+	io_sq_thread_stop(ctx);
+	io_sq_wq_destroy(ctx);
+}
+
 #if defined(CONFIG_UNIX)
 static void io_destruct_skb(struct sk_buff *skb)
 {
@@ -2483,6 +2494,18 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
 	return ret;
 }
 
+static void io_sq_offload_end(struct io_ring_ctx *ctx)
+{
+	io_sq_thread_stop(ctx);
+
+	if (ctx->sqo_mm) {
+		mmdrop(ctx->sqo_mm);
+		ctx->sqo_mm = NULL;
+	}
+
+	io_sq_wq_destroy(ctx);
+}
+
 static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages)
 {
 	atomic_long_sub(nr_pages, &user->locked_vm);
@@ -2765,33 +2788,6 @@ static int io_eventfd_unregister(struct io_ring_ctx *ctx)
 	return -ENXIO;
 }
 
-static void io_ring_ctx_free(struct io_ring_ctx *ctx)
-{
-	io_finish_async(ctx);
-	if (ctx->sqo_mm)
-		mmdrop(ctx->sqo_mm);
-
-	io_sqe_buffer_unregister(ctx);
-	io_sqe_files_unregister(ctx);
-	io_eventfd_unregister(ctx);
-
-#if defined(CONFIG_UNIX)
-	if (ctx->ring_sock)
-		sock_release(ctx->ring_sock);
-#endif
-
-	io_mem_free(ctx->sq_ring);
-	io_mem_free(ctx->sq_sqes);
-	io_mem_free(ctx->cq_ring);
-
-	percpu_ref_exit(&ctx->refs);
-	if (ctx->account_mem)
-		io_unaccount_mem(ctx->user,
-				ring_pages(ctx->sq_entries, ctx->cq_entries));
-	free_uid(ctx->user);
-	kfree(ctx);
-}
-
 static __poll_t io_uring_poll(struct file *file, poll_table *wait)
 {
 	struct io_ring_ctx *ctx = file->private_data;
@@ -2825,9 +2821,27 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
 	percpu_ref_kill(&ctx->refs);
 	mutex_unlock(&ctx->uring_lock);
 
+	/* poll all events and reap */
 	io_poll_remove_all(ctx);
 	io_iopoll_reap_events(ctx);
 	wait_for_completion(&ctx->ctx_done);
+
+	io_sq_offload_end(ctx);
+
+	/* unregister fixed buffer and files */
+	io_sqe_buffer_unregister(ctx);
+	io_sqe_files_unregister(ctx);
+	io_eventfd_unregister(ctx);
+
+#if defined(CONFIG_UNIX)
+	if (ctx->ring_sock)
+		sock_release(ctx->ring_sock);
+#endif
+	io_free_scq_urings(ctx);
+	if (ctx->account_mem)
+		io_unaccount_mem(ctx->user,
+				 ring_pages(ctx->sq_entries, ctx->cq_entries));
+	free_uid(ctx->user);
 	io_ring_ctx_free(ctx);
 }
 
@@ -2994,6 +3008,13 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
 	return ret;
 }
 
+static void io_free_scq_urings(struct io_ring_ctx *ctx)
+{
+	io_mem_free(ctx->sq_ring);
+	io_mem_free(ctx->sq_sqes);
+	io_mem_free(ctx->cq_ring);
+}
+
 /*
  * Allocate an anonymous fd, this is what constitutes the application
  * visible backing of an io_uring instance. The application mmaps this
@@ -3083,11 +3104,11 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
 
 	ret = io_allocate_scq_urings(ctx, p);
 	if (ret)
-		goto err;
+		goto err_scq;
 
 	ret = io_sq_offload_start(ctx, p);
 	if (ret)
-		goto err;
+		goto err_off;
 
 	ret = io_uring_get_fd(ctx);
 	if (ret < 0)
@@ -3110,8 +3131,17 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
 	p->cq_off.overflow = offsetof(struct io_cq_ring, overflow);
 	p->cq_off.cqes = offsetof(struct io_cq_ring, cqes);
 	return ret;
+
 err:
-	io_ring_ctx_wait_and_kill(ctx);
+	io_sq_offload_end(ctx);
+err_off:
+	io_free_scq_urings(ctx);
+err_scq:
+	free_uid(user);
+	if (account_mem)
+		io_unaccount_mem(user, ring_pages(p->sq_entries,
+							p->cq_entries));
+	io_ring_ctx_free(ctx);
 	return ret;
 }
 
-- 
2.7.4






[Index of Archives]     [Linux RAID]     [Linux SCSI]     [Linux ATA RAID]     [IDE]     [Linux Wireless]     [Linux Kernel]     [ATH6KL]     [Linux Bluetooth]     [Linux Netdev]     [Kernel Newbies]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Device Mapper]

  Powered by Linux