Re: "Cannot allocate memory" on ring creation (not RLIMIT_MEMLOCK)

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 12/19/20 2:54 PM, Jens Axboe wrote:
> On 12/19/20 1:51 PM, Josef wrote:
>>> And even more so, it's IOSQE_ASYNC on the IORING_OP_READ on an eventfd
>>> file descriptor. You probably don't want/mean to do that as it's
>>> pollable, I guess it's done because you just set it on all reads for the
>>> test?
>>
>> yes exactly, eventfd fd is blocking, so it actually makes no sense to
>> use IOSQE_ASYNC
> 
> Right, and it's pollable too.
> 
>> I just tested eventfd without the IOSQE_ASYNC flag, it seems to work
>> in my tests, thanks a lot :)
>>
>>> In any case, it should of course work. This is the leftover trace when
>>> we should be exiting, but an io-wq worker is still trying to get data
>>> from the eventfd:
>>
>> interesting, btw what kind of tool do you use for kernel debugging?
> 
> Just poking at it and thinking about it, no hidden magic I'm afraid...

Josef, can you try with this added? Looks bigger than it is, most of it
is just moving one function below another.


diff --git a/fs/io_uring.c b/fs/io_uring.c
index f3690dfdd564..96f6445ab827 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -8735,10 +8735,43 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
 	}
 }
 
+static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
+					    struct task_struct *task)
+{
+	while (1) {
+		struct io_task_cancel cancel = { .task = task, .files = NULL, };
+		enum io_wq_cancel cret;
+		bool ret = false;
+
+		cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true);
+		if (cret != IO_WQ_CANCEL_NOTFOUND)
+			ret = true;
+
+		/* SQPOLL thread does its own polling */
+		if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
+			while (!list_empty_careful(&ctx->iopoll_list)) {
+				io_iopoll_try_reap_events(ctx);
+				ret = true;
+			}
+		}
+
+		ret |= io_poll_remove_all(ctx, task, NULL);
+		ret |= io_kill_timeouts(ctx, task, NULL);
+		if (!ret)
+			break;
+		io_run_task_work();
+		cond_resched();
+	}
+}
+
 static void io_uring_cancel_files(struct io_ring_ctx *ctx,
 				  struct task_struct *task,
 				  struct files_struct *files)
 {
+	/* files == NULL, task is exiting. Cancel all that match task */
+	if (!files)
+		__io_uring_cancel_task_requests(ctx, task);
+
 	while (!list_empty_careful(&ctx->inflight_list)) {
 		struct io_task_cancel cancel = { .task = task, .files = files };
 		struct io_kiocb *req;
@@ -8772,35 +8805,6 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
 	}
 }
 
-static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
-					    struct task_struct *task)
-{
-	while (1) {
-		struct io_task_cancel cancel = { .task = task, .files = NULL, };
-		enum io_wq_cancel cret;
-		bool ret = false;
-
-		cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true);
-		if (cret != IO_WQ_CANCEL_NOTFOUND)
-			ret = true;
-
-		/* SQPOLL thread does its own polling */
-		if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
-			while (!list_empty_careful(&ctx->iopoll_list)) {
-				io_iopoll_try_reap_events(ctx);
-				ret = true;
-			}
-		}
-
-		ret |= io_poll_remove_all(ctx, task, NULL);
-		ret |= io_kill_timeouts(ctx, task, NULL);
-		if (!ret)
-			break;
-		io_run_task_work();
-		cond_resched();
-	}
-}
-
 /*
  * We need to iteratively cancel requests, in case a request has dependent
  * hard links. These persist even for failure of cancelations, hence keep

-- 
Jens Axboe




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux