On 1/6/2021 12:39 PM, Bijan Mottahedeh wrote:
Create common routines to be used for both files/buffers registration.
Signed-off-by: Bijan Mottahedeh <bijan.mottahedeh@xxxxxxxxxx>
---
fs/io_uring.c | 47 ++++++++++++++++++++++++-----------------------
1 file changed, 24 insertions(+), 23 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 91be618..fbff8480 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -7303,15 +7303,12 @@ static void io_sqe_rsrc_set_node(struct io_ring_ctx *ctx,
percpu_ref_get(&rsrc_data->refs);
}
-static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
+static int io_rsrc_ref_quiesce(struct fixed_rsrc_data *data,
+ struct io_ring_ctx *ctx)
{
- struct fixed_rsrc_data *data = ctx->file_data;
struct fixed_rsrc_ref_node *backup_node, *ref_node = NULL;
- unsigned nr_tables, i;
int ret;
- if (!data)
- return -ENXIO;
backup_node = alloc_fixed_file_ref_node(ctx);
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
I introduced a bug here; I call the file allocator directly. I've fixed
it by passing in the proper allocator. Will send it with the next patch
set.
if (!backup_node)
return -ENOMEM;
@@ -7339,6 +7336,23 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
}
} while (1);
+ destroy_fixed_rsrc_ref_node(backup_node);
+ return 0;
+}
+
+static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
+{
+ struct fixed_rsrc_data *data = ctx->file_data;
+ unsigned int nr_tables, i;
+ int ret;
+
+ if (!data)
+ return -ENXIO;
+
+ ret = io_rsrc_ref_quiesce(data, ctx);
+ if (ret)
+ return ret;
+
__io_sqe_files_unregister(ctx);
nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
for (i = 0; i < nr_tables; i++)
@@ -7348,7 +7362,6 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
kfree(data);
ctx->file_data = NULL;
ctx->nr_user_files = 0;
- destroy_fixed_rsrc_ref_node(backup_node);
return 0;
}
@@ -8384,22 +8397,14 @@ static void io_buffers_map_free(struct io_ring_ctx *ctx)
static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
{
struct fixed_rsrc_data *data = ctx->buf_data;
- struct fixed_rsrc_ref_node *ref_node = NULL;
+ int ret;
if (!data)
return -ENXIO;
- io_rsrc_ref_lock(ctx);
- ref_node = data->node;
- io_rsrc_ref_unlock(ctx);
- if (ref_node)
- percpu_ref_kill(&ref_node->refs);
-
- percpu_ref_kill(&data->refs);
-
- /* wait for all refs nodes to complete */
- flush_delayed_work(&ctx->rsrc_put_work);
- wait_for_completion(&data->done);
+ ret = io_rsrc_ref_quiesce(data, ctx);
+ if (ret)
+ return ret;
io_buffers_unmap(ctx);
io_buffers_map_free(ctx);
@@ -8751,11 +8756,7 @@ static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
return PTR_ERR(ref_node);
}
- buf_data->node = ref_node;
- io_rsrc_ref_lock(ctx);
- list_add(&ref_node->node, &ctx->rsrc_ref_list);
- io_rsrc_ref_unlock(ctx);
- percpu_ref_get(&buf_data->refs);
+ io_sqe_rsrc_set_node(ctx, buf_data, ref_node);
return 0;
}