Uplevel ref_list and make it common to all resources. This is to allow one common ref_list to be used for both files, and buffers in upcoming patches. Signed-off-by: Bijan Mottahedeh <bijan.mottahedeh@xxxxxxxxxx> --- fs/io_uring.c | 77 ++++++++++++++++++++++++++++------------------------------- 1 file changed, 36 insertions(+), 41 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 33b2ff6..1ed63bc 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -231,8 +231,6 @@ struct fixed_rsrc_data { struct fixed_rsrc_ref_node *node; struct percpu_ref refs; struct completion done; - struct list_head ref_list; - spinlock_t lock; }; struct io_buffer { @@ -398,8 +396,10 @@ struct io_ring_ctx { struct list_head inflight_list; } ____cacheline_aligned_in_smp; - struct delayed_work file_put_work; - struct llist_head file_put_llist; + struct delayed_work rsrc_put_work; + struct llist_head rsrc_put_llist; + struct list_head rsrc_ref_list; + spinlock_t rsrc_ref_lock; struct work_struct exit_work; struct io_restriction restrictions; @@ -1024,7 +1024,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx, static struct file *io_file_get(struct io_submit_state *state, struct io_kiocb *req, int fd, bool fixed); static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs); -static void io_file_put_work(struct work_struct *work); +static void io_rsrc_put_work(struct work_struct *work); static ssize_t io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, struct iov_iter *iter, @@ -1325,8 +1325,10 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) INIT_LIST_HEAD(&ctx->timeout_list); spin_lock_init(&ctx->inflight_lock); INIT_LIST_HEAD(&ctx->inflight_list); - INIT_DELAYED_WORK(&ctx->file_put_work, io_file_put_work); - init_llist_head(&ctx->file_put_llist); + spin_lock_init(&ctx->rsrc_ref_lock); + INIT_LIST_HEAD(&ctx->rsrc_ref_list); + INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work); + init_llist_head(&ctx->rsrc_put_llist); return ctx; err: if (ctx->fallback_req) @@ -7267,16 +7269,16 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx) if (!data) return -ENXIO; - spin_lock_bh(&data->lock); + spin_lock_bh(&ctx->rsrc_ref_lock); ref_node = data->node; - spin_unlock_bh(&data->lock); + spin_unlock_bh(&ctx->rsrc_ref_lock); if (ref_node) percpu_ref_kill(&ref_node->refs); percpu_ref_kill(&data->refs); /* wait for all refs nodes to complete */ - flush_delayed_work(&ctx->file_put_work); + flush_delayed_work(&ctx->rsrc_put_work); wait_for_completion(&data->done); __io_sqe_files_unregister(ctx); @@ -7617,30 +7619,25 @@ static void __io_rsrc_put_work(struct fixed_rsrc_ref_node *ref_node) percpu_ref_put(&rsrc_data->refs); } -static void io_rsrc_put_work(struct llist_node *node) +static void io_rsrc_put_work(struct work_struct *work) { - struct fixed_rsrc_ref_node *ref_node; - struct llist_node *next; + struct io_ring_ctx *ctx; + struct llist_node *node; + + ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work); + node = llist_del_all(&ctx->rsrc_put_llist); while (node) { - next = node->next; + struct fixed_rsrc_ref_node *ref_node; + struct llist_node *next = node->next; + ref_node = llist_entry(node, struct fixed_rsrc_ref_node, llist); __io_rsrc_put_work(ref_node); node = next; } } -static void io_file_put_work(struct work_struct *work) -{ - struct io_ring_ctx *ctx; - struct llist_node *node; - - ctx = container_of(work, struct io_ring_ctx, file_put_work.work); - node = llist_del_all(&ctx->file_put_llist); - io_rsrc_put_work(node); -} - -static void io_file_data_ref_zero(struct percpu_ref *ref) +static void io_rsrc_data_ref_zero(struct percpu_ref *ref) { struct fixed_rsrc_ref_node *ref_node; struct fixed_rsrc_data *data; @@ -7652,27 +7649,27 @@ static void io_file_data_ref_zero(struct percpu_ref *ref) data = ref_node->rsrc_data; ctx = data->ctx; - spin_lock_bh(&data->lock); + spin_lock_bh(&ctx->rsrc_ref_lock); ref_node->done = true; - while (!list_empty(&data->ref_list)) { - ref_node = list_first_entry(&data->ref_list, + while (!list_empty(&ctx->rsrc_ref_list)) { + ref_node = list_first_entry(&ctx->rsrc_ref_list, struct fixed_rsrc_ref_node, node); /* recycle ref nodes in order */ if (!ref_node->done) break; list_del(&ref_node->node); - first_add |= llist_add(&ref_node->llist, &ctx->file_put_llist); + first_add |= llist_add(&ref_node->llist, &ctx->rsrc_put_llist); } - spin_unlock_bh(&data->lock); + spin_unlock_bh(&ctx->rsrc_ref_lock); if (percpu_ref_is_dying(&data->refs)) delay = 0; if (!delay) - mod_delayed_work(system_wq, &ctx->file_put_work, 0); + mod_delayed_work(system_wq, &ctx->rsrc_put_work, 0); else if (first_add) - queue_delayed_work(system_wq, &ctx->file_put_work, delay); + queue_delayed_work(system_wq, &ctx->rsrc_put_work, delay); } static struct fixed_rsrc_ref_node *alloc_fixed_file_ref_node( @@ -7684,7 +7681,7 @@ static struct fixed_rsrc_ref_node *alloc_fixed_file_ref_node( if (!ref_node) return ERR_PTR(-ENOMEM); - if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero, + if (percpu_ref_init(&ref_node->refs, io_rsrc_data_ref_zero, 0, GFP_KERNEL)) { kfree(ref_node); return ERR_PTR(-ENOMEM); @@ -7725,8 +7722,6 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, return -ENOMEM; file_data->ctx = ctx; init_completion(&file_data->done); - INIT_LIST_HEAD(&file_data->ref_list); - spin_lock_init(&file_data->lock); nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE); file_data->table = kcalloc(nr_tables, sizeof(*file_data->table), @@ -7788,9 +7783,9 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, } file_data->node = ref_node; - spin_lock_bh(&file_data->lock); - list_add_tail(&ref_node->node, &file_data->ref_list); - spin_unlock_bh(&file_data->lock); + spin_lock_bh(&ctx->rsrc_ref_lock); + list_add_tail(&ref_node->node, &ctx->rsrc_ref_list); + spin_unlock_bh(&ctx->rsrc_ref_lock); percpu_ref_get(&file_data->refs); return ret; out_fput: @@ -7952,10 +7947,10 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx, if (needs_switch) { percpu_ref_kill(&data->node->refs); - spin_lock_bh(&data->lock); - list_add_tail(&ref_node->node, &data->ref_list); + spin_lock_bh(&ctx->rsrc_ref_lock); + list_add_tail(&ref_node->node, &ctx->rsrc_ref_list); data->node = ref_node; - spin_unlock_bh(&data->lock); + spin_unlock_bh(&ctx->rsrc_ref_lock); percpu_ref_get(&ctx->file_data->refs); } else destroy_fixed_file_ref_node(ref_node); -- 1.8.3.1