This just converts the ioctx refcount to the new generic dynamic percpu refcount code. Signed-off-by: Kent Overstreet <koverstreet@xxxxxxxxxx> Cc: Zach Brown <zab@xxxxxxxxxx> Cc: Felipe Balbi <balbi@xxxxxx> Cc: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> Cc: Mark Fasheh <mfasheh@xxxxxxxx> Cc: Joel Becker <jlbec@xxxxxxxxxxxx> Cc: Rusty Russell <rusty@xxxxxxxxxxxxxxx> Cc: Jens Axboe <axboe@xxxxxxxxx> Cc: Asai Thambi S P <asamymuthupa@xxxxxxxxxx> Cc: Selvan Mani <smani@xxxxxxxxxx> Cc: Sam Bradshaw <sbradshaw@xxxxxxxxxx> Cc: Jeff Moyer <jmoyer@xxxxxxxxxx> Cc: Al Viro <viro@xxxxxxxxxxxxxxxxxx> Cc: Benjamin LaHaise <bcrl@xxxxxxxxx> Reviewed-by: "Theodore Ts'o" <tytso@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- fs/aio.c | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/fs/aio.c b/fs/aio.c index c341cee..93383b0 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -36,6 +36,7 @@ #include <linux/eventfd.h> #include <linux/blkdev.h> #include <linux/compat.h> +#include <linux/percpu-refcount.h> #include <asm/kmap_types.h> #include <asm/uaccess.h> @@ -65,8 +66,7 @@ struct kioctx_cpu { }; struct kioctx { - atomic_t users; - atomic_t dead; + struct percpu_ref users; /* This needs improving */ unsigned long user_id; @@ -370,7 +370,7 @@ static void free_ioctx(struct kioctx *ctx) static void put_ioctx(struct kioctx *ctx) { - if (unlikely(atomic_dec_and_test(&ctx->users))) + if (percpu_ref_put(&ctx->users)) free_ioctx(ctx); } @@ -411,8 +411,13 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) ctx->max_reqs = nr_events; - atomic_set(&ctx->users, 2); - atomic_set(&ctx->dead, 0); + if (percpu_ref_init(&ctx->users)) + goto out_freectx; + + rcu_read_lock(); + percpu_ref_get(&ctx->users); + rcu_read_unlock(); + spin_lock_init(&ctx->ctx_lock); spin_lock_init(&ctx->completion_lock); mutex_init(&ctx->ring_lock); @@ -422,7 +427,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) ctx->cpu = alloc_percpu(struct kioctx_cpu); if (!ctx->cpu) - goto out_freectx; + goto out_freeref; if (aio_setup_ring(ctx) < 0) goto out_freepcpu; @@ -455,6 +460,8 @@ out_cleanup: aio_free_ring(ctx); out_freepcpu: free_percpu(ctx->cpu); +out_freeref: + free_percpu(ctx->users.pcpu_count); out_freectx: kmem_cache_free(kioctx_cachep, ctx); pr_debug("error allocating ioctx %d\n", err); @@ -484,7 +491,7 @@ static void kill_ioctx_rcu(struct rcu_head *head) */ static void kill_ioctx(struct kioctx *ctx) { - if (!atomic_xchg(&ctx->dead, 1)) { + if (percpu_ref_kill(&ctx->users)) { hlist_del_rcu(&ctx->list); /* Between hlist_del_rcu() and dropping the initial ref */ synchronize_rcu(); @@ -530,12 +537,6 @@ void exit_aio(struct mm_struct *mm) struct hlist_node *n; hlist_for_each_entry_safe(ctx, n, &mm->ioctx_list, list) { - if (1 != atomic_read(&ctx->users)) - printk(KERN_DEBUG - "exit_aio:ioctx still alive: %d %d %d\n", - atomic_read(&ctx->users), - atomic_read(&ctx->dead), - atomic_read(&ctx->reqs_available)); /* * We don't need to bother with munmap() here - * exit_mmap(mm) is coming and it'll unmap everything. @@ -546,7 +547,7 @@ void exit_aio(struct mm_struct *mm) */ ctx->mmap_size = 0; - if (!atomic_xchg(&ctx->dead, 1)) { + if (percpu_ref_kill(&ctx->users)) { hlist_del_rcu(&ctx->list); call_rcu(&ctx->rcu_head, kill_ioctx_rcu); } @@ -657,7 +658,7 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id) hlist_for_each_entry_rcu(ctx, &mm->ioctx_list, list) { if (ctx->user_id == ctx_id) { - atomic_inc(&ctx->users); + percpu_ref_get(&ctx->users); ret = ctx; break; } @@ -870,7 +871,7 @@ static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr, if (ret > 0) *i += ret; - if (unlikely(atomic_read(&ctx->dead))) + if (unlikely(percpu_ref_dead(&ctx->users))) ret = -EINVAL; if (!*i) -- 1.8.2.1 -- To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html