Replace the lglock with percpu spinlocks. That allows us to iterate in the seqfile ops without taking all underlyng spinlocks with the lg_global_lock(). Signed-off-by: Daniel Wagner <daniel.wagner@xxxxxxxxxxxx> Cc: Jeff Layton <jlayton@xxxxxxxxxxxxxxx> Cc: "J. Bruce Fields" <bfields@xxxxxxxxxxxx> Cc: Alexander Viro <viro@xxxxxxxxxxxxxxxxxx> --- fs/locks.c | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/fs/locks.c b/fs/locks.c index 142e4fd..20ed00a 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -128,7 +128,6 @@ #include <linux/pid_namespace.h> #include <linux/hashtable.h> #include <linux/percpu.h> -#include <linux/lglock.h> #define CREATE_TRACE_POINTS #include <trace/events/filelock.h> @@ -160,10 +159,10 @@ int lease_break_time = 45; /* * The global file_lock_list is only used for displaying /proc/locks, so we * keep a list on each CPU, with each list protected by its own spinlock via - * the file_lock_lglock. Note that alterations to the list also require that + * the file_lock_lock. Note that alterations to the list also require that * the relevant flc_lock is held. */ -DEFINE_STATIC_LGLOCK(file_lock_lglock); +static DEFINE_PER_CPU(spinlock_t, file_lock_lock); static DEFINE_PER_CPU(struct hlist_head, file_lock_list); /* @@ -561,10 +560,10 @@ static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2) /* Must be called with the flc_lock held! */ static void locks_insert_global_locks(struct file_lock *fl) { - lg_local_lock(&file_lock_lglock); + spin_lock(this_cpu_ptr(&file_lock_lock)); fl->fl_link_cpu = smp_processor_id(); hlist_add_head(&fl->fl_link, this_cpu_ptr(&file_lock_list)); - lg_local_unlock(&file_lock_lglock); + spin_unlock(this_cpu_ptr(&file_lock_lock)); } /* Must be called with the flc_lock held! */ @@ -577,9 +576,9 @@ static void locks_delete_global_locks(struct file_lock *fl) */ if (hlist_unhashed(&fl->fl_link)) return; - lg_local_lock_cpu(&file_lock_lglock, fl->fl_link_cpu); + spin_lock(per_cpu_ptr(&file_lock_lock, fl->fl_link_cpu)); hlist_del_init(&fl->fl_link); - lg_local_unlock_cpu(&file_lock_lglock, fl->fl_link_cpu); + spin_unlock(per_cpu_ptr(&file_lock_lock, fl->fl_link_cpu)); } static unsigned long @@ -2628,9 +2627,9 @@ static void *locks_start(struct seq_file *f, loff_t *pos) struct locks_iterator *iter = f->private; iter->li_pos = *pos + 1; - lg_global_lock(&file_lock_lglock); spin_lock(&blocked_lock_lock); - return seq_hlist_start_percpu(&file_lock_list, &iter->li_cpu, *pos); + return seq_hlist_start_percpu_locked(&file_lock_list, &file_lock_lock, + &iter->li_cpu, *pos); } static void *locks_next(struct seq_file *f, void *v, loff_t *pos) @@ -2638,14 +2637,17 @@ static void *locks_next(struct seq_file *f, void *v, loff_t *pos) struct locks_iterator *iter = f->private; ++iter->li_pos; - return seq_hlist_next_percpu(v, &file_lock_list, &iter->li_cpu, pos); + return seq_hlist_next_percpu_locked(v, &file_lock_list, &file_lock_lock, + &iter->li_cpu, pos); } static void locks_stop(struct seq_file *f, void *v) __releases(&blocked_lock_lock) { + struct locks_iterator *iter = f->private; + + seq_hlist_stop_percpu_locked(v, &file_lock_lock, &iter->li_cpu); spin_unlock(&blocked_lock_lock); - lg_global_unlock(&file_lock_lglock); } static const struct seq_operations locks_seq_operations = { @@ -2686,10 +2688,10 @@ static int __init filelock_init(void) filelock_cache = kmem_cache_create("file_lock_cache", sizeof(struct file_lock), 0, SLAB_PANIC, NULL); - lg_lock_init(&file_lock_lglock, "file_lock_lglock"); - - for_each_possible_cpu(i) + for_each_possible_cpu(i) { INIT_HLIST_HEAD(per_cpu_ptr(&file_lock_list, i)); + spin_lock_init(per_cpu_ptr(&file_lock_lock, i)); + } return 0; } -- 2.1.0 -- To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html