Commit-ID: 02e525b2aff1d665f6466e1d123ee4cb69f1d4b0 Gitweb: https://git.kernel.org/tip/02e525b2aff1d665f6466e1d123ee4cb69f1d4b0 Author: Peter Zijlstra <peterz@xxxxxxxxxxxxx> AuthorDate: Thu, 21 Feb 2019 15:38:40 +0100 Committer: Ingo Molnar <mingo@xxxxxxxxxx> CommitDate: Thu, 28 Feb 2019 07:55:37 +0100 locking/percpu-rwsem: Remove preempt_disable variants Effective revert commit: 87709e28dc7c ("fs/locks: Use percpu_down_read_preempt_disable()") This is causing major pain for PREEMPT_RT. Sebastian did a lot of lockperf runs on 2 and 4 node machines with all preemption modes (PREEMPT=n should be an obvious NOP for this patch and thus serves as a good control) and no results showed significance over 2-sigma (the PREEMPT=n results were almost empty at 1-sigma). Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx> Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Cc: Paul E. McKenney <paulmck@xxxxxxxxxxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Will Deacon <will.deacon@xxxxxxx> Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx> --- fs/locks.c | 32 ++++++++++++++++---------------- include/linux/percpu-rwsem.h | 24 ++++-------------------- 2 files changed, 20 insertions(+), 36 deletions(-) diff --git a/fs/locks.c b/fs/locks.c index ff6af2c32601..eaa1cfaf73b0 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1058,7 +1058,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request) return -ENOMEM; } - percpu_down_read_preempt_disable(&file_rwsem); + percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); if (request->fl_flags & FL_ACCESS) goto find_conflict; @@ -1100,7 +1100,7 @@ find_conflict: out: spin_unlock(&ctx->flc_lock); - percpu_up_read_preempt_enable(&file_rwsem); + percpu_up_read(&file_rwsem); if (new_fl) locks_free_lock(new_fl); locks_dispose_list(&dispose); @@ -1138,7 +1138,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request, new_fl2 = locks_alloc_lock(); } - percpu_down_read_preempt_disable(&file_rwsem); + percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); /* * New lock request. Walk all POSIX locks and look for conflicts. If @@ -1312,7 +1312,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request, } out: spin_unlock(&ctx->flc_lock); - percpu_up_read_preempt_enable(&file_rwsem); + percpu_up_read(&file_rwsem); /* * Free any unused locks. */ @@ -1584,7 +1584,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) return error; } - percpu_down_read_preempt_disable(&file_rwsem); + percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); time_out_leases(inode, &dispose); @@ -1636,13 +1636,13 @@ restart: locks_insert_block(fl, new_fl, leases_conflict); trace_break_lease_block(inode, new_fl); spin_unlock(&ctx->flc_lock); - percpu_up_read_preempt_enable(&file_rwsem); + percpu_up_read(&file_rwsem); locks_dispose_list(&dispose); error = wait_event_interruptible_timeout(new_fl->fl_wait, !new_fl->fl_blocker, break_time); - percpu_down_read_preempt_disable(&file_rwsem); + percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); trace_break_lease_unblock(inode, new_fl); locks_delete_block(new_fl); @@ -1659,7 +1659,7 @@ restart: } out: spin_unlock(&ctx->flc_lock); - percpu_up_read_preempt_enable(&file_rwsem); + percpu_up_read(&file_rwsem); locks_dispose_list(&dispose); locks_free_lock(new_fl); return error; @@ -1729,7 +1729,7 @@ int fcntl_getlease(struct file *filp) ctx = smp_load_acquire(&inode->i_flctx); if (ctx && !list_empty_careful(&ctx->flc_lease)) { - percpu_down_read_preempt_disable(&file_rwsem); + percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); time_out_leases(inode, &dispose); list_for_each_entry(fl, &ctx->flc_lease, fl_list) { @@ -1739,7 +1739,7 @@ int fcntl_getlease(struct file *filp) break; } spin_unlock(&ctx->flc_lock); - percpu_up_read_preempt_enable(&file_rwsem); + percpu_up_read(&file_rwsem); locks_dispose_list(&dispose); } @@ -1813,7 +1813,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr return -EINVAL; } - percpu_down_read_preempt_disable(&file_rwsem); + percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); time_out_leases(inode, &dispose); error = check_conflicting_open(dentry, arg, lease->fl_flags); @@ -1884,7 +1884,7 @@ out_setup: lease->fl_lmops->lm_setup(lease, priv); out: spin_unlock(&ctx->flc_lock); - percpu_up_read_preempt_enable(&file_rwsem); + percpu_up_read(&file_rwsem); locks_dispose_list(&dispose); if (is_deleg) inode_unlock(inode); @@ -1907,7 +1907,7 @@ static int generic_delete_lease(struct file *filp, void *owner) return error; } - percpu_down_read_preempt_disable(&file_rwsem); + percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); list_for_each_entry(fl, &ctx->flc_lease, fl_list) { if (fl->fl_file == filp && @@ -1920,7 +1920,7 @@ static int generic_delete_lease(struct file *filp, void *owner) if (victim) error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose); spin_unlock(&ctx->flc_lock); - percpu_up_read_preempt_enable(&file_rwsem); + percpu_up_read(&file_rwsem); locks_dispose_list(&dispose); return error; } @@ -2643,13 +2643,13 @@ locks_remove_lease(struct file *filp, struct file_lock_context *ctx) if (list_empty(&ctx->flc_lease)) return; - percpu_down_read_preempt_disable(&file_rwsem); + percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) if (filp == fl->fl_file) lease_modify(fl, F_UNLCK, &dispose); spin_unlock(&ctx->flc_lock); - percpu_up_read_preempt_enable(&file_rwsem); + percpu_up_read(&file_rwsem); locks_dispose_list(&dispose); } diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index 71b75643c432..03cb4b6f842e 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -29,7 +29,7 @@ static struct percpu_rw_semaphore name = { \ extern int __percpu_down_read(struct percpu_rw_semaphore *, int); extern void __percpu_up_read(struct percpu_rw_semaphore *); -static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem) +static inline void percpu_down_read(struct percpu_rw_semaphore *sem) { might_sleep(); @@ -47,16 +47,10 @@ static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore * __this_cpu_inc(*sem->read_count); if (unlikely(!rcu_sync_is_idle(&sem->rss))) __percpu_down_read(sem, false); /* Unconditional memory barrier */ - barrier(); /* - * The barrier() prevents the compiler from + * The preempt_enable() prevents the compiler from * bleeding the critical section out. */ -} - -static inline void percpu_down_read(struct percpu_rw_semaphore *sem) -{ - percpu_down_read_preempt_disable(sem); preempt_enable(); } @@ -83,13 +77,9 @@ static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem) return ret; } -static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem) +static inline void percpu_up_read(struct percpu_rw_semaphore *sem) { - /* - * The barrier() prevents the compiler from - * bleeding the critical section out. - */ - barrier(); + preempt_disable(); /* * Same as in percpu_down_read(). */ @@ -102,12 +92,6 @@ static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_); } -static inline void percpu_up_read(struct percpu_rw_semaphore *sem) -{ - preempt_disable(); - percpu_up_read_preempt_enable(sem); -} - extern void percpu_down_write(struct percpu_rw_semaphore *); extern void percpu_up_write(struct percpu_rw_semaphore *);