Add API for nested write locks and convert the few call sites doing that. Signed-off-by: Michel Lespinasse <walken@xxxxxxxxxx> Reviewed-by: Daniel Jordan <daniel.m.jordan@xxxxxxxxxx> --- arch/um/include/asm/mmu_context.h | 3 ++- include/linux/mmap_lock.h | 5 +++++ kernel/fork.c | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h index 62262c5c7785..17ddd4edf875 100644 --- a/arch/um/include/asm/mmu_context.h +++ b/arch/um/include/asm/mmu_context.h @@ -8,6 +8,7 @@ #include <linux/sched.h> #include <linux/mm_types.h> +#include <linux/mmap_lock.h> #include <asm/mmu.h> @@ -47,7 +48,7 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new) * when the new ->mm is used for the first time. */ __switch_mm(&new->context.id); - down_write_nested(&new->mmap_sem, 1); + mmap_write_lock_nested(new, SINGLE_DEPTH_NESTING); uml_setup_stubs(new); mmap_write_unlock(new); } diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index 97ac53b66052..a757cb30ae77 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -11,6 +11,11 @@ static inline void mmap_write_lock(struct mm_struct *mm) down_write(&mm->mmap_sem); } +static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass) +{ + down_write_nested(&mm->mmap_sem, subclass); +} + static inline int mmap_write_lock_killable(struct mm_struct *mm) { return down_write_killable(&mm->mmap_sem); diff --git a/kernel/fork.c b/kernel/fork.c index 41d3f45c058e..a5d1d20ccba7 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -499,7 +499,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, /* * Not linked in yet - no deadlock potential: */ - down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); + mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING); /* No ordering required: file already has been exposed. */ RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); -- 2.26.1.301.g55bc3eb7cb9-goog