On Fri, Aug 14, 2020 at 07:14:25AM -0700, Paul E. McKenney wrote: > On Fri, Aug 14, 2020 at 10:30:37AM +0200, Peter Zijlstra wrote: > > On Fri, Aug 14, 2020 at 01:59:04AM +0200, Thomas Gleixner wrote: [ . . . ] > > > > 3. Reusing existing GFP_ flags/values/whatever to communicate > > > > the raw-context information that was to be communicated by > > > > the new GFP_ flag. > > > > > > > > 4. Making lockdep forgive acquiring spinlocks while holding > > > > raw spinlocks, but only in CONFIG_PREEMPT_NONE=y kernels. > > > > Uhh, !CONFIG_PREEMPT_RT, the rest is 'fine'. > > I would be OK with either. In CONFIG_PREEMPT_NONE=n kernels, the > kfree_rcu() code could use preemptible() to determine whether it was safe > to invoke the allocator. The code in kfree_rcu() might look like this: > > mem = NULL; > if (IS_ENABLED(CONFIG_PREEMPT_NONE) || preemptible()) > mem = __get_free_page(...); > > Is your point is that the usual mistakes would then be caught by the > usual testing on CONFIG_PREEMPT_NONE=n kernels? Just to make sure we are talking about the same thing, please see below for an untested patch that illustrates how I was interpreting your words. Was this what you had in mind? Thanx, Paul ------------------------------------------------------------------------ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 62a382d..42d0ff1 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -579,7 +579,7 @@ do { \ # define lockdep_assert_preemption_disabled() do { } while (0) #endif -#ifdef CONFIG_PROVE_RAW_LOCK_NESTING +#ifdef CONFIG_PROVE_RAW_LOCK_NESTING_EFFECTIVE # define lockdep_assert_RT_in_threaded_ctx() do { \ WARN_ONCE(debug_locks && !current->lockdep_recursion && \ diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h index bb35b44..70867d58 100644 --- a/include/linux/lockdep_types.h +++ b/include/linux/lockdep_types.h @@ -20,7 +20,7 @@ enum lockdep_wait_type { LD_WAIT_FREE, /* wait free, rcu etc.. */ LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */ -#ifdef CONFIG_PROVE_RAW_LOCK_NESTING +#ifdef PROVE_RAW_LOCK_NESTING_EFFECTIVE LD_WAIT_CONFIG, /* CONFIG_PREEMPT_LOCK, spinlock_t etc.. */ #else LD_WAIT_CONFIG = LD_WAIT_SPIN, diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index e068c3c..e02de40 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1215,6 +1215,9 @@ config PROVE_RAW_LOCK_NESTING If unsure, select N. +config PROVE_RAW_LOCK_NESTING_EFFECTIVE + def_bool PROVE_RAW_LOCK_NESTING && !PREEMPTION + config LOCK_STAT bool "Lock usage statistics" depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT