On Fri, 1 Apr 2022 21:46:49 +0800 Zqiang <qiang1.zhang@xxxxxxxxx> wrote: > When the kmem_cache_shrink() be called, the IPI was triggered, the > ___cache_free() is called in IPI interrupt context, the local-lock or > spin-lock will be acquired. on PREEMPT_RT kernel, these lock is > replaced with sleepbale rt-spinlock, so the above problem is triggered. > fix it by move the qlist_free_allfrom() the IPI interrupt context to > the task context when PREEMPT_RT is enabled. > >This patch is rather ifdeffy so I propose the below cleanup. Please review and test? Thanks, it looks clearer. >Note that it incorporates the changes from your https://lkml.kernel.org/r/20220414025925.2423818-1-qiang1.zhang@xxxxxxxxx > >btw, how are we supposed to test PREEMPT_RT builds? I had to patch arch/Kconfig. The attachment is a config file and I test with linux-5.17.y-rt branch (https://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git), I have been tested. Thanks Zqiang > --- a/mm/kasan/quarantine.c~kasan-fix-sleeping-function-called-from-invalid-context-on-rt-kernel-fix +++ a/mm/kasan/quarantine.c @@ -319,28 +319,37 @@ static void qlist_move_cache(struct qlis } } -static void per_cpu_remove_cache(void *arg) +#ifndef CONFIG_PREEMPT_RT +static void __per_cpu_remove_cache(struct qlist_head *q, void *arg) { struct kmem_cache *cache = arg; - struct qlist_head *q; -#ifndef CONFIG_PREEMPT_RT struct qlist_head to_free = QLIST_INIT; -#else - unsigned long flags; - struct cpu_shrink_qlist *sq; -#endif - q = this_cpu_ptr(&cpu_quarantine); -#ifndef CONFIG_PREEMPT_RT - if (READ_ONCE(q->offline)) - return; + qlist_move_cache(q, &to_free, cache); qlist_free_all(&to_free, cache); +} #else +static void __per_cpu_remove_cache(struct qlist_head *q, void *arg) { + struct kmem_cache *cache = arg; + unsigned long flags; + struct cpu_shrink_qlist *sq; + sq = this_cpu_ptr(&shrink_qlist); raw_spin_lock_irqsave(&sq->lock, flags); qlist_move_cache(q, &sq->qlist, cache); raw_spin_unlock_irqrestore(&sq->lock, flags); +} #endif + +static void per_cpu_remove_cache(void *arg) { + struct qlist_head *q; + + q = this_cpu_ptr(&cpu_quarantine); + if (READ_ONCE(q->offline)) + return; + __per_cpu_remove_cache(q, arg); } /* Free all quarantined objects belonging to cache. */ @@ -348,10 +357,6 @@ void kasan_quarantine_remove_cache(struc { unsigned long flags, i; struct qlist_head to_free = QLIST_INIT; -#ifdef CONFIG_PREEMPT_RT - int cpu; - struct cpu_shrink_qlist *sq; -#endif /* * Must be careful to not miss any objects that are being moved from @@ -363,13 +368,18 @@ void kasan_quarantine_remove_cache(struc on_each_cpu(per_cpu_remove_cache, cache, 1); #ifdef CONFIG_PREEMPT_RT - for_each_online_cpu(cpu) { - sq = per_cpu_ptr(&shrink_qlist, cpu); - raw_spin_lock_irqsave(&sq->lock, flags); - qlist_move_cache(&sq->qlist, &to_free, cache); - raw_spin_unlock_irqrestore(&sq->lock, flags); + { + int cpu; + struct cpu_shrink_qlist *sq; + + for_each_online_cpu(cpu) { + sq = per_cpu_ptr(&shrink_qlist, cpu); + raw_spin_lock_irqsave(&sq->lock, flags); + qlist_move_cache(&sq->qlist, &to_free, cache); + raw_spin_unlock_irqrestore(&sq->lock, flags); + } + qlist_free_all(&to_free, cache); } - qlist_free_all(&to_free, cache); #endif raw_spin_lock_irqsave(&quarantine_lock, flags); _
Attachment:
rt-config
Description: rt-config