On Tue, Sep 17, 2013 at 10:43:25AM +0100, Markos Chandras wrote: > The cache flushing code uses the current_cpu_data macro which > may cause problems in preemptive kernels because it relies on > smp_processor_id() to get the current cpu number. Per cpu-data > needs to be protected so we disable preemption around the flush > caching code. We enable it back when we are about to return. > > Fixes the following problem: > > BUG: using smp_processor_id() in preemptible [00000000] code: kjournald/1761 > caller is blast_dcache32+0x30/0x254 Just what I feared - these messages popping out from all over the tree. I'd prefer if we change the caller otherwise depending on the platform a single cache flush might involve several preempt_disable/-enable invocations. Something like below. And it also keeps the header file more usable outside the core kernel which Florian's recent zboot a little easier. However maybe we'd be even better off to just switch to boot_cpu_data. That should be fine since r4k_dma_cache_* are only being used on uniprocessor systems anyway. Ralf arch/mips/mm/c-r4k.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 3ff2f74..73ca8c5 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -12,6 +12,7 @@ #include <linux/highmem.h> #include <linux/kernel.h> #include <linux/linkage.h> +#include <linux/preempt.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/mm.h> @@ -602,6 +603,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) /* Catch bad driver code */ BUG_ON(size == 0); + preempt_disable(); if (cpu_has_inclusive_pcaches) { if (size >= scache_size) r4k_blast_scache(); @@ -622,6 +624,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) R4600_HIT_CACHEOP_WAR_IMPL; blast_dcache_range(addr, addr + size); } + preempt_enable(); bc_wback_inv(addr, size); __sync(); @@ -632,6 +635,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) /* Catch bad driver code */ BUG_ON(size == 0); + preempt_disable(); if (cpu_has_inclusive_pcaches) { if (size >= scache_size) r4k_blast_scache(); @@ -656,6 +660,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) R4600_HIT_CACHEOP_WAR_IMPL; blast_inv_dcache_range(addr, addr + size); } + preempt_enable(); bc_inv(addr, size); __sync();