This patch provides functions to lock & unlock access to the "core-other" register region of the CPC. Without performing appropriate locking it is possible for code using this region to be preempted or to race with code on another VPE within the same core, with one changing the core which the "core-other" region is acting upon at an inopportune time for the other. Signed-off-by: Paul Burton <paul.burton@xxxxxxxxxx> --- arch/mips/include/asm/mips-cpc.h | 27 +++++++++++++++++++++++++++ arch/mips/kernel/mips-cpc.c | 26 ++++++++++++++++++++++++++ 2 files changed, 53 insertions(+) diff --git a/arch/mips/include/asm/mips-cpc.h b/arch/mips/include/asm/mips-cpc.h index c5bb609..e139a53 100644 --- a/arch/mips/include/asm/mips-cpc.h +++ b/arch/mips/include/asm/mips-cpc.h @@ -152,4 +152,31 @@ BUILD_CPC_Cx_RW(other, 0x10) #define CPC_Cx_OTHER_CORENUM_SHF 16 #define CPC_Cx_OTHER_CORENUM_MSK (_ULCAST_(0xff) << 16) +#ifdef CONFIG_MIPS_CPC + +/** + * mips_cpc_lock_other - lock access to another core + * core: the other core to be accessed + * + * Call before operating upon a core via the 'other' register region in + * order to prevent the region being moved during access. Must be followed + * by a call to mips_cpc_unlock_other. + */ +extern void mips_cpc_lock_other(unsigned int core); + +/** + * mips_cpc_unlock_other - unlock access to another core + * + * Call after operating upon another core via the 'other' register region. + * Must be called after mips_cpc_lock_other. + */ +extern void mips_cpc_unlock_other(void); + +#else /* !CONFIG_MIPS_CPC */ + +static inline void mips_cpc_lock_other(unsigned int core) { } +static inline void mips_cpc_unlock_other(void) { } + +#endif /* !CONFIG_MIPS_CPC */ + #endif /* __MIPS_ASM_MIPS_CPC_H__ */ diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c index c9dc674..2368fc5 100644 --- a/arch/mips/kernel/mips-cpc.c +++ b/arch/mips/kernel/mips-cpc.c @@ -15,6 +15,10 @@ void __iomem *mips_cpc_base; +static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock); + +static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags); + phys_t __weak mips_cpc_phys_base(void) { u32 cpc_base; @@ -39,6 +43,10 @@ phys_t __weak mips_cpc_phys_base(void) int mips_cpc_probe(void) { phys_t addr; + unsigned cpu; + + for_each_possible_cpu(cpu) + spin_lock_init(&per_cpu(cpc_core_lock, cpu)); addr = mips_cpc_phys_base(); if (!addr) @@ -50,3 +58,21 @@ int mips_cpc_probe(void) return 0; } + +void mips_cpc_lock_other(unsigned int core) +{ + unsigned curr_core; + preempt_disable(); + curr_core = current_cpu_data.core; + spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core), + per_cpu(cpc_core_lock_flags, curr_core)); + write_cpc_cl_other(core << CPC_Cx_OTHER_CORENUM_SHF); +} + +void mips_cpc_unlock_other(void) +{ + unsigned curr_core = current_cpu_data.core; + spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core), + per_cpu(cpc_core_lock_flags, curr_core)); + preempt_enable(); +} -- 1.8.5.3