On Mon, Oct 31, 2016 at 09:37:00AM +0800, Wanpeng Li wrote: > | RCU used illegally from idle CPU! > | rcu_scheduler_active = 1, debug_locks = 0 > | RCU used illegally from extended quiescent state! > | no locks held by swapper/1/0. > | > | [<ffffffff9d492b95>] do_trace_write_msr+0x135/0x140 > | [<ffffffff9d06f860>] native_write_msr+0x20/0x30 > | [<ffffffff9d065fad>] native_apic_msr_eoi_write+0x1d/0x30 > | [<ffffffff9d05bd1d>] smp_reschedule_interrupt+0x1d/0x30 > | [<ffffffff9d8daec6>] reschedule_interrupt+0x96/0xa0 Ontop of the two: --- From: Borislav Petkov <bp@xxxxxxx> Date: Wed, 2 Nov 2016 19:35:22 +0100 Subject: [PATCH] x86/MSR: Cleanup/streamline MSR helpers Make the MSR argument an unsigned int, both low and high u32, put "notrace" last in the function signature. Reflow function signatures for better readability. Cleanup white space. Signed-off-by: Borislav Petkov <bp@xxxxxxx> --- arch/x86/include/asm/msr.h | 40 +++++++++++++++++++++------------------- arch/x86/lib/msr.c | 4 ++-- 2 files changed, 23 insertions(+), 21 deletions(-) diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 9b0a23236836..db0b90c3b03e 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -70,14 +70,14 @@ extern struct tracepoint __tracepoint_read_msr; extern struct tracepoint __tracepoint_write_msr; extern struct tracepoint __tracepoint_rdpmc; #define msr_tracepoint_active(t) static_key_false(&(t).key) -extern void do_trace_write_msr(unsigned msr, u64 val, int failed); -extern void do_trace_read_msr(unsigned msr, u64 val, int failed); -extern void do_trace_rdpmc(unsigned msr, u64 val, int failed); +extern void do_trace_write_msr(unsigned int msr, u64 val, int failed); +extern void do_trace_read_msr(unsigned int msr, u64 val, int failed); +extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed); #else #define msr_tracepoint_active(t) false -static inline void do_trace_write_msr(unsigned msr, u64 val, int failed) {} -static inline void do_trace_read_msr(unsigned msr, u64 val, int failed) {} -static inline void do_trace_rdpmc(unsigned msr, u64 val, int failed) {} +static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {} +static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {} +static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {} #endif static inline unsigned long long native_read_msr(unsigned int msr) @@ -115,8 +115,8 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr, } /* Can be uninlined because referenced by paravirt */ -static notrace inline void __native_write_msr_notrace(unsigned int msr, - unsigned low, unsigned high) +static inline void notrace +__native_write_msr_notrace(unsigned int msr, u32 low, u32 high) { asm volatile("1: wrmsr\n" "2:\n" @@ -125,24 +125,26 @@ static notrace inline void __native_write_msr_notrace(unsigned int msr, } /* Can be uninlined because referenced by paravirt */ -static notrace inline void native_write_msr(unsigned int msr, - unsigned low, unsigned high) +static inline void notrace +native_write_msr(unsigned int msr, u32 low, u32 high) { __native_write_msr_notrace(msr, low, high); if (msr_tracepoint_active(__tracepoint_write_msr)) do_trace_write_msr(msr, ((u64)high << 32 | low), 0); } -static inline void wrmsr_notrace(unsigned msr, unsigned low, unsigned high) +static inline void +wrmsr_notrace(unsigned int msr, u32 low, u32 high) { __native_write_msr_notrace(msr, low, high); } /* Can be uninlined because referenced by paravirt */ -notrace static inline int native_write_msr_safe(unsigned int msr, - unsigned low, unsigned high) +static inline int notrace +native_write_msr_safe(unsigned int msr, u32 low, u32 high) { int err; + asm volatile("2: wrmsr ; xor %[err],%[err]\n" "1:\n\t" ".section .fixup,\"ax\"\n\t" @@ -235,7 +237,7 @@ do { \ (void)((high) = (u32)(__val >> 32)); \ } while (0) -static inline void wrmsr(unsigned msr, unsigned low, unsigned high) +static inline void wrmsr(unsigned int msr, u32 low, u32 high) { native_write_msr(msr, low, high); } @@ -243,13 +245,13 @@ static inline void wrmsr(unsigned msr, unsigned low, unsigned high) #define rdmsrl(msr, val) \ ((val) = native_read_msr((msr))) -static inline void wrmsrl(unsigned msr, u64 val) +static inline void wrmsrl(unsigned int msr, u64 val) { native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32)); } /* wrmsr with exception handling */ -static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) +static inline int wrmsr_safe(unsigned int msr, u32 low, u32 high) { return native_write_msr_safe(msr, low, high); } @@ -264,7 +266,7 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) __err; \ }) -static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) +static inline int rdmsrl_safe(unsigned int msr, unsigned long long *p) { int err; @@ -337,12 +339,12 @@ static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no, struct msr *msrs) { - rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h)); + rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h)); } static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no, struct msr *msrs) { - wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h); + wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h); } static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c index d1dee753b949..07764255b611 100644 --- a/arch/x86/lib/msr.c +++ b/arch/x86/lib/msr.c @@ -113,14 +113,14 @@ int msr_clear_bit(u32 msr, u8 bit) } #ifdef CONFIG_TRACEPOINTS -void do_trace_write_msr(unsigned msr, u64 val, int failed) +void do_trace_write_msr(unsigned int msr, u64 val, int failed) { trace_write_msr(msr, val, failed); } EXPORT_SYMBOL(do_trace_write_msr); EXPORT_TRACEPOINT_SYMBOL(write_msr); -void do_trace_read_msr(unsigned msr, u64 val, int failed) +void do_trace_read_msr(unsigned int msr, u64 val, int failed) { trace_read_msr(msr, val, failed); } -- 2.10.0 -- Regards/Gruss, Boris. ECO tip #101: Trim your mails when you reply. -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html