Idle code is very like entry code in that RCU isn't available. As such, add a little validation. Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Acked-by: Geert Uytterhoeven <geert@xxxxxxxxxxxxxx> Acked-by: Rafael J. Wysocki <rafael.j.wysocki@xxxxxxxxx> Acked-by: Frederic Weisbecker <frederic@xxxxxxxxxx> Tested-by: Tony Lindgren <tony@xxxxxxxxxxx> Tested-by: Ulf Hansson <ulf.hansson@xxxxxxxxxx> --- arch/alpha/kernel/vmlinux.lds.S | 1 - arch/arc/kernel/vmlinux.lds.S | 1 - arch/arm/include/asm/vmlinux.lds.h | 1 - arch/arm64/kernel/vmlinux.lds.S | 1 - arch/csky/kernel/vmlinux.lds.S | 1 - arch/hexagon/kernel/vmlinux.lds.S | 1 - arch/ia64/kernel/vmlinux.lds.S | 1 - arch/loongarch/kernel/vmlinux.lds.S | 1 - arch/m68k/kernel/vmlinux-nommu.lds | 1 - arch/m68k/kernel/vmlinux-std.lds | 1 - arch/m68k/kernel/vmlinux-sun3.lds | 1 - arch/microblaze/kernel/vmlinux.lds.S | 1 - arch/mips/kernel/vmlinux.lds.S | 1 - arch/nios2/kernel/vmlinux.lds.S | 1 - arch/openrisc/kernel/vmlinux.lds.S | 1 - arch/parisc/kernel/vmlinux.lds.S | 1 - arch/powerpc/kernel/vmlinux.lds.S | 1 - arch/riscv/kernel/vmlinux-xip.lds.S | 1 - arch/riscv/kernel/vmlinux.lds.S | 1 - arch/s390/kernel/vmlinux.lds.S | 1 - arch/sh/kernel/vmlinux.lds.S | 1 - arch/sparc/kernel/vmlinux.lds.S | 1 - arch/um/kernel/dyn.lds.S | 1 - arch/um/kernel/uml.lds.S | 1 - arch/x86/include/asm/irqflags.h | 11 ++++------- arch/x86/include/asm/mwait.h | 2 +- arch/x86/kernel/vmlinux.lds.S | 1 - arch/xtensa/kernel/vmlinux.lds.S | 1 - include/asm-generic/vmlinux.lds.h | 9 +++------ include/linux/compiler_types.h | 8 ++++++-- include/linux/cpu.h | 3 --- tools/objtool/check.c | 13 +++++++++++++ 32 files changed, 27 insertions(+), 45 deletions(-) --- a/arch/alpha/kernel/vmlinux.lds.S +++ b/arch/alpha/kernel/vmlinux.lds.S @@ -27,7 +27,6 @@ SECTIONS HEAD_TEXT TEXT_TEXT SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT *(.fixup) *(.gnu.warning) --- a/arch/arc/kernel/vmlinux.lds.S +++ b/arch/arc/kernel/vmlinux.lds.S @@ -85,7 +85,6 @@ SECTIONS _stext = .; TEXT_TEXT SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT --- a/arch/arm/include/asm/vmlinux.lds.h +++ b/arch/arm/include/asm/vmlinux.lds.h @@ -96,7 +96,6 @@ SOFTIRQENTRY_TEXT \ TEXT_TEXT \ SCHED_TEXT \ - CPUIDLE_TEXT \ LOCK_TEXT \ KPROBES_TEXT \ ARM_STUBS_TEXT \ --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -175,7 +175,6 @@ SECTIONS ENTRY_TEXT TEXT_TEXT SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT HYPERVISOR_TEXT --- a/arch/csky/kernel/vmlinux.lds.S +++ b/arch/csky/kernel/vmlinux.lds.S @@ -34,7 +34,6 @@ SECTIONS SOFTIRQENTRY_TEXT TEXT_TEXT SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT *(.fixup) --- a/arch/hexagon/kernel/vmlinux.lds.S +++ b/arch/hexagon/kernel/vmlinux.lds.S @@ -41,7 +41,6 @@ SECTIONS IRQENTRY_TEXT SOFTIRQENTRY_TEXT SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT *(.fixup) --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -51,7 +51,6 @@ SECTIONS { __end_ivt_text = .; TEXT_TEXT SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT --- a/arch/loongarch/kernel/vmlinux.lds.S +++ b/arch/loongarch/kernel/vmlinux.lds.S @@ -42,7 +42,6 @@ SECTIONS .text : { TEXT_TEXT SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT --- a/arch/m68k/kernel/vmlinux-nommu.lds +++ b/arch/m68k/kernel/vmlinux-nommu.lds @@ -48,7 +48,6 @@ SECTIONS { IRQENTRY_TEXT SOFTIRQENTRY_TEXT SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT *(.fixup) . = ALIGN(16); --- a/arch/m68k/kernel/vmlinux-std.lds +++ b/arch/m68k/kernel/vmlinux-std.lds @@ -19,7 +19,6 @@ SECTIONS IRQENTRY_TEXT SOFTIRQENTRY_TEXT SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT *(.fixup) *(.gnu.warning) --- a/arch/m68k/kernel/vmlinux-sun3.lds +++ b/arch/m68k/kernel/vmlinux-sun3.lds @@ -19,7 +19,6 @@ SECTIONS IRQENTRY_TEXT SOFTIRQENTRY_TEXT SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT *(.fixup) *(.gnu.warning) --- a/arch/microblaze/kernel/vmlinux.lds.S +++ b/arch/microblaze/kernel/vmlinux.lds.S @@ -36,7 +36,6 @@ SECTIONS { EXIT_TEXT EXIT_CALL SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -61,7 +61,6 @@ SECTIONS .text : { TEXT_TEXT SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT --- a/arch/nios2/kernel/vmlinux.lds.S +++ b/arch/nios2/kernel/vmlinux.lds.S @@ -24,7 +24,6 @@ SECTIONS .text : { TEXT_TEXT SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT IRQENTRY_TEXT SOFTIRQENTRY_TEXT --- a/arch/openrisc/kernel/vmlinux.lds.S +++ b/arch/openrisc/kernel/vmlinux.lds.S @@ -52,7 +52,6 @@ SECTIONS _stext = .; TEXT_TEXT SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -86,7 +86,6 @@ SECTIONS TEXT_TEXT LOCK_TEXT SCHED_TEXT - CPUIDLE_TEXT KPROBES_TEXT IRQENTRY_TEXT SOFTIRQENTRY_TEXT --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -111,7 +111,6 @@ SECTIONS #endif NOINSTR_TEXT SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT --- a/arch/riscv/kernel/vmlinux-xip.lds.S +++ b/arch/riscv/kernel/vmlinux-xip.lds.S @@ -39,7 +39,6 @@ SECTIONS _stext = .; TEXT_TEXT SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT ENTRY_TEXT --- a/arch/riscv/kernel/vmlinux.lds.S +++ b/arch/riscv/kernel/vmlinux.lds.S @@ -42,7 +42,6 @@ SECTIONS _stext = .; TEXT_TEXT SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT ENTRY_TEXT --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -42,7 +42,6 @@ SECTIONS HEAD_TEXT TEXT_TEXT SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S @@ -29,7 +29,6 @@ SECTIONS HEAD_TEXT TEXT_TEXT SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -50,7 +50,6 @@ SECTIONS HEAD_TEXT TEXT_TEXT SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT --- a/arch/um/kernel/dyn.lds.S +++ b/arch/um/kernel/dyn.lds.S @@ -74,7 +74,6 @@ SECTIONS _stext = .; TEXT_TEXT SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT IRQENTRY_TEXT SOFTIRQENTRY_TEXT --- a/arch/um/kernel/uml.lds.S +++ b/arch/um/kernel/uml.lds.S @@ -35,7 +35,6 @@ SECTIONS _stext = .; TEXT_TEXT SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT IRQENTRY_TEXT SOFTIRQENTRY_TEXT --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h @@ -8,9 +8,6 @@ #include <asm/nospec-branch.h> -/* Provide __cpuidle; we can't safely include <linux/cpu.h> */ -#define __cpuidle __section(".cpuidle.text") - /* * Interrupt control: */ @@ -45,13 +42,13 @@ static __always_inline void native_irq_e asm volatile("sti": : :"memory"); } -static inline __cpuidle void native_safe_halt(void) +static __always_inline void native_safe_halt(void) { mds_idle_clear_cpu_buffers(); asm volatile("sti; hlt": : :"memory"); } -static inline __cpuidle void native_halt(void) +static __always_inline void native_halt(void) { mds_idle_clear_cpu_buffers(); asm volatile("hlt": : :"memory"); @@ -84,7 +81,7 @@ static __always_inline void arch_local_i * Used in the idle loop; sti takes one instruction cycle * to complete: */ -static inline __cpuidle void arch_safe_halt(void) +static __always_inline void arch_safe_halt(void) { native_safe_halt(); } @@ -93,7 +90,7 @@ static inline __cpuidle void arch_safe_h * Used when interrupts are already enabled or to * shutdown the processor: */ -static inline __cpuidle void halt(void) +static __always_inline void halt(void) { native_halt(); } --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h @@ -105,7 +105,7 @@ static inline void __sti_mwait(unsigned * New with Core Duo processors, MWAIT can take some hints based on CPU * capability. */ -static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) +static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) { if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) { if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) { --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -129,7 +129,6 @@ SECTIONS HEAD_TEXT TEXT_TEXT SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT SOFTIRQENTRY_TEXT --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -125,7 +125,6 @@ SECTIONS ENTRY_TEXT TEXT_TEXT SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT *(.fixup) } --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -580,6 +580,9 @@ ALIGN_FUNCTION(); \ __noinstr_text_start = .; \ *(.noinstr.text) \ + __cpuidle_text_start = .; \ + *(.cpuidle.text) \ + __cpuidle_text_end = .; \ __noinstr_text_end = .; /* @@ -620,12 +623,6 @@ *(.spinlock.text) \ __lock_text_end = .; -#define CPUIDLE_TEXT \ - ALIGN_FUNCTION(); \ - __cpuidle_text_start = .; \ - *(.cpuidle.text) \ - __cpuidle_text_end = .; - #define KPROBES_TEXT \ ALIGN_FUNCTION(); \ __kprobes_text_start = .; \ --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -232,11 +232,15 @@ struct ftrace_likely_data { #endif /* Section for code which can't be instrumented at all */ -#define noinstr \ - noinline notrace __attribute((__section__(".noinstr.text"))) \ +#define __noinstr_section(section) \ + noinline notrace __attribute((__section__(section))) \ __no_kcsan __no_sanitize_address __no_profile __no_sanitize_coverage \ __no_sanitize_memory +#define noinstr __noinstr_section(".noinstr.text") + +#define __cpuidle __noinstr_section(".cpuidle.text") + #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -176,9 +176,6 @@ void __noreturn cpu_startup_entry(enum c void cpu_idle_poll_ctrl(bool enable); -/* Attach to any functions which should be considered cpuidle. */ -#define __cpuidle __section(".cpuidle.text") - bool cpu_in_idle(unsigned long pc); void arch_cpu_idle(void); --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -376,6 +376,7 @@ static int decode_instructions(struct ob if (!strcmp(sec->name, ".noinstr.text") || !strcmp(sec->name, ".entry.text") || + !strcmp(sec->name, ".cpuidle.text") || !strncmp(sec->name, ".text.__x86.", 12)) sec->noinstr = true; @@ -3361,6 +3362,12 @@ static inline bool noinstr_call_dest(str return true; /* + * If the symbol is a static_call trampoline, we can't tell. + */ + if (func->static_call_tramp) + return true; + + /* * The __ubsan_handle_*() calls are like WARN(), they only happen when * something 'BAD' happened. At the risk of taking the machine down, * let them proceed to get the message out. @@ -4155,6 +4162,12 @@ static int validate_noinstr_sections(str if (sec) { warnings += validate_section(file, sec); warnings += validate_unwind_hints(file, sec); + } + + sec = find_section_by_name(file->elf, ".cpuidle.text"); + if (sec) { + warnings += validate_section(file, sec); + warnings += validate_unwind_hints(file, sec); } return warnings;