This is a note to let you know that I've just added the patch titled x86/boot: Annotate local functions to the 5.4-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: x86-boot-annotate-local-functions.patch and it can be found in the queue-5.4 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit 6e1dd0156493e0c9c62d07759f14944698875170 Author: Jiri Slaby <jirislaby@xxxxxxxxxx> Date: Fri Oct 11 13:50:47 2019 +0200 x86/boot: Annotate local functions [ Upstream commit deff8a24e1021fb39dddf5f6bc5832e0e3a632ea ] .Lrelocated, .Lpaging_enabled, .Lno_longmode, and .Lin_pm32 are self-standing local functions, annotate them as such and preserve "no alignment". The annotations do not generate anything yet. Signed-off-by: Jiri Slaby <jslaby@xxxxxxx> Signed-off-by: Borislav Petkov <bp@xxxxxxx> Cc: Cao jin <caoj.fnst@xxxxxxxxxxxxxx> Cc: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> Cc: "H. Peter Anvin" <hpa@xxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: Kate Stewart <kstewart@xxxxxxxxxxxxxxxxxxx> Cc: "Kirill A. Shutemov" <kirill.shutemov@xxxxxxxxxxxxxxx> Cc: linux-arch@xxxxxxxxxxxxxxx Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Wei Huang <wei@xxxxxxxxxx> Cc: x86-ml <x86@xxxxxxxxxx> Cc: Xiaoyao Li <xiaoyao.li@xxxxxxxxxxxxxxx> Link: https://lkml.kernel.org/r/20191011115108.12392-8-jslaby@xxxxxxx Stable-dep-of: 264b82fdb498 ("x86/decompressor: Don't rely on upper 32 bits of GPRs being preserved") Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index d7c0fcc1dbf9e..b788b986f3351 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S @@ -210,7 +210,7 @@ ENDPROC(efi32_stub_entry) #endif .text -.Lrelocated: +SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) /* * Clear BSS (stack is currently empty) @@ -261,6 +261,7 @@ ENDPROC(efi32_stub_entry) */ xorl %ebx, %ebx jmp *%eax +SYM_FUNC_END(.Lrelocated) #ifdef CONFIG_EFI_STUB .data diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 50c9eeb36f0d8..95ee795d97964 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -517,7 +517,7 @@ ENDPROC(efi64_stub_entry) #endif .text -.Lrelocated: +SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) /* * Clear BSS (stack is currently empty) @@ -546,6 +546,7 @@ ENDPROC(efi64_stub_entry) * Jump to the decompressed kernel. */ jmp *%rax +SYM_FUNC_END(.Lrelocated) /* * Adjust the global offset table @@ -641,9 +642,10 @@ ENTRY(trampoline_32bit_src) lret .code64 -.Lpaging_enabled: +SYM_FUNC_START_LOCAL_NOALIGN(.Lpaging_enabled) /* Return from the trampoline */ jmp *%rdi +SYM_FUNC_END(.Lpaging_enabled) /* * The trampoline code has a size limit. @@ -653,11 +655,12 @@ ENTRY(trampoline_32bit_src) .org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE .code32 -.Lno_longmode: +SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode) /* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */ 1: hlt jmp 1b +SYM_FUNC_END(.Lno_longmode) #include "../../kernel/verify_cpu.S" diff --git a/arch/x86/boot/pmjump.S b/arch/x86/boot/pmjump.S index ea88d52eeac70..81658fe353808 100644 --- a/arch/x86/boot/pmjump.S +++ b/arch/x86/boot/pmjump.S @@ -46,7 +46,7 @@ ENDPROC(protected_mode_jump) .code32 .section ".text32","ax" -.Lin_pm32: +SYM_FUNC_START_LOCAL_NOALIGN(.Lin_pm32) # Set up data segments for flat 32-bit mode movl %ecx, %ds movl %ecx, %es @@ -72,4 +72,4 @@ ENDPROC(protected_mode_jump) lldt %cx jmpl *%eax # Jump to the 32-bit entrypoint -ENDPROC(.Lin_pm32) +SYM_FUNC_END(.Lin_pm32)