On a randomly chosen distro kernel build for arm64, vmlinux.o shows the following sections, containing jump label entries, and the associated RELA relocation records, respectively: ... [38088] __jump_table PROGBITS 0000000000000000 00e19f30 000000000002ea10 0000000000000000 WA 0 0 8 [38089] .rela__jump_table RELA 0000000000000000 01fd8bb0 000000000008be30 0000000000000018 I 38178 38088 8 ... In other words, we have 190 KB worth of 'struct jump_entry' instances, and 573 KB worth of RELA entries to relocate each entry's code, target and key members. This means the RELA section occupies 10% of the .init segment, and the two sections combined represent 5% of vmlinux's entire memory footprint. So let's switch from 64-bit absolute references to 32-bit relative references for the code and target field, and a 64-bit relative reference for the 'key' field (which may reside in another module or the core kernel, which may be more than 4 GB way on arm64 when running with KASLR enable): this reduces the size of the __jump_table by 33%, and gets rid of the RELA section entirely. Acked-by: Will Deacon <will.deacon@xxxxxxx> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@xxxxxxxxxx> --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/jump_label.h | 36 +++++++++----------- arch/arm64/kernel/jump_label.c | 6 ++-- 3 files changed, 20 insertions(+), 23 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1940c6405d04..d17aa9614e69 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -91,6 +91,7 @@ config ARM64 select HAVE_ARCH_BITREVERSE select HAVE_ARCH_HUGE_VMAP select HAVE_ARCH_JUMP_LABEL + select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48) select HAVE_ARCH_KGDB select HAVE_ARCH_MMAP_RND_BITS diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h index 1b5e0e843c3a..bb6d15b34668 100644 --- a/arch/arm64/include/asm/jump_label.h +++ b/arch/arm64/include/asm/jump_label.h @@ -26,13 +26,15 @@ #define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE -static __always_inline bool arch_static_branch(struct static_key *key, bool branch) +static __always_inline bool arch_static_branch(struct static_key *key, + bool branch) { - asm goto("1: nop\n\t" - ".pushsection __jump_table, \"aw\"\n\t" - ".align 3\n\t" - ".quad 1b, %l[l_yes], %c0\n\t" - ".popsection\n\t" + asm goto("1: nop \n\t" + " .pushsection __jump_table, \"aw\" \n\t" + " .align 3 \n\t" + " .long 1b - ., %l[l_yes] - . \n\t" + " .quad %c0 - . \n\t" + " .popsection \n\t" : : "i"(&((char *)key)[branch]) : : l_yes); return false; @@ -40,13 +42,15 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran return true; } -static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) +static __always_inline bool arch_static_branch_jump(struct static_key *key, + bool branch) { - asm goto("1: b %l[l_yes]\n\t" - ".pushsection __jump_table, \"aw\"\n\t" - ".align 3\n\t" - ".quad 1b, %l[l_yes], %c0\n\t" - ".popsection\n\t" + asm goto("1: b %l[l_yes] \n\t" + " .pushsection __jump_table, \"aw\" \n\t" + " .align 3 \n\t" + " .long 1b - ., %l[l_yes] - . \n\t" + " .quad %c0 - . \n\t" + " .popsection \n\t" : : "i"(&((char *)key)[branch]) : : l_yes); return false; @@ -54,13 +58,5 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool return true; } -typedef u64 jump_label_t; - -struct jump_entry { - jump_label_t code; - jump_label_t target; - jump_label_t key; -}; - #endif /* __ASSEMBLY__ */ #endif /* __ASM_JUMP_LABEL_H */ diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c index c2dd1ad3e648..903d17023d77 100644 --- a/arch/arm64/kernel/jump_label.c +++ b/arch/arm64/kernel/jump_label.c @@ -25,12 +25,12 @@ void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { - void *addr = (void *)entry->code; + void *addr = (void *)jump_entry_code(entry); u32 insn; if (type == JUMP_LABEL_JMP) { - insn = aarch64_insn_gen_branch_imm(entry->code, - entry->target, + insn = aarch64_insn_gen_branch_imm(jump_entry_code(entry), + jump_entry_target(entry), AARCH64_INSN_BRANCH_NOLINK); } else { insn = aarch64_insn_gen_nop(); -- 2.17.1