From: Hugh Dickins <hughd@xxxxxxxxxx> Now that we're playing the ALTERNATIVE game, use that more efficient method: instead of user-mapping an extra page, and reading an extra cacheline each time for x86_cr3_pcid_noflush. Neel has found that __stringify(bts $X86_CR3_PCID_NOFLUSH_BIT, %rax) is a working substitute for the "bts $63, %rax" in these ALTERNATIVEs; but the one line with $63 in looks clearer, so let's stick with that. Worried about what happens with an ALTERNATIVE between the jump and jump label in another ALTERNATIVE? I was, but have checked the combinations in SWITCH_KERNEL_CR3_NO_STACK at entry_SYSCALL_64, and it does a good job. Signed-off-by: Hugh Dickins <hughd@xxxxxxxxxx> Acked-by: Jiri Kosina <jkosina@xxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> (cherry picked from commit 2dff99eb0335f9e0817410696a180dba25ca7371) Signed-off-by: Pavel Tatashin <pasha.tatashin@xxxxxxxxxx> Conflicts: arch/x86/entry/entry_64.S (not in this tree) arch/x86/kernel/entry_64.S (patched instead of that) --- arch/x86/include/asm/kaiser.h | 6 +++--- arch/x86/kernel/entry_64.S | 7 ++++--- arch/x86/mm/kaiser.c | 11 +---------- 3 files changed, 8 insertions(+), 16 deletions(-) diff --git a/arch/x86/include/asm/kaiser.h b/arch/x86/include/asm/kaiser.h index 96643a9c194c..906150d6094e 100644 --- a/arch/x86/include/asm/kaiser.h +++ b/arch/x86/include/asm/kaiser.h @@ -25,7 +25,8 @@ .macro _SWITCH_TO_KERNEL_CR3 reg movq %cr3, \reg andq $(~(X86_CR3_PCID_ASID_MASK | KAISER_SHADOW_PGD_OFFSET)), \reg -orq x86_cr3_pcid_noflush, \reg +/* If PCID enabled, set X86_CR3_PCID_NOFLUSH_BIT */ +ALTERNATIVE "", "bts $63, \reg", X86_FEATURE_PCID movq \reg, %cr3 .endm @@ -39,7 +40,7 @@ movq \reg, %cr3 movq %cr3, \reg orq PER_CPU_VAR(x86_cr3_pcid_user), \reg js 9f -/* FLUSH this time, reset to NOFLUSH for next time (if PCID enabled) */ +/* If PCID enabled, FLUSH this time, reset to NOFLUSH for next time */ movb \regb, PER_CPU_VAR(x86_cr3_pcid_user+7) 9: movq \reg, %cr3 @@ -90,7 +91,6 @@ movq PER_CPU_VAR(unsafe_stack_register_backup), %rax */ DECLARE_PER_CPU_USER_MAPPED(unsigned long, unsafe_stack_register_backup); -extern unsigned long x86_cr3_pcid_noflush; DECLARE_PER_CPU(unsigned long, x86_cr3_pcid_user); extern char __per_cpu_user_mapped_start[], __per_cpu_user_mapped_end[]; diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 71189e3d1073..f681e3bc6c22 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -1332,7 +1332,8 @@ ENTRY(paranoid_entry) jz 2f orl $2, %ebx andq $(~(X86_CR3_PCID_ASID_MASK | KAISER_SHADOW_PGD_OFFSET)), %rax - orq x86_cr3_pcid_noflush, %rax + /* If PCID enabled, set X86_CR3_PCID_NOFLUSH_BIT */ + ALTERNATIVE "", "bts $63, %rax", X86_FEATURE_PCID movq %rax, %cr3 2: #endif @@ -1568,7 +1569,7 @@ ENTRY(nmi) /* %rax is saved above, so OK to clobber here */ ALTERNATIVE "jmp 2f", "movq %cr3, %rax", X86_FEATURE_KAISER /* If PCID enabled, NOFLUSH now and NOFLUSH on return */ - orq x86_cr3_pcid_noflush, %rax + ALTERNATIVE "", "bts $63, %rax", X86_FEATURE_PCID pushq %rax /* mask off "user" bit of pgd address and 12 PCID bits: */ andq $(~(X86_CR3_PCID_ASID_MASK | KAISER_SHADOW_PGD_OFFSET)), %rax @@ -1807,7 +1808,7 @@ end_repeat_nmi: /* %rax is saved above, so OK to clobber here */ ALTERNATIVE "jmp 2f", "movq %cr3, %rax", X86_FEATURE_KAISER /* If PCID enabled, NOFLUSH now and NOFLUSH on return */ - orq x86_cr3_pcid_noflush, %rax + ALTERNATIVE "", "bts $63, %rax", X86_FEATURE_PCID pushq %rax /* mask off "user" bit of pgd address and 12 PCID bits: */ andq $(~(X86_CR3_PCID_ASID_MASK | KAISER_SHADOW_PGD_OFFSET)), %rax diff --git a/arch/x86/mm/kaiser.c b/arch/x86/mm/kaiser.c index 88b4526d57a5..a8ce2e4737e0 100644 --- a/arch/x86/mm/kaiser.c +++ b/arch/x86/mm/kaiser.c @@ -32,7 +32,6 @@ DEFINE_PER_CPU_USER_MAPPED(unsigned long, unsafe_stack_register_backup); * This is also handy because systems that do not support PCIDs * just end up or'ing a 0 into their CR3, which does no harm. */ -unsigned long x86_cr3_pcid_noflush __read_mostly; DEFINE_PER_CPU(unsigned long, x86_cr3_pcid_user); /* @@ -357,10 +356,6 @@ void __init kaiser_init(void) kaiser_add_user_map_early(&debug_idt_table, sizeof(gate_desc) * NR_VECTORS, __PAGE_KERNEL); - - kaiser_add_user_map_early(&x86_cr3_pcid_noflush, - sizeof(x86_cr3_pcid_noflush), - __PAGE_KERNEL); } /* Add a mapping to the shadow mapping, and synchronize the mappings */ @@ -434,18 +429,14 @@ pgd_t kaiser_set_shadow_pgd(pgd_t *pgdp, pgd_t pgd) void kaiser_setup_pcid(void) { - unsigned long kern_cr3 = 0; unsigned long user_cr3 = KAISER_SHADOW_PGD_OFFSET; - if (this_cpu_has(X86_FEATURE_PCID)) { - kern_cr3 |= X86_CR3_PCID_KERN_NOFLUSH; + if (this_cpu_has(X86_FEATURE_PCID)) user_cr3 |= X86_CR3_PCID_USER_NOFLUSH; - } /* * These variables are used by the entry/exit * code to change PCID and pgd and TLB flushing. */ - x86_cr3_pcid_noflush = kern_cr3; this_cpu_write(x86_cr3_pcid_user, user_cr3); } -- 2.16.2