Use alternatives to reduce untagged_addr() overhead. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx> --- arch/x86/include/asm/disabled-features.h | 8 ++++- arch/x86/include/asm/uaccess.h | 41 +++++++++++++++++------- 2 files changed, 37 insertions(+), 12 deletions(-) diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index c44b56f7ffba..3f0c31044f02 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -75,6 +75,12 @@ # define DISABLE_CALL_DEPTH_TRACKING (1 << (X86_FEATURE_CALL_DEPTH & 31)) #endif +#ifdef CONFIG_ADDRESS_MASKING +# define DISABLE_LAM 0 +#else +# define DISABLE_LAM (1 << (X86_FEATURE_LAM & 31)) +#endif + #ifdef CONFIG_INTEL_IOMMU_SVM # define DISABLE_ENQCMD 0 #else @@ -115,7 +121,7 @@ #define DISABLED_MASK10 0 #define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET| \ DISABLE_CALL_DEPTH_TRACKING) -#define DISABLED_MASK12 0 +#define DISABLED_MASK12 (DISABLE_LAM) #define DISABLED_MASK13 0 #define DISABLED_MASK14 0 #define DISABLED_MASK15 0 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index fd9182951084..6450a2723bcd 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -9,6 +9,7 @@ #include <linux/kasan-checks.h> #include <linux/mm_types.h> #include <linux/string.h> +#include <linux/mmap_lock.h> #include <asm/asm.h> #include <asm/page.h> #include <asm/smap.h> @@ -30,26 +31,44 @@ static inline bool pagefault_disabled(void); * Magic with the 'sign' allows to untag userspace pointer without any branches * while leaving kernel addresses intact. */ -static inline unsigned long __untagged_addr(unsigned long addr, - unsigned long mask) +static inline unsigned long __untagged_addr(unsigned long addr) { - long sign = addr >> 63; + long sign; + + /* + * Refer tlbstate_untag_mask directly to avoid RIP-relative relocation + * in alternative instructions. The relocation gets wrong when gets + * copied to the target place. + */ + asm (ALTERNATIVE("", + "sar $63, %[sign]\n\t" /* user_ptr ? 0 : -1UL */ + "or %%gs:tlbstate_untag_mask, %[sign]\n\t" + "and %[sign], %[addr]\n\t", X86_FEATURE_LAM) + : [addr] "+r" (addr), [sign] "=r" (sign) + : "m" (tlbstate_untag_mask), "[sign]" (addr)); - addr &= mask | sign; return addr; } #define untagged_addr(addr) ({ \ - u64 __addr = (__force u64)(addr); \ - __addr = __untagged_addr(__addr, current_untag_mask()); \ - (__force __typeof__(addr))__addr; \ + unsigned long __addr = (__force unsigned long)(addr); \ + (__force __typeof__(addr))__untagged_addr(__addr); \ }) +static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, + unsigned long addr) +{ + long sign = addr >> 63; + + mmap_assert_locked(mm); + addr &= (mm)->context.untag_mask | sign; + + return addr; +} + #define untagged_addr_remote(mm, addr) ({ \ - u64 __addr = (__force u64)(addr); \ - mmap_assert_locked(mm); \ - __addr = __untagged_addr(__addr, (mm)->context.untag_mask); \ - (__force __typeof__(addr))__addr; \ + unsigned long __addr = (__force unsigned long)(addr); \ + (__force __typeof__(addr))__untagged_addr_remote(mm, __addr); \ }) #else -- 2.39.1