This is a note to let you know that I've just added the patch titled x86: Share definition of __is_canonical_address() to the 5.10-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: x86-share-definition-of-__is_canonical_address.patch and it can be found in the queue-5.10 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit 24e5ea69289774ab392564ccf47dab38fc48f865 Author: Adrian Hunter <adrian.hunter@xxxxxxxxx> Date: Mon Jan 31 09:24:50 2022 +0200 x86: Share definition of __is_canonical_address() [ Upstream commit 1fb85d06ad6754796cd1b920639ca9d8840abefd ] Reduce code duplication by moving canonical address code to a common header file. Signed-off-by: Adrian Hunter <adrian.hunter@xxxxxxxxx> Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Link: https://lore.kernel.org/r/20220131072453.2839535-3-adrian.hunter@xxxxxxxxx Stable-dep-of: f79936545fb1 ("x86/sev-es: Allow copy_from_kernel_nofault() in earlier boot") Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index d87421acddc39..5667b8b994e34 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -1360,20 +1360,10 @@ static void pt_addr_filters_fini(struct perf_event *event) } #ifdef CONFIG_X86_64 -static u64 canonical_address(u64 vaddr, u8 vaddr_bits) -{ - return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits); -} - -static u64 is_canonical_address(u64 vaddr, u8 vaddr_bits) -{ - return canonical_address(vaddr, vaddr_bits) == vaddr; -} - /* Clamp to a canonical address greater-than-or-equal-to the address given */ static u64 clamp_to_ge_canonical_addr(u64 vaddr, u8 vaddr_bits) { - return is_canonical_address(vaddr, vaddr_bits) ? + return __is_canonical_address(vaddr, vaddr_bits) ? vaddr : -BIT_ULL(vaddr_bits - 1); } @@ -1381,7 +1371,7 @@ static u64 clamp_to_ge_canonical_addr(u64 vaddr, u8 vaddr_bits) /* Clamp to a canonical address less-than-or-equal-to the address given */ static u64 clamp_to_le_canonical_addr(u64 vaddr, u8 vaddr_bits) { - return is_canonical_address(vaddr, vaddr_bits) ? + return __is_canonical_address(vaddr, vaddr_bits) ? vaddr : BIT_ULL(vaddr_bits - 1) - 1; } diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h index 7555b48803a8c..ffae5ea9fd4e1 100644 --- a/arch/x86/include/asm/page.h +++ b/arch/x86/include/asm/page.h @@ -71,6 +71,16 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr, extern bool __virt_addr_valid(unsigned long kaddr); #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr)) +static __always_inline u64 __canonical_address(u64 vaddr, u8 vaddr_bits) +{ + return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits); +} + +static __always_inline u64 __is_canonical_address(u64 vaddr, u8 vaddr_bits) +{ + return __canonical_address(vaddr, vaddr_bits) == vaddr; +} + #endif /* __ASSEMBLY__ */ #include <asm-generic/memory_model.h> diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 63efccc8f4292..56750febf4604 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -688,7 +688,7 @@ static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt) static inline bool emul_is_noncanonical_address(u64 la, struct x86_emulate_ctxt *ctxt) { - return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la; + return !__is_canonical_address(la, ctxt_virt_addr_bits(ctxt)); } /* @@ -738,7 +738,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, case X86EMUL_MODE_PROT64: *linear = la; va_bits = ctxt_virt_addr_bits(ctxt); - if (get_canonical(la, va_bits) != la) + if (!__is_canonical_address(la, va_bits)) goto bad; *max_size = min_t(u64, ~0u, (1ull << va_bits) - la); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9d3015863e581..c2899ff31a068 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1640,7 +1640,7 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, * value, and that something deterministic happens if the guest * invokes 64-bit SYSENTER. */ - data = get_canonical(data, vcpu_virt_addr_bits(vcpu)); + data = __canonical_address(data, vcpu_virt_addr_bits(vcpu)); } msr.data = data; diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 2bff44f1efec8..4037b3cc704e8 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -156,14 +156,9 @@ static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu) return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48; } -static inline u64 get_canonical(u64 la, u8 vaddr_bits) -{ - return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits); -} - static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu) { - return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la; + return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu)); } static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, diff --git a/arch/x86/mm/maccess.c b/arch/x86/mm/maccess.c index 92ec176a72937..5a53c2cc169cc 100644 --- a/arch/x86/mm/maccess.c +++ b/arch/x86/mm/maccess.c @@ -4,11 +4,6 @@ #include <linux/kernel.h> #ifdef CONFIG_X86_64 -static __always_inline u64 canonical_address(u64 vaddr, u8 vaddr_bits) -{ - return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits); -} - bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) { unsigned long vaddr = (unsigned long)unsafe_src; @@ -19,7 +14,7 @@ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) * we also need to include the userspace guard page. */ return vaddr >= TASK_SIZE_MAX + PAGE_SIZE && - canonical_address(vaddr, boot_cpu_data.x86_virt_bits) == vaddr; + __is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits); } #else bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)