From: Lai Jiangshan <jiangshan.ljs@xxxxxxxxxxxx> The SMAP checking and rflags are only needed in permission_fault() when it is supervisor access and SMAP is enabled. These information is already encoded in the combination of mmu->permissions[] and the index. So we can use the encoded information to see if we need the SMAP checking instead of getting the rflags unconditionally. Signed-off-by: Lai Jiangshan <jiangshan.ljs@xxxxxxxxxxxx> --- arch/x86/kvm/mmu.h | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 4cb7a39ecd51..ceac1e9e21e9 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -218,13 +218,12 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, { /* strip nested paging fault error codes */ unsigned int pfec = access; - unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); /* * For explicit supervisor accesses, SMAP is disabled if EFLAGS.AC = 1. * For implicit supervisor accesses, SMAP cannot be overridden. * - * SMAP works on supervisor accesses only, and not_smap can + * SMAP works on supervisor accesses only, and the SMAP checking bit can * be set or not set when user access with neither has any bearing * on the result. * @@ -233,11 +232,30 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, * if SMAP checks are being disabled. */ bool explicit_access = !(access & PFERR_IMPLICIT_ACCESS); - bool not_smap = (rflags & X86_EFLAGS_AC) && explicit_access; - int index = (pfec + (!!not_smap << PFERR_RSVD_BIT)) >> 1; - bool fault = (mmu->permissions[index] >> pte_access) & 1; + bool fault = (mmu->permissions[pfec >> 1] >> pte_access) & 1; + int index = (pfec + PFERR_RSVD_MASK) >> 1; + bool fault_not_smap = (mmu->permissions[index] >> pte_access) & 1; u32 errcode = PFERR_PRESENT_MASK; + /* + * The value of fault has included SMAP checking if it is supervisor + * access and SMAP is enabled and encoded in mmu->permissions. + * + * fault fault_not_smap + * 0 0 not fault due to UWX nor SMAP + * 0 1 impossible combination + * 1 1 fault due to UWX + * 1 0 fault due to SMAP, need to check if + * SMAP is prevented + * + * SMAP is prevented only when X86_EFLAGS_AC is set on explicit + * supervisor access. + */ + if (unlikely(fault && !fault_not_smap && explicit_access)) { + unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); + fault = !(rflags & X86_EFLAGS_AC); + } + WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK)); if (unlikely(mmu->pkru_mask)) { u32 pkru_bits, offset; -- 2.19.1.6.gb485710b