On 21/03/2016 11:05, Huaitong Han wrote: > static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, > - unsigned pte_access, unsigned pfec) > + unsigned pte_access, unsigned pte_pkey, > + unsigned pfec) > { > int cpl = kvm_x86_ops->get_cpl(vcpu); > unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); > + unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC); > + int index = (pfec >> 1) + > + (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1)); > + > + if (unlikely(mmu->pkru_mask)) { > + u32 pkru_bits, offset; > + > + WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK)); > + > + /* > + * PKRU defines 32 bits, there are 16 domains and 2 > + * attribute bits per domain in pkru, pkey is the > + * index to a defined domain, so the value of > + * pkey * 2 is offset of a defined domain. > + */ > + pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3; > + /* replace PFEC.RSVD with ACC_USER_MASK. */ > + offset = pfec | ((pte_access & PT_USER_MASK) << > + (PFERR_RSVD_BIT - PT_USER_SHIFT)); > + > + pkru_bits &= mmu->pkru_mask >> (offset & ~1); > + pfec |= -pkru_bits & PFERR_PK_MASK; > + } > > /* > * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1. > @@ -167,14 +192,12 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, > * but it will be one in index if SMAP checks are being overridden. > * It is important to keep this branchless. > */ > - unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC); > - int index = (pfec >> 1) + > - (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1)); > > WARN_ON(pfec & PFERR_RSVD_MASK); > > pfec |= PFERR_PRESENT_MASK; > - return -((mmu->permissions[index] >> pte_access) & 1) & pfec; > + return -(((pfec >> PFERR_PK_BIT) | > + (mmu->permissions[index] >> pte_access)) & 1) & pfec; > } > > void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm); Slightly cleaner: 1) keep WARN_ON together 2) keep smap comment close to smap variable 3) build expression for final return a piece at a time Does it look good? Thanks, Paolo @@ -149,7 +150,8 @@ static inline bool is_write_protection(struct kvm_vcpu *vcpu) * if the access faults. */ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, - unsigned pte_access, unsigned pfec) + unsigned pte_access, unsigned pte_pkey, + unsigned pfec) { int cpl = kvm_x86_ops->get_cpl(vcpu); unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); @@ -170,11 +172,32 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC); int index = (pfec >> 1) + (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1)); + bool fault = (mmu->permissions[index] >> pte_access) & 1; - WARN_ON(pfec & PFERR_RSVD_MASK); - + WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK)); pfec |= PFERR_PRESENT_MASK; - return -((mmu->permissions[index] >> pte_access) & 1) & pfec; + + if (unlikely(mmu->pkru_mask)) { + u32 pkru_bits, offset; + + /* + * PKRU defines 32 bits, there are 16 domains and 2 + * attribute bits per domain in pkru, pkey is the + * index to a defined domain, so the value of + * pkey * 2 is offset of a defined domain. + */ + pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3; + + /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */ + offset = pfec - 1 + + ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT)); + + pkru_bits &= mmu->pkru_mask >> offset; + pfec |= -pkru_bits & PFERR_PK_MASK; + fault |= (pkru_bits != 0); + } + + return -(uint32_t)fault & pfec; } void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm); -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html