To support execute only mappings on behalf of L1 hypervisors, we teach set_spte to honor L1's valid XWR bits. This is only if host supports EPT execute only. Use ACC_USER_MASK to signify if the L1 hypervisor has the present bit set. Signed-off-by: Bandan Das <bsd@xxxxxxxxxx> --- arch/x86/kvm/mmu.c | 11 ++++++++--- arch/x86/kvm/paging_tmpl.h | 2 +- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 57d8696..3ca1a99 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2528,7 +2528,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access)) return 0; - spte = PT_PRESENT_MASK; + if (!shadow_xonly_valid) + spte = PT_PRESENT_MASK; if (!speculative) spte |= shadow_accessed_mask; @@ -2537,8 +2538,12 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, else spte |= shadow_nx_mask; - if (pte_access & ACC_USER_MASK) - spte |= shadow_user_mask; + if (pte_access & ACC_USER_MASK) { + if (shadow_xonly_valid) + spte |= PT_PRESENT_MASK; + else + spte |= shadow_user_mask; + } if (level > PT_PAGE_TABLE_LEVEL) spte |= PT_PAGE_SIZE_MASK; diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 9f5bd06..5366a55 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -192,7 +192,7 @@ static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte) #if PTTYPE == PTTYPE_EPT access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) | ((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) | - ACC_USER_MASK; + ((gpte & VMX_EPT_READABLE_MASK) ? ACC_USER_MASK : 0); #else BUILD_BUG_ON(ACC_EXEC_MASK != PT_PRESENT_MASK); BUILD_BUG_ON(ACC_EXEC_MASK != 1); -- 2.5.5 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html