> > +static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address) > +{ > + struct vcpu_vmx *vmx = to_vmx(vcpu); > + u64 mask = address & 0x7; > + int maxphyaddr = cpuid_maxphyaddr(vcpu); > + > + /* Check for memory type validity */ > + switch (mask) { > + case 0: > + if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPTP_UC_BIT)) > + return false; > + break; > + case 6: > + if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPTP_WB_BIT)) > + return false; > + break; > + default: > + return false; > + } > + > + /* Bits 5:3 must be 3 */ > + if (((address >> VMX_EPT_GAW_EPTP_SHIFT) & 0x7) != VMX_EPT_DEFAULT_GAW) > + return false; > + > + /* Reserved bits should not be set */ > + if (address >> maxphyaddr || ((address >> 7) & 0x1f)) > + return false; > + > + /* AD, if set, should be supported */ > + if ((address & VMX_EPT_AD_ENABLE_BIT)) { > + if (!enable_ept_ad_bits) > + return false; In theory (I guess) we would have to check here if (vmx->nested.nested_vmx_ept_caps & VMX_EPT_AD_BIT) But I am no expert on this. > + } > + > + return true; > +} > + > +static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu, > + struct vmcs12 *vmcs12) > +{ > + u32 index = vcpu->arch.regs[VCPU_REGS_RCX]; > + u64 *l1_eptp_list, address; > + struct page *page; > + bool accessed_dirty; > + struct kvm_mmu *mmu = vcpu->arch.walk_mmu; > + > + if (!nested_cpu_has_eptp_switching(vmcs12) || > + !nested_cpu_has_ept(vmcs12)) > + return 1; > + > + if (index >= VMFUNC_EPTP_ENTRIES) > + return 1; > + > + page = nested_get_page(vcpu, vmcs12->eptp_list_address); > + if (!page) > + return 1; > + > + l1_eptp_list = kmap(page); > + address = l1_eptp_list[index]; > + accessed_dirty = !!(address & VMX_EPT_AD_ENABLE_BIT); Minor nit: Can't you directly do kunmap(page); nested_release_page_clean(page); at this point? We can fix this up later. We could even later factor this out into sth. like "nested_vmx_read_guest". -- Thanks, David