Read the data directly from guest memory instead of the map->read->unmap sequence. This also avoids using kvm_vcpu_gpa_to_page() and kmap() which assumes that there is a "struct page" for guest memory. Suggested-by: Jim Mattson <jmattson@xxxxxxxxxx> Signed-off-by: KarimAllah Ahmed <karahmed@xxxxxxxxx> Reviewed-by: Jim Mattson <jmattson@xxxxxxxxxx> Reviewed-by: David Hildenbrand <david@xxxxxxxxxx> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> --- v1 -> v2: - Massage commit message a bit. --- arch/x86/kvm/vmx/nested.c | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 8ff2052..11b44a9 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -4192,7 +4192,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu) { int ret; gpa_t vmptr; - struct page *page; + uint32_t revision; struct vcpu_vmx *vmx = to_vmx(vcpu); const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; @@ -4241,18 +4241,10 @@ static int handle_vmon(struct kvm_vcpu *vcpu) if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) return nested_vmx_failInvalid(vcpu); - page = kvm_vcpu_gpa_to_page(vcpu, vmptr); - if (is_error_page(page)) + if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) || + revision != VMCS12_REVISION) return nested_vmx_failInvalid(vcpu); - if (*(u32 *)kmap(page) != VMCS12_REVISION) { - kunmap(page); - kvm_release_page_clean(page); - return nested_vmx_failInvalid(vcpu); - } - kunmap(page); - kvm_release_page_clean(page); - vmx->nested.vmxon_ptr = vmptr; ret = enter_vmx_operation(vcpu); if (ret) -- 2.7.4