When checking limits for VMX opcodes in protected mode, different sizes of memory operands must be taken into account. For VMREAD and VMWRITE instructions, memory operand size is 32 or 64 bits depending on CPU mode. For VMON, VMCLEAR, VMPTRST, VMPTRLD instructions, memory operand size is 64 bits. For INVEPT instruction, memory operand size is 128 bits. Signed-off-by: Eugene Korenevsky <ekorenevsky@xxxxxxxxx> --- arch/x86/kvm/vmx.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 4a4d677..f39e24f 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -6399,7 +6399,8 @@ static int vmx_protmode_seg_check(struct kvm_vcpu *vcpu, */ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, - u32 vmx_instruction_info, bool wr, gva_t *ret) + u32 vmx_instruction_info, + bool wr, int mem_op_size, gva_t *ret) { gva_t off; struct kvm_segment s; @@ -6466,7 +6467,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, int maxphyaddr = cpuid_maxphyaddr(vcpu); if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), - vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) + vmcs_read32(VMX_INSTRUCTION_INFO), false, 8, &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr, @@ -6971,6 +6972,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); gva_t gva = 0; + int mem_op_size; if (!nested_vmx_check_permission(vcpu) || !nested_vmx_check_vmcs12(vcpu)) @@ -6993,12 +6995,13 @@ static int handle_vmread(struct kvm_vcpu *vcpu) kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf), field_value); } else { + mem_op_size = is_long_mode(vcpu) ? 8 : 4; if (get_vmx_mem_address(vcpu, exit_qualification, - vmx_instruction_info, true, &gva)) + vmx_instruction_info, true, mem_op_size, &gva)) return 1; /* _system ok, as nested_vmx_check_permission verified cpl=0 */ kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva, - &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL); + &field_value, mem_op_size, NULL); } nested_vmx_succeed(vcpu); @@ -7021,6 +7024,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) */ u64 field_value = 0; struct x86_exception e; + int mem_op_size; if (!nested_vmx_check_permission(vcpu) || !nested_vmx_check_vmcs12(vcpu)) @@ -7030,11 +7034,12 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) field_value = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 3) & 0xf)); else { + mem_op_size = is_64_bit_mode(vcpu) ? 8 : 4; if (get_vmx_mem_address(vcpu, exit_qualification, - vmx_instruction_info, false, &gva)) + vmx_instruction_info, false, mem_op_size, &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, - &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { + &field_value, mem_op_size, &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } @@ -7123,7 +7128,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu) return 1; if (get_vmx_mem_address(vcpu, exit_qualification, - vmx_instruction_info, true, &vmcs_gva)) + vmx_instruction_info, true, 8, &vmcs_gva)) return 1; /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */ if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva, @@ -7179,7 +7184,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) * operand is read even if it isn't needed (e.g., for type==global) */ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), - vmx_instruction_info, false, &gva)) + vmx_instruction_info, false, 16, &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, sizeof(operand), &e)) { -- 2.1.4 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html