I vaguely recall that at some time in the distant past, field 440EH of the VMCS was referred to as the "VMX instruction-information field." However, this field now provides instruction-information for string PIO VM-exits, descriptor table VM-exits, RDRAND VM-exits, and RDSEED VM-exits. The SDM now refers to it as the "VM-exit instruction-information field." Since this field still is not yet exposed as part of a userspace API, let's rename it to match the SDM. Signed-off-by: Jim Mattson <jmattson@xxxxxxxxxx> Reviewed-by: Peter Shier <pshier@xxxxxxxxxx> --- arch/x86/include/asm/hyperv-tlfs.h | 2 +- arch/x86/include/asm/vmx.h | 2 +- arch/x86/kvm/vmx.c | 96 ++++++++++++++++-------------- arch/x86/kvm/vmx_evmcs.h | 2 +- 4 files changed, 54 insertions(+), 48 deletions(-) diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h index 00e01d215f74..4211b9796e4f 100644 --- a/arch/x86/include/asm/hyperv-tlfs.h +++ b/arch/x86/include/asm/hyperv-tlfs.h @@ -658,7 +658,7 @@ struct hv_enlightened_vmcs { u32 idt_vectoring_info_field; u32 idt_vectoring_error_code; u32 vm_exit_instruction_len; - u32 vmx_instruction_info; + u32 vm_exit_instruction_info; u64 exit_qualification; u64 exit_io_instruction_ecx; diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index ade0f153947d..678daa8d1c70 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -271,7 +271,7 @@ enum vmcs_field { IDT_VECTORING_INFO_FIELD = 0x00004408, IDT_VECTORING_ERROR_CODE = 0x0000440a, VM_EXIT_INSTRUCTION_LEN = 0x0000440c, - VMX_INSTRUCTION_INFO = 0x0000440e, + VM_EXIT_INSTRUCTION_INFO = 0x0000440e, GUEST_ES_LIMIT = 0x00004800, GUEST_CS_LIMIT = 0x00004802, GUEST_SS_LIMIT = 0x00004804, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index ccc6a01eb4f4..53d5bc08698e 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -553,7 +553,7 @@ struct __packed vmcs12 { u32 idt_vectoring_info_field; u32 idt_vectoring_error_code; u32 vm_exit_instruction_len; - u32 vmx_instruction_info; + u32 vm_exit_instruction_info; u32 guest_es_limit; u32 guest_cs_limit; u32 guest_ss_limit; @@ -710,7 +710,7 @@ static inline void vmx_check_vmcs12_offsets(void) { CHECK_OFFSET(idt_vectoring_info_field, 824); CHECK_OFFSET(idt_vectoring_error_code, 828); CHECK_OFFSET(vm_exit_instruction_len, 832); - CHECK_OFFSET(vmx_instruction_info, 836); + CHECK_OFFSET(vm_exit_instruction_info, 836); CHECK_OFFSET(guest_es_limit, 840); CHECK_OFFSET(guest_cs_limit, 844); CHECK_OFFSET(guest_ss_limit, 848); @@ -1192,7 +1192,7 @@ static const unsigned short vmcs_field_to_offset_table[] = { FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field), FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code), FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len), - FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info), + FIELD(VM_EXIT_INSTRUCTION_INFO, vm_exit_instruction_info), FIELD(GUEST_ES_LIMIT, guest_es_limit), FIELD(GUEST_CS_LIMIT, guest_cs_limit), FIELD(GUEST_SS_LIMIT, guest_ss_limit), @@ -8157,8 +8157,9 @@ static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) * #UD or #GP. */ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, - unsigned long exit_qualification, - u32 vmx_instruction_info, bool wr, gva_t *ret) + unsigned long exit_qualification, + u32 vm_exit_instruction_info, bool wr, + gva_t *ret) { gva_t off; bool exn; @@ -8166,20 +8167,20 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, /* * According to Vol. 3B, "Information for VM Exits Due to Instruction - * Execution", on an exit, vmx_instruction_info holds most of the + * Execution", on an exit, vm_exit_instruction_info holds most of the * addressing components of the operand. Only the displacement part * is put in exit_qualification (see 3B, "Basic VM-Exit Information"). * For how an actual address is calculated from all these components, * refer to Vol. 1, "Operand Addressing". */ - int scaling = vmx_instruction_info & 3; - int addr_size = (vmx_instruction_info >> 7) & 7; - bool is_reg = vmx_instruction_info & (1u << 10); - int seg_reg = (vmx_instruction_info >> 15) & 7; - int index_reg = (vmx_instruction_info >> 18) & 0xf; - bool index_is_valid = !(vmx_instruction_info & (1u << 22)); - int base_reg = (vmx_instruction_info >> 23) & 0xf; - bool base_is_valid = !(vmx_instruction_info & (1u << 27)); + int scaling = vm_exit_instruction_info & 3; + int addr_size = (vm_exit_instruction_info >> 7) & 7; + bool is_reg = vm_exit_instruction_info & (1u << 10); + int seg_reg = (vm_exit_instruction_info >> 15) & 7; + int index_reg = (vm_exit_instruction_info >> 18) & 0xf; + bool index_is_valid = !(vm_exit_instruction_info & (1u << 22)); + int base_reg = (vm_exit_instruction_info >> 23) & 0xf; + bool base_is_valid = !(vm_exit_instruction_info & (1u << 27)); if (is_reg) { kvm_queue_exception(vcpu, UD_VECTOR); @@ -8253,7 +8254,7 @@ static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer) struct x86_exception e; if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), - vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) + vmcs_read32(VM_EXIT_INSTRUCTION_INFO), false, &gva)) return 1; if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) { @@ -8863,7 +8864,7 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx) * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field; * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code; * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len; - * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info; + * vmcs12->vm_exit_instruction_info = evmcs->vm_exit_instruction_info; * vmcs12->exit_qualification = evmcs->exit_qualification; * vmcs12->guest_linear_address = evmcs->guest_linear_address; * @@ -9020,7 +9021,7 @@ static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx) evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field; evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code; evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len; - evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info; + evmcs->vm_exit_instruction_info = vmcs12->vm_exit_instruction_info; evmcs->exit_qualification = vmcs12->exit_qualification; @@ -9122,7 +9123,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) unsigned long field; u64 field_value; unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); - u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); + u32 vm_exit_instruction_info = vmcs_read32(VM_EXIT_INSTRUCTION_INFO); gva_t gva = 0; struct vmcs12 *vmcs12; @@ -9145,7 +9146,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu) } /* Decode instruction info and find the field to read */ - field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); + field = kvm_register_readl(vcpu, + (((vm_exit_instruction_info) >> 28) & 0xf)); /* Read the field, zero-extended to a u64 field_value */ if (vmcs12_read_any(vmcs12, field, &field_value) < 0) return nested_vmx_failValid(vcpu, @@ -9156,12 +9158,13 @@ static int handle_vmread(struct kvm_vcpu *vcpu) * Note that the number of bits actually copied is 32 or 64 depending * on the guest's mode (32 or 64 bit), not on the given field's length. */ - if (vmx_instruction_info & (1u << 10)) { - kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf), - field_value); + if (vm_exit_instruction_info & (1u << 10)) { + kvm_register_writel(vcpu, + (((vm_exit_instruction_info) >> 3) & 0xf), + field_value); } else { if (get_vmx_mem_address(vcpu, exit_qualification, - vmx_instruction_info, true, &gva)) + vm_exit_instruction_info, true, &gva)) return 1; /* _system ok, nested_vmx_check_permission has verified cpl=0 */ kvm_write_guest_virt_system(vcpu, gva, &field_value, @@ -9178,7 +9181,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) gva_t gva; struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); - u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); + u32 vm_exit_instruction_info = vmcs_read32(VM_EXIT_INSTRUCTION_INFO); /* The value to write might be 32 or 64 bits, depending on L1's long * mode, and eventually we need to write that into a field of several @@ -9196,12 +9199,12 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) if (vmx->nested.current_vmptr == -1ull) return nested_vmx_failInvalid(vcpu); - if (vmx_instruction_info & (1u << 10)) + if (vm_exit_instruction_info & (1u << 10)) field_value = kvm_register_readl(vcpu, - (((vmx_instruction_info) >> 3) & 0xf)); + (((vm_exit_instruction_info) >> 3) & 0xf)); else { if (get_vmx_mem_address(vcpu, exit_qualification, - vmx_instruction_info, false, &gva)) + vm_exit_instruction_info, false, &gva)) return 1; if (kvm_read_guest_virt(vcpu, gva, &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { @@ -9211,7 +9214,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) } - field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); + field = kvm_register_readl(vcpu, + (((vm_exit_instruction_info) >> 28) & 0xf)); /* * If the vCPU supports "VMWRITE to any supported field in the * VMCS," then the "read-only" fields are actually read/write. @@ -9398,7 +9402,7 @@ static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu, static int handle_vmptrst(struct kvm_vcpu *vcpu) { unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION); - u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); + u32 instr_info = vmcs_read32(VM_EXIT_INSTRUCTION_INFO); gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr; struct x86_exception e; gva_t gva; @@ -9424,7 +9428,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu) static int handle_invept(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); - u32 vmx_instruction_info, types; + u32 vm_exit_instruction_info, types; unsigned long type; gva_t gva; struct x86_exception e; @@ -9442,8 +9446,8 @@ static int handle_invept(struct kvm_vcpu *vcpu) if (!nested_vmx_check_permission(vcpu)) return 1; - vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); - type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); + vm_exit_instruction_info = vmcs_read32(VM_EXIT_INSTRUCTION_INFO); + type = kvm_register_readl(vcpu, (vm_exit_instruction_info >> 28) & 0xf); types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; @@ -9455,7 +9459,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) * operand is read even if it isn't needed (e.g., for type==global) */ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), - vmx_instruction_info, false, &gva)) + vm_exit_instruction_info, false, &gva)) return 1; if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { kvm_inject_page_fault(vcpu, &e); @@ -9490,7 +9494,7 @@ static u16 nested_get_vpid02(struct kvm_vcpu *vcpu) static int handle_invvpid(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); - u32 vmx_instruction_info; + u32 vm_exit_instruction_info; unsigned long type, types; gva_t gva; struct x86_exception e; @@ -9510,8 +9514,8 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) if (!nested_vmx_check_permission(vcpu)) return 1; - vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); - type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); + vm_exit_instruction_info = vmcs_read32(VM_EXIT_INSTRUCTION_INFO); + type = kvm_register_readl(vcpu, (vm_exit_instruction_info >> 28) & 0xf); types = (vmx->nested.msrs.vpid_caps & VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8; @@ -9524,7 +9528,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) * operand is read even if it isn't needed (e.g., for type==global) */ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), - vmx_instruction_info, false, &gva)) + vm_exit_instruction_info, false, &gva)) return 1; if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { kvm_inject_page_fault(vcpu, &e); @@ -9567,7 +9571,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) static int handle_invpcid(struct kvm_vcpu *vcpu) { - u32 vmx_instruction_info; + u32 vm_exit_instruction_info; unsigned long type; bool pcid_enabled; gva_t gva; @@ -9584,8 +9588,8 @@ static int handle_invpcid(struct kvm_vcpu *vcpu) return 1; } - vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); - type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); + vm_exit_instruction_info = vmcs_read32(VM_EXIT_INSTRUCTION_INFO); + type = kvm_register_readl(vcpu, (vm_exit_instruction_info >> 28) & 0xf); if (type > 3) { kvm_inject_gp(vcpu, 0); @@ -9596,7 +9600,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu) * is read even if it isn't needed (e.g., for type==all) */ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), - vmx_instruction_info, false, &gva)) + vm_exit_instruction_info, false, &gva)) return 1; if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { @@ -10056,7 +10060,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, gpa_t bitmap) { - u32 vmx_instruction_info; + u32 vm_exit_instruction_info; unsigned long field; u8 b; @@ -10064,8 +10068,9 @@ static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu, return true; /* Decode instruction info and find the field to access */ - vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); - field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); + vm_exit_instruction_info = vmcs_read32(VM_EXIT_INSTRUCTION_INFO); + field = kvm_register_read(vcpu, + (((vm_exit_instruction_info) >> 28) & 0xf)); /* Out-of-range fields always cause a VM exit from L2 to L1 */ if (field >> 15) @@ -13809,7 +13814,8 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, vmcs12->idt_vectoring_info_field = 0; vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); - vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); + vmcs12->vm_exit_instruction_info = + vmcs_read32(VM_EXIT_INSTRUCTION_INFO); if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { vmcs12->launch_state = 1; diff --git a/arch/x86/kvm/vmx_evmcs.h b/arch/x86/kvm/vmx_evmcs.h index 210a884090ad..cf5d8cb22df4 100644 --- a/arch/x86/kvm/vmx_evmcs.h +++ b/arch/x86/kvm/vmx_evmcs.h @@ -247,7 +247,7 @@ static const struct evmcs_field vmcs_field_to_evmcs_1[] = { HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE), EVMCS1_FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len, HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE), - EVMCS1_FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info, + EVMCS1_FIELD(VM_EXIT_INSTRUCTION_INFO, vm_exit_instruction_info, HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE), /* No mask defined in the spec (not used) */ -- 2.19.1.1215.g8438c0b245-goog