.. to improve readability and maintainability, and to align the code as per the layout of the checks in chapter "VM Entries" in Intel SDM vol 3C. Signed-off-by: Krish Sadhukhan <krish.sadhukhan@xxxxxxxxxx> Reviewed-by: Mihai Carabas <mihai.carabas@xxxxxxxxxx> Reviewed-by: Mark Kanda <mark.kanda@xxxxxxxxxx> --- arch/x86/kvm/vmx.c | 43 +++++++++++++++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 6fa48b7..2284b8f 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -12267,20 +12267,28 @@ static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, return 0; } -static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) +static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) { if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_load_count, vmcs12->vm_exit_msr_load_addr) || nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_store_count, - vmcs12->vm_exit_msr_store_addr) || - nested_vmx_check_msr_switch(vcpu, vmcs12->vm_entry_msr_load_count, - vmcs12->vm_entry_msr_load_addr)) + vmcs12->vm_exit_msr_store_addr)) return -EINVAL; return 0; } +static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_entry_msr_load_count, + vmcs12->vm_entry_msr_load_addr)) + return -EINVAL; + + return 0; +} + static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { @@ -13046,6 +13054,23 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu, return 0; } +/* + * Checks related to VM-Exit Control Fields + */ +static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (!vmx_control_verify(vmcs12->vm_exit_controls, + vmx->nested.msrs.exit_ctls_low, + vmx->nested.msrs.exit_ctls_high) || + nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12)) + return -EINVAL; + + return 0; +} + static int nested_vmx_check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { @@ -13055,7 +13080,8 @@ static int nested_vmx_check_vmentry_prereqs(struct kvm_vcpu *vcpu, vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; - if (nested_check_vm_execution_controls(vcpu, vmcs12)) + if (nested_check_vm_execution_controls(vcpu, vmcs12) || + nested_check_vm_exit_controls(vcpu, vmcs12)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12)) @@ -13066,10 +13092,7 @@ static int nested_vmx_check_vmentry_prereqs(struct kvm_vcpu *vcpu, !nested_cr3_valid(vcpu, vmcs12->host_cr3)) return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD; - if (!vmx_control_verify(vmcs12->vm_exit_controls, - vmx->nested.msrs.exit_ctls_low, - vmx->nested.msrs.exit_ctls_high) || - !vmx_control_verify(vmcs12->vm_entry_controls, + if (!vmx_control_verify(vmcs12->vm_entry_controls, vmx->nested.msrs.entry_ctls_low, vmx->nested.msrs.entry_ctls_high)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; -- 2.9.5