..so that every nested vmentry is not slowed down by those checks. Signed-off-by: Krish Sadhukhan <krish.sadhukhan@xxxxxxxxxx> --- arch/x86/kvm/vmx/nested.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 056eba497730..ffeeeb5ff520 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -2534,6 +2534,15 @@ static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu, return 0; } +static int nested_check_vm_entry_controls_full(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12)) + return -EINVAL; + + return 0; +} + /* * Checks related to VM-Entry Control Fields */ @@ -2603,7 +2612,8 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu, } } - if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12)) + if ((vmx->nested.dirty_vmcs12) && + nested_check_vm_entry_controls_full(vcpu, vmcs12)) return -EINVAL; return 0; -- 2.20.1