From: Maxim Levitsky <mlevitsk@xxxxxxxxxx> commit f9697df251438b0798780900e8b43bdb12a56d64 upstream. add kvm_leave_nested which wraps a call to nested_ops->leave_nested into a function. Cc: stable@xxxxxxxxxxxxxxx Signed-off-by: Maxim Levitsky <mlevitsk@xxxxxxxxxx> Message-Id: <20221103141351.50662-4-mlevitsk@xxxxxxxxxx> Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- arch/x86/kvm/svm/nested.c | 3 --- arch/x86/kvm/vmx/nested.c | 3 --- arch/x86/kvm/x86.c | 8 +++++++- 3 files changed, 7 insertions(+), 7 deletions(-) --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -1164,9 +1164,6 @@ void svm_free_nested(struct vcpu_svm *sv svm->nested.initialized = false; } -/* - * Forcibly leave nested mode in order to be able to reset the VCPU later on. - */ void svm_leave_nested(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -6294,9 +6294,6 @@ out: return kvm_state.size; } -/* - * Forcibly leave nested mode in order to be able to reset the VCPU later on. - */ void vmx_leave_nested(struct kvm_vcpu *vcpu) { if (is_guest_mode(vcpu)) { --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -611,6 +611,12 @@ void kvm_deliver_exception_payload(struc } EXPORT_SYMBOL_GPL(kvm_deliver_exception_payload); +/* Forcibly leave the nested mode in cases like a vCPU reset */ +static void kvm_leave_nested(struct kvm_vcpu *vcpu) +{ + kvm_x86_ops.nested_ops->leave_nested(vcpu); +} + static void kvm_multiple_exception(struct kvm_vcpu *vcpu, unsigned nr, bool has_error, u32 error_code, bool has_payload, unsigned long payload, bool reinject) @@ -5154,7 +5160,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_e if (events->flags & KVM_VCPUEVENT_VALID_SMM) { if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { - kvm_x86_ops.nested_ops->leave_nested(vcpu); + kvm_leave_nested(vcpu); kvm_smm_changed(vcpu, events->smi.smm); }