Use local variables to derefence svm->vcpu and svm->vmcb as they make the code tidier. Suggested-by: Sean Christopherson <seanjc@xxxxxxxxxx> Signed-off-by: Krish Sadhukhan <krish.sadhukhan@xxxxxxxxxx> --- arch/x86/kvm/svm/nested.c | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 9d8d80f04400..2ae542d1befc 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -503,33 +503,34 @@ int nested_svm_vmrun(struct vcpu_svm *svm) { int ret; struct vmcb *vmcb12; + struct kvm_vcpu *vcpu = &svm->vcpu; struct vmcb *hsave = svm->nested.hsave; struct vmcb *vmcb = svm->vmcb; struct kvm_host_map map; u64 vmcb12_gpa; - if (is_smm(&svm->vcpu)) { - kvm_queue_exception(&svm->vcpu, UD_VECTOR); + if (is_smm(vcpu)) { + kvm_queue_exception(vcpu, UD_VECTOR); return 1; } - vmcb12_gpa = svm->vmcb->save.rax; - ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb12_gpa), &map); + vmcb12_gpa = vmcb->save.rax; + ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map); if (ret == -EINVAL) { - kvm_inject_gp(&svm->vcpu, 0); + kvm_inject_gp(vcpu, 0); return 1; } else if (ret) { - return kvm_skip_emulated_instruction(&svm->vcpu); + return kvm_skip_emulated_instruction(vcpu); } - ret = kvm_skip_emulated_instruction(&svm->vcpu); + ret = kvm_skip_emulated_instruction(vcpu); vmcb12 = map.hva; if (WARN_ON_ONCE(!svm->nested.initialized)) return -EINVAL; - if (!nested_vmcb_checks(&svm->vcpu, vmcb12)) { + if (!nested_vmcb_checks(vcpu, vmcb12)) { vmcb12->control.exit_code = SVM_EXIT_ERR; vmcb12->control.exit_code_hi = 0; vmcb12->control.exit_info_1 = 0; @@ -539,8 +540,8 @@ int nested_svm_vmrun(struct vcpu_svm *svm) /* Clear internal status */ - kvm_clear_exception_queue(&svm->vcpu); - kvm_clear_interrupt_queue(&svm->vcpu); + kvm_clear_exception_queue(vcpu); + kvm_clear_interrupt_queue(vcpu); /* * Save the old vmcb, so we don't need to pick what we save, but can @@ -552,17 +553,17 @@ int nested_svm_vmrun(struct vcpu_svm *svm) hsave->save.ds = vmcb->save.ds; hsave->save.gdtr = vmcb->save.gdtr; hsave->save.idtr = vmcb->save.idtr; - hsave->save.efer = svm->vcpu.arch.efer; - hsave->save.cr0 = kvm_read_cr0(&svm->vcpu); + hsave->save.efer = vcpu->arch.efer; + hsave->save.cr0 = kvm_read_cr0(vcpu); hsave->save.cr4 = svm->vcpu.arch.cr4; - hsave->save.rflags = kvm_get_rflags(&svm->vcpu); - hsave->save.rip = kvm_rip_read(&svm->vcpu); + hsave->save.rflags = kvm_get_rflags(vcpu); + hsave->save.rip = kvm_rip_read(vcpu); hsave->save.rsp = vmcb->save.rsp; hsave->save.rax = vmcb->save.rax; if (npt_enabled) hsave->save.cr3 = vmcb->save.cr3; else - hsave->save.cr3 = kvm_read_cr3(&svm->vcpu); + hsave->save.cr3 = kvm_read_cr3(vcpu); copy_vmcb_control_area(&hsave->control, &vmcb->control); @@ -585,7 +586,7 @@ int nested_svm_vmrun(struct vcpu_svm *svm) nested_svm_vmexit(svm); out: - kvm_vcpu_unmap(&svm->vcpu, &map, true); + kvm_vcpu_unmap(vcpu, &map, true); return ret; } -- 2.27.0