Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx> --- arch/x86/kvm/x86.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5c520bb2f649..ea0775842c9c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7627,7 +7627,7 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n) return X86EMUL_CONTINUE; } -static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, +static int rsm_enter_protected_mode(struct kvm_vcpu *vcpu, u64 cr0, u64 cr3, u64 cr4) { int bad; @@ -7640,7 +7640,7 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, cr3 &= ~0xfff; } - bad = ctxt->ops->set_cr(ctxt, 3, cr3); + bad = kvm_set_cr3(vcpu, cr3); if (bad) return X86EMUL_UNHANDLEABLE; @@ -7649,20 +7649,20 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, * Then enable protected mode. However, PCID cannot be enabled * if EFER.LMA=0, so set it separately. */ - bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE); + bad = kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PCIDE); if (bad) return X86EMUL_UNHANDLEABLE; - bad = ctxt->ops->set_cr(ctxt, 0, cr0); + bad = kvm_set_cr0(vcpu, cr0); if (bad) return X86EMUL_UNHANDLEABLE; if (cr4 & X86_CR4_PCIDE) { - bad = ctxt->ops->set_cr(ctxt, 4, cr4); + bad = kvm_set_cr4(vcpu, cr4); if (bad) return X86EMUL_UNHANDLEABLE; if (pcid) { - bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid); + bad = kvm_set_cr3(vcpu, cr3 | pcid); if (bad) return X86EMUL_UNHANDLEABLE; } @@ -7731,7 +7731,7 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase) vcpu->arch.smbase = GET_SMSTATE(u32, smbase, 0x7ef8); - return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4); + return rsm_enter_protected_mode(vcpu, cr0, cr3, cr4); } static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) @@ -7784,7 +7784,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) dt.address = GET_SMSTATE(u64, smbase, 0x7e68); ctxt->ops->set_gdt(ctxt, &dt); - r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4); + r = rsm_enter_protected_mode(vcpu, cr0, cr3, cr4); if (r != X86EMUL_CONTINUE) return r; @@ -7809,13 +7809,13 @@ static int leave_smm(struct kvm_vcpu *vcpu) * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU * supports long mode. */ - cr4 = ctxt->ops->get_cr(ctxt, 4); + cr4 = kvm_read_cr4(vcpu); if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) { struct desc_struct cs_desc; /* Zero CR4.PCIDE before CR0.PG. */ if (cr4 & X86_CR4_PCIDE) { - ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE); + kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PCIDE); cr4 &= ~X86_CR4_PCIDE; } @@ -7827,13 +7827,13 @@ static int leave_smm(struct kvm_vcpu *vcpu) } /* For the 64-bit case, this will clear EFER.LMA. */ - cr0 = ctxt->ops->get_cr(ctxt, 0); + cr0 = kvm_read_cr0(vcpu); if (cr0 & X86_CR0_PE) - ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE)); + kvm_set_cr0(vcpu, cr0 & ~(X86_CR0_PG | X86_CR0_PE)); /* Now clear CR4.PAE (which must be done before clearing EFER.LME). */ if (cr4 & X86_CR4_PAE) - ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE); + kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PAE); /* And finally go back to 32-bit mode. */ efer = 0; -- 2.21.0