On Thu, Sep 29, 2022, Paolo Bonzini wrote: > @@ -520,14 +505,14 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt) > u64 smbase; > int ret; > > - smbase = ctxt->ops->get_smbase(ctxt); > + smbase = vcpu->arch.smbase; > > - ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf)); > - if (ret != X86EMUL_CONTINUE) > + ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfe00, buf, sizeof(buf)); > + if (ret < 0) > return X86EMUL_UNHANDLEABLE; > > - if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0) > - ctxt->ops->set_nmi_mask(ctxt, false); > + if ((vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK) == 0) > + static_call(kvm_x86_set_nmi_mask)(vcpu, false); > > kvm_smm_changed(vcpu, false); > > @@ -535,41 +520,41 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt) > * Get back to real mode, to prepare a safe state in which to load > * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU > * supports long mode. > - * > - * The ctxt->ops callbacks will handle all side effects when writing > - * writing MSRs and CRs, e.g. MMU context resets, CPUID > - * runtime updates, etc. > */ > - if (emulator_has_longmode(ctxt)) { > - struct desc_struct cs_desc; > +#ifdef CONFIG_X86_64 > + if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) { To fix the "unused 'efer'" issue and avoid multiple guest_cpuid_has() calls, this as fixup? It's not like we care about the code footprint for 32-bit KVM if the compiler isn't clever enough to optimize away the dead code. --- arch/x86/kvm/smm.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c index 41ca128478fc..740fca1cf3a3 100644 --- a/arch/x86/kvm/smm.c +++ b/arch/x86/kvm/smm.c @@ -500,6 +500,8 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, int emulator_leave_smm(struct x86_emulate_ctxt *ctxt) { struct kvm_vcpu *vcpu = ctxt->vcpu; + bool is_64bit_vcpu = IS_ENABLED(CONFIG_X86_64) && + guest_cpuid_has(vcpu, X86_FEATURE_LM); unsigned long cr0, cr4, efer; char buf[512]; u64 smbase; @@ -521,8 +523,7 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt) * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU * supports long mode. */ -#ifdef CONFIG_X86_64 - if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) { + if (is_64bit_vcpu) { struct kvm_segment cs_desc; /* Zero CR4.PCIDE before CR0.PG. */ @@ -536,15 +537,13 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt) cs_desc.s = cs_desc.g = cs_desc.present = 1; kvm_set_segment(vcpu, &cs_desc, VCPU_SREG_CS); } -#endif /* For the 64-bit case, this will clear EFER.LMA. */ cr0 = kvm_read_cr0(vcpu); if (cr0 & X86_CR0_PE) kvm_set_cr0(vcpu, cr0 & ~(X86_CR0_PG | X86_CR0_PE)); -#ifdef CONFIG_X86_64 - if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) { + if (is_64bit_vcpu) { /* Clear CR4.PAE before clearing EFER.LME. */ cr4 = kvm_read_cr4(vcpu); if (cr4 & X86_CR4_PAE) @@ -554,7 +553,6 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt) efer = 0; kvm_set_msr(vcpu, MSR_EFER, efer); } -#endif /* * Give leave_smm() a chance to make ISA-specific changes to the vCPU @@ -565,7 +563,7 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt) return X86EMUL_UNHANDLEABLE; #ifdef CONFIG_X86_64 - if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) + if (is_64bit_vcpu) return rsm_load_state_64(ctxt, buf); else #endif base-commit: 8b86d27cc60a150252b04989de818ad4ec85f899 --