On Wed, Aug 03, 2022, Maxim Levitsky wrote: > Switch from using a raw array to 'union kvm_smram'. > > Signed-off-by: Maxim Levitsky <mlevitsk@xxxxxxxxxx> > --- > arch/x86/include/asm/kvm_host.h | 5 +++-- > arch/x86/kvm/emulate.c | 12 +++++++----- > arch/x86/kvm/kvm_emulate.h | 3 ++- > arch/x86/kvm/svm/svm.c | 8 ++++++-- > arch/x86/kvm/vmx/vmx.c | 4 ++-- > arch/x86/kvm/x86.c | 16 ++++++++-------- > 6 files changed, 28 insertions(+), 20 deletions(-) > > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index e8281d64a4315a..d752fabde94ad2 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -204,6 +204,7 @@ typedef enum exit_fastpath_completion fastpath_t; > > struct x86_emulate_ctxt; > struct x86_exception; > +union kvm_smram; > enum x86_intercept; > enum x86_intercept_stage; > > @@ -1600,8 +1601,8 @@ struct kvm_x86_ops { > void (*setup_mce)(struct kvm_vcpu *vcpu); > > int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection); > - int (*enter_smm)(struct kvm_vcpu *vcpu, char *smstate); > - int (*leave_smm)(struct kvm_vcpu *vcpu, const char *smstate); > + int (*enter_smm)(struct kvm_vcpu *vcpu, union kvm_smram *smram); > + int (*leave_smm)(struct kvm_vcpu *vcpu, const union kvm_smram *smram); > void (*enable_smi_window)(struct kvm_vcpu *vcpu); > > int (*mem_enc_ioctl)(struct kvm *kvm, void __user *argp); > diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c > index 55d9328e6074a2..610978d00b52b0 100644 > --- a/arch/x86/kvm/emulate.c > +++ b/arch/x86/kvm/emulate.c > @@ -2594,16 +2594,18 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, > static int em_rsm(struct x86_emulate_ctxt *ctxt) > { > unsigned long cr0, cr4, efer; > - char buf[512]; > + const union kvm_smram smram; This is blatantly wrong, ctxt->ops->read_phys() writes to the buffer. I assume you did this to make it more difficult to modify the buffer after reading from guest memory, but IMO that's not worth misleading readers. > u64 smbase; > int ret; > > + BUILD_BUG_ON(sizeof(smram) != 512); > + > if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0) > return emulate_ud(ctxt); > > smbase = ctxt->ops->get_smbase(ctxt); > > - ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf)); > + ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, (void *)&smram, sizeof(smram)); The point of the union + bytes is so that KVM doesn't have to cast. kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, smram.bytes, sizeof(smram)); > if (ret != X86EMUL_CONTINUE) > return X86EMUL_UNHANDLEABLE; > > @@ -2653,15 +2655,15 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) > * state (e.g. enter guest mode) before loading state from the SMM > * state-save area. > */ > - if (ctxt->ops->leave_smm(ctxt, buf)) > + if (ctxt->ops->leave_smm(ctxt, &smram)) > goto emulate_shutdown; > > #ifdef CONFIG_X86_64 > if (emulator_has_longmode(ctxt)) > - ret = rsm_load_state_64(ctxt, buf); > + ret = rsm_load_state_64(ctxt, (const char *)&smram); > else > #endif > - ret = rsm_load_state_32(ctxt, buf); > + ret = rsm_load_state_32(ctxt, (const char *)&smram); Same thing here, though this is temporary. And it's kinda silly, but I think it makes sense to avoid the cast here by tweaking the rsm_load_state_*() helpers to take "u8 *" instead of "char *". > if (ret != X86EMUL_CONTINUE) > goto emulate_shutdown; > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c > index 38f873cb6f2c14..688315d1dfabd1 100644 > --- a/arch/x86/kvm/svm/svm.c > +++ b/arch/x86/kvm/svm/svm.c > @@ -4433,12 +4433,14 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection) > return 1; > } > > -static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate) > +static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram) > { > struct vcpu_svm *svm = to_svm(vcpu); > struct kvm_host_map map_save; > int ret; > > + char *smstate = (char *)smram; Again temporary, but since this is new code, just make it u8 *smstate = smram->bytes; > + > if (!is_guest_mode(vcpu)) > return 0; > > @@ -4480,7 +4482,7 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate) > return 0; > } > > -static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) > +static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram) > { > struct vcpu_svm *svm = to_svm(vcpu); > struct kvm_host_map map, map_save; > @@ -4488,6 +4490,8 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) > struct vmcb *vmcb12; > int ret; > > + const char *smstate = (const char *)smram; > + And here. > if (!guest_cpuid_has(vcpu, X86_FEATURE_LM)) > return 0; > E.g. this compiles cleanly on top --- arch/x86/kvm/emulate.c | 17 +++++++++-------- arch/x86/kvm/svm/svm.c | 4 ++-- arch/x86/kvm/x86.c | 7 ++++--- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index dd0a08af1dd9..b2ef63cf6cff 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2357,7 +2357,7 @@ static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags) desc->type = (flags >> 8) & 15; } -static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate, +static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const u8 *smstate, int n) { struct desc_struct desc; @@ -2379,7 +2379,7 @@ static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate, } #ifdef CONFIG_X86_64 -static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate, +static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const u8 *smstate, int n) { struct desc_struct desc; @@ -2446,7 +2446,7 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, } static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, - const char *smstate) + const u8 *smstate) { struct desc_struct desc; struct desc_ptr dt; @@ -2507,7 +2507,7 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, #ifdef CONFIG_X86_64 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, - const char *smstate) + const u8 *smstate) { struct desc_struct desc; struct desc_ptr dt; @@ -2580,7 +2580,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, static int em_rsm(struct x86_emulate_ctxt *ctxt) { unsigned long cr0, cr4, efer; - const union kvm_smram smram; + union kvm_smram smram; u64 smbase; int ret; @@ -2591,7 +2591,8 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) smbase = ctxt->ops->get_smbase(ctxt); - ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, (void *)&smram, sizeof(smram)); + ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, + smram.bytes, sizeof(smram)); if (ret != X86EMUL_CONTINUE) return X86EMUL_UNHANDLEABLE; @@ -2646,10 +2647,10 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) #ifdef CONFIG_X86_64 if (emulator_has_longmode(ctxt)) - ret = rsm_load_state_64(ctxt, (const char *)&smram); + ret = rsm_load_state_64(ctxt, smram.bytes); else #endif - ret = rsm_load_state_32(ctxt, (const char *)&smram); + ret = rsm_load_state_32(ctxt, smram.bytes); if (ret != X86EMUL_CONTINUE) goto emulate_shutdown; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 5d748b10c5be..ecf11c8a052e 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4439,7 +4439,7 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram) struct kvm_host_map map_save; int ret; - char *smstate = (char *)smram; + u8 *smstate = smram->bytes; if (!is_guest_mode(vcpu)) return 0; @@ -4490,7 +4490,7 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram) struct vmcb *vmcb12; int ret; - const char *smstate = (const char *)smram; + const char *smstate = smram->bytes; if (!guest_cpuid_has(vcpu, X86_FEATURE_LM)) return 0; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ca558674b07b..09268c2335a8 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9985,10 +9985,10 @@ static void enter_smm(struct kvm_vcpu *vcpu) memset(smram.bytes, 0, sizeof(smram.bytes)); #ifdef CONFIG_X86_64 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) - enter_smm_save_state_64(vcpu, (char *)&smram); + enter_smm_save_state_64(vcpu, smram.bytes); else #endif - enter_smm_save_state_32(vcpu, (char *)&smram); + enter_smm_save_state_32(vcpu, smram.bytes); /* * Give enter_smm() a chance to make ISA-specific changes to the vCPU @@ -9998,7 +9998,8 @@ static void enter_smm(struct kvm_vcpu *vcpu) static_call(kvm_x86_enter_smm)(vcpu, &smram); kvm_smm_changed(vcpu, true); - kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, &smram, sizeof(smram)); + kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, + smram.bytes, sizeof(smram)); if (static_call(kvm_x86_get_nmi_mask)(vcpu)) vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; base-commit: 0708faef18ff51a2b2dba546961d843223331138 --