On Fri, Jul 22, 2022 at 09:38:47PM +0000, Sean Christopherson wrote: > On Fri, Jul 22, 2022, Nathan Chancellor wrote: > > On Fri, Jul 22, 2022 at 06:33:27PM +0000, Sean Christopherson wrote: > > > On Thu, Jul 14, 2022, Vitaly Kuznetsov wrote: > > > > diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h > > > > index 286c88e285ea..89eaab3495a6 100644 > > > > --- a/arch/x86/kvm/vmx/vmx.h > > > > +++ b/arch/x86/kvm/vmx/vmx.h > > > > @@ -467,6 +467,113 @@ static inline u8 vmx_get_rvi(void) > > > > return vmcs_read16(GUEST_INTR_STATUS) & 0xff; > > > > } > > > > > > > > +#define __KVM_REQ_VMX_VM_ENTRY_CONTROLS \ > > > > + (VM_ENTRY_LOAD_DEBUG_CONTROLS) > > > > +#ifdef CONFIG_X86_64 > > > > + #define KVM_REQ_VMX_VM_ENTRY_CONTROLS \ > > > > + (__KVM_REQ_VMX_VM_ENTRY_CONTROLS | \ > > > > + VM_ENTRY_IA32E_MODE) > > > > > > This breaks 32-bit builds, but at least we know the assert works! > > > > > > vmx_set_efer() toggles VM_ENTRY_IA32E_MODE without a CONFIG_X86_64 guard. That > > > should be easy enough to fix since KVM should never allow EFER_LMA. Compile > > > tested patch at the bottom. > > > > > > More problematic is that clang-13 doesn't like the new asserts, and even worse gives > > > a very cryptic error. I don't have bandwidth to look into this at the moment, and > > > probably won't next week either. > > > > > > ERROR: modpost: "__compiletime_assert_533" [arch/x86/kvm/kvm-intel.ko] undefined! > > > ERROR: modpost: "__compiletime_assert_531" [arch/x86/kvm/kvm-intel.ko] undefined! > > > ERROR: modpost: "__compiletime_assert_532" [arch/x86/kvm/kvm-intel.ko] undefined! > > > ERROR: modpost: "__compiletime_assert_530" [arch/x86/kvm/kvm-intel.ko] undefined! > > > make[2]: *** [scripts/Makefile.modpost:128: modules-only.symvers] Error 1 > > > make[1]: *** [Makefile:1753: modules] Error 2 > > > make[1]: *** Waiting for unfinished jobs.... > > > > clang-14 added support for the error and warning attributes, which makes > > the BUILD_BUG_ON failures look like GCC. With allmodconfig, this > > becomes: > > ... > > > As you mentioned in the other comment on this patch, the 'inline' > > keyword should be '__always_inline' in the BUILD_CONTROLS_SHADOW macro > > and a couple of other functions need it for BUILD_BUG_ON to see the > > value all the way through the call chain. The following diff resolves > > those errors for me, hopefully it is useful! > > Thanks a ton! Y'all are like a benevolent Beetlejuice, one needs only to mention > "clang" and you show up and solve the problem :-) Praise be to the mightly lei and its filters :) FWIW, if you ever have a question about clang's behavior or any errors, please feel free to cc llvm@xxxxxxxxxxxxxxx, we're always happy to look into things so that clang stays well supported upstream (and thank you for verifying KVM changes with it!). Cheers, Nathan > > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > > index 4ce7ed835e06..b97ed63ece56 100644 > > --- a/arch/x86/kvm/vmx/vmx.c > > +++ b/arch/x86/kvm/vmx/vmx.c > > @@ -790,7 +790,7 @@ static bool msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr) > > MSR_IA32_SPEC_CTRL); > > } > > > > -static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, > > +static __always_inline void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, > > unsigned long entry, unsigned long exit) > > { > > vm_entry_controls_clearbit(vmx, entry); > > @@ -848,7 +848,7 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) > > vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); > > } > > > > -static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, > > +static __always_inline void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, > > unsigned long entry, unsigned long exit, > > unsigned long guest_val_vmcs, unsigned long host_val_vmcs, > > u64 guest_val, u64 host_val) > > diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h > > index 758f80c41beb..acefa5b5e1b9 100644 > > --- a/arch/x86/kvm/vmx/vmx.h > > +++ b/arch/x86/kvm/vmx/vmx.h > > @@ -597,12 +597,12 @@ static inline u##bits lname##_controls_get(struct vcpu_vmx *vmx) \ > > { \ > > return __##lname##_controls_get(vmx->loaded_vmcs); \ > > } \ > > -static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u##bits val) \ > > +static __always_inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u##bits val) \ > > { \ > > BUILD_BUG_ON(!(val & (KVM_REQ_VMX_##uname | KVM_OPT_VMX_##uname))); \ > > lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \ > > } \ > > -static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u##bits val) \ > > +static __always_inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u##bits val) \ > > { \ > > BUILD_BUG_ON(!(val & (KVM_REQ_VMX_##uname | KVM_OPT_VMX_##uname))); \ > > lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \ > > > > > > +#else > > > > + #define KVM_REQ_VMX_VM_ENTRY_CONTROLS \ > > > > + __KVM_REQ_VMX_VM_ENTRY_CONTROLS > > > > +#endif