On Wed, 2022-12-28 at 11:37 +0800, Binbin Wu wrote: > On 12/9/2022 12:45 PM, Robert Hoo wrote: > > kvm_vcpu_arch::cr4_guest_owned_bits and > > kvm_vcpu_arch::cr4_guest_rsvd_bits > > looks confusing. Rename latter to cr4_host_rsvd_bits, because it in > > fact > > decribes the effective host reserved cr4 bits from the vcpu's > > perspective. > > IMO, the current name cr4_guest_rsvd_bits is OK becuase it shows > that > these bits are reserved bits from the pointview of guest. Actually, it's cr4_guest_owned_bits that from the perspective of guest. > cr4_guest_owned_bits and cr4_guest_rsvd_bits together looks quite confusing. The first looks like "guest owns these CR4 bits"; the latter looks like "guest reserved these bits". My first response of the seeing is: hey, what's difference between guest owns and guest reserves? Then take a look at their calculations, we'll find that cr4_guest_owned_bits comes from cr4_guest_rsvd_bits, and coarsely "~" relationship. vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS & ~vcpu->arch.cr4_guest_rsvd_bits; Those set bits in cr4_guest_owned_bits means guest owns; Those set bits in cr4_guest_rsvd_bits means HOST owns. So the ~ calculation is naturally explained. The more hierarchical relationships among these macros/structure elements surrounding CR4 virtualization, can be found below. (P.S. more story: this isn't the first time I dig into these code. I had got clear on their relationship long ago, but when I come to it this time, I got confused by the name again. Therefore, I would rather cook this patch now to avoid next time;-)) > Change to *host* is OK, but seems not easier to understand. > Perhaps "host_reserved" isn't the best name, welcome other suggestions. But guest_own v.s. guest_rsvd, is really confusing, or even misleading. > > > > > Meanwhile, rename other related variables/macros to be better > > descriptive: > > * CR4_RESERVED_BITS --> CR4_HOST_RESERVED_BITS, which describes > > host bare > > metal CR4 reserved bits. > > > > * cr4_reserved_bits --> cr4_kvm_reserved_bits, which describes > > CR4_HOST_RESERVED_BITS + !kvm_cap_has() = kvm level cr4 reserved > > bits. > > > > * __cr4_reserved_bits() --> __cr4_calc_reserved_bits(), which to > > calc > > effective cr4 reserved bits for kvm or vm level, by corresponding > > x_cpu_has() input. > > > > Thus, by these renames, the hierarchical relations of those > > reserved CR4 > > bits is more clear. > > > > Just renames, no functional changes intended. > > > > Signed-off-by: Robert Hoo <robert.hu@xxxxxxxxxxxxxxx> > > --- > > arch/x86/include/asm/kvm_host.h | 4 ++-- > > arch/x86/kvm/cpuid.c | 4 ++-- > > arch/x86/kvm/vmx/vmx.c | 2 +- > > arch/x86/kvm/x86.c | 12 ++++++------ > > arch/x86/kvm/x86.h | 4 ++-- > > 5 files changed, 13 insertions(+), 13 deletions(-) > > > > diff --git a/arch/x86/include/asm/kvm_host.h > > b/arch/x86/include/asm/kvm_host.h > > index f05ebaa26f0f..3c736e00b6b1 100644 > > --- a/arch/x86/include/asm/kvm_host.h > > +++ b/arch/x86/include/asm/kvm_host.h > > @@ -114,7 +114,7 @@ > > | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | > > X86_CR0_AM \ > > | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG)) > > > > -#define > > CR4_RESERVED_BITS \ > > +#define > > CR4_HOST_RESERVED_BITS > > \ > > (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | > > X86_CR4_DE\ > > | X86_CR4_PSE | X86_CR4_PAE | > > X86_CR4_MCE \ > > | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR > > | X86_CR4_PCIDE \ > > @@ -671,7 +671,7 @@ struct kvm_vcpu_arch { > > unsigned long cr3; > > unsigned long cr4; > > unsigned long cr4_guest_owned_bits; > > - unsigned long cr4_guest_rsvd_bits; > > + unsigned long cr4_host_rsvd_bits; > > unsigned long cr8; > > u32 host_pkru; > > u32 pkru; > > diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c > > index c92c49a0b35b..01e2b93ef563 100644 > > --- a/arch/x86/kvm/cpuid.c > > +++ b/arch/x86/kvm/cpuid.c > > @@ -352,8 +352,8 @@ static void kvm_vcpu_after_set_cpuid(struct > > kvm_vcpu *vcpu) > > vcpu->arch.reserved_gpa_bits = > > kvm_vcpu_reserved_gpa_bits_raw(vcpu); > > > > kvm_pmu_refresh(vcpu); > > - vcpu->arch.cr4_guest_rsvd_bits = > > - __cr4_reserved_bits(guest_cpuid_has, vcpu); > > + vcpu->arch.cr4_host_rsvd_bits = > > + __cr4_calc_reserved_bits(guest_cpuid_has, vcpu); > > > > kvm_hv_set_cpuid(vcpu, kvm_cpuid_has_hyperv(vcpu- > > >arch.cpuid_entries, > > vcpu- > > >arch.cpuid_nent)); > > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > > index 63247c57c72c..cfa06c7c062e 100644 > > --- a/arch/x86/kvm/vmx/vmx.c > > +++ b/arch/x86/kvm/vmx/vmx.c > > @@ -4250,7 +4250,7 @@ void set_cr4_guest_host_mask(struct vcpu_vmx > > *vmx) > > struct kvm_vcpu *vcpu = &vmx->vcpu; > > > > vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS & > > - ~vcpu- > > >arch.cr4_guest_rsvd_bits; > > + ~vcpu- > > >arch.cr4_host_rsvd_bits; > > if (!enable_ept) { > > vcpu->arch.cr4_guest_owned_bits &= > > ~X86_CR4_TLBFLUSH_BITS; > > vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_PDPTR_BITS; > > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > > index 69227f77b201..eb1f2c20e19e 100644 > > --- a/arch/x86/kvm/x86.c > > +++ b/arch/x86/kvm/x86.c > > @@ -108,7 +108,7 @@ u64 __read_mostly efer_reserved_bits = > > ~((u64)(EFER_SCE | EFER_LME | EFER_LMA)); > > static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); > > #endif > > > > -static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS; > > +static u64 __read_mostly cr4_kvm_reserved_bits = > > CR4_HOST_RESERVED_BITS; > > > > #define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE) > > > > @@ -1102,10 +1102,10 @@ EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv); > > > > bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) > > { > > - if (cr4 & cr4_reserved_bits) > > + if (cr4 & cr4_kvm_reserved_bits) > > return false; > > > > - if (cr4 & vcpu->arch.cr4_guest_rsvd_bits) > > + if (cr4 & vcpu->arch.cr4_host_rsvd_bits) > > return false; > > > > return true; > > @@ -12290,7 +12290,7 @@ int kvm_arch_hardware_setup(void *opaque) > > kvm_caps.supported_xss = 0; > > > > #define __kvm_cpu_cap_has(UNUSED_, f) kvm_cpu_cap_has(f) > > - cr4_reserved_bits = __cr4_reserved_bits(__kvm_cpu_cap_has, > > UNUSED_); > > + cr4_kvm_reserved_bits = > > __cr4_calc_reserved_bits(__kvm_cpu_cap_has, UNUSED_); > > #undef __kvm_cpu_cap_has > > > > if (kvm_caps.has_tsc_control) { > > @@ -12323,8 +12323,8 @@ int kvm_arch_check_processor_compat(void > > *opaque) > > > > WARN_ON(!irqs_disabled()); > > > > - if (__cr4_reserved_bits(cpu_has, c) != > > - __cr4_reserved_bits(cpu_has, &boot_cpu_data)) > > + if (__cr4_calc_reserved_bits(cpu_has, c) != > > + __cr4_calc_reserved_bits(cpu_has, &boot_cpu_data)) > > return -EIO; > > > > return ops->check_processor_compatibility(); > > diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h > > index 829d3134c1eb..d92e580768e5 100644 > > --- a/arch/x86/kvm/x86.h > > +++ b/arch/x86/kvm/x86.h > > @@ -452,9 +452,9 @@ bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 > > index, u32 type); > > #define KVM_MSR_RET_INVALID 2 /* in-kernel MSR emulation > > #GP condition */ > > #define KVM_MSR_RET_FILTERED 3 /* #GP due to userspace MSR > > filter */ > > > > -#define __cr4_reserved_bits(__cpu_has, __c) \ > > +#define __cr4_calc_reserved_bits(__cpu_has, __c) \ > > ({ \ > > - u64 __reserved_bits = CR4_RESERVED_BITS; \ > > + u64 __reserved_bits = CR4_HOST_RESERVED_BITS; \ > > \ > > if (!__cpu_has(__c, X86_FEATURE_XSAVE)) \ > > __reserved_bits |= X86_CR4_OSXSAVE; \