The level and direct field of the CPU role can act as a marker for validity instead: exactly one of them is guaranteed to be nonzero, so a zero value for both means that the role is invalid and the MMU properties will be computed again. Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> --- arch/x86/include/asm/kvm_host.h | 4 +--- arch/x86/kvm/mmu/mmu.c | 8 +++----- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 52ceeadbed28..1356959a2fe1 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -347,8 +347,7 @@ union kvm_mmu_page_role { * kvm_mmu_extended_role complements kvm_mmu_page_role, tracking properties * relevant to the current MMU configuration. When loading CR0, CR4, or EFER, * including on nested transitions, if nothing in the full role changes then - * MMU re-configuration can be skipped. @valid bit is set on first usage so we - * don't treat all-zero structure as valid data. + * MMU re-configuration can be skipped. * * The properties that are tracked in the extended role but not the page role * are for things that either (a) do not affect the validity of the shadow page @@ -365,7 +364,6 @@ union kvm_mmu_page_role { union kvm_mmu_extended_role { u32 word; struct { - unsigned int valid:1; unsigned int execonly:1; unsigned int cr4_pse:1; unsigned int cr4_pke:1; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index cf8a41675a79..33827d1e3d5a 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4699,7 +4699,6 @@ kvm_calc_cpu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs) role.base.access = ACC_ALL; role.base.smm = is_smm(vcpu); role.base.guest_mode = is_guest_mode(vcpu); - role.ext.valid = 1; if (!____is_cr0_pg(regs)) { role.base.direct = 1; @@ -4909,7 +4908,6 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty, role.ext.word = 0; role.ext.execonly = execonly; - role.ext.valid = 1; return role; } @@ -5030,9 +5028,9 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu) vcpu->arch.root_mmu.root_role.word = 0; vcpu->arch.guest_mmu.root_role.word = 0; vcpu->arch.nested_mmu.root_role.word = 0; - vcpu->arch.root_mmu.cpu_role.ext.valid = 0; - vcpu->arch.guest_mmu.cpu_role.ext.valid = 0; - vcpu->arch.nested_mmu.cpu_role.ext.valid = 0; + vcpu->arch.root_mmu.cpu_role.as_u64 = 0; + vcpu->arch.guest_mmu.cpu_role.as_u64 = 0; + vcpu->arch.nested_mmu.cpu_role.as_u64 = 0; kvm_mmu_reset_context(vcpu); /* -- 2.31.1