After reverting and restoring the fast tlb invalidation patch series, the mmio_cached is not removed. Hence a unused field is left in kvm_mmu_page. Cc: Sean Christopherson <seanjc@xxxxxxxxxx> Signed-off-by: Jia He <justin.he@xxxxxxx> --- arch/x86/kvm/mmu/mmu_internal.h | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 35567293c1fd..3e6f21c1871a 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -37,7 +37,6 @@ struct kvm_mmu_page { bool unsync; u8 mmu_valid_gen; - bool mmio_cached; bool lpage_disallowed; /* Can't be replaced by an equiv large page */ /* -- 2.17.1