From: Lai Jiangshan <jiangshan.ljs@xxxxxxxxxxxx> Check the pointer before calling it to catch any possible mistake. Signed-off-by: Lai Jiangshan <jiangshan.ljs@xxxxxxxxxxxx> --- arch/x86/kvm/mmu/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index ee2837ea18d4..69ab0d1bb0ec 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1940,7 +1940,7 @@ static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) * differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the * reserved bits checks will be wrong, etc... */ - if (WARN_ON_ONCE(sp->role.direct || + if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_page || (sp->role.word ^ root_role.word) & ~sync_role_ign.word)) return false; -- 2.19.1.6.gb485710b