On 2024-08-15 06:50:04, kernel test robot wrote: > sparse warnings: (new ones prefixed by >>) > >> arch/x86/kvm/mmu/tdp_mmu.c:847:21: sparse: sparse: incompatible types in comparison expression (different address spaces): > arch/x86/kvm/mmu/tdp_mmu.c:847:21: sparse: unsigned long long [usertype] * > arch/x86/kvm/mmu/tdp_mmu.c:847:21: sparse: unsigned long long [noderef] [usertype] __rcu * > arch/x86/kvm/mmu/tdp_mmu.c: note: in included file (through include/linux/rbtree.h, include/linux/mm_types.h, include/linux/mmzone.h, ...): > include/linux/rcupdate.h:812:25: sparse: sparse: context imbalance in '__tdp_mmu_zap_root' - unexpected unlock > arch/x86/kvm/mmu/tdp_mmu.c:1447:33: sparse: sparse: context imbalance in 'tdp_mmu_split_huge_pages_root' - unexpected unlock > > vim +847 arch/x86/kvm/mmu/tdp_mmu.c > > 819 > 820 static bool tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) > 821 { > 822 struct tdp_iter iter = {}; > 823 > 824 lockdep_assert_held_read(&kvm->mmu_lock); > 825 > 826 /* > 827 * This helper intentionally doesn't allow zapping a root shadow page, > 828 * which doesn't have a parent page table and thus no associated entry. > 829 */ > 830 if (WARN_ON_ONCE(!sp->ptep)) > 831 return false; > 832 > 833 iter.old_spte = kvm_tdp_mmu_read_spte(sp->ptep); > 834 iter.sptep = sp->ptep; > 835 iter.level = sp->role.level + 1; > 836 iter.gfn = sp->gfn; > 837 iter.as_id = kvm_mmu_page_as_id(sp); > 838 > 839 retry: > 840 /* > 841 * Since mmu_lock is held in read mode, it's possible to race with > 842 * another CPU which can remove sp from the page table hierarchy. > 843 * > 844 * No need to re-read iter.old_spte as tdp_mmu_set_spte_atomic() will > 845 * update it in the case of failure. > 846 */ > > 847 if (sp->spt != spte_to_child_pt(iter.old_spte, iter.level)) Hmm, I need to wrap spte_to_child_pt() with rcu_access_pointer() before comparing it to sp->spt. Following patch makes this Sparse error go away. diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 7c7d207ee590..7d5dbfe48c4b 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -820,6 +820,7 @@ static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, static bool tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) { struct tdp_iter iter = {}; + tdp_ptep_t pt; lockdep_assert_held_read(&kvm->mmu_lock); @@ -844,7 +845,8 @@ static bool tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) * No need to re-read iter.old_spte as tdp_mmu_set_spte_atomic() will * update it in the case of failure. */ - if (sp->spt != spte_to_child_pt(iter.old_spte, iter.level)) + pt = spte_to_child_pt(iter.old_spte, iter.level); + if (sp->spt != rcu_access_pointer(pt)) return false; if (tdp_mmu_set_spte_atomic(kvm, &iter, SHADOW_NONPRESENT_VALUE))