On Fri, Mar 12, 2021 at 03:18:58PM +0000, Steven Price wrote: > diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c > index 77cb2d28f2a4..b31b7a821f90 100644 > --- a/arch/arm64/kvm/mmu.c > +++ b/arch/arm64/kvm/mmu.c > @@ -879,6 +879,22 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, > if (vma_pagesize == PAGE_SIZE && !force_pte) > vma_pagesize = transparent_hugepage_adjust(memslot, hva, > &pfn, &fault_ipa); > + > + if (fault_status != FSC_PERM && kvm_has_mte(kvm) && pfn_valid(pfn)) { This pfn_valid() check may be problematic. Following commit eeb0753ba27b ("arm64/mm: Fix pfn_valid() for ZONE_DEVICE based memory"), it returns true for ZONE_DEVICE memory but such memory is allowed not to support MTE. I now wonder if we can get a MAP_ANONYMOUS mapping of ZONE_DEVICE pfn even without virtualisation. > + /* > + * VM will be able to see the page's tags, so we must ensure > + * they have been initialised. if PG_mte_tagged is set, tags > + * have already been initialised. > + */ > + struct page *page = pfn_to_page(pfn); > + unsigned long i, nr_pages = vma_pagesize >> PAGE_SHIFT; > + > + for (i = 0; i < nr_pages; i++, page++) { > + if (!test_and_set_bit(PG_mte_tagged, &page->flags)) > + mte_clear_page_tags(page_address(page)); > + } > + } > + > if (writable) > prot |= KVM_PGTABLE_PROT_W; > -- Catalin _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm