From: David Stevens <stevensd@xxxxxxxxxxxx> Covert usages of the deprecated gfn_to_pfn functions to the new gfn_to_pfn_page functions. Signed-off-by: David Stevens <stevensd@xxxxxxxxxxxx> --- arch/arm64/kvm/mmu.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 0625bf2353c2..56859999b211 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -782,7 +782,7 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot, static unsigned long transparent_hugepage_adjust(struct kvm_memory_slot *memslot, unsigned long hva, kvm_pfn_t *pfnp, - phys_addr_t *ipap) + struct page **page, phys_addr_t *ipap) { kvm_pfn_t pfn = *pfnp; @@ -791,7 +791,7 @@ transparent_hugepage_adjust(struct kvm_memory_slot *memslot, * sure that the HVA and IPA are sufficiently aligned and that the * block map is contained within the memslot. */ - if (kvm_is_transparent_hugepage(pfn) && + if (*page && kvm_is_transparent_hugepage(pfn) && fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) { /* * The address we faulted on is backed by a transparent huge @@ -812,10 +812,11 @@ transparent_hugepage_adjust(struct kvm_memory_slot *memslot, * page accordingly. */ *ipap &= PMD_MASK; - kvm_release_pfn_clean(pfn); + put_page(*page); pfn &= ~(PTRS_PER_PMD - 1); - kvm_get_pfn(pfn); *pfnp = pfn; + *page = pfn_to_page(pfn); + get_page(*page); return PMD_SIZE; } @@ -908,6 +909,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, short vma_shift; gfn_t gfn; kvm_pfn_t pfn; + struct page *page; bool logging_active = memslot_is_logging(memslot); unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu); unsigned long vma_pagesize, fault_granule; @@ -1009,8 +1011,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, */ smp_rmb(); - pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, - write_fault, &writable, NULL); + pfn = __gfn_to_pfn_page_memslot(memslot, gfn, false, NULL, + write_fault, &writable, NULL, &page); if (pfn == KVM_PFN_ERR_HWPOISON) { kvm_send_hwpoison_signal(hva, vma_shift); return 0; @@ -1052,7 +1054,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, */ if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) vma_pagesize = transparent_hugepage_adjust(memslot, hva, - &pfn, &fault_ipa); + &pfn, &page, + &fault_ipa); if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) { /* Check the VMM hasn't introduced a new VM_SHARED VMA */ @@ -1090,14 +1093,17 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, /* Mark the page dirty only if the fault is handled successfully */ if (writable && !ret) { - kvm_set_pfn_dirty(pfn); + if (page) + kvm_set_pfn_dirty(pfn); mark_page_dirty_in_slot(kvm, memslot, gfn); } out_unlock: spin_unlock(&kvm->mmu_lock); - kvm_set_pfn_accessed(pfn); - kvm_release_pfn_clean(pfn); + if (page) { + kvm_set_pfn_accessed(pfn); + put_page(page); + } return ret != -EAGAIN ? ret : 0; } -- 2.33.0.rc2.250.ged5fa647cd-goog _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm