The page requested for page-in; sometimes, can have transient references, and hence cannot migrate immediately. Retry a few times before returning error. H_SVM_PAGE_IN interface is enhanced to return H_BUSY if the page is not in a migratable state. Cc: Paul Mackerras <paulus@xxxxxxxxxx> Cc: Benjamin Herrenschmidt <benh@xxxxxxxxxxxxxxxxxxx> Cc: Michael Ellerman <mpe@xxxxxxxxxxxxxx> Cc: Bharata B Rao <bharata@xxxxxxxxxxxxx> Cc: Aneesh Kumar K.V <aneesh.kumar@xxxxxxxxxxxxx> Cc: Sukadev Bhattiprolu <sukadev@xxxxxxxxxxxxxxxxxx> Cc: Laurent Dufour <ldufour@xxxxxxxxxxxxx> Cc: Thiago Jung Bauermann <bauerman@xxxxxxxxxxxxx> Cc: David Gibson <david@xxxxxxxxxxxxxxxxxxxxx> Cc: Claudio Carvalho <cclaudio@xxxxxxxxxxxxx> Cc: kvm-ppc@xxxxxxxxxxxxxxx Cc: linuxppc-dev@xxxxxxxxxxxxxxxx Signed-off-by: Ram Pai <linuxram@xxxxxxxxxx> --- Documentation/powerpc/ultravisor.rst | 1 + arch/powerpc/kvm/book3s_hv_uvmem.c | 54 +++++++++++++++++++++--------------- 2 files changed, 33 insertions(+), 22 deletions(-) diff --git a/Documentation/powerpc/ultravisor.rst b/Documentation/powerpc/ultravisor.rst index d98fc85..638d1a7 100644 --- a/Documentation/powerpc/ultravisor.rst +++ b/Documentation/powerpc/ultravisor.rst @@ -1034,6 +1034,7 @@ Return values * H_PARAMETER if ``guest_pa`` is invalid. * H_P2 if ``flags`` is invalid. * H_P3 if ``order`` of page is invalid. + * H_BUSY if ``page`` is not in a state to pagein Description ~~~~~~~~~~~ diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index 12ed52a..c9bdef6 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -843,7 +843,7 @@ unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa, struct vm_area_struct *vma; int srcu_idx; unsigned long gfn = gpa >> page_shift; - int ret; + int ret, repeat_count = REPEAT_COUNT; if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) return H_UNSUPPORTED; @@ -857,34 +857,44 @@ unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa, if (flags & H_PAGE_IN_SHARED) return kvmppc_share_page(kvm, gpa, page_shift); - ret = H_PARAMETER; srcu_idx = srcu_read_lock(&kvm->srcu); - down_write(&kvm->mm->mmap_sem); - start = gfn_to_hva(kvm, gfn); - if (kvm_is_error_hva(start)) - goto out; - - mutex_lock(&kvm->arch.uvmem_lock); /* Fail the page-in request of an already paged-in page */ - if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL)) - goto out_unlock; + mutex_lock(&kvm->arch.uvmem_lock); + ret = kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL); + mutex_unlock(&kvm->arch.uvmem_lock); + if (ret) { + srcu_read_unlock(&kvm->srcu, srcu_idx); + return H_PARAMETER; + } - end = start + (1UL << page_shift); - vma = find_vma_intersection(kvm->mm, start, end); - if (!vma || vma->vm_start > start || vma->vm_end < end) - goto out_unlock; + do { + ret = H_PARAMETER; + down_write(&kvm->mm->mmap_sem); - if (kvmppc_svm_migrate_page(vma, start, end, gpa, kvm, page_shift, - true)) - goto out_unlock; + start = gfn_to_hva(kvm, gfn); + if (kvm_is_error_hva(start)) { + up_write(&kvm->mm->mmap_sem); + break; + } - ret = H_SUCCESS; + end = start + (1UL << page_shift); + vma = find_vma_intersection(kvm->mm, start, end); + if (!vma || vma->vm_start > start || vma->vm_end < end) { + up_write(&kvm->mm->mmap_sem); + break; + } + + mutex_lock(&kvm->arch.uvmem_lock); + ret = kvmppc_svm_migrate_page(vma, start, end, gpa, kvm, page_shift, true); + mutex_unlock(&kvm->arch.uvmem_lock); + + up_write(&kvm->mm->mmap_sem); + } while (ret == -2 && repeat_count--); + + if (ret == -2) + ret = H_BUSY; -out_unlock: - mutex_unlock(&kvm->arch.uvmem_lock); -out: - up_write(&kvm->mm->mmap_sem); srcu_read_unlock(&kvm->srcu, srcu_idx); return ret; } -- 1.8.3.1