---
arch/powerpc/kvm/book3s_64_vio_hv.c | 86 ++++++++++++++++++++++++++++++-------
1 file changed, 70 insertions(+), 16 deletions(-)
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 44be73e..af155f6 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -180,6 +180,38 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+static mm_context_t *kvmppc_mm_context(struct kvm_vcpu *vcpu)
+{
+ struct task_struct *task;
+
+ task = vcpu->arch.run_task;
+ if (unlikely(!task || !task->mm))
+ return NULL;
+
+ return &task->mm->context;
+}
+
+static inline bool kvmppc_preregistered(struct kvm_vcpu *vcpu)
+{
+ mm_context_t *mm = kvmppc_mm_context(vcpu);
+
+ if (unlikely(!mm))
+ return false;
+
+ return mm_iommu_preregistered(mm);
+}
+
+static struct mm_iommu_table_group_mem_t *kvmppc_rm_iommu_lookup(
+ struct kvm_vcpu *vcpu, unsigned long ua, unsigned long size)
+{
+ mm_context_t *mm = kvmppc_mm_context(vcpu);
+
+ if (unlikely(!mm))
+ return NULL;
+
+ return mm_iommu_lookup_rm(mm, ua, size);
+}
+
long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce)
{
@@ -261,23 +293,44 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
if (ret != H_SUCCESS)
return ret;
- if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
- return H_TOO_HARD;
+ if (kvmppc_preregistered(vcpu)) {
+ /*
+ * We get here if guest memory was pre-registered which
+ * is normally VFIO case and gpa->hpa translation does not
+ * depend on hpt.
+ */
+ struct mm_iommu_table_group_mem_t *mem;
- rmap = (void *) vmalloc_to_phys(rmap);
+ if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL))
+ return H_TOO_HARD;
- /*
- * Synchronize with the MMU notifier callbacks in
- * book3s_64_mmu_hv.c (kvm_unmap_hva_hv etc.).
- * While we have the rmap lock, code running on other CPUs
- * cannot finish unmapping the host real page that backs
- * this guest real page, so we are OK to access the host
- * real page.
- */
- lock_rmap(rmap);
- if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
- ret = H_TOO_HARD;
- goto unlock_exit;
+ mem = kvmppc_rm_iommu_lookup(vcpu, ua, IOMMU_PAGE_SIZE_4K);
+ if (!mem || mm_iommu_rm_ua_to_hpa(mem, ua, &tces))
+ return H_TOO_HARD;
+ } else {
+ /*
+ * This is emulated devices case.
+ * We do not require memory to be preregistered in this case
+ * so lock rmap and do __find_linux_pte_or_hugepte().
+ */
+ if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
+ return H_TOO_HARD;
+
+ rmap = (void *) vmalloc_to_phys(rmap);
+
+ /*
+ * Synchronize with the MMU notifier callbacks in
+ * book3s_64_mmu_hv.c (kvm_unmap_hva_hv etc.).
+ * While we have the rmap lock, code running on other CPUs
+ * cannot finish unmapping the host real page that backs
+ * this guest real page, so we are OK to access the host
+ * real page.
+ */
+ lock_rmap(rmap);
+ if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
+ ret = H_TOO_HARD;
+ goto unlock_exit;
+ }
}
for (i = 0; i < npages; ++i) {
@@ -291,7 +344,8 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
}
unlock_exit:
- unlock_rmap(rmap);
+ if (rmap)