Re: [PATCH v10 07/14] KVM: ARM: Memory virtualization setup

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Sun, Aug 19, 2012 at 9:00 AM, Avi Kivity <avi@xxxxxxxxxx> wrote:
> On 08/19/2012 12:38 PM, Peter Maydell wrote:
>> On 19 August 2012 05:34, Christoffer Dall <c.dall@xxxxxxxxxxxxxxxxxxxxxx> wrote:
>>> On Thu, Aug 16, 2012 at 2:25 PM, Alexander Graf <agraf@xxxxxxx> wrote:
>>>> A single hva can have multiple gpas mapped, no? At least that's what I gathered
>>>> from the discussion about my attempt to a function similar to this :).
>>
>>> I don't think this is the case for ARM, can you provide an example? We
>>> use gfn_to_pfn_prot and only allow user memory regions. What you
>>> suggest would be multiple physical addresses pointing to the same
>>> memory bank, I don't think that makes any sense on ARM hardware, for
>>> x86 and PPC I don't know.
>>
>> I don't know what an hva is,
>
> host virtual address
>
> (see Documentation/virtual/kvm/mmu.txt for more TLAs in this area).
>
>  but yes, ARM boards can have the same
>> block of RAM aliased into multiple places in the physical address space.
>> (we don't currently bother to implement the aliases in qemu's vexpress-a15
>> though because it's a bunch of mappings of the low 2GB into high
>> addresses mostly intended to let you test LPAE code without having to
>> put lots of RAM on the hardware).

I stand corrected.

>
> Even if it weren't common, the API allows it, so we must behave sensibly.
>

true, this should be a solution:

commit 2a8661fd7e6c15889a20a4547bd7861e84b778a8
Author: Christoffer Dall <c.dall@xxxxxxxxxxxxxxxxxxxxxx>
Date:   Sun Aug 19 15:52:10 2012 -0400

    KVM: ARM: A single hva can map multiple gpas

    Handle mmu notifier ops for every such mapping.

    Signed-off-by: Christoffer Dall <c.dall@xxxxxxxxxxxxxxxxxxxxxx>

diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 3df4fa8..9b23230 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -754,11 +754,14 @@ int kvm_handle_guest_abort(struct kvm_vcpu
*vcpu, struct kvm_run *run)
 	return ret ? ret : 1;
 }

-static bool hva_to_gpa(struct kvm *kvm, unsigned long hva, gpa_t *gpa)
+static int handle_hva_to_gpa(struct kvm *kvm, unsigned long hva,
+			     void (*handler)(struct kvm *kvm, unsigned long hva,
+					     gpa_t gpa, void *data),
+			     void *data)
 {
 	struct kvm_memslots *slots;
 	struct kvm_memory_slot *memslot;
-	bool found = false;
+	int cnt = 0;

 	slots = kvm_memslots(kvm);

@@ -769,31 +772,36 @@ static bool hva_to_gpa(struct kvm *kvm, unsigned
long hva, gpa_t *gpa)

 		end = start + (memslot->npages << PAGE_SHIFT);
 		if (hva >= start && hva < end) {
+			gpa_t gpa;
 			gpa_t gpa_offset = hva - start;
-			*gpa = (memslot->base_gfn << PAGE_SHIFT) + gpa_offset;
-			found = true;
-			/* no overlapping memslots allowed: break */
-			break;
+			gpa = (memslot->base_gfn << PAGE_SHIFT) + gpa_offset;
+			handler(kvm, hva, gpa, data);
+			cnt++;
 		}
 	}

-	return found;
+	return cnt;
+}
+
+static void kvm_unmap_hva_handler(struct kvm *kvm, unsigned long hva,
+				  gpa_t gpa, void *data)
+{
+	spin_lock(&kvm->arch.pgd_lock);
+	stage2_clear_pte(kvm, gpa);
+	spin_unlock(&kvm->arch.pgd_lock);
 }

 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
 {
-	bool found;
-	gpa_t gpa;
+	int found;

 	if (!kvm->arch.pgd)
 		return 0;

-	found = hva_to_gpa(kvm, hva, &gpa);
-	if (found) {
-		spin_lock(&kvm->arch.pgd_lock);
-		stage2_clear_pte(kvm, gpa);
-		spin_unlock(&kvm->arch.pgd_lock);
-	}
+	found = handle_hva_to_gpa(kvm, hva, &kvm_unmap_hva_handler, NULL);
+	if (found > 0)
+		__kvm_tlb_flush_vmid(kvm);
+
 	return 0;
 }

@@ -814,21 +822,27 @@ int kvm_unmap_hva_range(struct kvm *kvm,
 	return 0;
 }

+static void kvm_set_spte_handler(struct kvm *kvm, unsigned long hva,
+				 gpa_t gpa, void *data)
+{
+	pte_t *pte = (pte_t *)data;
+
+	spin_lock(&kvm->arch.pgd_lock);
+	stage2_set_pte(kvm, NULL, gpa, pte);
+	spin_unlock(&kvm->arch.pgd_lock);
+}
+
+
 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
 {
-	gpa_t gpa;
-	bool found;
+	int found;

 	if (!kvm->arch.pgd)
 		return;

-	found = hva_to_gpa(kvm, hva, &gpa);
-	if (found) {
-		spin_lock(&kvm->arch.pgd_lock);
-		stage2_set_pte(kvm, NULL, gpa, &pte);
-		spin_unlock(&kvm->arch.pgd_lock);
+	found = handle_hva_to_gpa(kvm, hva, &kvm_set_spte_handler, &pte);
+	if (found > 0)
 		__kvm_tlb_flush_vmid(kvm);
-	}
 }

 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)

--
_______________________________________________
kvmarm mailing list
kvmarm@xxxxxxxxxxxxxxxxxxxxx
https://lists.cs.columbia.edu/cucslists/listinfo/kvmarm


[Index of Archives]     [Linux KVM]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux