Re: [PATCH 3/4] arm/arm64: KVM: Flush caches to memory on unmap

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, Jan 08, 2015 at 11:59:08AM +0000, Marc Zyngier wrote:
> Let's assume a guest has created an uncached mapping, and written
> to that page. Let's also assume that the host uses a cache-coherent
> IO subsystem. Let's finally assume that the host is under memory
> pressure and starts to swap things out.
> 
> Before this "uncached" page is evicted, we need to make sure it
> gets invalidated, or the IO subsystem is going to swap out the
> cached view, loosing the data that has been written there.
> 
> Signed-off-by: Marc Zyngier <marc.zyngier@xxxxxxx>
> ---
>  arch/arm/include/asm/kvm_mmu.h   | 31 +++++++++++++++++++++++++++
>  arch/arm/kvm/mmu.c               | 46 +++++++++++++++++++++++++++-------------
>  arch/arm64/include/asm/kvm_mmu.h | 18 ++++++++++++++++
>  3 files changed, 80 insertions(+), 15 deletions(-)
> 
> diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
> index 63e0ecc..7ceb836 100644
> --- a/arch/arm/include/asm/kvm_mmu.h
> +++ b/arch/arm/include/asm/kvm_mmu.h
> @@ -44,6 +44,7 @@
>  
>  #ifndef __ASSEMBLY__
>  
> +#include <linux/highmem.h>
>  #include <asm/cacheflush.h>
>  #include <asm/pgalloc.h>
>  
> @@ -190,6 +191,36 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
>  
>  #define kvm_virt_to_phys(x)		virt_to_idmap((unsigned long)(x))
>  
> +static inline void __kvm_flush_dcache_pte(pte_t pte)
> +{
> +	void *va = kmap_atomic(pte_page(pte));
> +
> +	kvm_flush_dcache_to_poc(va, PAGE_SIZE);
> +
> +	kunmap_atomic(va);
> +}
> +
> +static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
> +{
> +	unsigned long size = PMD_SIZE;
> +	pfn_t pfn = pmd_pfn(pmd);
> +
> +	while (size) {
> +		void *va = kmap_atomic_pfn(pfn);
> +
> +		kvm_flush_dcache_to_poc(va, PAGE_SIZE);
> +
> +		pfn++;
> +		size -= PAGE_SIZE;
> +
> +		kunmap_atomic(va);
> +	}
> +}
> +
> +static inline void __kvm_flush_dcache_pud(pud_t pud)
> +{
> +}
> +
>  void stage2_flush_vm(struct kvm *kvm);
>  
>  #endif	/* !__ASSEMBLY__ */
> diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
> index 1dc9778..1f5b793 100644
> --- a/arch/arm/kvm/mmu.c
> +++ b/arch/arm/kvm/mmu.c
> @@ -58,6 +58,21 @@ static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
>  		kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
>  }
>  
> +static void kvm_flush_dcache_pte(pte_t pte)
> +{
> +	__kvm_flush_dcache_pte(pte);
> +}
> +
> +static void kvm_flush_dcache_pmd(pmd_t pmd)
> +{
> +	__kvm_flush_dcache_pmd(pmd);
> +}
> +
> +static void kvm_flush_dcache_pud(pud_t pud)
> +{
> +	__kvm_flush_dcache_pud(pud);
> +}
> +
>  static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
>  				  int min, int max)
>  {
> @@ -128,9 +143,12 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
>  	start_pte = pte = pte_offset_kernel(pmd, addr);
>  	do {
>  		if (!pte_none(*pte)) {
> +			pte_t old_pte = *pte;
>  			kvm_set_pte(pte, __pte(0));
> -			put_page(virt_to_page(pte));

was this a bug beforehand that we released the page before we flushed
the TLB?

>  			kvm_tlb_flush_vmid_ipa(kvm, addr);
> +			if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
> +				kvm_flush_dcache_pte(old_pte);

this is confusing me: We are only flushing the cache for cached stage-2
mappings?  Weren't we trying to flush the cache for uncached mappings?
(we obviously also need flush a cached stage-2 mapping but where the
guest is mapping it as uncached, but we don't know that easily).

Am I missing something completely here?

In any case we probably need a comment here explaining this.

> +			put_page(virt_to_page(pte));
>  		}
>  	} while (pte++, addr += PAGE_SIZE, addr != end);
>  
> @@ -149,8 +167,10 @@ static void unmap_pmds(struct kvm *kvm, pud_t *pud,
>  		next = kvm_pmd_addr_end(addr, end);
>  		if (!pmd_none(*pmd)) {
>  			if (kvm_pmd_huge(*pmd)) {
> +				pmd_t old_pmd = *pmd;
>  				pmd_clear(pmd);
>  				kvm_tlb_flush_vmid_ipa(kvm, addr);
> +				kvm_flush_dcache_pmd(old_pmd);
>  				put_page(virt_to_page(pmd));
>  			} else {
>  				unmap_ptes(kvm, pmd, addr, next);
> @@ -173,8 +193,10 @@ static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
>  		next = kvm_pud_addr_end(addr, end);
>  		if (!pud_none(*pud)) {
>  			if (pud_huge(*pud)) {
> +				pud_t old_pud = *pud;
>  				pud_clear(pud);
>  				kvm_tlb_flush_vmid_ipa(kvm, addr);
> +				kvm_flush_dcache_pud(old_pud);
>  				put_page(virt_to_page(pud));
>  			} else {
>  				unmap_pmds(kvm, pud, addr, next);
> @@ -209,10 +231,8 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
>  
>  	pte = pte_offset_kernel(pmd, addr);
>  	do {
> -		if (!pte_none(*pte)) {
> -			hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
> -			kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
> -		}
> +		if (!pte_none(*pte))
> +			kvm_flush_dcache_pte(*pte);
>  	} while (pte++, addr += PAGE_SIZE, addr != end);
>  }
>  
> @@ -226,12 +246,10 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
>  	do {
>  		next = kvm_pmd_addr_end(addr, end);
>  		if (!pmd_none(*pmd)) {
> -			if (kvm_pmd_huge(*pmd)) {
> -				hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
> -				kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE);
> -			} else {
> +			if (kvm_pmd_huge(*pmd))
> +				kvm_flush_dcache_pmd(*pmd);
> +			else
>  				stage2_flush_ptes(kvm, pmd, addr, next);
> -			}
>  		}
>  	} while (pmd++, addr = next, addr != end);
>  }
> @@ -246,12 +264,10 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
>  	do {
>  		next = kvm_pud_addr_end(addr, end);
>  		if (!pud_none(*pud)) {
> -			if (pud_huge(*pud)) {
> -				hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
> -				kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE);
> -			} else {
> +			if (pud_huge(*pud))
> +				kvm_flush_dcache_pud(*pud);
> +			else
>  				stage2_flush_pmds(kvm, pud, addr, next);
> -			}
>  		}
>  	} while (pud++, addr = next, addr != end);
>  }
> diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
> index 14a74f1..b7419f5 100644
> --- a/arch/arm64/include/asm/kvm_mmu.h
> +++ b/arch/arm64/include/asm/kvm_mmu.h
> @@ -260,6 +260,24 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
>  
>  #define kvm_virt_to_phys(x)		__virt_to_phys((unsigned long)(x))
>  
> +static inline void __kvm_flush_dcache_pte(pte_t pte)
> +{
> +	struct page *page = pte_page(pte);
> +	kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
> +}
> +
> +static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
> +{
> +	struct page *page = pmd_page(pmd);
> +	kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
> +}
> +
> +static inline void __kvm_flush_dcache_pud(pud_t pud)
> +{
> +	struct page *page = pud_page(pud);
> +	kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
> +}
> +
>  void stage2_flush_vm(struct kvm *kvm);
>  
>  #endif /* __ASSEMBLY__ */
> -- 
> 2.1.4
> 
Thanks,

-Christoffer
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html



[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux