Re: [PATCH v9 5/8] KVM: arm64: Support PUD hugepage in stage2_is_exec()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 




On 10/31/2018 11:27 PM, Punit Agrawal wrote:
> In preparation for creating PUD hugepages at stage 2, add support for
> detecting execute permissions on PUD page table entries. Faults due to
> lack of execute permissions on page table entries is used to perform
> i-cache invalidation on first execute.
> 
> Provide trivial implementations of arm32 helpers to allow sharing of
> code.
> 
> Signed-off-by: Punit Agrawal <punit.agrawal@xxxxxxx>
> Reviewed-by: Suzuki K Poulose <suzuki.poulose@xxxxxxx>
> Cc: Christoffer Dall <christoffer.dall@xxxxxxx>
> Cc: Marc Zyngier <marc.zyngier@xxxxxxx>
> Cc: Russell King <linux@xxxxxxxxxxxxxxx>
> Cc: Catalin Marinas <catalin.marinas@xxxxxxx>
> Cc: Will Deacon <will.deacon@xxxxxxx>
> ---
>  arch/arm/include/asm/kvm_mmu.h         |  6 +++
>  arch/arm64/include/asm/kvm_mmu.h       |  5 +++
>  arch/arm64/include/asm/pgtable-hwdef.h |  2 +
>  virt/kvm/arm/mmu.c                     | 53 +++++++++++++++++++++++---
>  4 files changed, 61 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
> index 37bf85d39607..839a619873d3 100644
> --- a/arch/arm/include/asm/kvm_mmu.h
> +++ b/arch/arm/include/asm/kvm_mmu.h
> @@ -102,6 +102,12 @@ static inline bool kvm_s2pud_readonly(pud_t *pud)
>  	return false;
>  }
>  
> +static inline bool kvm_s2pud_exec(pud_t *pud)
> +{
> +	BUG();
> +	return false;
> +}
> +
>  static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
>  {
>  	pte_val(pte) |= L_PTE_S2_RDWR;
> diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
> index 8da6d1b2a196..c755b37b3f92 100644
> --- a/arch/arm64/include/asm/kvm_mmu.h
> +++ b/arch/arm64/include/asm/kvm_mmu.h
> @@ -261,6 +261,11 @@ static inline bool kvm_s2pud_readonly(pud_t *pudp)
>  	return kvm_s2pte_readonly((pte_t *)pudp);
>  }
>  
> +static inline bool kvm_s2pud_exec(pud_t *pudp)
> +{
> +	return !(READ_ONCE(pud_val(*pudp)) & PUD_S2_XN);
> +}
> +
>  #define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
>  
>  #ifdef __PAGETABLE_PMD_FOLDED
> diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
> index 1d7d8da2ef9b..336e24cddc87 100644
> --- a/arch/arm64/include/asm/pgtable-hwdef.h
> +++ b/arch/arm64/include/asm/pgtable-hwdef.h
> @@ -193,6 +193,8 @@
>  #define PMD_S2_RDWR		(_AT(pmdval_t, 3) << 6)   /* HAP[2:1] */
>  #define PMD_S2_XN		(_AT(pmdval_t, 2) << 53)  /* XN[1:0] */
>  
> +#define PUD_S2_XN		(_AT(pudval_t, 2) << 53)  /* XN[1:0] */
> +
>  /*
>   * Memory Attribute override for Stage-2 (MemAttr[3:0])
>   */
> diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
> index 1c669c3c1208..8e44dccd1b47 100644
> --- a/virt/kvm/arm/mmu.c
> +++ b/virt/kvm/arm/mmu.c
> @@ -1083,23 +1083,66 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
>  	return 0;
>  }
>  
> -static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
> +/*
> + * stage2_get_leaf_entry - walk the stage2 VM page tables and return
> + * true if a valid and present leaf-entry is found. A pointer to the
> + * leaf-entry is returned in the appropriate level variable - pudpp,
> + * pmdpp, ptepp.
> + */
> +static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr,
> +				  pud_t **pudpp, pmd_t **pmdpp, pte_t **ptepp)
>  {
> +	pud_t *pudp;
>  	pmd_t *pmdp;
>  	pte_t *ptep;
>  
> -	pmdp = stage2_get_pmd(kvm, NULL, addr);
> +	*pudpp = NULL;
> +	*pmdpp = NULL;
> +	*ptepp = NULL;
> +
> +	pudp = stage2_get_pud(kvm, NULL, addr);
> +	if (!pudp || stage2_pud_none(kvm, *pudp) || !stage2_pud_present(kvm, *pudp))
> +		return false;
> +
> +	if (stage2_pud_huge(kvm, *pudp)) {
> +		*pudpp = pudp;
> +		return true;
> +	}
> +
> +	pmdp = stage2_pmd_offset(kvm, pudp, addr);
>  	if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp))
>  		return false;
>  
> -	if (pmd_thp_or_huge(*pmdp))
> -		return kvm_s2pmd_exec(pmdp);
> +	if (pmd_thp_or_huge(*pmdp)) {
> +		*pmdpp = pmdp;
> +		return true;
> +	}
>  
>  	ptep = pte_offset_kernel(pmdp, addr);
>  	if (!ptep || pte_none(*ptep) || !pte_present(*ptep))
>  		return false;
>  
> -	return kvm_s2pte_exec(ptep);
> +	*ptepp = ptep;
> +	return true;
> +}
> +
> +static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
> +{
> +	pud_t *pudp;
> +	pmd_t *pmdp;
> +	pte_t *ptep;
> +	bool found;
> +
> +	found = stage2_get_leaf_entry(kvm, addr, &pudp, &pmdp, &ptep);
> +	if (!found)
> +		return false;
> +
> +	if (pudp)
> +		return kvm_s2pud_exec(pudp);
> +	else if (pmdp)
> +		return kvm_s2pmd_exec(pmdp);
> +	else
> +		return kvm_s2pte_exec(ptep);
>  }

stage2_get_leaf_entry() is not really necessary as a separate function.
It determines leaf entry and just return a true/false. At those 'true'
return points it can just return kvm_s2XXX_exec() directly. Passing
three  different pointers as arguments and checking for them being
non-NULL upon return and doing a simple return then seems like a lot
without much reason. stage2_is_exec() can just be expanded to add PUD
support.
_______________________________________________
kvmarm mailing list
kvmarm@xxxxxxxxxxxxxxxxxxxxx
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm



[Index of Archives]     [Linux KVM]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux