From: Quentin Perret <qperret@xxxxxxxxxx> __pkvm_create_private_mapping() is currently responsible for allocating VA space in the hypervisor's "private" range and creating stage-1 mappings. In order to allow reusing the VA space allocation logic from other places, let's factor it out in a standalone function. This is will be used to allocate private VA ranges for hypervisor stack guard pages in a subsequent patch in this series. Signed-off-by: Quentin Perret <qperret@xxxxxxxxxx> [Kalesh - Resolve conflicts and make hyp_alloc_private_va_range available outside nvhe/mm.c, update commit message] Signed-off-by: Kalesh Singh <kaleshsingh@xxxxxxxxxx> --- arch/arm64/kvm/hyp/include/nvhe/mm.h | 1 + arch/arm64/kvm/hyp/nvhe/mm.c | 28 +++++++++++++++++++--------- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/arch/arm64/kvm/hyp/include/nvhe/mm.h b/arch/arm64/kvm/hyp/include/nvhe/mm.h index 2d08510c6cc1..f53fb0e406db 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/mm.h +++ b/arch/arm64/kvm/hyp/include/nvhe/mm.h @@ -21,6 +21,7 @@ int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot); int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot); unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size, enum kvm_pgtable_prot prot); +unsigned long hyp_alloc_private_va_range(size_t size); static inline void hyp_vmemmap_range(phys_addr_t phys, unsigned long size, unsigned long *start, unsigned long *end) diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c index 526a7d6fa86f..e196441e072f 100644 --- a/arch/arm64/kvm/hyp/nvhe/mm.c +++ b/arch/arm64/kvm/hyp/nvhe/mm.c @@ -37,6 +37,22 @@ static int __pkvm_create_mappings(unsigned long start, unsigned long size, return err; } +unsigned long hyp_alloc_private_va_range(size_t size) +{ + unsigned long addr = __io_map_base; + + hyp_assert_lock_held(&pkvm_pgd_lock); + __io_map_base += PAGE_ALIGN(size); + + /* Are we overflowing on the vmemmap ? */ + if (__io_map_base > __hyp_vmemmap) { + __io_map_base = addr; + addr = (unsigned long)ERR_PTR(-ENOMEM); + } + + return addr; +} + unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size, enum kvm_pgtable_prot prot) { @@ -45,16 +61,10 @@ unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size, hyp_spin_lock(&pkvm_pgd_lock); - size = PAGE_ALIGN(size + offset_in_page(phys)); - addr = __io_map_base; - __io_map_base += size; - - /* Are we overflowing on the vmemmap ? */ - if (__io_map_base > __hyp_vmemmap) { - __io_map_base -= size; - addr = (unsigned long)ERR_PTR(-ENOMEM); + size = size + offset_in_page(phys); + addr = hyp_alloc_private_va_range(size); + if (IS_ERR((void *)addr)) goto out; - } err = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, size, phys, prot); if (err) { -- 2.35.1.265.g69c8d7142f-goog _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm