From: Quentin Perret <qperret@xxxxxxxxxx> __pkvm_create_private_mapping() is currently responsible for allocating VA space in the hypervisor's "private" range and creating stage-1 mappings. In order to allow reusing the VA space allocation logic from other places, let's factor it out in a standalone function. Signed-off-by: Quentin Perret <qperret@xxxxxxxxxx> --- arch/arm64/kvm/hyp/nvhe/mm.c | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c index 168e7fbe9a3c..4377b067dc0e 100644 --- a/arch/arm64/kvm/hyp/nvhe/mm.c +++ b/arch/arm64/kvm/hyp/nvhe/mm.c @@ -37,6 +37,22 @@ static int __pkvm_create_mappings(unsigned long start, unsigned long size, return err; } +static unsigned long hyp_alloc_private_va_range(size_t size) +{ + unsigned long addr = __io_map_base; + + hyp_assert_lock_held(&pkvm_pgd_lock); + __io_map_base += PAGE_ALIGN(size); + + /* Are we overflowing on the vmemmap ? */ + if (__io_map_base > __hyp_vmemmap) { + __io_map_base = addr; + addr = (unsigned long)ERR_PTR(-ENOMEM); + } + + return addr; +} + unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size, enum kvm_pgtable_prot prot) { @@ -45,16 +61,10 @@ unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size, hyp_spin_lock(&pkvm_pgd_lock); - size = PAGE_ALIGN(size + offset_in_page(phys)); - addr = __io_map_base; - __io_map_base += size; - - /* Are we overflowing on the vmemmap ? */ - if (__io_map_base > __hyp_vmemmap) { - __io_map_base -= size; - addr = (unsigned long)ERR_PTR(-ENOMEM); + size = size + offset_in_page(phys); + addr = hyp_alloc_private_va_range(size); + if (IS_ERR((void *)addr)) goto out; - } err = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, size, phys, prot); if (err) { -- 2.36.1.124.g0e6072fb45-goog