There is very little reason for kvm_hyp_pgd and kvm_hyp_pgd_mutex to be accessed outside of mmu.c, and kvm_hyp_pgd is passed as an argument to most functions. Instead, define a set of function to access it, and make it private. Signed-off-by: Marc Zyngier <marc.zyngier at arm.com> --- arch/arm/include/asm/kvm_mmu.h | 11 +++++---- arch/arm/kvm/arm.c | 38 ++++++++++++++++-------------------- arch/arm/kvm/mmu.c | 42 ++++++++++++++++++++++++++++----------- 3 files changed, 53 insertions(+), 38 deletions(-) diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 71d6df4..4569756 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -27,12 +27,13 @@ #define PTRS_PER_PGD2 512 #define PGD2_ORDER get_order(PTRS_PER_PGD2 * sizeof(pgd_t)) -extern pgd_t *kvm_hyp_pgd; -extern struct mutex kvm_hyp_pgd_mutex; +int create_hyp_mappings(void *from, void *to); +int create_hyp_io_mappings(void *from, void *to, phys_addr_t); +void free_hyp_pmds(void); -int create_hyp_mappings(pgd_t *hyp_pgd, void *from, void *to); -int create_hyp_io_mappings(pgd_t *hyp_pgd, void *from, void *to, phys_addr_t); -void free_hyp_pmds(pgd_t *hyp_pgd); +int kvm_hyp_pgd_alloc(void); +pgd_t *kvm_hyp_pgd_get(void); +void kvm_hyp_pgd_free(void); int kvm_alloc_stage2_pgd(struct kvm *kvm); void kvm_free_stage2_pgd(struct kvm *kvm); diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 317ec19..a73c613 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -111,7 +111,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) goto out_fail_alloc; mutex_init(&kvm->arch.pgd_mutex); - ret = create_hyp_mappings(kvm_hyp_pgd, kvm, kvm + 1); + ret = create_hyp_mappings(kvm, kvm + 1); if (ret) goto out_free_stage2_pgd; @@ -210,7 +210,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) if (err) goto free_vcpu; - err = create_hyp_mappings(kvm_hyp_pgd, vcpu, vcpu + 1); + err = create_hyp_mappings(vcpu, vcpu + 1); if (err) goto free_vcpu; @@ -627,7 +627,7 @@ static void cpu_init_hyp_mode(void *vector) cpu_set_vector(vector); - pgd_ptr = virt_to_phys(kvm_hyp_pgd); + pgd_ptr = virt_to_phys(kvm_hyp_pgd_get()); stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); hyp_stack_ptr = stack_page + PAGE_SIZE; @@ -672,11 +672,9 @@ static int init_hyp_mode(void) /* * Allocate Hyp level-1 page table */ - kvm_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); - if (!kvm_hyp_pgd) { - err = -ENOMEM; + err = kvm_hyp_pgd_alloc(); + if (err) goto out_free_stack_pages; - } init_phys_addr = virt_to_phys(__kvm_hyp_init); init_end_phys_addr = virt_to_phys(__kvm_hyp_init_end); @@ -685,8 +683,9 @@ static int init_hyp_mode(void) /* * Create identity mapping for the init code. */ - hyp_idmap_add(kvm_hyp_pgd, (unsigned long)init_phys_addr, - (unsigned long)init_end_phys_addr); + hyp_idmap_add(kvm_hyp_pgd_get(), + (unsigned long)init_phys_addr, + (unsigned long)init_end_phys_addr); /* * Execute the init code on each CPU. @@ -703,14 +702,14 @@ static int init_hyp_mode(void) /* * Unmap the identity mapping */ - hyp_idmap_del(kvm_hyp_pgd, (unsigned long)init_phys_addr, - (unsigned long)init_end_phys_addr); + hyp_idmap_del(kvm_hyp_pgd_get(), + (unsigned long)init_phys_addr, + (unsigned long)init_end_phys_addr); /* * Map the Hyp-code called directly from the host */ - err = create_hyp_mappings(kvm_hyp_pgd, - __kvm_hyp_code_start, __kvm_hyp_code_end); + err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end); if (err) { kvm_err("Cannot map world-switch code\n"); goto out_free_mappings; @@ -721,8 +720,7 @@ static int init_hyp_mode(void) */ for_each_possible_cpu(cpu) { char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu); - err = create_hyp_mappings(kvm_hyp_pgd, - stack_page, stack_page + PAGE_SIZE); + err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE); if (err) { kvm_err("Cannot map hyp stack\n"); @@ -739,12 +737,11 @@ static int init_hyp_mode(void) return 0; out_free_mappings: - free_hyp_pmds(kvm_hyp_pgd); - kfree(kvm_hyp_pgd); + free_hyp_pmds(); + kvm_hyp_pgd_free(); out_free_stack_pages: for_each_possible_cpu(cpu) free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); - kvm_hyp_pgd = NULL; return err; } @@ -778,11 +775,10 @@ void kvm_arch_exit(void) { int cpu; - free_hyp_pmds(kvm_hyp_pgd); + free_hyp_pmds(); for_each_possible_cpu(cpu) free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); - kfree(kvm_hyp_pgd); - kvm_hyp_pgd = NULL; + kvm_hyp_pgd_free(); } static int arm_init(void) diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 36bccaa..8d8530f 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -26,8 +26,8 @@ #include "trace.h" -pgd_t *kvm_hyp_pgd; -DEFINE_MUTEX(kvm_hyp_pgd_mutex); +static pgd_t *kvm_hyp_pgd; +static DEFINE_MUTEX(kvm_hyp_pgd_mutex); static void free_ptes(pmd_t *pmd, unsigned long addr) { @@ -45,12 +45,11 @@ static void free_ptes(pmd_t *pmd, unsigned long addr) /** * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables - * @hypd_pgd: The Hyp-mode page table pointer * * Assumes this is a page table used strictly in Hyp-mode and therefore contains * only mappings in the kernel memory area, which is above PAGE_OFFSET. */ -void free_hyp_pmds(pgd_t *hyp_pgd) +void free_hyp_pmds(void) { pgd_t *pgd; pud_t *pud; @@ -59,7 +58,7 @@ void free_hyp_pmds(pgd_t *hyp_pgd) mutex_lock(&kvm_hyp_pgd_mutex); for (addr = PAGE_OFFSET; addr != 0; addr += PGDIR_SIZE) { - pgd = hyp_pgd + pgd_index(addr); + pgd = kvm_hyp_pgd + pgd_index(addr); pud = pud_offset(pgd, addr); BUG_ON(pud_bad(*pud)); @@ -141,7 +140,6 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, /** * create_hyp_mappings - map a kernel virtual address range in Hyp mode - * @hyp_pgd: The allocated hypervisor level-1 table * @from: The virtual kernel start address of the range * @to: The virtual kernel end address of the range (exclusive) * @@ -150,7 +148,7 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, * * Note: Wrapping around zero in the "to" address is not supported. */ -static int __create_hyp_mappings(pgd_t *hyp_pgd, void *from, void *to, +static int __create_hyp_mappings(void *from, void *to, hyp_pte_map_fn_t map_fn, unsigned long *pfn_base) { @@ -168,7 +166,7 @@ static int __create_hyp_mappings(pgd_t *hyp_pgd, void *from, void *to, mutex_lock(&kvm_hyp_pgd_mutex); for (addr = start; addr < end; addr = next) { - pgd = hyp_pgd + pgd_index(addr); + pgd = kvm_hyp_pgd + pgd_index(addr); pud = pud_offset(pgd, addr); if (pud_none_or_clear_bad(pud)) { @@ -191,15 +189,15 @@ out: return err; } -int create_hyp_mappings(pgd_t *hyp_pgd, void *from, void *to) +int create_hyp_mappings(void *from, void *to) { - return __create_hyp_mappings(hyp_pgd, from, to, create_hyp_pte_mappings, NULL); + return __create_hyp_mappings(from, to, create_hyp_pte_mappings, NULL); } -int create_hyp_io_mappings(pgd_t *hyp_pgd, void *from, void *to, phys_addr_t addr) +int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr) { unsigned long pfn = __phys_to_pfn(addr); - return __create_hyp_mappings(hyp_pgd, from, to, create_hyp_pte_io_mappings, &pfn); + return __create_hyp_mappings(from, to, create_hyp_pte_io_mappings, &pfn); } /** @@ -701,3 +699,23 @@ int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) return 0; } + +int kvm_hyp_pgd_alloc(void) +{ + kvm_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); + if (!kvm_hyp_pgd) + return -ENOMEM; + + return 0; +} + +pgd_t *kvm_hyp_pgd_get(void) +{ + return kvm_hyp_pgd; +} + +void kvm_hyp_pgd_free(void) +{ + kfree(kvm_hyp_pgd); + kvm_hyp_pgd = NULL; +} -- 1.7.7.1