[Android-virt] [PATCH v2] ARM: KVM: Move HYP idmap to be section based

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Rework the HYP idmap code to be more consistent with what the kernel
does:

- Move the HYP pgd to the core code (so it can benefit to other
  hypervisors)
- Populate this pgd with an identity mapping of the code contained
  in the .hyp.idmap.text section
- Offer a method to drop the this identity mapping
- Make all the above depend on CONFIG_ARM_VIRT_EXT

This makes the core code more generic, and remove some of the
clutter from the KVM init.

Cc: Will Deacon <will.deacon at arm.com>
Cc: Christoffer Dall <c.dall at virtualopensystems.com>
Signed-off-by: Marc Zyngier <marc.zyngier at arm.com>
---
 arch/arm/include/asm/idmap.h   |    6 +++++
 arch/arm/include/asm/pgtable.h |    5 ----
 arch/arm/kernel/vmlinux.lds.S  |    6 ++++-
 arch/arm/kvm/arm.c             |   48 +++++----------------------------
 arch/arm/kvm/init.S            |    4 ++-
 arch/arm/kvm/mmu.c             |   26 +++---------------
 arch/arm/mm/idmap.c            |   58 ++++++++++++++++++++++++++--------------
 7 files changed, 62 insertions(+), 91 deletions(-)

diff --git a/arch/arm/include/asm/idmap.h b/arch/arm/include/asm/idmap.h
index bf863ed..61c034e 100644
--- a/arch/arm/include/asm/idmap.h
+++ b/arch/arm/include/asm/idmap.h
@@ -11,4 +11,10 @@ extern pgd_t *idmap_pgd;
 
 void setup_mm_for_reboot(void);
 
+#ifdef CONFIG_ARM_VIRT_EXT
+extern pgd_t *hyp_pgd;
+
+void hyp_idmap_teardown(void);
+#endif
+
 #endif	/* __ASM_IDMAP_H */
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index fffc01f..bb014af 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -315,11 +315,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 
 #define pgtable_cache_init() do { } while (0)
 
-#ifdef CONFIG_KVM_ARM_HOST
-void hyp_idmap_add(pgd_t *, unsigned long, unsigned long);
-void hyp_idmap_del(pgd_t *pgd, unsigned long addr, unsigned long end);
-#endif
-
 #endif /* !__ASSEMBLY__ */
 
 #endif /* CONFIG_MMU */
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 43a31fb..33da40a 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -19,7 +19,11 @@
 	ALIGN_FUNCTION();						\
 	VMLINUX_SYMBOL(__idmap_text_start) = .;				\
 	*(.idmap.text)							\
-	VMLINUX_SYMBOL(__idmap_text_end) = .;
+	VMLINUX_SYMBOL(__idmap_text_end) = .;				\
+	ALIGN_FUNCTION();						\
+	VMLINUX_SYMBOL(__hyp_idmap_text_start) = .;			\
+	*(.hyp.idmap.text)						\
+	VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
 
 #ifdef CONFIG_HOTPLUG_CPU
 #define ARM_CPU_DISCARD(x)
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 3693ae8..93ea924 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -32,6 +32,7 @@
 #include <asm/uaccess.h>
 #include <asm/ptrace.h>
 #include <asm/mman.h>
+#include <asm/idmap.h>
 #include <asm/tlbflush.h>
 #include <asm/cputype.h>
 #include <asm/kvm_arm.h>
@@ -697,7 +698,7 @@ static void cpu_init_hyp_mode(void *vector)
 
 	cpu_set_vector(vector);
 
-	pgd_ptr = virt_to_phys(kvm_hyp_pgd_get());
+	pgd_ptr = virt_to_phys(hyp_pgd);
 	stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);
 	hyp_stack_ptr = stack_page + PAGE_SIZE;
 
@@ -718,7 +719,7 @@ static void cpu_init_hyp_mode(void *vector)
  */
 static int init_hyp_mode(void)
 {
-	phys_addr_t init_phys_addr, init_end_phys_addr;
+	phys_addr_t init_phys_addr;
 	int cpu;
 	int err = 0;
 
@@ -738,30 +739,14 @@ static int init_hyp_mode(void)
 	}
 
 	/*
-	 * Allocate Hyp level-1 page table
-	 */
-	err = kvm_hyp_pgd_alloc();
-	if (err)
-		goto out_free_stack_pages;
-
-	init_phys_addr = virt_to_phys(__kvm_hyp_init);
-	init_end_phys_addr = virt_to_phys(__kvm_hyp_init_end);
-	BUG_ON(init_phys_addr & 0x1f);
-
-	/*
-	 * Create identity mapping for the init code.
-	 */
-	hyp_idmap_add(kvm_hyp_pgd_get(),
-		      (unsigned long)init_phys_addr,
-		      (unsigned long)init_end_phys_addr);
-
-	/*
 	 * Execute the init code on each CPU.
 	 *
 	 * Note: The stack is not mapped yet, so don't do anything else than
 	 * initializing the hypervisor mode on each CPU using a local stack
 	 * space for temporary storage.
 	 */
+	init_phys_addr = virt_to_phys(__kvm_hyp_init);
+
 	for_each_online_cpu(cpu) {
 		smp_call_function_single(cpu, cpu_init_hyp_mode,
 					 (void *)(long)init_phys_addr, 1);
@@ -770,9 +755,7 @@ static int init_hyp_mode(void)
 	/*
 	 * Unmap the identity mapping
 	 */
-	hyp_idmap_del(kvm_hyp_pgd_get(),
-		      (unsigned long)init_phys_addr,
-		      (unsigned long)init_end_phys_addr);
+	hyp_idmap_teardown();
 
 	/*
 	 * Map the Hyp-code called directly from the host
@@ -820,7 +803,6 @@ static int init_hyp_mode(void)
 	return 0;
 out_free_mappings:
 	free_hyp_pmds();
-	kvm_hyp_pgd_free();
 out_free_stack_pages:
 	for_each_possible_cpu(cpu)
 		free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
@@ -864,21 +846,13 @@ static void cpu_exit_hyp_mode(void *vector)
 
 static int exit_hyp_mode(void)
 {
-	phys_addr_t exit_phys_addr, exit_end_phys_addr;
+	phys_addr_t exit_phys_addr;
 	int cpu;
 
 	exit_phys_addr = virt_to_phys(__kvm_hyp_exit);
-	exit_end_phys_addr = virt_to_phys(__kvm_hyp_exit_end);
 	BUG_ON(exit_phys_addr & 0x1f);
 
 	/*
-	 * Create identity mapping for the exit code.
-	 */
-	hyp_idmap_add(kvm_hyp_pgd_get(),
-		      (unsigned long)exit_phys_addr,
-		      (unsigned long)exit_end_phys_addr);
-
-	/*
 	 * Execute the exit code on each CPU.
 	 *
 	 * Note: The stack is not mapped yet, so don't do anything else than
@@ -890,13 +864,6 @@ static int exit_hyp_mode(void)
 					 (void *)(long)exit_phys_addr, 1);
 	}
 
-	/*
-	 * Unmap the identity mapping
-	 */
-	hyp_idmap_del(kvm_hyp_pgd_get(),
-		      (unsigned long)exit_phys_addr,
-		      (unsigned long)exit_end_phys_addr);
-
 	return 0;
 }
 
@@ -909,7 +876,6 @@ void kvm_arch_exit(void)
 	free_hyp_pmds();
 	for_each_possible_cpu(cpu)
 		free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
-	kvm_hyp_pgd_free();
 }
 
 static int arm_init(void)
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
index 506e77c..40f0b27 100644
--- a/arch/arm/kvm/init.S
+++ b/arch/arm/kvm/init.S
@@ -28,6 +28,7 @@
 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
 	.text
 	.arm
+        .pushsection    .hyp.idmap.text,"ax"
 	.align 12
 __kvm_hyp_init:
 	.globl __kvm_hyp_init
@@ -120,7 +121,6 @@ __do_hyp_init:
 	.globl __kvm_hyp_init_end
 __kvm_hyp_init_end:
 
-
 	.align 12
 __kvm_hyp_exit:
 	.globl __kvm_hyp_exit
@@ -147,3 +147,5 @@ __do_hyp_exit:
 
 	.globl __kvm_hyp_exit_end
 __kvm_hyp_exit_end:
+
+	.popsection
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 5dd30da..1b3db42 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -18,6 +18,7 @@
 #include <linux/kvm_host.h>
 #include <linux/io.h>
 #include <trace/events/kvm.h>
+#include <asm/idmap.h>
 #include <asm/pgalloc.h>
 #include <asm/kvm_arm.h>
 #include <asm/kvm_mmu.h>
@@ -27,7 +28,6 @@
 
 #include "trace.h"
 
-static pgd_t *kvm_hyp_pgd;
 static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
 
 static void free_ptes(pmd_t *pmd, unsigned long addr)
@@ -59,7 +59,7 @@ void free_hyp_pmds(void)
 
 	mutex_lock(&kvm_hyp_pgd_mutex);
 	for (addr = PAGE_OFFSET; addr != 0; addr += PGDIR_SIZE) {
-		pgd = kvm_hyp_pgd + pgd_index(addr);
+		pgd = hyp_pgd + pgd_index(addr);
 		pud = pud_offset(pgd, addr);
 
 		if (pud_none(*pud))
@@ -156,7 +156,7 @@ static int __create_hyp_mappings(void *from, void *to,
 
 	mutex_lock(&kvm_hyp_pgd_mutex);
 	for (addr = start; addr < end; addr = next) {
-		pgd = kvm_hyp_pgd + pgd_index(addr);
+		pgd = hyp_pgd + pgd_index(addr);
 		pud = pud_offset(pgd, addr);
 
 		if (pud_none_or_clear_bad(pud)) {
@@ -730,23 +730,3 @@ int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
 
 	return 0;
 }
-
-int kvm_hyp_pgd_alloc(void)
-{
-	kvm_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
-	if (!kvm_hyp_pgd)
-		return -ENOMEM;
-
-	return 0;
-}
-
-pgd_t *kvm_hyp_pgd_get(void)
-{
-	return kvm_hyp_pgd;
-}
-
-void kvm_hyp_pgd_free(void)
-{
-	kfree(kvm_hyp_pgd);
-	kvm_hyp_pgd = NULL;
-}
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 87d00ae..60557e3 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -1,5 +1,6 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/slab.h>
 
 #include <asm/cputype.h>
 #include <asm/idmap.h>
@@ -60,11 +61,18 @@ static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
 	} while (pud++, addr = next, addr != end);
 }
 
-static void identity_mapping_add(pgd_t *pgd, unsigned long addr,
-				 unsigned long end, unsigned long prot)
+static void identity_mapping_add(pgd_t *pgd, const char *text_start,
+				 const char *text_end, unsigned long prot)
 {
+	unsigned long addr, end;
 	unsigned long next;
 
+	addr = virt_to_phys(text_start);
+	end = virt_to_phys(text_end);
+
+	pr_info("Setting up static %sidentity map for 0x%llx - 0x%llx\n",
+		prot ? "HYP " : "",
+		(long long)addr, (long long)end);
 	prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
 
 	if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
@@ -81,30 +89,19 @@ extern char  __idmap_text_start[], __idmap_text_end[];
 
 static int __init init_static_idmap(void)
 {
-	phys_addr_t idmap_start, idmap_end;
-
 	idmap_pgd = pgd_alloc(&init_mm);
 	if (!idmap_pgd)
 		return -ENOMEM;
 
-	/* Add an identity mapping for the physical address of the section. */
-	idmap_start = virt_to_phys((void *)__idmap_text_start);
-	idmap_end = virt_to_phys((void *)__idmap_text_end);
-
-	pr_info("Setting up static identity map for 0x%llx - 0x%llx\n",
-		(long long)idmap_start, (long long)idmap_end);
-	identity_mapping_add(idmap_pgd, idmap_start, idmap_end, 0);
+	identity_mapping_add(idmap_pgd, __idmap_text_start,
+			     __idmap_text_end, 0);
 
 	return 0;
 }
 early_initcall(init_static_idmap);
 
-#ifdef CONFIG_KVM_ARM_HOST
-void hyp_idmap_add(pgd_t *pgd, unsigned long addr, unsigned long end)
-{
-	identity_mapping_add(pgd, addr, end, PMD_SECT_AP1);
-}
-EXPORT_SYMBOL_GPL(hyp_idmap_add);
+#ifdef CONFIG_ARM_VIRT_EXT
+pgd_t *hyp_pgd;
 
 static void hyp_idmap_del_pmd(pgd_t *pgd, unsigned long addr)
 {
@@ -113,17 +110,25 @@ static void hyp_idmap_del_pmd(pgd_t *pgd, unsigned long addr)
 
 	pud = pud_offset(pgd, addr);
 	pmd = pmd_offset(pud, addr);
-	pmd_free(NULL, pmd);
 	pud_clear(pud);
+	clean_pmd_entry(pmd);
+	pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK));
 }
 
+extern char  __hyp_idmap_text_start[], __hyp_idmap_text_end[];
+
 /*
  * This version actually frees the underlying pmds for all pgds in range and
  * clear the pgds themselves afterwards.
  */
-void hyp_idmap_del(pgd_t *pgd, unsigned long addr, unsigned long end)
+void hyp_idmap_teardown(void)
 {
+	unsigned long addr, end;
 	unsigned long next;
+	pgd_t *pgd = hyp_pgd;
+
+	addr = virt_to_phys(__hyp_idmap_text_start);
+	end = virt_to_phys(__hyp_idmap_text_end);
 
 	pgd += pgd_index(addr);
 	do {
@@ -132,7 +137,20 @@ void hyp_idmap_del(pgd_t *pgd, unsigned long addr, unsigned long end)
 			hyp_idmap_del_pmd(pgd, addr);
 	} while (pgd++, addr = next, addr < end);
 }
-EXPORT_SYMBOL_GPL(hyp_idmap_del);
+EXPORT_SYMBOL_GPL(hyp_idmap_teardown);
+
+static int __init hyp_init_static_idmap(void)
+{
+	hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
+	if (!hyp_pgd)
+		return -ENOMEM;
+
+	identity_mapping_add(hyp_pgd, __hyp_idmap_text_start,
+			     __hyp_idmap_text_end, PMD_SECT_AP1);
+
+	return 0;
+}
+early_initcall(hyp_init_static_idmap);
 #endif
 
 /*
-- 
1.7.10.3





[Index of Archives]     [Linux KVM]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux