[PATCH 3/3] mips: 48bit: 48-bit virtual address space support on MIPS using the 4th level of page tables.

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This is a set of patches to add 48-bit virtual address space support on 
MIPS to the kernel 3.10. It includes a port of existing patch for page 
size 16k and 64k, plus support for 4-level page table for the rest of 
the supported page sizes.

Cc: David Saney <ddaney@xxxxxxxxxxxxxxxxxx>
Cc: Ralf Baechle <ralf@xxxxxxxxxxxxxx>
Cc: linux-mips@xxxxxxxxxxxxxx

Signed-off-by: Alex Belits <alex.belits@xxxxxxxxxx>

---
 arch/mips/Kconfig                  |   3 -
 arch/mips/include/asm/pgalloc.h    |  32 +++++
 arch/mips/include/asm/pgtable-64.h | 262 +++++++++++++++++++++++++++++++++++--
 arch/mips/include/asm/pgtable.h    |  13 +-
 arch/mips/include/asm/processor.h  |   2 +-
 arch/mips/kernel/asm-offsets.c     |   9 ++
 arch/mips/mm/init.c                |   3 +
 arch/mips/mm/pgtable-64.c          |  29 ++++
 arch/mips/mm/tlbex.c               |  30 ++++-
 9 files changed, 368 insertions(+), 15 deletions(-)

diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 2ee3067..e514a81 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1756,7 +1756,6 @@ choice
 config PAGE_SIZE_4KB
 	bool "4kB"
 	depends on !CPU_LOONGSON2
-	depends on !MIPS_VA_BITS_48
 	help
 	 This option select the standard 4kB Linux page size.  On some
 	 R3000-family processors this is the only available page size.  Using
@@ -1766,7 +1765,6 @@ config PAGE_SIZE_4KB
 config PAGE_SIZE_8KB
 	bool "8kB"
 	depends on CPU_R8000 || CPU_CAVIUM_OCTEON
-	depends on !MIPS_VA_BITS_48
 	help
 	  Using 8kB page size will result in higher performance kernel at
 	  the price of higher memory consumption.  This option is available
@@ -1785,7 +1783,6 @@ config PAGE_SIZE_16KB
 config PAGE_SIZE_32KB
 	bool "32kB"
 	depends on CPU_CAVIUM_OCTEON
-	depends on !MIPS_VA_BITS_48
 	help
 	  Using 32kB page size will result in higher performance kernel at
 	  the price of higher memory consumption.  This option is available
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index 881d18b4..97d4254 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -29,7 +29,18 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
 /*
  * Initialize a new pmd table with invalid pointers.
  */
+#ifndef __PAGETABLE_PMD_FOLDED
 extern void pmd_init(unsigned long page, unsigned long pagetable);
+#endif
+
+#ifndef __PAGETABLE_PUD_FOLDED
+
+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+{
+	set_pgd(pgd, __pgd((unsigned long)pud));
+}
+
+#endif
 
 #ifndef __PAGETABLE_PMD_FOLDED
 
@@ -104,6 +115,27 @@ do {							\
 	tlb_remove_page((tlb), pte);			\
 } while (0)
 
+#ifndef __PAGETABLE_PUD_FOLDED
+
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+	pud_t *pud;
+
+	pud = (pud_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PUD_ORDER);
+	if (pud)
+		pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table);
+	return pud;
+}
+
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+	free_pages((unsigned long)pud, PUD_ORDER);
+}
+
+#define __pud_free_tlb(tlb, x, addr)	pud_free((tlb)->mm, x)
+
+#endif
+
 #ifndef __PAGETABLE_PMD_FOLDED
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index e9805ad..7193212 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -17,13 +17,140 @@
 #include <asm/cachectl.h>
 #include <asm/fixmap.h>
 
-#if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48)
+
+/*
+ * Memory mapping options:
+ *
+ * Virtual address bits supported by tables:
+ * +------+---------+----------+
+ * | Page | Default | "48 bit" |
+ * +------+---------+----------+
+ * | 4K   |  40     |  48      |
+ * | 8K   |  43     |  53      |
+ * | 16K  |  47     |  48      |
+ * | 32K  |  51     |  51      |
+ * | 64K  |  42     |  54      |
+ * +------+---------+----------+
+ *
+ *
+ * 4K pages:
+ * With two levels of page tables, one 4K page per level, and 4K page:
+ * PTE provides 12 + 9 = 21 bits
+ * PGD provides 9 bits
+ * Total 30 bits (not used).
+ *
+ * With three levels of page tables, one 4K page per level, and 4K page:
+ * PTE provides 12 + 9 = 21 bits
+ * PMD provides 9 bits
+ * PGD provides 9 bits
+ * Total 39 bits (not used).
+ *
+ * With three levels of page tables, one 4K page per level except for
+ * two-page PGD, and 4K page:
+ * PTE provides 12 + 9 = 21 bits
+ * PMD provides 9 bits
+ * PGD provides 10 bits
+ * Total 40 bits (the default for 4K pages).
+ *
+ * With four levels of page tables, one 4K page per level, and 4K page:
+ * PTE provides 12 + 9 = 21 bits
+ * PMD provides 9 bits
+ * PUD provides 9 bits
+ * PGD provides 9 bits
+ * Total 48 bits (used when 48 bit address is enabled with 4K pages).
+ *
+ *
+ * 8K pages:
+ * With two levels of page tables, one 8K page per level, and 8K page:
+ * PTE provides 13 + 10 = 23 bits
+ * PGD provides 10 bits
+ * Total 33 bits (not used).
+ *
+ * With three levels of page tables, one 8K page per level, and 8K page:
+ * PTE provides 13 + 10 = 23 bits
+ * PMD provides 10 bits
+ * PGD provides 10 bits
+ * Total 43 bits (what is set when 8K pages are selected).
+ *
+ * With four levels of page tables, one 8K page per level, and 8K page:
+ * PTE provides 13 + 10 = 23 bits
+ * PMD provides 10 bits
+ * PUD provides 10 bits
+ * PGD provides 10 bits
+ * Total 53 bits (used when 48 bit address is enabled with 8K pages).
+ *
+ *
+ * 16K pages:
+ * With two levels of page tables, one 16K page per level, and 16K page:
+ * PTE provides 14 + 11 = 25 bits
+ * PGD provides 11 bits
+ * Total 36 bits (not used).
+ *
+ * With three levels of page tables, one 16K page per level, and 16K page:
+ * PTE provides 14 + 11 = 25 bits
+ * PMD provides 11 bits
+ * PGD provides 11 bits
+ * Total 47 bits (the default for 16K pages).
+ *
+ * With three levels of page tables, one 16K page per level except for
+ * two-page PGD, and 16K page:
+ * PTE provides 14 + 11 = 25 bits
+ * PMD provides 11 bits
+ * PGD provides 12 bits
+ * Total 48 bits (used when 48 bit address is enabled with 16K pages).
+ *
+ *
+ * 32K pages:
+ * With two levels of page tables, one 32K page per level, and 32K page:
+ * PTE provides 15 + 12 = 27 bits
+ * PGD provides 12 bits
+ * Total 39 bits (not used).
+ *
+ * With three levels of page tables, one 32K page per level, and 32K page:
+ * PTE provides 15 + 12 = 27 bits
+ * PMD provides 12 bits
+ * PGD provides 12 bits
+ * Total 51 bit (the default for 32K pages).
+ *
+ *
+ * 64K pages:
+ * With two levels of page tables, one 64K page per level, and 64K page:
+ * PTE provides 16 + 13 = 29 bits
+ * PGD provides 13 bits
+ * Total 42 bits (the default for 64K pages).
+ *
+ * With three levels of page tables, one 64K page per level, and 64K page:
+ * PTE provides 16 + 13 = 29 bits
+ * PMD provides 13 bits
+ * PGD provides 13 bits
+ * Total 54 bits (used when 48 bit address is enabled with 64K pages).
+ *
+ * Actually supported virtual address bits can not exceed 48 bits
+ * or whatever is supported by CPU, see arch/mips/include/asm/processor.h
+ *
+ */
+
+#ifdef CONFIG_MIPS_VA_BITS_48
+/* 48-bit virtual memory */
+#if !defined(CONFIG_PAGE_SIZE_4KB) && !defined(CONFIG_PAGE_SIZE_8KB)
+/* All page sizes except 4K and 8K will use three-level page tables */
+#include <asm-generic/pgtable-nopud.h>
+#endif
+/* 4K and 8K pages use four-level page tables */
+#else
+/* Reduced (below 48 bit) virtual memory size */
+#ifdef CONFIG_PAGE_SIZE_64KB
+/* Two-level page table */
 #include <asm-generic/pgtable-nopmd.h>
 #else
+/* All other page sizes will use three-level page tables */
 #include <asm-generic/pgtable-nopud.h>
 #endif
+#endif
 
 /*
+ * Default configuration with 4K pages:
+ *
  * Each address space has 2 4K pages as its page directory, giving 1024
  * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a
  * single 4K page, giving 512 (== PTRS_PER_PMD) 8 byte pointers to page
@@ -42,8 +169,9 @@
  * fault address - VMALLOC_START.
  */
 
+#ifdef __PAGETABLE_PUD_FOLDED
 
-/* PGDIR_SHIFT determines what a third-level page table entry can map */
+/* Here PGDIR_SHIFT determines what a third-level page table entry can map */
 #ifdef __PAGETABLE_PMD_FOLDED
 #define PGDIR_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
 #else
@@ -56,9 +184,29 @@
 
 #define PGDIR_SHIFT	(PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
 #endif
+#else
+/* PUD is not folded */
+
+/* PMD_SHIFT determines the size of the area a second-level page table can map */
+#define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
+#define PMD_SIZE	(1UL << PMD_SHIFT)
+#define PMD_MASK	(~(PMD_SIZE-1))
+
+/* PUD_SHIFT determines the size of the area a third-level page table can map */
+
+#define PUD_SHIFT	(PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
+#define PUD_SIZE	(1UL << PUD_SHIFT)
+#define PUD_MASK	(~(PUD_SIZE-1))
+
+/* Here PGDIR_SHIFT determines what a fourth-level page table entry can map */
+
+#define PGDIR_SHIFT	(PUD_SHIFT + (PAGE_SHIFT + PUD_ORDER - 3))
+#endif
+
 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
 
+
 /*
  * For 4kB page size we use a 3 level page tree and an 8kB pgd, which
  * permits us mapping 40 bits of virtual address space.
@@ -71,24 +219,34 @@
  * two levels would be easy to implement.
  *
  * For 16kB page size we use a 3 level page tree which permits a total of
- * 47 bits of virtual address space.  We could add a third level but it seems
- * like at the moment there's no need for this.
+ * 47 bits of virtual address space.
  *
  * For 64kB page size we use a 2 level page table tree for a total of 42 bits
  * of virtual address space.
  */
 #ifdef CONFIG_PAGE_SIZE_4KB
+#ifdef CONFIG_MIPS_VA_BITS_48
+#define PGD_ORDER		0
+#define PUD_ORDER		0
+#else
 #define PGD_ORDER		1
 #define PUD_ORDER		aieeee_attempt_to_allocate_pud
+#endif
 #define PMD_ORDER		0
 #define PTE_ORDER		0
 #endif
+
 #ifdef CONFIG_PAGE_SIZE_8KB
 #define PGD_ORDER		0
+#ifdef CONFIG_MIPS_VA_BITS_48
+#define PUD_ORDER		0
+#else
 #define PUD_ORDER		aieeee_attempt_to_allocate_pud
+#endif
 #define PMD_ORDER		0
 #define PTE_ORDER		0
 #endif
+
 #ifdef CONFIG_PAGE_SIZE_16KB
 #ifdef CONFIG_MIPS_VA_BITS_48
 #define PGD_ORDER		1
@@ -99,12 +257,14 @@
 #define PMD_ORDER		0
 #define PTE_ORDER		0
 #endif
+
 #ifdef CONFIG_PAGE_SIZE_32KB
 #define PGD_ORDER		0
 #define PUD_ORDER		aieeee_attempt_to_allocate_pud
 #define PMD_ORDER		0
 #define PTE_ORDER		0
 #endif
+
 #ifdef CONFIG_PAGE_SIZE_64KB
 #define PGD_ORDER		0
 #define PUD_ORDER		aieeee_attempt_to_allocate_pud
@@ -117,6 +277,9 @@
 #endif
 
 #define PTRS_PER_PGD	((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
+#ifndef __PAGETABLE_PUD_FOLDED
+#define PTRS_PER_PUD	((PAGE_SIZE << PUD_ORDER) / sizeof(pud_t))
+#endif
 #ifndef __PAGETABLE_PMD_FOLDED
 #define PTRS_PER_PMD	((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t))
 #endif
@@ -131,10 +294,18 @@
  * reliably trap.
  */
 #define VMALLOC_START		(MAP_BASE + (2 * PAGE_SIZE))
+#ifdef __PAGETABLE_PUD_FOLDED
 #define VMALLOC_END	\
 	(MAP_BASE + \
 	 min(PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
 	     (1UL << cpu_vmbits)) - (1UL << 32))
+#else
+#define VMALLOC_END	\
+	(MAP_BASE + \
+	 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE \
+	     * PAGE_SIZE,					       \
+	     (1UL << cpu_vmbits)) - (1UL << 32))
+#endif
 
 #if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \
 	VMALLOC_START != CKSSEG
@@ -149,12 +320,28 @@
 #define pmd_ERROR(e) \
 	printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
 #endif
+#ifndef __PAGETABLE_PUD_FOLDED
+#define pud_ERROR(e) \
+	printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
+#endif
 #define pgd_ERROR(e) \
 	printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
 
 extern pte_t invalid_pte_table[PTRS_PER_PTE];
 extern pte_t empty_bad_page_table[PTRS_PER_PTE];
 
+#ifndef __PAGETABLE_PUD_FOLDED
+/*
+ * For 4-level pagetables we defines these ourselves, for 3-level the
+ * definitions are below, for 2-level the
+ * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
+ */
+typedef struct { unsigned long pud; } pud_t;
+#define pud_val(x)	((x).pud)
+#define __pud(x)	((pud_t) { (x) })
+
+extern pud_t invalid_pud_table[PTRS_PER_PUD];
+#endif
 
 #ifndef __PAGETABLE_PMD_FOLDED
 /*
@@ -200,6 +387,33 @@ static inline void pmd_clear(pmd_t *pmdp)
 {
 	pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
 }
+
+#ifndef __PAGETABLE_PUD_FOLDED
+
+/*
+ * Empty pgd entries point to the invalid_pud_table.
+ */
+static inline int pgd_none(pgd_t pgd)
+{
+	return pgd_val(pgd) == (unsigned long) invalid_pud_table;
+}
+
+static inline int pgd_bad(pgd_t pgd)
+{
+	return pgd_val(pgd) & ~PAGE_MASK;
+}
+
+static inline int pgd_present(pgd_t pgd)
+{
+	return pgd_val(pgd) != (unsigned long) invalid_pud_table;
+}
+
+static inline void pgd_clear(pgd_t *pgdp)
+{
+	pgd_val(*pgdp) = ((unsigned long) invalid_pud_table);
+}
+#endif
+
 #ifndef __PAGETABLE_PMD_FOLDED
 
 /*
@@ -238,18 +452,40 @@ static inline void pud_clear(pud_t *pudp)
 #endif
 
 #define __pgd_offset(address)	pgd_index(address)
-#define __pud_offset(address)	(((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
+#ifndef __PAGETABLE_PUD_FOLDED
+#define __pud_offset(address)	pud_index(address)
+#else
+#define __pud_offset(address)  (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
+#endif
 #define __pmd_offset(address)	pmd_index(address)
 
 /* to find an entry in a kernel page-table-directory */
 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
 
 #define pgd_index(address)	(((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+#ifndef __PAGETABLE_PUD_FOLDED
+#define pud_index(address)	(((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
+#endif
 #define pmd_index(address)	(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
 
 /* to find an entry in a page-table-directory */
 #define pgd_offset(mm, addr)	((mm)->pgd + pgd_index(addr))
 
+#ifndef __PAGETABLE_PUD_FOLDED
+static inline unsigned long pgd_page_vaddr(pgd_t pgd)
+{
+	return pgd_val(pgd);
+}
+
+/*
+ * Find an entry in the upper-level (below global) page table..
+ */
+static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+{
+	return (pud_t *) pgd_page_vaddr(*pgd) + pud_index(address);
+}
+#endif
+
 #ifndef __PAGETABLE_PMD_FOLDED
 static inline unsigned long pud_page_vaddr(pud_t pud)
 {
@@ -258,14 +494,19 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
 #define pud_phys(pud)		virt_to_phys((void *)pud_val(pud))
 #define pud_page(pud)		(pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
 
-/* Find an entry in the second-level page table.. */
+/*
+ * Find an entry in the middle-level (below upper, if any, otherwise global)
+ * page table..
+ */
 static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address)
 {
 	return (pmd_t *) pud_page_vaddr(*pud) + pmd_index(address);
 }
 #endif
 
-/* Find an entry in the third-level page table.. */
+/*
+ * Find an entry in the low-level page table..
+ */
 #define __pte_offset(address)						\
 	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 #define pte_offset(dir, address)					\
@@ -277,10 +518,15 @@ static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address)
 #define pte_unmap(pte) ((void)(pte))
 
 /*
- * Initialize a new pgd / pmd table with invalid pointers.
+ * Initialize a new pgd / pud / pmd table with invalid pointers.
  */
 extern void pgd_init(unsigned long page);
+#ifndef __PAGETABLE_PUD_FOLDED
+extern void pud_init(unsigned long page, unsigned long pagetable);
+#endif
+#ifndef __PAGETABLE_PMD_FOLDED
 extern void pmd_init(unsigned long page, unsigned long pagetable);
+#endif
 
 /*
  * Non-present pages:  high 24 bits are offset, next 8 bits type,
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index e821de7..12e3fce 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -201,20 +201,29 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
 #endif
 
 /*
- * (pmds are folded into puds so this doesn't get actually called,
+ * (pmds may be folded into puds so this doesn't get actually called,
  * but the define is needed for a generic inline function.)
  */
 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
 
 #ifndef __PAGETABLE_PMD_FOLDED
 /*
- * (puds are folded into pgds so this doesn't get actually called,
+ * (puds may be folded into pgds so this doesn't get actually called,
  * but the define is needed for a generic inline function.)
  */
 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
 #endif
 
+#ifndef __PAGETABLE_PUD_FOLDED
+/*
+ * (puds may be folded into pgds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pgd(pgdptr, pgdval) do { *(pgdptr) = (pgdval); } while (0)
+#endif
+
 #define PGD_T_LOG2	(__builtin_ffs(sizeof(pgd_t)) - 1)
+#define PUD_T_LOG2	(__builtin_ffs(sizeof(pud_t)) - 1)
 #define PMD_T_LOG2	(__builtin_ffs(sizeof(pmd_t)) - 1)
 #define PTE_T_LOG2	(__builtin_ffs(sizeof(pte_t)) - 1)
 
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 083a56f..b5fc9de 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -74,7 +74,7 @@ extern unsigned int vced_count, vcei_count;
 #ifdef CONFIG_MIPS_VA_BITS_48
 #define TASK_SIZE64	(0x1UL << ((cpu_data[0].vmbits > 48) ? 48 : cpu_data[0].vmbits))
 #else
-#define TASK_SIZE64     0x10000000000UL
+#define TASK_SIZE64	0x10000000000UL
 #endif
 #define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
 
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 0845091..84b6db7 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -180,26 +180,35 @@ void output_mm_defines(void)
 	OFFSET(MM_CONTEXT, mm_struct, context);
 	BLANK();
 	DEFINE(_PGD_T_SIZE, sizeof(pgd_t));
+	DEFINE(_PUD_T_SIZE, sizeof(pud_t));
 	DEFINE(_PMD_T_SIZE, sizeof(pmd_t));
 	DEFINE(_PTE_T_SIZE, sizeof(pte_t));
 	BLANK();
 	DEFINE(_PGD_T_LOG2, PGD_T_LOG2);
+#ifndef __PAGETABLE_PUD_FOLDED
+	DEFINE(_PUD_T_LOG2, PUD_T_LOG2);
+#endif
 #ifndef __PAGETABLE_PMD_FOLDED
 	DEFINE(_PMD_T_LOG2, PMD_T_LOG2);
 #endif
 	DEFINE(_PTE_T_LOG2, PTE_T_LOG2);
 	BLANK();
 	DEFINE(_PGD_ORDER, PGD_ORDER);
+#ifndef __PAGETABLE_PUD_FOLDED
+	DEFINE(_PUD_ORDER, PUD_ORDER);
+#endif
 #ifndef __PAGETABLE_PMD_FOLDED
 	DEFINE(_PMD_ORDER, PMD_ORDER);
 #endif
 	DEFINE(_PTE_ORDER, PTE_ORDER);
 	BLANK();
 	DEFINE(_PMD_SHIFT, PMD_SHIFT);
+	DEFINE(_PUD_SHIFT, PUD_SHIFT);
 	DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT);
 	BLANK();
 	DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD);
 	DEFINE(_PTRS_PER_PMD, PTRS_PER_PMD);
+	DEFINE(_PTRS_PER_PUD, PTRS_PER_PUD);
 	DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE);
 	BLANK();
 	DEFINE(_PAGE_SHIFT, PAGE_SHIFT);
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 9b973e0..25d7ce5 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -466,6 +466,9 @@ unsigned long pgd_current[NR_CPUS];
  * it in the linker script.
  */
 pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
+#ifndef __PAGETABLE_PUD_FOLDED
+pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
+#endif
 #ifndef __PAGETABLE_PMD_FOLDED
 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
 #endif
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index e8adc00..a942256 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -18,11 +18,15 @@ void pgd_init(unsigned long page)
 	unsigned long *p, *end;
 	unsigned long entry;
 
+#ifdef __PAGETABLE_PUD_FOLDED
 #ifdef __PAGETABLE_PMD_FOLDED
 	entry = (unsigned long)invalid_pte_table;
 #else
 	entry = (unsigned long)invalid_pmd_table;
 #endif
+#else
+	entry = (unsigned long)invalid_pud_table;
+#endif
 
 	p = (unsigned long *) page;
 	end = p + PTRS_PER_PGD;
@@ -40,6 +44,28 @@ void pgd_init(unsigned long page)
 	} while (p != end);
 }
 
+#ifndef __PAGETABLE_PUD_FOLDED
+void pud_init(unsigned long addr, unsigned long pagetable)
+{
+	unsigned long *p, *end;
+
+	p = (unsigned long *) addr;
+	end = p + PTRS_PER_PUD;
+
+	do {
+		p[0] = pagetable;
+		p[1] = pagetable;
+		p[2] = pagetable;
+		p[3] = pagetable;
+		p[4] = pagetable;
+		p += 8;
+		p[-3] = pagetable;
+		p[-2] = pagetable;
+		p[-1] = pagetable;
+	} while (p != end);
+}
+#endif
+
 #ifndef __PAGETABLE_PMD_FOLDED
 void pmd_init(unsigned long addr, unsigned long pagetable)
 {
@@ -102,6 +128,9 @@ void __init pagetable_init(void)
 #ifndef __PAGETABLE_PMD_FOLDED
 	pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table);
 #endif
+#ifndef __PAGETABLE_PUD_FOLDED
+	pud_init((unsigned long)invalid_pud_table, (unsigned long)invalid_pmd_table);
+#endif
 	pgd_base = swapper_pg_dir;
 	/*
 	 * Fixed mappings:
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index a91a7a9..2c6ac9e 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -862,6 +862,13 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
 
 	uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
 	uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
+#ifndef __PAGETABLE_PUD_FOLDED
+	uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
+	uasm_i_ld(p, ptr, 0, ptr); /* get pud pointer */
+	uasm_i_dsrl_safe(p, tmp, tmp, PUD_SHIFT-3); /* get pud offset in bytes */
+	uasm_i_andi(p, tmp, tmp, (PTRS_PER_PUD - 1)<<3);
+	uasm_i_daddu(p, ptr, ptr, tmp); /* add in pud offset */
+#endif
 #ifndef __PAGETABLE_PMD_FOLDED
 	uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
 	uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
@@ -1188,10 +1195,29 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
 		UASM_i_LWX(p, LOC_PTEP, scratch, ptr);
 	} else {
 		uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */
-		uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */
+		uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pud, pmd or pte pointer */
 	}
 
 #ifndef __PAGETABLE_PMD_FOLDED
+#ifndef __PAGETABLE_PUD_FOLDED
+	/* LOC_PTEP is ptr, and it contains a pointer to PUD entry */
+	/* tmp contains the address */
+	/* get pud offset in bytes */
+	uasm_i_dsrl_safe(p, scratch, tmp, PUD_SHIFT - 3);
+	uasm_i_andi(p, scratch, scratch, (PTRS_PER_PUD - 1) << 3);
+
+	if (use_lwx_insns()) {
+		UASM_i_LWX(p, ptr, scratch, ptr);
+	} else {
+		uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
+		UASM_i_LW(p, ptr, 0, ptr);
+	}
+	/* ptr contains a pointer to PMD entry */
+	/* tmp contains the address */
+#endif
+
+	/* LOC_PTEP is ptr, and it contains a pointer to PMD entry */
+	/* tmp contains the address */
 	/* get pmd offset in bytes */
 	uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3);
 	uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3);
@@ -1203,6 +1229,8 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
 		uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
 		UASM_i_LW(p, scratch, 0, ptr);
 	}
+	/* scratch contain a pointer to PTE entry */
+	/* tmp contains context */
 #endif
 	/* Adjust the context during the load latency. */
 	build_adjust_context(p, tmp);
-- 
2.8.1





[Index of Archives]     [Linux MIPS Home]     [LKML Archive]     [Linux ARM Kernel]     [Linux ARM]     [Linux]     [Git]     [Yosemite News]     [Linux SCSI]     [Linux Hams]

  Powered by Linux