Fix the non-linear memory mapping done via remap_file_pages() -- it didn't
work on any MIPS CPU because the page offset was clashing with _PAGE_FILE and
some other page protection bits which should have been left zeros for this
kind of pages.
The patch has been tested on MIPS32, MIPS64, and Alchemy CPUs, only
R3000/TX3927 part hasn't been tested for the lack of time...
Signed-off-by: Konstantin Baydarov <kbaidarov@xxxxxxxxxxxxx>
Signed-off-by: Sergei Shtylyov <sshtylyov@xxxxxxxxxxxxx>
diff --git a/include/asm-mips/pgtable-32.h b/include/asm-mips/pgtable-32.h
index 4d6bc45..e88cbd9 100644
--- a/include/asm-mips/pgtable-32.h
+++ b/include/asm-mips/pgtable-32.h
@@ -177,16 +177,19 @@ pfn_pte(unsigned long pfn, pgprot_t prot
((swp_entry_t) { ((type) << 10) | ((offset) << 15) })
/*
- * Bits 0, 1, 2, 9 and 10 are taken, split up the 27 bits of offset
+ * Bits 0, 3, 4, 8, and 9 are taken, split up 27 bits of offset
* into this range:
*/
#define PTE_FILE_MAX_BITS 27
-#define pte_to_pgoff(_pte) \
- ((((_pte).pte >> 3) & 0x3f ) + (((_pte).pte >> 11) << 8 ))
-
-#define pgoff_to_pte(off) \
- ((pte_t) { (((off) & 0x3f) << 3) + (((off) >> 8) << 11) + _PAGE_FILE })
+#define pte_to_pgoff(_pte) ((((_pte).pte >> 1 ) & 0x03) | \
+ (((_pte).pte >> 3 ) & 0x1c) | \
+ (((_pte).pte >> 10) << 5 ))
+
+#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x03) << 1 ) | \
+ (((off) & 0x1c) << 3 ) | \
+ (((off) >> 5 ) << 10) | \
+ _PAGE_FILE })
#else
@@ -196,24 +199,31 @@ pfn_pte(unsigned long pfn, pgprot_t prot
#define __swp_entry(type,offset) \
((swp_entry_t) { ((type) << 8) | ((offset) << 13) })
+#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
/*
- * Bits 0, 1, 2, 7 and 8 are taken, split up the 27 bits of offset
+ * Bits 0 and 1 of pte_high are taken, split up 30 bits of offset
* into this range:
*/
-#define PTE_FILE_MAX_BITS 27
+#define PTE_FILE_MAX_BITS 30
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
- /* fixme */
-#define pte_to_pgoff(_pte) (((_pte).pte_high >> 6) + ((_pte).pte_high & 0x3f))
-#define pgoff_to_pte(off) \
- ((pte_t){(((off) & 0x3f) + ((off) << 6) + _PAGE_FILE)})
+#define pte_to_pgoff(_pte) ((_pte).pte_high >> 2)
+#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) << 2 })
#else
-#define pte_to_pgoff(_pte) \
- ((((_pte).pte >> 3) & 0x1f ) + (((_pte).pte >> 9) << 6 ))
+/*
+ * Bits 0, 3, 4, 6, and 7 are taken, split up 27 bits of offset
+ * into this range:
+ */
+#define PTE_FILE_MAX_BITS 27
-#define pgoff_to_pte(off) \
- ((pte_t) { (((off) & 0x1f) << 3) + (((off) >> 6) << 9) + _PAGE_FILE })
+#define pte_to_pgoff(_pte) ((((_pte).pte >> 1) & 0x3) | \
+ (((_pte).pte >> 3) & 0x4) | \
+ (((_pte).pte >> 8) << 3))
+
+#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x3) << 1) | \
+ (((off) & 0x4) << 3) | \
+ (((off) >> 3 ) << 8) | \
+ _PAGE_FILE })
#endif
#endif
diff --git a/include/asm-mips/pgtable-64.h b/include/asm-mips/pgtable-64.h
index 82166b2..c0f3446 100644
--- a/include/asm-mips/pgtable-64.h
+++ b/include/asm-mips/pgtable-64.h
@@ -224,15 +224,12 @@ static inline pte_t mk_swap_pte(unsigned
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
/*
- * Bits 0, 1, 2, 7 and 8 are taken, split up the 32 bits of offset
- * into this range:
+ * Bits 0, 3, 4, 6, and 7 are taken. Let's leave bits 1, 2, and 5 alone
+ * to make things easier, and only use the upper 56 bits for the page offset...
*/
-#define PTE_FILE_MAX_BITS 32
+#define PTE_FILE_MAX_BITS 56
-#define pte_to_pgoff(_pte) \
- ((((_pte).pte >> 3) & 0x1f ) + (((_pte).pte >> 9) << 6 ))
-
-#define pgoff_to_pte(off) \
- ((pte_t) { (((off) & 0x1f) << 3) + (((off) >> 6) << 9) + _PAGE_FILE })
+#define pte_to_pgoff(_pte) ((_pte).pte >> 8)
+#define pgoff_to_pte(off) ((pte_t) { ((off) << 8) | _PAGE_FILE })
#endif /* _ASM_PGTABLE_64_H */