Hello.
Sergei Shtylyov wrote:
With 64-bit physical address enabled, 'swapon' was causing kernel oops
on Alchemy CPUs (MIPS32) because of the swap entry type field corrupting
the _PAGE_FILE bit in pte_low. So, change layout of the swap entry to use
all bits
except _PAGE_PRESENT and _PAGE_FILE (the harware protection bits are loaded
from pte_high which should be cleared by __swp_entry_to_pte() macro) --
which gives 25 bits for the swap entry offset.
Hm, just noticed that this fix renders set_pte()/pte_clear() erroneous by
reusing _PAGE_GLOBAL (bit 0) in pte_low field of pte_t -- pte_high should have
been used instead or those macros fixed. So, refrain from committing as yet...
WBR, Sergei
Signed-off-by: Konstantin Baydarov <kbaidarov@xxxxxxxxxxxxx>
Signed-off-by: Sergei Shtylyov <sshtylyov@xxxxxxxxxxxxx>
------------------------------------------------------------------------
diff --git a/include/asm-mips/pgtable-32.h b/include/asm-mips/pgtable-32.h
index 4d6bc45..b0ad112 100644
--- a/include/asm-mips/pgtable-32.h
+++ b/include/asm-mips/pgtable-32.h
@@ -190,11 +190,27 @@ pfn_pte(unsigned long pfn, pgprot_t prot
#else
+#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
+/*
+ * For 36-bit physical address we store swap entry in pte_low and 0 in pte_high,
+ * which gives us 25 bits available for the offset...
+ */
+#define __swp_type(x) ((x).val & 0x1f)
+#define __swp_offset(x) ((((x).val >> 5) & 0x1) | \
+ (((x).val >> 6) & 0xe) | \
+ (((x).val >> 11) << 4))
+#define __swp_entry(type,offset) \
+ ((swp_entry_t) { ((type) & 0x1f) | \
+ (((offset) & 0x1) << 5) | \
+ (((offset) & 0xe) << 6) | \
+ (((offset) >> 4 ) << 11) })
+#else
/* Swap entries must have VALID and GLOBAL bits cleared. */
#define __swp_type(x) (((x).val >> 8) & 0x1f)
#define __swp_offset(x) ((x).val >> 13)
#define __swp_entry(type,offset) \
((swp_entry_t) { ((type) << 8) | ((offset) << 13) })
+#endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */
/*
* Bits 0, 1, 2, 7 and 8 are taken, split up the 27 bits of offset