[PATCH 6.13 520/623] s390/mm: Allow large pages for KASAN shadow mapping

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



6.13-stable review patch.  If anyone has any objections, please let me know.

------------------

From: Vasily Gorbik <gor@xxxxxxxxxxxxx>

[ Upstream commit e70452c4ba2ce1e24a3fdc18bd623edb7b56013c ]

Commit c98d2ecae08f ("s390/mm: Uncouple physical vs virtual address
spaces") introduced a large_allowed() helper that restricts which mapping
modes can use large pages. This change unintentionally prevented KASAN
shadow mappings from using large pages, despite there being no reason
to avoid them. In fact, large pages are preferred for performance.

Since commit d8073dc6bc04 ("s390/mm: Allow large pages only for aligned
physical addresses"), both can_large_pud() and can_large_pmd() call _pa()
to check if large page physical addresses are aligned. However, _pa()
has a side effect: it allocates memory in POPULATE_KASAN_MAP_SHADOW
mode.

Rename large_allowed() to large_page_mapping_allowed() and add
POPULATE_KASAN_MAP_SHADOW to the allowed list to restore large page
mappings for KASAN shadows.

While large_page_mapping_allowed() isn't strictly necessary with current
mapping modes since disallowed modes either don't map anything or fail
alignment and size checks, keep it for clarity.

Rename _pa() to resolve_pa_may_alloc() for clarity and to emphasize
existing side effect. Rework can_large_pud()/can_large_pmd() to take
the side effect into consideration and actually return physical address
instead of just checking conditions.

Fixes: c98d2ecae08f ("s390/mm: Uncouple physical vs virtual address spaces")
Reviewed-by: Alexander Gordeev <agordeev@xxxxxxxxxxxxx>
Signed-off-by: Vasily Gorbik <gor@xxxxxxxxxxxxx>
Signed-off-by: Alexander Gordeev <agordeev@xxxxxxxxxxxxx>
Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx>
---
 arch/s390/boot/vmem.c | 74 +++++++++++++++++++++++++++++--------------
 1 file changed, 50 insertions(+), 24 deletions(-)

diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
index 3fa28db2fe59f..41f0159339dbb 100644
--- a/arch/s390/boot/vmem.c
+++ b/arch/s390/boot/vmem.c
@@ -13,6 +13,7 @@
 #include "decompressor.h"
 #include "boot.h"
 
+#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
 struct ctlreg __bootdata_preserved(s390_invalid_asce);
 
 #ifdef CONFIG_PROC_FS
@@ -236,11 +237,12 @@ static pte_t *boot_pte_alloc(void)
 	return pte;
 }
 
-static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_mode mode)
+static unsigned long resolve_pa_may_alloc(unsigned long addr, unsigned long size,
+					  enum populate_mode mode)
 {
 	switch (mode) {
 	case POPULATE_NONE:
-		return -1;
+		return INVALID_PHYS_ADDR;
 	case POPULATE_DIRECT:
 		return addr;
 	case POPULATE_LOWCORE:
@@ -258,33 +260,55 @@ static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_m
 		return addr;
 #endif
 	default:
-		return -1;
+		return INVALID_PHYS_ADDR;
 	}
 }
 
-static bool large_allowed(enum populate_mode mode)
+static bool large_page_mapping_allowed(enum populate_mode mode)
 {
-	return (mode == POPULATE_DIRECT) || (mode == POPULATE_IDENTITY) || (mode == POPULATE_KERNEL);
+	switch (mode) {
+	case POPULATE_DIRECT:
+	case POPULATE_IDENTITY:
+	case POPULATE_KERNEL:
+#ifdef CONFIG_KASAN
+	case POPULATE_KASAN_MAP_SHADOW:
+#endif
+		return true;
+	default:
+		return false;
+	}
 }
 
-static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end,
-			  enum populate_mode mode)
+static unsigned long try_get_large_pud_pa(pud_t *pu_dir, unsigned long addr, unsigned long end,
+					  enum populate_mode mode)
 {
-	unsigned long size = end - addr;
+	unsigned long pa, size = end - addr;
+
+	if (!machine.has_edat2 || !large_page_mapping_allowed(mode) ||
+	    !IS_ALIGNED(addr, PUD_SIZE) || (size < PUD_SIZE))
+		return INVALID_PHYS_ADDR;
 
-	return machine.has_edat2 && large_allowed(mode) &&
-	       IS_ALIGNED(addr, PUD_SIZE) && (size >= PUD_SIZE) &&
-	       IS_ALIGNED(_pa(addr, size, mode), PUD_SIZE);
+	pa = resolve_pa_may_alloc(addr, size, mode);
+	if (!IS_ALIGNED(pa, PUD_SIZE))
+		return INVALID_PHYS_ADDR;
+
+	return pa;
 }
 
-static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end,
-			  enum populate_mode mode)
+static unsigned long try_get_large_pmd_pa(pmd_t *pm_dir, unsigned long addr, unsigned long end,
+					  enum populate_mode mode)
 {
-	unsigned long size = end - addr;
+	unsigned long pa, size = end - addr;
+
+	if (!machine.has_edat1 || !large_page_mapping_allowed(mode) ||
+	    !IS_ALIGNED(addr, PMD_SIZE) || (size < PMD_SIZE))
+		return INVALID_PHYS_ADDR;
+
+	pa = resolve_pa_may_alloc(addr, size, mode);
+	if (!IS_ALIGNED(pa, PMD_SIZE))
+		return INVALID_PHYS_ADDR;
 
-	return machine.has_edat1 && large_allowed(mode) &&
-	       IS_ALIGNED(addr, PMD_SIZE) && (size >= PMD_SIZE) &&
-	       IS_ALIGNED(_pa(addr, size, mode), PMD_SIZE);
+	return pa;
 }
 
 static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end,
@@ -298,7 +322,7 @@ static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long e
 		if (pte_none(*pte)) {
 			if (kasan_pte_populate_zero_shadow(pte, mode))
 				continue;
-			entry = __pte(_pa(addr, PAGE_SIZE, mode));
+			entry = __pte(resolve_pa_may_alloc(addr, PAGE_SIZE, mode));
 			entry = set_pte_bit(entry, PAGE_KERNEL);
 			if (!machine.has_nx)
 				entry = clear_pte_bit(entry, __pgprot(_PAGE_NOEXEC));
@@ -313,7 +337,7 @@ static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long e
 static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long end,
 				 enum populate_mode mode)
 {
-	unsigned long next, pages = 0;
+	unsigned long pa, next, pages = 0;
 	pmd_t *pmd, entry;
 	pte_t *pte;
 
@@ -323,8 +347,9 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e
 		if (pmd_none(*pmd)) {
 			if (kasan_pmd_populate_zero_shadow(pmd, addr, next, mode))
 				continue;
-			if (can_large_pmd(pmd, addr, next, mode)) {
-				entry = __pmd(_pa(addr, _SEGMENT_SIZE, mode));
+			pa = try_get_large_pmd_pa(pmd, addr, next, mode);
+			if (pa != INVALID_PHYS_ADDR) {
+				entry = __pmd(pa);
 				entry = set_pmd_bit(entry, SEGMENT_KERNEL);
 				if (!machine.has_nx)
 					entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC));
@@ -346,7 +371,7 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e
 static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long end,
 				 enum populate_mode mode)
 {
-	unsigned long next, pages = 0;
+	unsigned long pa, next, pages = 0;
 	pud_t *pud, entry;
 	pmd_t *pmd;
 
@@ -356,8 +381,9 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e
 		if (pud_none(*pud)) {
 			if (kasan_pud_populate_zero_shadow(pud, addr, next, mode))
 				continue;
-			if (can_large_pud(pud, addr, next, mode)) {
-				entry = __pud(_pa(addr, _REGION3_SIZE, mode));
+			pa = try_get_large_pud_pa(pud, addr, next, mode);
+			if (pa != INVALID_PHYS_ADDR) {
+				entry = __pud(pa);
 				entry = set_pud_bit(entry, REGION3_KERNEL);
 				if (!machine.has_nx)
 					entry = clear_pud_bit(entry, __pgprot(_REGION_ENTRY_NOEXEC));
-- 
2.39.5







[Index of Archives]     [Linux Kernel]     [Kernel Development Newbies]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite Hiking]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux