Le 06/02/2020 à 03:58, Jason Yan a écrit :
Some code refactor in kaslr_legal_offset() and kaslr_early_init(). No
functional change. This is a preparation for KASLR fsl_booke64.
Signed-off-by: Jason Yan <yanaijie@xxxxxxxxxx>
Cc: Scott Wood <oss@xxxxxxxxxxxx>
Cc: Diana Craciun <diana.craciun@xxxxxxx>
Cc: Michael Ellerman <mpe@xxxxxxxxxxxxxx>
Cc: Christophe Leroy <christophe.leroy@xxxxxx>
Cc: Benjamin Herrenschmidt <benh@xxxxxxxxxxxxxxxxxxx>
Cc: Paul Mackerras <paulus@xxxxxxxxx>
Cc: Nicholas Piggin <npiggin@xxxxxxxxx>
Cc: Kees Cook <keescook@xxxxxxxxxxxx>
---
arch/powerpc/mm/nohash/kaslr_booke.c | 40 ++++++++++++++--------------
1 file changed, 20 insertions(+), 20 deletions(-)
diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c
index 4a75f2d9bf0e..07b036e98353 100644
--- a/arch/powerpc/mm/nohash/kaslr_booke.c
+++ b/arch/powerpc/mm/nohash/kaslr_booke.c
@@ -25,6 +25,7 @@ struct regions {
unsigned long pa_start;
unsigned long pa_end;
unsigned long kernel_size;
+ unsigned long linear_sz;
unsigned long dtb_start;
unsigned long dtb_end;
unsigned long initrd_start;
@@ -260,11 +261,23 @@ static __init void get_cell_sizes(const void *fdt, int node, int *addr_cells,
*size_cells = fdt32_to_cpu(*prop);
}
-static unsigned long __init kaslr_legal_offset(void *dt_ptr, unsigned long index,
- unsigned long offset)
+static unsigned long __init kaslr_legal_offset(void *dt_ptr, unsigned long random)
{
unsigned long koffset = 0;
unsigned long start;
+ unsigned long index;
+ unsigned long offset;
+
+ /*
+ * Decide which 64M we want to start
+ * Only use the low 8 bits of the random seed
+ */
+ index = random & 0xFF;
+ index %= regions.linear_sz / SZ_64M;
+
+ /* Decide offset inside 64M */
+ offset = random % (SZ_64M - regions.kernel_size);
+ offset = round_down(offset, SZ_16K);
while ((long)index >= 0) {
offset = memstart_addr + index * SZ_64M + offset;
@@ -289,10 +302,9 @@ static inline __init bool kaslr_disabled(void)
static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size,
unsigned long kernel_sz)
{
- unsigned long offset, random;
+ unsigned long random;
unsigned long ram, linear_sz;
u64 seed;
- unsigned long index;
kaslr_get_cmdline(dt_ptr);
if (kaslr_disabled())
@@ -333,22 +345,12 @@ static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size
regions.dtb_start = __pa(dt_ptr);
regions.dtb_end = __pa(dt_ptr) + fdt_totalsize(dt_ptr);
regions.kernel_size = kernel_sz;
+ regions.linear_sz = linear_sz;
get_initrd_range(dt_ptr);
get_crash_kernel(dt_ptr, ram);
- /*
- * Decide which 64M we want to start
- * Only use the low 8 bits of the random seed
- */
- index = random & 0xFF;
- index %= linear_sz / SZ_64M;
-
- /* Decide offset inside 64M */
- offset = random % (SZ_64M - kernel_sz);
- offset = round_down(offset, SZ_16K);
-
- return kaslr_legal_offset(dt_ptr, index, offset);
+ return kaslr_legal_offset(dt_ptr, random);
}
/*
@@ -358,8 +360,6 @@ static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size
*/
notrace void __init kaslr_early_init(void *dt_ptr, phys_addr_t size)
{
- unsigned long tlb_virt;
- phys_addr_t tlb_phys;
unsigned long offset;
unsigned long kernel_sz;
@@ -375,8 +375,8 @@ notrace void __init kaslr_early_init(void *dt_ptr, phys_addr_t size)
is_second_reloc = 1;
if (offset >= SZ_64M) {
- tlb_virt = round_down(kernstart_virt_addr, SZ_64M);
- tlb_phys = round_down(kernstart_addr, SZ_64M);
+ unsigned long tlb_virt = round_down(kernstart_virt_addr, SZ_64M);
+ phys_addr_t tlb_phys = round_down(kernstart_addr, SZ_64M);
That looks like cleanup unrelated to the patch itself.
/* Create kernel map to relocate in */
create_kaslr_tlb_entry(1, tlb_virt, tlb_phys);
Christophe