+
+ for (i = 0; i < size / sizeof(hash); i++) {
+ /* Rotate by odd number of bits and XOR. */
+ hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
+ hash ^= ptr[i];
+ }
+
+ return hash;
+}
+
+/* Attempt to create a simple but unpredictable starting entropy. */
+static unsigned long __init get_boot_seed(void *fdt)
+{
+ unsigned long hash = 0;
+
+ hash = rotate_xor(hash, build_str, sizeof(build_str));
+ hash = rotate_xor(hash, fdt, fdt_totalsize(fdt));
+
+ return hash;
+}
+
+static __init u64 get_kaslr_seed(void *fdt)
+{
+ int node, len;
+ fdt64_t *prop;
+ u64 ret;
+
+ node = fdt_path_offset(fdt, "/chosen");
+ if (node < 0)
+ return 0;
+
+ prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len);
+ if (!prop || len != sizeof(u64))
+ return 0;
+
+ ret = fdt64_to_cpu(*prop);
+ *prop = 0;
+ return ret;
+}
+
+static __init bool regions_overlap(u32 s1, u32 e1, u32 s2, u32 e2)
+{
+ return e1 >= s2 && e2 >= s1;
+}
+
+static __init bool overlaps_reserved_region(const void *fdt, u32 start,
+ u32 end, struct regions *regions)
+{
+ int subnode, len, i;
+ u64 base, size;
+
+ /* check for overlap with /memreserve/ entries */
+ for (i = 0; i < fdt_num_mem_rsv(fdt); i++) {
+ if (fdt_get_mem_rsv(fdt, i, &base, &size) < 0)
+ continue;
+ if (regions_overlap(start, end, base, base + size))
+ return true;
+ }
+
+ if (regions->reserved_mem < 0)
+ return false;
+
+ /* check for overlap with static reservations in /reserved-memory */
+ for (subnode = fdt_first_subnode(fdt, regions->reserved_mem);
+ subnode >= 0;
+ subnode = fdt_next_subnode(fdt, subnode)) {
+ const fdt32_t *reg;
+ u64 rsv_end;
+
+ len = 0;
+ reg = fdt_getprop(fdt, subnode, "reg", &len);
+ while (len >= (regions->reserved_mem_addr_cells +
+ regions->reserved_mem_size_cells)) {
+ base = fdt32_to_cpu(reg[0]);
+ if (regions->reserved_mem_addr_cells == 2)
+ base = (base << 32) | fdt32_to_cpu(reg[1]);
+
+ reg += regions->reserved_mem_addr_cells;
+ len -= 4 * regions->reserved_mem_addr_cells;
+
+ size = fdt32_to_cpu(reg[0]);
+ if (regions->reserved_mem_size_cells == 2)
+ size = (size << 32) | fdt32_to_cpu(reg[1]);
+
+ reg += regions->reserved_mem_size_cells;
+ len -= 4 * regions->reserved_mem_size_cells;
+
+ if (base >= regions->pa_end)
+ continue;
+
+ rsv_end = min(base + size, (u64)U32_MAX);
+
+ if (regions_overlap(start, end, base, rsv_end))
+ return true;
+ }
+ }
+ return false;
+}
+
+static __init bool overlaps_region(const void *fdt, u32 start,
+ u32 end, struct regions *regions)
+{
+ if (regions_overlap(start, end, regions->dtb_start,
+ regions->dtb_end))
+ return true;
+
+ if (regions_overlap(start, end, regions->initrd_start,
+ regions->initrd_end))
+ return true;
+
+ if (regions_overlap(start, end, regions->crash_start,
+ regions->crash_end))
+ return true;
+
+ return overlaps_reserved_region(fdt, start, end, regions);
+}
+
+static void __init get_crash_kernel(void *fdt, unsigned long size,
+ struct regions *regions)
+{
+#ifdef CONFIG_CRASH_CORE
+ unsigned long long crash_size, crash_base;
+ int ret;
+
+ ret = parse_crashkernel(boot_command_line, size, &crash_size,
+ &crash_base);
+ if (ret != 0 || crash_size == 0)
+ return;
+ if (crash_base == 0)
+ crash_base = KDUMP_KERNELBASE;
+
+ regions->crash_start = (unsigned long)crash_base;
+ regions->crash_end = (unsigned long)(crash_base + crash_size);
+
+ DBG("crash_base=0x%llx crash_size=0x%llx\n", crash_base, crash_size);
+#endif
+}
+
+static void __init get_initrd_range(void *fdt, struct regions *regions)
+{
+ u64 start, end;
+ int node, len;
+ const __be32 *prop;
+
+ node = fdt_path_offset(fdt, "/chosen");
+ if (node < 0)
+ return;
+
+ prop = fdt_getprop(fdt, node, "linux,initrd-start", &len);
+ if (!prop)
+ return;
+ start = of_read_number(prop, len / 4);
+
+ prop = fdt_getprop(fdt, node, "linux,initrd-end", &len);
+ if (!prop)
+ return;
+ end = of_read_number(prop, len / 4);
+
+ regions->initrd_start = (unsigned long)start;
+ regions->initrd_end = (unsigned long)end;
+
+ DBG("initrd_start=0x%llx initrd_end=0x%llx\n", start, end);
+}
+
+static __init unsigned long get_usable_offset(const void *fdt, struct regions *regions,
+ unsigned long start)
+{
+ unsigned long pa;
+ unsigned long pa_end;
+
+ for (pa = start; pa > regions->pa_start; pa -= SZ_16K) {
+ pa_end = pa + regions->kernel_size;
+ if (overlaps_region(fdt, pa, pa_end, regions))
+ continue;
+
+ return pa;
+ }
+ return 0;
+}
+
+static __init void get_cell_sizes(const void *fdt, int node, int *addr_cells,
+ int *size_cells)
+{
+ const int *prop;
+ int len;
+
+ /*
+ * Retrieve the #address-cells and #size-cells properties
+ * from the 'node', or use the default if not provided.
+ */
+ *addr_cells = *size_cells = 1;
+
+ prop = fdt_getprop(fdt, node, "#address-cells", &len);
+ if (len == 4)
+ *addr_cells = fdt32_to_cpu(*prop);
+ prop = fdt_getprop(fdt, node, "#size-cells", &len);
+ if (len == 4)
+ *size_cells = fdt32_to_cpu(*prop);
+}
+
static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size,
unsigned long kernel_sz)
{
- /* return a fixed offset of 64M for now */
- return SZ_64M;
+ unsigned long offset, random;
+ unsigned long ram, linear_sz;
+ unsigned long kaslr_offset;
+ u64 seed;
+ struct regions regions;
+ unsigned long index;
+
+ random = get_boot_seed(dt_ptr);
+
+ seed = get_tb() << 32;
+ seed ^= get_tb();
+ random = rotate_xor(random, &seed, sizeof(seed));
+
+ /*
+ * Retrieve (and wipe) the seed from the FDT
+ */
+ seed = get_kaslr_seed(dt_ptr);
+ if (seed)
+ random = rotate_xor(random, &seed, sizeof(seed));
+
+ ram = min((phys_addr_t)__max_low_memory, size);
+ ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true);
+ linear_sz = min(ram, (unsigned long)SZ_512M);
+
+ /* If the linear size is smaller than 64M, do not randmize */
+ if (linear_sz < SZ_64M)
+ return 0;
+
+ memset(®ions, 0, sizeof(regions));
+
+ /* check for a reserved-memory node and record its cell sizes */
+ regions.reserved_mem = fdt_path_offset(dt_ptr, "/reserved-memory");
+ if (regions.reserved_mem >= 0)
+ get_cell_sizes(dt_ptr, regions.reserved_mem,
+ ®ions.reserved_mem_addr_cells,
+ ®ions.reserved_mem_size_cells);
+
+ regions.pa_start = 0;
+ regions.pa_end = linear_sz;
+ regions.dtb_start = __pa(dt_ptr);
+ regions.dtb_end = __pa(dt_ptr) + fdt_totalsize(dt_ptr);
+ regions.kernel_size = kernel_sz;
+
+ get_initrd_range(dt_ptr, ®ions);
+ get_crash_kernel(dt_ptr, ram, ®ions);
+
+ /*
+ * Decide which 64M we want to start
+ * Only use the low 8 bits of the random seed
+ */
+ index = random & 0xFF;
+ index %= linear_sz / SZ_64M;
+
+ /* Decide offset inside 64M */
+ if (index == 0) {
+ offset = random % (SZ_64M - round_up(kernel_sz, SZ_16K) * 2);
+ offset += round_up(kernel_sz, SZ_16K);
+ offset = round_up(offset, SZ_16K);
+ } else {
+ offset = random % (SZ_64M - kernel_sz);
+ offset = round_down(offset, SZ_16K);
+ }
+
+ while (index >= 0) {
+ offset = offset + index * SZ_64M;
+ kaslr_offset = get_usable_offset(dt_ptr, ®ions, offset);
+ if (kaslr_offset)
+ break;
+ index--;
+ }
+
+ /* Did not find any usable region? Give up randomize */
+ if (index < 0)
+ kaslr_offset = 0;
+
+ return kaslr_offset;
}
/*
@@ -59,6 +375,8 @@ notrace void __init kaslr_early_init(void *dt_ptr, phys_addr_t size)
kernel_sz = (unsigned long)_end - KERNELBASE;
+ kaslr_get_cmdline(dt_ptr);
+
offset = kaslr_choose_location(dt_ptr, size, kernel_sz);
if (offset == 0)