Hi Mike, On Thu, Feb 06 2025, Mike Rapoport wrote: [...] > @@ -444,7 +576,141 @@ static void kho_reserve_scratch(void) > kho_enable = false; > } > > +/* > + * Scan the DT for any memory ranges and make sure they are reserved in > + * memblock, otherwise they will end up in a weird state on free lists. > + */ > +static void kho_init_reserved_pages(void) > +{ > + const void *fdt = kho_get_fdt(); > + int offset = 0, depth = 0, initial_depth = 0, len; > + > + if (!fdt) > + return; > + > + /* Go through the mem list and add 1 for each reference */ > + for (offset = 0; > + offset >= 0 && depth >= initial_depth; > + offset = fdt_next_node(fdt, offset, &depth)) { > + const struct kho_mem *mems; > + u32 i; > + > + mems = fdt_getprop(fdt, offset, "mem", &len); > + if (!mems || len & (sizeof(*mems) - 1)) > + continue; > + > + for (i = 0; i < len; i += sizeof(*mems)) { > + const struct kho_mem *mem = &mems[i]; i goes from 0 to len in steps of 16, but you use it to dereference an array of type struct kho_mem. So you end up only looking at only one of every 16 mems and do an out of bounds access. I found this when testing the memfd patches and any time the file was more than 1 page, it started to crash randomly. Below patch should fix that: ---- 8< ---- diff --git a/kernel/kexec_handover.c b/kernel/kexec_handover.c index c26753d613cbc..40d1d8ac68d44 100644 --- a/kernel/kexec_handover.c +++ b/kernel/kexec_handover.c @@ -685,13 +685,15 @@ static void kho_init_reserved_pages(void) offset >= 0 && depth >= initial_depth; offset = fdt_next_node(fdt, offset, &depth)) { const struct kho_mem *mems; - u32 i; + u32 i, nr_mems; mems = fdt_getprop(fdt, offset, "mem", &len); if (!mems || len & (sizeof(*mems) - 1)) continue; - for (i = 0; i < len; i += sizeof(*mems)) { + nr_mems = len / sizeof(*mems); + + for (i = 0; i < nr_mems; i++) { const struct kho_mem *mem = &mems[i]; memblock_reserve(mem->addr, mem->size); ---- >8 ---- [...] -- Regards, Pratyush Yadav