Print a list of pages to be copied if debugging is enabled. Consecutive entries are merged to reduce screen clutter. Signed-off-by: Geert Uytterhoeven <geert at linux-m68k.org> Cc: Eric Biederman <ebiederm at xmission.com> --- kernel/kexec.c | 76 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/kernel/kexec.c b/kernel/kexec.c index 490afc03627e..e25022ac229e 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -1073,6 +1073,80 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry, } #endif +#ifdef DEBUG +struct kimage_block { + unsigned long dst, src, len; +}; + +static void kimage_print_block(const struct kimage_block *block) +{ + pr_info("Copy from 0x%lx-0x%lx to 0x%lx-0x%lx (0x%lx bytes)\n", + block->src, block->src + block->len - 1, block->dst, + block->dst + block->len - 1, block->len); +} + +static void kimage_print(const struct kimage *image) +{ + void *control_code_page; + const kimage_entry_t *ptr; + kimage_entry_t entry; + struct kimage_block block; + unsigned long dnext = KIMAGE_NO_DEST, snext = KIMAGE_NO_DEST; + unsigned long total = 0; + + control_code_page = page_address(image->control_code_page); + pr_info("Control code page 0x%p (phys 0x%lx)\n", + control_code_page, virt_to_phys(control_code_page)); + + ptr = &image->head; + block.dst = KIMAGE_NO_DEST; + block.src = KIMAGE_NO_DEST; + block.len = 0; + while ((entry = *ptr)) { + if (entry & IND_DONE) + break; + + if (entry & IND_DESTINATION) { + if (block.len > 0) { + kimage_print_block(&block); + total += block.len; + } + dnext = block.dst = entry & PAGE_MASK; + block.src = KIMAGE_NO_DEST; + block.len = 0; + } + + if (entry & IND_SOURCE) { + if (!block.len) { + snext = block.src = entry & PAGE_MASK; + } else if ((entry & PAGE_MASK) != snext) { + kimage_print_block(&block); + total += block.len; + block.dst = dnext; + snext = block.src = entry & PAGE_MASK; + block.len = 0; + } + dnext += PAGE_SIZE; + snext += PAGE_SIZE; + block.len += PAGE_SIZE; + } + + if (entry & IND_INDIRECTION) { + pr_info("Indirection page 0x%lx\n", entry & PAGE_MASK); + ptr = phys_to_virt(entry & PAGE_MASK); + } else + ptr++; + } + if (block.len) { + kimage_print_block(&block); + total += block.len; + } + pr_info("Total: 0x%lx/%ld bytes\n", total, total); +} +#else +static inline void kimage_print(const struct kimage *image) {} +#endif + void crash_kexec(struct pt_regs *regs) { /* Take the kexec_mutex here to prevent sys_kexec_load @@ -1090,6 +1164,7 @@ void crash_kexec(struct pt_regs *regs) crash_setup_regs(&fixed_regs, regs); crash_save_vmcoreinfo(); machine_crash_shutdown(&fixed_regs); + kimage_print(kexec_crash_image); machine_kexec(kexec_crash_image); } mutex_unlock(&kexec_mutex); @@ -1680,6 +1755,7 @@ int kernel_kexec(void) machine_shutdown(); } + kimage_print(kexec_image); machine_kexec(kexec_image); #ifdef CONFIG_KEXEC_JUMP -- 1.7.9.5