? 2013?03?02? 16:36, HATAYAMA Daisuke ??: > Allocate buffer for ELF headers on page-size aligned boudary to > satisfy mmap() requirement. For this, __get_free_pages() is used > instead of kmalloc(). > > Also, later patch will decrease actually used buffer size for ELF > headers, so it's necessary to keep original buffer size and actually > used buffer size separately. elfcorebuf_sz_orig keeps the original one > and elfcorebuf_sz the actually used one. > > Signed-off-by: HATAYAMA Daisuke <d.hatayama at jp.fujitsu.com> > --- > > fs/proc/vmcore.c | 30 +++++++++++++++++++++--------- > 1 files changed, 21 insertions(+), 9 deletions(-) > > diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c > index b5c9e33..1b02d01 100644 > --- a/fs/proc/vmcore.c > +++ b/fs/proc/vmcore.c > @@ -31,6 +31,7 @@ static LIST_HEAD(vmcore_list); > /* Stores the pointer to the buffer containing kernel elf core headers. */ > static char *elfcorebuf; > static size_t elfcorebuf_sz; > +static size_t elfcorebuf_sz_orig; > > /* Total size of vmcore file. */ > static u64 vmcore_size; > @@ -610,26 +611,31 @@ static int __init parse_crash_elf64_headers(void) > > /* Read in all elf headers. */ > elfcorebuf_sz = ehdr.e_phoff + ehdr.e_phnum * sizeof(Elf64_Phdr); > - elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL); > + elfcorebuf_sz_orig = elfcorebuf_sz; > + elfcorebuf = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, > + get_order(elfcorebuf_sz_orig)); > if (!elfcorebuf) > return -ENOMEM; > addr = elfcorehdr_addr; > rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0); > if (rc < 0) { > - kfree(elfcorebuf); > + free_pages((unsigned long)elfcorebuf, > + get_order(elfcorebuf_sz_orig)); > return rc; > } > > /* Merge all PT_NOTE headers into one. */ > rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list); > if (rc) { > - kfree(elfcorebuf); > + free_pages((unsigned long)elfcorebuf, > + get_order(elfcorebuf_sz_orig)); > return rc; > } > rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz, > &vmcore_list); > if (rc) { > - kfree(elfcorebuf); > + free_pages((unsigned long)elfcorebuf, > + get_order(elfcorebuf_sz_orig)); > return rc; > } > set_vmcore_list_offsets_elf64(elfcorebuf, &vmcore_list); > @@ -665,26 +671,31 @@ static int __init parse_crash_elf32_headers(void) > > /* Read in all elf headers. */ > elfcorebuf_sz = ehdr.e_phoff + ehdr.e_phnum * sizeof(Elf32_Phdr); > - elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL); > + elfcorebuf_sz_orig = elfcorebuf_sz; > + elfcorebuf = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, > + get_order(elfcorebuf_sz)); Why not elfcorebuf_sz_orig here? > if (!elfcorebuf) > return -ENOMEM; > addr = elfcorehdr_addr; > rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0); > if (rc < 0) { > - kfree(elfcorebuf); > + free_pages((unsigned long)elfcorebuf, > + get_order(elfcorebuf_sz_orig)); > return rc; > } > > /* Merge all PT_NOTE headers into one. */ > rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, &vmcore_list); > if (rc) { > - kfree(elfcorebuf); > + free_pages((unsigned long)elfcorebuf, > + get_order(elfcorebuf_sz_orig)); > return rc; > } > rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz, > &vmcore_list); > if (rc) { > - kfree(elfcorebuf); > + free_pages((unsigned long)elfcorebuf, > + get_order(elfcorebuf_sz_orig)); > return rc; > } > set_vmcore_list_offsets_elf32(elfcorebuf, &vmcore_list); > @@ -766,7 +777,8 @@ void vmcore_cleanup(void) > list_del(&m->list); > kfree(m); > } > - kfree(elfcorebuf); > + free_pages((unsigned long)elfcorebuf, > + get_order(elfcorebuf_sz_orig)); > elfcorebuf = NULL; > } > EXPORT_SYMBOL_GPL(vmcore_cleanup); > > > _______________________________________________ > kexec mailing list > kexec at lists.infradead.org > http://lists.infradead.org/mailman/listinfo/kexec >