If there's some vmcore object that doesn't satisfy page-size boundary requirement, remap_pfn_range() fails to remap it to user-space. Objects that posisbly don't satisfy the requirement are ELF note segments only. The memory chunks corresponding to PT_LOAD entries are guaranteed to satisfy page-size boundary requirement by the copy from old memory to buffer in 2nd kernel done in later patch. Signed-off-by: HATAYAMA Daisuke <d.hatayama at jp.fujitsu.com> --- fs/proc/vmcore.c | 22 ++++++++++++++++++++++ 1 files changed, 22 insertions(+), 0 deletions(-) diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index e432946..5582aaa 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -38,6 +38,8 @@ static u64 vmcore_size; static struct proc_dir_entry *proc_vmcore = NULL; +static bool support_mmap_vmcore; + /* * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error * The called function has to take care of module refcounting. @@ -897,6 +899,7 @@ static int __init parse_crash_elf_headers(void) static int __init vmcore_init(void) { int rc = 0; + struct vmcore *m; /* If elfcorehdr= has been passed in cmdline, then capture the dump.*/ if (!(is_vmcore_usable())) @@ -907,6 +910,25 @@ static int __init vmcore_init(void) return rc; } + /* If some object doesn't satisfy PAGE_SIZE boundary + * requirement, mmap_vmcore() is not exported to + * user-space. */ + support_mmap_vmcore = true; + list_for_each_entry(m, &vmcore_list, list) { + u64 paddr; + + if (m->flag & MEM_TYPE_CURRENT_KERNEL) + paddr = (u64)__pa(m->buf); + else + paddr = m->paddr; + + if ((m->offset & ~PAGE_MASK) || (paddr & ~PAGE_MASK) + || (m->size & ~PAGE_MASK)) { + support_mmap_vmcore = false; + break; + } + } + proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations); if (proc_vmcore) proc_vmcore->size = vmcore_size;