The patch titled Subject: proc/kcore: hold lock during read has been added to the -mm tree. Its filename is proc-kcore-hold-lock-during-read.patch This patch should soon appear at http://ozlabs.org/~akpm/mmots/broken-out/proc-kcore-hold-lock-during-read.patch and later at http://ozlabs.org/~akpm/mmotm/broken-out/proc-kcore-hold-lock-during-read.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Omar Sandoval <osandov@xxxxxx> Subject: proc/kcore: hold lock during read Now that we're using an rwsem, we can hold it during the entirety of read_kcore() and have a common return path. This is preparation for the next change. Link: http://lkml.kernel.org/r/e610dcf31e4dec99c43e8f4353cf6f448c4d4782.1531440458.git.osandov@xxxxxx Signed-off-by: Omar Sandoval <osandov@xxxxxx> Cc: Alexey Dobriyan <adobriyan@xxxxxxxxx> Cc: Bhupesh Sharma <bhsharma@xxxxxxxxxx> Cc: Eric Biederman <ebiederm@xxxxxxxxxxxx> Cc: James Morse <james.morse@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- diff -puN fs/proc/kcore.c~proc-kcore-hold-lock-during-read fs/proc/kcore.c --- a/fs/proc/kcore.c~proc-kcore-hold-lock-during-read +++ a/fs/proc/kcore.c @@ -440,19 +440,18 @@ static ssize_t read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) { char *buf = file->private_data; - ssize_t acc = 0; size_t size, tsz; size_t elf_buflen; int nphdr; unsigned long start; + size_t orig_buflen = buflen; + int ret = 0; down_read(&kclist_lock); size = get_kcore_size(&nphdr, &elf_buflen); - if (buflen == 0 || *fpos >= size) { - up_read(&kclist_lock); - return 0; - } + if (buflen == 0 || *fpos >= size) + goto out; /* trim buflen to not go beyond EOF */ if (buflen > size - *fpos) @@ -465,28 +464,26 @@ read_kcore(struct file *file, char __use tsz = elf_buflen - *fpos; if (buflen < tsz) tsz = buflen; - elf_buf = kzalloc(elf_buflen, GFP_ATOMIC); + elf_buf = kzalloc(elf_buflen, GFP_KERNEL); if (!elf_buf) { - up_read(&kclist_lock); - return -ENOMEM; + ret = -ENOMEM; + goto out; } elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen); - up_read(&kclist_lock); if (copy_to_user(buffer, elf_buf + *fpos, tsz)) { kfree(elf_buf); - return -EFAULT; + ret = -EFAULT; + goto out; } kfree(elf_buf); buflen -= tsz; *fpos += tsz; buffer += tsz; - acc += tsz; /* leave now if filled buffer already */ if (buflen == 0) - return acc; - } else - up_read(&kclist_lock); + goto out; + } /* * Check to see if our file offset matches with any of @@ -499,25 +496,29 @@ read_kcore(struct file *file, char __use while (buflen) { struct kcore_list *m; - down_read(&kclist_lock); list_for_each_entry(m, &kclist_head, list) { if (start >= m->addr && start < (m->addr+m->size)) break; } - up_read(&kclist_lock); if (&m->list == &kclist_head) { - if (clear_user(buffer, tsz)) - return -EFAULT; + if (clear_user(buffer, tsz)) { + ret = -EFAULT; + goto out; + } } else if (m->type == KCORE_VMALLOC) { vread(buf, (char *)start, tsz); /* we have to zero-fill user buffer even if no read */ - if (copy_to_user(buffer, buf, tsz)) - return -EFAULT; + if (copy_to_user(buffer, buf, tsz)) { + ret = -EFAULT; + goto out; + } } else if (m->type == KCORE_USER) { /* User page is handled prior to normal kernel page: */ - if (copy_to_user(buffer, (char *)start, tsz)) - return -EFAULT; + if (copy_to_user(buffer, (char *)start, tsz)) { + ret = -EFAULT; + goto out; + } } else { if (kern_addr_valid(start)) { /* @@ -525,26 +526,35 @@ read_kcore(struct file *file, char __use * hardened user copy kernel text checks. */ if (probe_kernel_read(buf, (void *) start, tsz)) { - if (clear_user(buffer, tsz)) - return -EFAULT; + if (clear_user(buffer, tsz)) { + ret = -EFAULT; + goto out; + } } else { - if (copy_to_user(buffer, buf, tsz)) - return -EFAULT; + if (copy_to_user(buffer, buf, tsz)) { + ret = -EFAULT; + goto out; + } } } else { - if (clear_user(buffer, tsz)) - return -EFAULT; + if (clear_user(buffer, tsz)) { + ret = -EFAULT; + goto out; + } } } buflen -= tsz; *fpos += tsz; buffer += tsz; - acc += tsz; start += tsz; tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen); } - return acc; +out: + up_write(&kclist_lock); + if (ret) + return ret; + return orig_buflen - buflen; } _ Patches currently in -mm which might be from osandov@xxxxxx are proc-kcore-dont-grab-lock-for-kclist_add.patch proc-kcore-replace-kclist_lock-rwlock-with-rwsem.patch proc-kcore-fix-memory-hotplug-vs-multiple-opens-race.patch proc-kcore-hold-lock-during-read.patch proc-kcore-clean-up-elf-header-generation.patch proc-kcore-optimize-multiple-page-reads.patch proc-kcore-add-vmcoreinfo-note-to-proc-kcore.patch -- To unsubscribe from this list: send the line "unsubscribe mm-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html