copy_page_from_iter_atomic() already handles !highmem compound pages correctly, but if we are passed a highmem compound page, each base page needs to be mapped & unmapped individually. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- lib/iov_iter.c | 43 ++++++++++++++++++++++++++++--------------- 1 file changed, 28 insertions(+), 15 deletions(-) diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 960223ed9199..1a3fbda0c508 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -857,24 +857,37 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i) } EXPORT_SYMBOL(iov_iter_zero); -size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes, - struct iov_iter *i) +size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, + size_t bytes, struct iov_iter *i) { - char *kaddr = kmap_atomic(page), *p = kaddr + offset; - if (!page_copy_sane(page, offset, bytes)) { - kunmap_atomic(kaddr); + size_t n, copied = 0; + + if (!page_copy_sane(page, offset, bytes)) return 0; - } - if (WARN_ON_ONCE(!i->data_source)) { - kunmap_atomic(kaddr); + if (WARN_ON_ONCE(!i->data_source)) return 0; - } - iterate_and_advance(i, bytes, base, len, off, - copyin(p + off, base, len), - memcpy_from_iter(i, p + off, base, len) - ) - kunmap_atomic(kaddr); - return bytes; + + do { + char *kaddr; + + n = bytes - copied; + if (PageHighMem(page)) { + page += offset / PAGE_SIZE; + offset %= PAGE_SIZE; + n = min_t(size_t, n, PAGE_SIZE - offset); + } + + kaddr = kmap_atomic(page) + offset; + iterate_and_advance(i, n, base, len, off, + copyin(kaddr + off, base, len), + memcpy_from_iter(i, kaddr + off, base, len) + ) + kunmap_atomic(kaddr); + copied += n; + offset += n; + } while (PageHighMem(page) && copied != bytes && n > 0); + + return copied; } EXPORT_SYMBOL(copy_page_from_iter_atomic); -- 2.39.2