From: Nadav Amit <namit@xxxxxxxxxx> copy_page_from_iter_iovec() cannot be used when preemption is enabled. Change copy_page_from_iter_iovec() into __copy_page_from_iter_iovec() with an additional parameter that says whether the caller runs in atomic context. When __copy_page_from_iter_iovec() is used in an atomic context it will gracefully fail but would not lead to a deadlock. The caller is expected to recover from such failure gracefully. Cc: Jens Axboe <axboe@xxxxxxxxx> Cc: Andrea Arcangeli <aarcange@xxxxxxxxxx> Cc: Peter Xu <peterx@xxxxxxxxxx> Cc: Alexander Viro <viro@xxxxxxxxxxxxxxxxxx> Cc: io-uring@xxxxxxxxxxxxxxx Cc: linux-fsdevel@xxxxxxxxxxxxxxx Cc: linux-kernel@xxxxxxxxxxxxxxx Cc: linux-mm@xxxxxxxxx Signed-off-by: Nadav Amit <namit@xxxxxxxxxx> --- include/linux/uio.h | 3 +++ lib/iov_iter.c | 23 +++++++++++++++++------ 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/include/linux/uio.h b/include/linux/uio.h index 72d88566694e..7c90f7371a6f 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -121,6 +121,9 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); +size_t __copy_page_from_iter(struct page *page, size_t offset, size_t bytes, + struct iov_iter *i, bool atomic); + size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i); size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 1635111c5bd2..e597df6a46a7 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -246,7 +246,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b } static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes, - struct iov_iter *i) + struct iov_iter *i, bool atomic) { size_t skip, copy, left, wanted; const struct iovec *iov; @@ -259,14 +259,15 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t if (unlikely(!bytes)) return 0; - might_fault(); + if (!atomic) + might_fault(); wanted = bytes; iov = i->iov; skip = i->iov_offset; buf = iov->iov_base + skip; copy = min(bytes, iov->iov_len - skip); - if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) { + if (atomic || (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy))) { kaddr = kmap_atomic(page); to = kaddr + offset; @@ -295,6 +296,9 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t buf += copy; kunmap_atomic(kaddr); copy = min(bytes, iov->iov_len - skip); + if (atomic) + goto done; + } /* Too bad - revert to non-atomic kmap */ @@ -929,8 +933,8 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, } EXPORT_SYMBOL(copy_page_to_iter); -size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, - struct iov_iter *i) +size_t __copy_page_from_iter(struct page *page, size_t offset, size_t bytes, + struct iov_iter *i, bool atomic) { if (unlikely(!page_copy_sane(page, offset, bytes))) return 0; @@ -944,7 +948,14 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, kunmap_atomic(kaddr); return wanted; } else - return copy_page_from_iter_iovec(page, offset, bytes, i); + return copy_page_from_iter_iovec(page, offset, bytes, i, atomic); +} +EXPORT_SYMBOL(__copy_page_from_iter); + +size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, + struct iov_iter *i) +{ + return __copy_page_from_iter(page, offset, bytes, i, false); } EXPORT_SYMBOL(copy_page_from_iter); -- 2.25.1