From: "Kirill A. Shutemov" <kirill.shutemov@xxxxxxxxxxxxxxx> For now we still write/read at most PAGE_CACHE_SIZE bytes a time. This implementation doesn't cover address spaces with backing store. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx> --- mm/filemap.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/mm/filemap.c b/mm/filemap.c index e086ef0..ebd361a 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1177,6 +1177,17 @@ find_page: if (unlikely(page == NULL)) goto no_cached_page; } + if (PageTransCompound(page)) { + struct page *head = compound_trans_head(page); + /* + * We don't yet support huge pages in page cache + * for filesystems with backing device, so pages + * should always be up-to-date. + */ + BUG_ON(ra->ra_pages); + BUG_ON(!PageUptodate(head)); + goto page_ok; + } if (PageReadahead(page)) { page_cache_async_readahead(mapping, ra, filp, page, @@ -2413,8 +2424,13 @@ again: if (mapping_writably_mapped(mapping)) flush_dcache_page(page); + if (PageTransHuge(page)) + offset = pos & ~HPAGE_PMD_MASK; + pagefault_disable(); - copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); + copied = iov_iter_copy_from_user_atomic( + page + (offset >> PAGE_CACHE_SHIFT), + i, offset & ~PAGE_CACHE_MASK, bytes); pagefault_enable(); flush_dcache_page(page); @@ -2437,6 +2453,7 @@ again: * because not all segments in the iov can be copied at * once without a pagefault. */ + offset = pos & ~PAGE_CACHE_MASK; bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, iov_iter_single_seg_count(i)); goto again; -- 1.7.10.4 -- To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html