From: Darrick J. Wong <djwong@xxxxxxxxxx> Support using directly-mapped pages in the buffer cache when the fs blocksize is less than the page size. This is not strictly necessary since the only user of direct-map buffers always uses page-sized buffers, but I included it here for completeness. Signed-off-by: Darrick J. Wong <djwong@xxxxxxxxxx> --- fs/xfs/xfs_buf.c | 8 ++++++-- fs/xfs/xfs_buf_xfile.c | 20 +++++++++++++++++--- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 9b14f317396c..bc386c0a3ed5 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -321,7 +321,7 @@ xfs_buf_free( ASSERT(list_empty(&bp->b_lru)); if (xfs_buf_is_vmapped(bp)) - vm_unmap_ram(bp->b_addr, bp->b_page_count); + vm_unmap_ram(bp->b_addr - bp->b_offset, bp->b_page_count); if (bp->b_flags & _XBF_DIRECT_MAP) xfile_buf_unmap_pages(bp); @@ -434,6 +434,8 @@ xfs_buf_alloc_pages( XFS_STATS_INC(bp->b_mount, xb_page_retries); memalloc_retry_wait(gfp_mask); } + + bp->b_offset = 0; return 0; } @@ -449,7 +451,7 @@ _xfs_buf_map_pages( if (bp->b_page_count == 1) { /* A single page buffer is always mappable */ - bp->b_addr = page_address(bp->b_pages[0]); + bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; } else if (flags & XBF_UNMAPPED) { bp->b_addr = NULL; } else { @@ -476,6 +478,8 @@ _xfs_buf_map_pages( if (!bp->b_addr) return -ENOMEM; + + bp->b_addr += bp->b_offset; } return 0; diff --git a/fs/xfs/xfs_buf_xfile.c b/fs/xfs/xfs_buf_xfile.c index 5757e0521a60..06c5d14f1093 100644 --- a/fs/xfs/xfs_buf_xfile.c +++ b/fs/xfs/xfs_buf_xfile.c @@ -163,15 +163,27 @@ xfile_buf_map_pages( gfp_t gfp_mask = __GFP_NOWARN; const unsigned int page_align_mask = PAGE_SIZE - 1; unsigned int m, p, n; + unsigned int first_page_offset; int error; ASSERT(xfile_buftarg_can_direct_map(bp->b_target)); - /* For direct-map buffers, each map has to be page aligned. */ - for (m = 0, map = bp->b_maps; m < bp->b_map_count; m++, map++) - if (BBTOB(map->bm_bn | map->bm_len) & page_align_mask) + /* + * For direct-map buffer targets with multiple mappings, the first map + * must end on a page boundary and the rest of the mappings must start + * and end on a page boundary. For single-mapping buffers, we don't + * care. + */ + if (bp->b_map_count > 1) { + map = &bp->b_maps[0]; + if (BBTOB(map->bm_bn + map->bm_len) & page_align_mask) return -ENOTBLK; + for (m = 1, map++; m < bp->b_map_count - 1; m++, map++) + if (BBTOB(map->bm_bn | map->bm_len) & page_align_mask) + return -ENOTBLK; + } + if (flags & XBF_READ_AHEAD) gfp_mask |= __GFP_NORETRY; else @@ -182,6 +194,7 @@ xfile_buf_map_pages( return error; /* Map in the xfile pages. */ + first_page_offset = offset_in_page(BBTOB(xfs_buf_daddr(bp))); for (m = 0, p = 0, map = bp->b_maps; m < bp->b_map_count; m++, map++) { for (n = 0; n < map->bm_len; n += BTOBB(PAGE_SIZE)) { unsigned int len; @@ -198,6 +211,7 @@ xfile_buf_map_pages( } bp->b_flags |= _XBF_DIRECT_MAP; + bp->b_offset = first_page_offset; return 0; fail: