Commit 0e6e847f which introduced xfs_buf_allocate_memory() function has a bug causing the function to overestimate the number of necessary pages. The problem is that xfs_buf_alloc() sets b_bn to -1 and thus effectively every buffer is straddling a page boundary which causes xfs_buf_allocate_memory() to allocate two pages and use vmalloc() for access which slows things down. Fix the code to use correct block number. Signed-off-by: Jan Kara <jack@xxxxxxx> --- fs/xfs/xfs_buf.c | 7 ++++--- 1 files changed, 4 insertions(+), 3 deletions(-) diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 172d3cc..b67cc83 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -296,6 +296,7 @@ xfs_buf_free( STATIC int xfs_buf_allocate_memory( xfs_buf_t *bp, + xfs_daddr_t blkno, uint flags) { size_t size; @@ -334,8 +335,8 @@ xfs_buf_allocate_memory( } use_alloc_page: - start = BBTOB(bp->b_bn) >> PAGE_SHIFT; - end = (BBTOB(bp->b_bn + bp->b_length) + PAGE_SIZE - 1) >> PAGE_SHIFT; + start = BBTOB(blkno) >> PAGE_SHIFT; + end = (BBTOB(blkno + bp->b_length) + PAGE_SIZE - 1) >> PAGE_SHIFT; page_count = end - start; error = _xfs_buf_get_pages(bp, page_count, flags); if (unlikely(error)) @@ -552,7 +553,7 @@ xfs_buf_get( if (unlikely(!new_bp)) return NULL; - error = xfs_buf_allocate_memory(new_bp, flags); + error = xfs_buf_allocate_memory(new_bp, blkno, flags); if (error) { kmem_zone_free(xfs_buf_zone, new_bp); return NULL; -- 1.7.1 _______________________________________________ xfs mailing list xfs@xxxxxxxxxxx http://oss.sgi.com/mailman/listinfo/xfs