No need to walk the page list if bp->b_addr is valid. That also means b_offset doesn't need to be taken into account in the unmapped loop as b_offset is only set for kmem backed buffers which are always mapped. Signed-off-by: Christoph Hellwig <hch@xxxxxx> --- fs/xfs/xfs_buf.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 15bb790359f8..9e0c64511936 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -1688,13 +1688,18 @@ xfs_buf_zero( { size_t bend; + if (bp->b_addr) { + memset(bp->b_addr + boff, 0, bsize); + return; + } + bend = boff + bsize; while (boff < bend) { struct page *page; int page_index, page_offset, csize; - page_index = (boff + bp->b_offset) >> PAGE_SHIFT; - page_offset = (boff + bp->b_offset) & ~PAGE_MASK; + page_index = boff >> PAGE_SHIFT; + page_offset = boff & ~PAGE_MASK; page = bp->b_pages[page_index]; csize = min_t(size_t, PAGE_SIZE - page_offset, BBTOB(bp->b_length) - boff); -- 2.45.2