On Tue, Apr 14, 2015 at 05:26:45PM +1000, Dave Chinner wrote: > From: Dave Chinner <dchinner@xxxxxxxxxx> > > The mapping size calculation is done last in __xfs_get_blocks(), but > we are going to need the actual mapping size we will use to map the > direct IO correctly in xfs_map_direct(). Factor out the calculation > for code clarity, and move the call to be the first operation in > mapping the extent to the returned buffer. > > Signed-off-by: Dave Chinner <dchinner@xxxxxxxxxx> > --- Reviewed-by: Brian Foster <bfoster@xxxxxxxxxx> > fs/xfs/xfs_aops.c | 79 ++++++++++++++++++++++++++++++++----------------------- > 1 file changed, 46 insertions(+), 33 deletions(-) > > diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c > index 5f7ddd5..8f63520 100644 > --- a/fs/xfs/xfs_aops.c > +++ b/fs/xfs/xfs_aops.c > @@ -1249,6 +1249,47 @@ xfs_map_direct( > } > } > > + > +/* > + * If this is O_DIRECT or the mpage code calling tell them how large the mapping > + * is, so that we can avoid repeated get_blocks calls. > + * > + * If the mapping spans EOF, then we have to break the mapping up as the mapping > + * for blocks beyond EOF must be marked new so that sub block regions can be > + * correctly zeroed. We can't do this for mappings within EOF unless the mapping > + * was just allocated or is unwritten, otherwise the callers would overwrite > + * existing data with zeros. Hence we have to split the mapping into a range up > + * to and including EOF, and a second mapping for beyond EOF. > + */ > +static void > +xfs_map_trim_size( > + struct inode *inode, > + sector_t iblock, > + struct buffer_head *bh_result, > + struct xfs_bmbt_irec *imap, > + xfs_off_t offset, > + ssize_t size) > +{ > + xfs_off_t mapping_size; > + > + mapping_size = imap->br_startoff + imap->br_blockcount - iblock; > + mapping_size <<= inode->i_blkbits; > + > + ASSERT(mapping_size > 0); > + if (mapping_size > size) > + mapping_size = size; > + if (offset < i_size_read(inode) && > + offset + mapping_size >= i_size_read(inode)) { > + /* limit mapping to block that spans EOF */ > + mapping_size = roundup_64(i_size_read(inode) - offset, > + 1 << inode->i_blkbits); > + } > + if (mapping_size > LONG_MAX) > + mapping_size = LONG_MAX; > + > + bh_result->b_size = mapping_size; > +} > + > STATIC int > __xfs_get_blocks( > struct inode *inode, > @@ -1347,6 +1388,11 @@ __xfs_get_blocks( > goto out_unlock; > } > > + /* trim mapping down to size requested */ > + if (direct || size > (1 << inode->i_blkbits)) > + xfs_map_trim_size(inode, iblock, bh_result, > + &imap, offset, size); > + > if (imap.br_startblock != HOLESTARTBLOCK && > imap.br_startblock != DELAYSTARTBLOCK && > (create || !ISUNWRITTEN(&imap))) { > @@ -1388,39 +1434,6 @@ __xfs_get_blocks( > } > } > > - /* > - * If this is O_DIRECT or the mpage code calling tell them how large > - * the mapping is, so that we can avoid repeated get_blocks calls. > - * > - * If the mapping spans EOF, then we have to break the mapping up as the > - * mapping for blocks beyond EOF must be marked new so that sub block > - * regions can be correctly zeroed. We can't do this for mappings within > - * EOF unless the mapping was just allocated or is unwritten, otherwise > - * the callers would overwrite existing data with zeros. Hence we have > - * to split the mapping into a range up to and including EOF, and a > - * second mapping for beyond EOF. > - */ > - if (direct || size > (1 << inode->i_blkbits)) { > - xfs_off_t mapping_size; > - > - mapping_size = imap.br_startoff + imap.br_blockcount - iblock; > - mapping_size <<= inode->i_blkbits; > - > - ASSERT(mapping_size > 0); > - if (mapping_size > size) > - mapping_size = size; > - if (offset < i_size_read(inode) && > - offset + mapping_size >= i_size_read(inode)) { > - /* limit mapping to block that spans EOF */ > - mapping_size = roundup_64(i_size_read(inode) - offset, > - 1 << inode->i_blkbits); > - } > - if (mapping_size > LONG_MAX) > - mapping_size = LONG_MAX; > - > - bh_result->b_size = mapping_size; > - } > - > return 0; > > out_unlock: > -- > 2.0.0 > > _______________________________________________ > xfs mailing list > xfs@xxxxxxxxxxx > http://oss.sgi.com/mailman/listinfo/xfs _______________________________________________ xfs mailing list xfs@xxxxxxxxxxx http://oss.sgi.com/mailman/listinfo/xfs