First, change the sanity check in gfs2_stuffed_write_end to check for the actual write size instead of the requested write size. Second, use the existing teardown code in gfs2_write_end instead of duplicating it in gfs2_stuffed_write_end. Third, make the page argument optional. Signed-off-by: Andreas Gruenbacher <agruenba@xxxxxxxxxx> --- fs/gfs2/aops.c | 65 ++++++++++++++++++++++++++++------------------------------ 1 file changed, 31 insertions(+), 34 deletions(-) diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 81dc28a0aba1..aa2031ff82c8 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -814,9 +814,8 @@ static void adjust_fs_space(struct inode *inode) * @inode: The inode * @dibh: The buffer_head containing the on-disk inode * @pos: The file position - * @len: The length of the write * @copied: How much was actually copied by the VFS - * @page: The page + * @page: The (optional) page * * This copies the data from the page into the inode block after * the inode data structure itself. @@ -824,17 +823,23 @@ static void adjust_fs_space(struct inode *inode) * Returns: errno */ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh, - loff_t pos, unsigned len, unsigned copied, + loff_t pos, unsigned copied, struct page *page) { struct gfs2_inode *ip = GFS2_I(inode); - struct gfs2_sbd *sdp = GFS2_SB(inode); - struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); u64 to = pos + copied; void *kaddr; unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode); + bool page_grabbed = false; - BUG_ON(pos + len > gfs2_max_stuffed_size(ip)); + BUG_ON(pos + copied > gfs2_max_stuffed_size(ip)); + + if (!page) { + page = find_lock_page(inode->i_mapping, 0); + if (WARN_ON(!page)) + return -ENOMEM; + page_grabbed = true; + } kaddr = kmap_atomic(page); memcpy(buf + pos, kaddr + pos, copied); @@ -842,28 +847,17 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh, kunmap_atomic(kaddr); WARN_ON(!PageUptodate(page)); - unlock_page(page); - put_page(page); + + if (page_grabbed) { + unlock_page(page); + put_page(page); + } if (copied) { if (inode->i_size < to) i_size_write(inode, to); mark_inode_dirty(inode); } - - if (inode == sdp->sd_rindex) { - adjust_fs_space(inode); - sdp->sd_rindex_uptodate = 0; - } - - brelse(dibh); - gfs2_trans_end(sdp); - if (inode == sdp->sd_rindex) { - gfs2_glock_dq(&m_ip->i_gh); - gfs2_holder_uninit(&m_ip->i_gh); - } - gfs2_glock_dq(&ip->i_gh); - gfs2_holder_uninit(&ip->i_gh); return copied; } @@ -877,9 +871,8 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh, * @page: The page that has been written * @fsdata: The fsdata (unused in GFS2) * - * The main write_end function for GFS2. We have a separate one for - * stuffed files as they are slightly different, otherwise we just - * put our locking around the VFS provided functions. + * The main write_end function for GFS2. We just put our locking around the VFS + * provided functions. * * Returns: errno */ @@ -900,32 +893,36 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping, BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL); ret = gfs2_meta_inode_buffer(ip, &dibh); - if (unlikely(ret)) { - unlock_page(page); - put_page(page); - goto failed; - } + if (unlikely(ret)) + goto out; - if (gfs2_is_stuffed(ip)) - return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page); + if (gfs2_is_stuffed(ip)) { + ret = gfs2_stuffed_write_end(inode, dibh, pos, copied, page); + goto out2; + } if (!gfs2_is_writeback(ip)) gfs2_page_add_databufs(ip, page, pos & ~PAGE_MASK, len); ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); + page = NULL; if (tr->tr_num_buf_new) __mark_inode_dirty(inode, I_DIRTY_DATASYNC); else gfs2_trans_add_meta(ip->i_gl, dibh); - +out2: if (inode == sdp->sd_rindex) { adjust_fs_space(inode); sdp->sd_rindex_uptodate = 0; } brelse(dibh); -failed: +out: + if (page) { + unlock_page(page); + put_page(page); + } gfs2_trans_end(sdp); gfs2_inplace_release(ip); if (ip->i_qadata && ip->i_qadata->qa_qd_num) -- 2.14.3