From: Darrick J. Wong <djwong@xxxxxxxxxx> This is a precursor to putting more static data in the btree ops structure. Signed-off-by: Darrick J. Wong <djwong@xxxxxxxxxx> --- fs/xfs/libxfs/xfs_alloc_btree.c | 11 +++++------ fs/xfs/libxfs/xfs_bmap_btree.c | 3 +-- fs/xfs/libxfs/xfs_btree.h | 2 ++ fs/xfs/libxfs/xfs_ialloc_btree.c | 10 ++++++---- fs/xfs/libxfs/xfs_refcount_btree.c | 4 ++-- fs/xfs/libxfs/xfs_rmap_btree.c | 7 +++---- fs/xfs/scrub/xfbtree.c | 4 ++-- 7 files changed, 21 insertions(+), 20 deletions(-) diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c index 90c7cb8c54ab..ec832e8291de 100644 --- a/fs/xfs/libxfs/xfs_alloc_btree.c +++ b/fs/xfs/libxfs/xfs_alloc_btree.c @@ -512,18 +512,17 @@ xfs_allocbt_init_common( ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT); - cur = xfs_btree_alloc_cursor(mp, tp, btnum, mp->m_alloc_maxlevels, - xfs_allocbt_cur_cache); - cur->bc_ag.abt.active = false; - if (btnum == XFS_BTNUM_CNT) { - cur->bc_ops = &xfs_cntbt_ops; + cur = xfs_btree_alloc_cursor(mp, tp, btnum, &xfs_cntbt_ops, + mp->m_alloc_maxlevels, xfs_allocbt_cur_cache); cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2); cur->bc_flags = XFS_BTREE_LASTREC_UPDATE; } else { - cur->bc_ops = &xfs_bnobt_ops; + cur = xfs_btree_alloc_cursor(mp, tp, btnum, &xfs_bnobt_ops, + mp->m_alloc_maxlevels, xfs_allocbt_cur_cache); cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2); } + cur->bc_ag.abt.active = false; cur->bc_ag.pag = xfs_perag_hold(pag); diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c index 36a024102d06..f5e9b56870b4 100644 --- a/fs/xfs/libxfs/xfs_bmap_btree.c +++ b/fs/xfs/libxfs/xfs_bmap_btree.c @@ -548,11 +548,10 @@ xfs_bmbt_init_common( ASSERT(whichfork != XFS_COW_FORK); - cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP, + cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP, &xfs_bmbt_ops, mp->m_bm_maxlevels[whichfork], xfs_bmbt_cur_cache); cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2); - cur->bc_ops = &xfs_bmbt_ops; cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE; if (xfs_has_crc(mp)) cur->bc_flags |= XFS_BTREE_CRC_BLOCKS; diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h index a1e7fb0e5806..92a938a7a30e 100644 --- a/fs/xfs/libxfs/xfs_btree.h +++ b/fs/xfs/libxfs/xfs_btree.h @@ -737,12 +737,14 @@ xfs_btree_alloc_cursor( struct xfs_mount *mp, struct xfs_trans *tp, xfs_btnum_t btnum, + const struct xfs_btree_ops *ops, uint8_t maxlevels, struct kmem_cache *cache) { struct xfs_btree_cur *cur; cur = kmem_cache_zalloc(cache, GFP_NOFS | __GFP_NOFAIL); + cur->bc_ops = ops; cur->bc_tp = tp; cur->bc_mp = mp; cur->bc_btnum = btnum; diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c index 5a945ae21b5d..3b37e231425b 100644 --- a/fs/xfs/libxfs/xfs_ialloc_btree.c +++ b/fs/xfs/libxfs/xfs_ialloc_btree.c @@ -455,14 +455,16 @@ xfs_inobt_init_common( struct xfs_mount *mp = pag->pag_mount; struct xfs_btree_cur *cur; - cur = xfs_btree_alloc_cursor(mp, tp, btnum, - M_IGEO(mp)->inobt_maxlevels, xfs_inobt_cur_cache); if (btnum == XFS_BTNUM_INO) { + cur = xfs_btree_alloc_cursor(mp, tp, btnum, &xfs_inobt_ops, + M_IGEO(mp)->inobt_maxlevels, + xfs_inobt_cur_cache); cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2); - cur->bc_ops = &xfs_inobt_ops; } else { + cur = xfs_btree_alloc_cursor(mp, tp, btnum, &xfs_finobt_ops, + M_IGEO(mp)->inobt_maxlevels, + xfs_inobt_cur_cache); cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_fibt_2); - cur->bc_ops = &xfs_finobt_ops; } if (xfs_has_crc(mp)) diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c index c5b99f1322ba..dfda80bdcfcb 100644 --- a/fs/xfs/libxfs/xfs_refcount_btree.c +++ b/fs/xfs/libxfs/xfs_refcount_btree.c @@ -359,7 +359,8 @@ xfs_refcountbt_init_common( ASSERT(pag->pag_agno < mp->m_sb.sb_agcount); cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_REFC, - mp->m_refc_maxlevels, xfs_refcountbt_cur_cache); + &xfs_refcountbt_ops, mp->m_refc_maxlevels, + xfs_refcountbt_cur_cache); cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2); cur->bc_flags |= XFS_BTREE_CRC_BLOCKS; @@ -367,7 +368,6 @@ xfs_refcountbt_init_common( cur->bc_ag.pag = xfs_perag_hold(pag); cur->bc_ag.refc.nr_ops = 0; cur->bc_ag.refc.shape_changes = 0; - cur->bc_ops = &xfs_refcountbt_ops; return cur; } diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c index d8b21b01f4b2..0bef2883e55b 100644 --- a/fs/xfs/libxfs/xfs_rmap_btree.c +++ b/fs/xfs/libxfs/xfs_rmap_btree.c @@ -517,11 +517,10 @@ xfs_rmapbt_init_common( struct xfs_btree_cur *cur; /* Overlapping btree; 2 keys per pointer. */ - cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP, + cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP, &xfs_rmapbt_ops, mp->m_rmap_maxlevels, xfs_rmapbt_cur_cache); cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING; cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2); - cur->bc_ops = &xfs_rmapbt_ops; cur->bc_ag.pag = xfs_perag_hold(pag); return cur; @@ -646,11 +645,11 @@ xfs_rmapbt_mem_cursor( /* Overlapping btree; 2 keys per pointer. */ cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP, - mp->m_rmap_maxlevels, xfs_rmapbt_cur_cache); + &xfs_rmapbt_mem_ops, mp->m_rmap_maxlevels, + xfs_rmapbt_cur_cache); cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING | XFS_BTREE_IN_XFILE; cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2); - cur->bc_ops = &xfs_rmapbt_mem_ops; cur->bc_mem.xfbtree = xfbtree; cur->bc_mem.head_bp = head_bp; cur->bc_nlevels = xfs_btree_mem_head_nlevels(head_bp); diff --git a/fs/xfs/scrub/xfbtree.c b/fs/xfs/scrub/xfbtree.c index 5cd03457091c..bfd8ff8872a5 100644 --- a/fs/xfs/scrub/xfbtree.c +++ b/fs/xfs/scrub/xfbtree.c @@ -258,11 +258,11 @@ xfbtree_dup_cursor( ASSERT(cur->bc_flags & XFS_BTREE_IN_XFILE); ncur = xfs_btree_alloc_cursor(cur->bc_mp, cur->bc_tp, cur->bc_btnum, - cur->bc_maxlevels, cur->bc_cache); + cur->bc_ops, cur->bc_maxlevels, cur->bc_cache); ncur->bc_flags = cur->bc_flags; ncur->bc_nlevels = cur->bc_nlevels; ncur->bc_statoff = cur->bc_statoff; - ncur->bc_ops = cur->bc_ops; + memcpy(&ncur->bc_mem, &cur->bc_mem, sizeof(cur->bc_mem)); if (cur->bc_mem.pag)