On 4/23/21 6:10 AM, Brian Foster wrote:
Introduce an in-core counter to track the sum of all allocbt blocks
used by the filesystem. This value is currently tracked per-ag via
the ->agf_btreeblks field in the AGF, which also happens to include
rmapbt blocks. A global, in-core count of allocbt blocks is required
to identify the subset of global ->m_fdblocks that consists of
unavailable blocks currently used for allocation btrees. To support
this calculation at block reservation time, construct a similar
global counter for allocbt blocks, populate it on first read of each
AGF and update it as allocbt blocks are used and released.
Signed-off-by: Brian Foster <bfoster@xxxxxxxxxx>
OK, makes sense
Reviewed-by: Allison Henderson <allison.henderson@xxxxxxxxxx>
---
fs/xfs/libxfs/xfs_alloc.c | 12 ++++++++++++
fs/xfs/libxfs/xfs_alloc_btree.c | 2 ++
fs/xfs/xfs_mount.h | 6 ++++++
3 files changed, 20 insertions(+)
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index aaa19101bb2a..144e2d68245c 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -3036,6 +3036,7 @@ xfs_alloc_read_agf(
struct xfs_agf *agf; /* ag freelist header */
struct xfs_perag *pag; /* per allocation group data */
int error;
+ uint32_t allocbt_blks;
trace_xfs_alloc_read_agf(mp, agno);
@@ -3066,6 +3067,17 @@ xfs_alloc_read_agf(
pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
pag->pagf_init = 1;
pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf);
+
+ /*
+ * Update the global in-core allocbt block counter. Filter
+ * rmapbt blocks from the on-disk counter because those are
+ * managed by perag reservation.
+ */
+ if (pag->pagf_btreeblks > be32_to_cpu(agf->agf_rmap_blocks)) {
+ allocbt_blks = pag->pagf_btreeblks -
+ be32_to_cpu(agf->agf_rmap_blocks);
+ atomic64_add(allocbt_blks, &mp->m_allocbt_blks);
+ }
}
#ifdef DEBUG
else if (!XFS_FORCED_SHUTDOWN(mp)) {
diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c
index 8e01231b308e..9f5a45f7baed 100644
--- a/fs/xfs/libxfs/xfs_alloc_btree.c
+++ b/fs/xfs/libxfs/xfs_alloc_btree.c
@@ -71,6 +71,7 @@ xfs_allocbt_alloc_block(
return 0;
}
+ atomic64_inc(&cur->bc_mp->m_allocbt_blks);
xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.agno, bno, 1, false);
xfs_trans_agbtree_delta(cur->bc_tp, 1);
@@ -95,6 +96,7 @@ xfs_allocbt_free_block(
if (error)
return error;
+ atomic64_dec(&cur->bc_mp->m_allocbt_blks);
xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
XFS_EXTENT_BUSY_SKIP_DISCARD);
xfs_trans_agbtree_delta(cur->bc_tp, -1);
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 81829d19596e..bb67274ee23f 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -170,6 +170,12 @@ typedef struct xfs_mount {
* extents or anything related to the rt device.
*/
struct percpu_counter m_delalloc_blks;
+ /*
+ * Global count of allocation btree blocks in use across all AGs. Only
+ * used when perag reservation is enabled. Helps prevent block
+ * reservation from attempting to reserve allocation btree blocks.
+ */
+ atomic64_t m_allocbt_blks;
struct radix_tree_root m_perag_tree; /* per-ag accounting info */
spinlock_t m_perag_lock; /* lock for m_perag_tree */