From: Darrick J. Wong <darrick.wong@xxxxxxxxxx> Add a summary variable to the bulkload structure so that we can track the number of blocks that have been reserved for a particular (btree) bulkload operation. Doing so enables us to simplify the logic in init_freespace_cursors that deals with figuring out how many more blocks we need to fill the bnobt/cntbt properly. Signed-off-by: Darrick J. Wong <darrick.wong@xxxxxxxxxx> --- repair/agbtree.c | 33 +++++++++++++++++---------------- repair/bulkload.c | 2 ++ repair/bulkload.h | 3 +++ 3 files changed, 22 insertions(+), 16 deletions(-) diff --git a/repair/agbtree.c b/repair/agbtree.c index 339b1489..de8015ec 100644 --- a/repair/agbtree.c +++ b/repair/agbtree.c @@ -217,8 +217,6 @@ init_freespace_cursors( struct bt_rebuild *btr_bno, struct bt_rebuild *btr_cnt) { - unsigned int bno_blocks; - unsigned int cnt_blocks; int error; init_rebuild(sc, &XFS_RMAP_OINFO_AG, free_space, btr_bno); @@ -244,9 +242,7 @@ init_freespace_cursors( */ do { unsigned int num_freeblocks; - - bno_blocks = btr_bno->bload.nr_blocks; - cnt_blocks = btr_cnt->bload.nr_blocks; + int delta_bno, delta_cnt; /* Compute how many bnobt blocks we'll need. */ error = -libxfs_btree_bload_compute_geometry(btr_bno->cur, @@ -262,25 +258,30 @@ _("Unable to compute free space by block btree geometry, error %d.\n"), -error); do_error( _("Unable to compute free space by length btree geometry, error %d.\n"), -error); + /* + * Compute the deficit between the number of blocks reserved + * and the number of blocks we think we need for the btree. + */ + delta_bno = (int)btr_bno->newbt.nr_reserved - + btr_bno->bload.nr_blocks; + delta_cnt = (int)btr_cnt->newbt.nr_reserved - + btr_cnt->bload.nr_blocks; + /* We don't need any more blocks, so we're done. */ - if (bno_blocks >= btr_bno->bload.nr_blocks && - cnt_blocks >= btr_cnt->bload.nr_blocks) + if (delta_bno >= 0 && delta_cnt >= 0) { + *extra_blocks = delta_bno + delta_cnt; break; + } /* Allocate however many more blocks we need this time. */ - if (bno_blocks < btr_bno->bload.nr_blocks) - reserve_btblocks(sc->mp, agno, btr_bno, - btr_bno->bload.nr_blocks - bno_blocks); - if (cnt_blocks < btr_cnt->bload.nr_blocks) - reserve_btblocks(sc->mp, agno, btr_cnt, - btr_cnt->bload.nr_blocks - cnt_blocks); + if (delta_bno < 0) + reserve_btblocks(sc->mp, agno, btr_bno, -delta_bno); + if (delta_cnt < 0) + reserve_btblocks(sc->mp, agno, btr_cnt, -delta_cnt); /* Ok, now how many free space records do we have? */ *nr_extents = count_bno_extents_blocks(agno, &num_freeblocks); } while (1); - - *extra_blocks = (bno_blocks - btr_bno->bload.nr_blocks) + - (cnt_blocks - btr_cnt->bload.nr_blocks); } /* Rebuild the free space btrees. */ diff --git a/repair/bulkload.c b/repair/bulkload.c index 81d67e62..8dd0a0c3 100644 --- a/repair/bulkload.c +++ b/repair/bulkload.c @@ -40,6 +40,8 @@ bulkload_add_blocks( resv->len = len; resv->used = 0; list_add_tail(&resv->list, &bkl->resv_list); + bkl->nr_reserved += len; + return 0; } diff --git a/repair/bulkload.h b/repair/bulkload.h index 01f67279..a84e99b8 100644 --- a/repair/bulkload.h +++ b/repair/bulkload.h @@ -41,6 +41,9 @@ struct bulkload { /* The last reservation we allocated from. */ struct bulkload_resv *last_resv; + + /* Number of blocks reserved via resv_list. */ + unsigned int nr_reserved; }; #define for_each_bulkload_reservation(bkl, resv, n) \