In a similar spirit as previous commits, factor our the routine to truncate a bulk-checkin packfile when writing past the pack size limit. Signed-off-by: Taylor Blau <me@xxxxxxxxxxxx> --- bulk-checkin.c | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/bulk-checkin.c b/bulk-checkin.c index c1f5450583..b92d7a6f5a 100644 --- a/bulk-checkin.c +++ b/bulk-checkin.c @@ -276,6 +276,22 @@ static void prepare_checkpoint(struct bulk_checkin_packfile *state, } } +static void truncate_checkpoint(struct bulk_checkin_packfile *state, + struct hashfile_checkpoint *checkpoint, + struct pack_idx_entry *idx) +{ + /* + * Writing this object to the current pack will make + * it too big; we need to truncate it, start a new + * pack, and write into it. + */ + if (!idx) + BUG("should not happen"); + hashfile_truncate(state->f, checkpoint); + state->offset = checkpoint->offset; + flush_bulk_checkin_packfile(state); +} + static int deflate_blob_to_pack(struct bulk_checkin_packfile *state, struct object_id *result_oid, int fd, size_t size, @@ -304,16 +320,7 @@ static int deflate_blob_to_pack(struct bulk_checkin_packfile *state, if (!stream_blob_to_pack(state, &ctx, &already_hashed_to, fd, size, path, flags)) break; - /* - * Writing this object to the current pack will make - * it too big; we need to truncate it, start a new - * pack, and write into it. - */ - if (!idx) - BUG("should not happen"); - hashfile_truncate(state->f, &checkpoint); - state->offset = checkpoint.offset; - flush_bulk_checkin_packfile(state); + truncate_checkpoint(state, &checkpoint, idx); if (lseek(fd, seekback, SEEK_SET) == (off_t) -1) return error("cannot seek back"); } -- 2.42.0.405.gdb2a2f287e