Taylor Blau <me@xxxxxxxxxxxx> writes: > Now that we have factored out many of the common routines necessary to > index a new object into a pack created by the bulk-checkin machinery, we > can introduce a variant of `index_blob_bulk_checkin()` that acts on > blobs whose contents we can fit in memory. Hmph. Doesn't the duplication between the main loop of the new deflate_obj_contents_to_pack() with existing deflate_blob_to_pack() bother you? A similar duplication in the previous round resulted in a nice refactoring of patches 5 and 6 in this round. Compared to that, the differences in the set-up code between the two functions may be much larger, but subtle and unnecessary differences between the code that was copied and pasted (e.g., we do not check the errors from the seek_to method here, but the original does) is a sign that over time a fix to one will need to be carried over to the other, adding unnecessary maintenance burden, isn't it? > +static int deflate_obj_contents_to_pack_incore(struct bulk_checkin_packfile *state, > + git_hash_ctx *ctx, > + struct hashfile_checkpoint *checkpoint, > + struct object_id *result_oid, > + const void *buf, size_t size, > + enum object_type type, > + const char *path, unsigned flags) > +{ > + struct pack_idx_entry *idx = NULL; > + off_t already_hashed_to = 0; > + struct bulk_checkin_source source = { > + .type = SOURCE_INCORE, > + .buf = buf, > + .size = size, > + .read = 0, > + .path = path, > + }; > + > + /* Note: idx is non-NULL when we are writing */ > + if (flags & HASH_WRITE_OBJECT) > + CALLOC_ARRAY(idx, 1); > + > + while (1) { > + prepare_checkpoint(state, checkpoint, idx, flags); > + > + if (!stream_obj_to_pack(state, ctx, &already_hashed_to, &source, > + type, flags)) > + break; > + truncate_checkpoint(state, checkpoint, idx); > + bulk_checkin_source_seek_to(&source, 0); > + } > + > + finalize_checkpoint(state, ctx, checkpoint, idx, result_oid); > + > + return 0; > +} > + > +static int deflate_blob_to_pack_incore(struct bulk_checkin_packfile *state, > + struct object_id *result_oid, > + const void *buf, size_t size, > + const char *path, unsigned flags) > +{ > + git_hash_ctx ctx; > + struct hashfile_checkpoint checkpoint = {0}; > + > + format_object_header_hash(the_hash_algo, &ctx, &checkpoint, OBJ_BLOB, > + size); > + > + return deflate_obj_contents_to_pack_incore(state, &ctx, &checkpoint, > + result_oid, buf, size, > + OBJ_BLOB, path, flags); > +} > + > static int deflate_blob_to_pack(struct bulk_checkin_packfile *state, > struct object_id *result_oid, > int fd, size_t size, > @@ -456,6 +509,17 @@ int index_blob_bulk_checkin(struct object_id *oid, > return status; > } > > +int index_blob_bulk_checkin_incore(struct object_id *oid, > + const void *buf, size_t size, > + const char *path, unsigned flags) > +{ > + int status = deflate_blob_to_pack_incore(&bulk_checkin_packfile, oid, > + buf, size, path, flags); > + if (!odb_transaction_nesting) > + flush_bulk_checkin_packfile(&bulk_checkin_packfile); > + return status; > +} > + > void begin_odb_transaction(void) > { > odb_transaction_nesting += 1; > diff --git a/bulk-checkin.h b/bulk-checkin.h > index aa7286a7b3..1b91daeaee 100644 > --- a/bulk-checkin.h > +++ b/bulk-checkin.h > @@ -13,6 +13,10 @@ int index_blob_bulk_checkin(struct object_id *oid, > int fd, size_t size, > const char *path, unsigned flags); > > +int index_blob_bulk_checkin_incore(struct object_id *oid, > + const void *buf, size_t size, > + const char *path, unsigned flags); > + > /* > * Tell the object database to optimize for adding > * multiple objects. end_odb_transaction must be called