From: Han Xin <hanxin.hx@xxxxxxxxxxxxxxx> Read input stream repeatedly in write_loose_object() unless reach the end, so that we can divide the large blob write into many small blocks. Signed-off-by: Han Xin <hanxin.hx@xxxxxxxxxxxxxxx> --- object-file.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/object-file.c b/object-file.c index 8393659f0d..e333448c54 100644 --- a/object-file.c +++ b/object-file.c @@ -1891,7 +1891,7 @@ static int write_loose_object(const struct object_id *oid, char *hdr, static struct strbuf tmp_file = STRBUF_INIT; static struct strbuf filename = STRBUF_INIT; const char *buf; - unsigned long len; + int flush = 0; if (is_null_oid(oid)) { /* When oid is not determined, save tmp file to odb path. */ @@ -1927,12 +1927,16 @@ static int write_loose_object(const struct object_id *oid, char *hdr, the_hash_algo->update_fn(&c, hdr, hdrlen); /* Then the data itself.. */ - buf = in_stream->read(in_stream->data, &len); - stream.next_in = (void *)buf; - stream.avail_in = len; do { unsigned char *in0 = stream.next_in; - ret = git_deflate(&stream, Z_FINISH); + if (!stream.avail_in) { + if ((buf = in_stream->read(in_stream->data, &stream.avail_in))) { + stream.next_in = (void *)buf; + in0 = (unsigned char *)buf; + } else + flush = Z_FINISH; + } + ret = git_deflate(&stream, flush); the_hash_algo->update_fn(&c, in0, stream.next_in - in0); if (!dry_run && write_buffer(fd, compressed, stream.next_out - compressed) < 0) die(_("unable to write loose object file")); -- 2.33.1.44.g9344627884.agit.6.5.4