Am 17.12.21 um 12:26 schrieb Han Xin: > From: Han Xin <hanxin.hx@xxxxxxxxxxxxxxx> > > We used to call "get_data()" in "unpack_non_delta_entry()" to read the > entire contents of a blob object, no matter how big it is. This > implementation may consume all the memory and cause OOM. > > This can be improved by feeding data to "stream_loose_object()" in a > stream. The input stream is implemented as an interface. > > When streaming a large blob object to "write_loose_object()", we have no > chance to run "write_object_file_prepare()" to calculate the oid in > advance. So we need to handle undetermined oid in a new function called > "stream_loose_object()". > > In "write_loose_object()", we know the oid and we can write the > temporary file in the same directory as the final object, but for an > object with an undetermined oid, we don't know the exact directory for > the object, so we have to save the temporary file in ".git/objects/" > directory instead. > > We will reuse "write_object_file_flags()" in "unpack_non_delta_entry()" to > read the entire data contents in stream, so a new flag "HASH_STREAM" is > added. When read in stream, we needn't prepare the "oid" before > "write_loose_object()", only generate the header. > "freshen_packed_object()" or "freshen_loose_object()" will be called > inside "stream_loose_object()" after obtaining the "oid". > > Helped-by: Ævar Arnfjörð Bjarmason <avarab@xxxxxxxxx> > Helped-by: Jiang Xin <zhiyou.jx@xxxxxxxxxxxxxxx> > Signed-off-by: Han Xin <hanxin.hx@xxxxxxxxxxxxxxx> > --- > cache.h | 1 + > object-file.c | 92 ++++++++++++++++++++++++++++++++++++++++++++++++++ > object-store.h | 5 +++ > 3 files changed, 98 insertions(+) > > diff --git a/cache.h b/cache.h > index cfba463aa9..6d68fd10a3 100644 > --- a/cache.h > +++ b/cache.h > @@ -898,6 +898,7 @@ int ie_modified(struct index_state *, const struct cache_entry *, struct stat *, > #define HASH_FORMAT_CHECK 2 > #define HASH_RENORMALIZE 4 > #define HASH_SILENT 8 > +#define HASH_STREAM 16 > int index_fd(struct index_state *istate, struct object_id *oid, int fd, struct stat *st, enum object_type type, const char *path, unsigned flags); > int index_path(struct index_state *istate, struct object_id *oid, const char *path, struct stat *st, unsigned flags); > > diff --git a/object-file.c b/object-file.c > index dd29e5372e..2ef1d4fb00 100644 > --- a/object-file.c > +++ b/object-file.c > @@ -1994,6 +1994,88 @@ static int freshen_packed_object(const struct object_id *oid) > return 1; > } > > +static int stream_loose_object(struct object_id *oid, char *hdr, int hdrlen, > + const struct input_stream *in_stream, > + unsigned long len, time_t mtime, unsigned flags) > +{ > + int fd, ret, err = 0, flush = 0; > + unsigned char compressed[4096]; > + git_zstream stream; > + git_hash_ctx c; > + struct object_id parano_oid; > + static struct strbuf tmp_file = STRBUF_INIT; > + static struct strbuf filename = STRBUF_INIT; Note these static strbufs. > + int dirlen; > + > + /* When oid is not determined, save tmp file to odb path. */ > + strbuf_addf(&filename, "%s/", get_object_directory()); > + > + fd = create_tmpfile(&tmp_file, filename.buf, flags); > + if (fd < 0) { > + err = -1; > + goto cleanup; > + } > + > + /* Set it up and write header */ > + setup_stream_and_header(&stream, compressed, sizeof(compressed), > + &c, hdr, hdrlen); > + > + /* Then the data itself.. */ > + do { > + unsigned char *in0 = stream.next_in; > + if (!stream.avail_in) { > + const void *in = in_stream->read(in_stream, &stream.avail_in); > + stream.next_in = (void *)in; > + in0 = (unsigned char *)in; > + /* All data has been read. */ > + if (len + hdrlen == stream.total_in + stream.avail_in) > + flush = Z_FINISH; > + } > + ret = git_deflate(&stream, flush); > + the_hash_algo->update_fn(&c, in0, stream.next_in - in0); > + if (write_buffer(fd, compressed, stream.next_out - compressed) < 0) > + die(_("unable to write loose object file")); > + stream.next_out = compressed; > + stream.avail_out = sizeof(compressed); > + } while (ret == Z_OK || ret == Z_BUF_ERROR); > + > + if (ret != Z_STREAM_END) > + die(_("unable to deflate new object streamingly (%d)"), ret); > + ret = git_deflate_end_gently(&stream); > + if (ret != Z_OK) > + die(_("deflateEnd on object streamingly failed (%d)"), ret); > + the_hash_algo->final_oid_fn(¶no_oid, &c); > + > + close_loose_object(fd); > + > + oidcpy(oid, ¶no_oid); > + > + if (freshen_packed_object(oid) || freshen_loose_object(oid)) { > + unlink_or_warn(tmp_file.buf); > + goto cleanup; > + } > + > + loose_object_path(the_repository, &filename, oid); > + > + /* We finally know the object path, and create the missing dir. */ > + dirlen = directory_size(filename.buf); > + if (dirlen) { > + struct strbuf dir = STRBUF_INIT; > + strbuf_add(&dir, filename.buf, dirlen - 1); > + > + if (mkdir_in_gitdir(dir.buf) < 0) { > + err = -1; > + goto cleanup; > + } > + } > + > + err = finalize_object_file_with_mtime(tmp_file.buf, filename.buf, mtime, flags); > +cleanup: > + strbuf_release(&tmp_file); > + strbuf_release(&filename); The static strbufs are released here. That combination is strange -- why keep the variable values between calls by making them static, but throw away the allocated buffers instead of reusing them? Given that this function is only used for huge objects I think making the strbufs non-static and releasing them is the best choice here. > + return err; > +} > + > int write_object_file_flags(const void *buf, unsigned long len, > const char *type, struct object_id *oid, > unsigned flags) > @@ -2001,6 +2083,16 @@ int write_object_file_flags(const void *buf, unsigned long len, > char hdr[MAX_HEADER_LEN]; > int hdrlen = sizeof(hdr); > > + /* When streaming a large blob object (marked as HASH_STREAM), > + * we have no chance to run "write_object_file_prepare()" to > + * calculate the "oid" in advance. Call "stream_loose_object()" > + * to write loose object in stream. > + */ > + if (flags & HASH_STREAM) { > + hdrlen = generate_object_header(hdr, hdrlen, type, len); > + return stream_loose_object(oid, hdr, hdrlen, buf, len, 0, flags); > + } So stream_loose_object() is called by passing the flag HASH_STREAM to write_object_file_flags() and passing a struct input_stream via its buf pointer. That's ... unconventional. Certainly scary. Why not export stream_loose_object() and call it directly? Demo patch below. > + > /* Normally if we have it in the pack then we do not bother writing > * it out into .git/objects/??/?{38} file. > */ > diff --git a/object-store.h b/object-store.h > index 952efb6a4b..4040e2c40a 100644 > --- a/object-store.h > +++ b/object-store.h > @@ -34,6 +34,11 @@ struct object_directory { > char *path; > }; > > +struct input_stream { > + const void *(*read)(const struct input_stream *, unsigned long *len); > + void *data; > +}; > + > KHASH_INIT(odb_path_map, const char * /* key: odb_path */, > struct object_directory *, 1, fspathhash, fspatheq) > diff --git a/builtin/unpack-objects.c b/builtin/unpack-objects.c index 42e1033d85..07d186bd20 100644 --- a/builtin/unpack-objects.c +++ b/builtin/unpack-objects.c @@ -375,10 +375,8 @@ static void write_stream_blob(unsigned nr, unsigned long size) data.zstream = &zstream; git_inflate_init(&zstream); - if (write_object_file_flags(&in_stream, size, - type_name(OBJ_BLOB), - &obj_list[nr].oid, - HASH_STREAM)) + if (stream_loose_object(&in_stream, size, type_name(OBJ_BLOB), 0, 0, + &obj_list[nr].oid)) die(_("failed to write object in stream")); if (zstream.total_out != size || data.status != Z_STREAM_END) diff --git a/object-file.c b/object-file.c index 2ef1d4fb00..0a6b65ab26 100644 --- a/object-file.c +++ b/object-file.c @@ -1994,9 +1994,9 @@ static int freshen_packed_object(const struct object_id *oid) return 1; } -static int stream_loose_object(struct object_id *oid, char *hdr, int hdrlen, - const struct input_stream *in_stream, - unsigned long len, time_t mtime, unsigned flags) +int stream_loose_object(struct input_stream *in_stream, unsigned long len, + const char *type, time_t mtime, unsigned flags, + struct object_id *oid) { int fd, ret, err = 0, flush = 0; unsigned char compressed[4096]; @@ -2006,6 +2006,10 @@ static int stream_loose_object(struct object_id *oid, char *hdr, int hdrlen, static struct strbuf tmp_file = STRBUF_INIT; static struct strbuf filename = STRBUF_INIT; int dirlen; + char hdr[MAX_HEADER_LEN]; + int hdrlen = sizeof(hdr); + + hdrlen = generate_object_header(hdr, hdrlen, type, len); /* When oid is not determined, save tmp file to odb path. */ strbuf_addf(&filename, "%s/", get_object_directory()); @@ -2083,16 +2087,6 @@ int write_object_file_flags(const void *buf, unsigned long len, char hdr[MAX_HEADER_LEN]; int hdrlen = sizeof(hdr); - /* When streaming a large blob object (marked as HASH_STREAM), - * we have no chance to run "write_object_file_prepare()" to - * calculate the "oid" in advance. Call "stream_loose_object()" - * to write loose object in stream. - */ - if (flags & HASH_STREAM) { - hdrlen = generate_object_header(hdr, hdrlen, type, len); - return stream_loose_object(oid, hdr, hdrlen, buf, len, 0, flags); - } - /* Normally if we have it in the pack then we do not bother writing * it out into .git/objects/??/?{38} file. */ diff --git a/object-store.h b/object-store.h index 4040e2c40a..786b6435b1 100644 --- a/object-store.h +++ b/object-store.h @@ -237,6 +237,10 @@ static inline int write_object_file(const void *buf, unsigned long len, return write_object_file_flags(buf, len, type, oid, 0); } +int stream_loose_object(struct input_stream *in_stream, unsigned long len, + const char *type, time_t mtime, unsigned flags, + struct object_id *oid); + int hash_object_file_literally(const void *buf, unsigned long len, const char *type, struct object_id *oid, unsigned flags);