On Sat, Mar 6, 2021 at 11:35 AM Ævar Arnfjörð Bjarmason <avarab@xxxxxxxxx> wrote: > > Remove the "ce_stage(ce) == 1" and "Sort the cache entry" code from > read_tree(), which allows us to remove the function entirely and move > over to read_tree_recursive(). Removing ce_stage(ce) == 1 and the use of read_one_entry() as the read_tree_fn_t, keeping read_one_entry_quick() as read_tree_fn_t in all cases. This basically means you are assuming there are no entries with ce_stage(ce) == 1 to start with; what if the user calls `ls-files --with-tree` when in the middle of a merge conflict? Will those entries be duplicated and/or overwritten? > I don't think the "Sort the cached entry" code was needed here, see > af3785dc5a7 (Optimize "diff --cached" performance., 2007-08-09) for > the use-case it was intended for. The only user of this code is > "ls-files --with-tree", which isn't the sort of use-case that needs to > care about "ce_stage(ce) != 0" or sorting tree entries. Could you spell this out more? I don't understand. The initial index might be something like (using the syntax of filename, stage): file 0 random 0 some 0 and after read_tree_recursive() reading into stage 1, wouldn't it be something like file 0 random 0 some 0 file 1 some 1 strange 1 when the ordering should be file 0 file 1 random 0 some 0 some 1 strange 1 ? I'm probably missing something here. > > Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@xxxxxxxxx> > --- > builtin/ls-files.c | 76 +++++++--------------------------------------- > 1 file changed, 11 insertions(+), 65 deletions(-) > > diff --git a/builtin/ls-files.c b/builtin/ls-files.c > index 74d572a3e4a..f5239437809 100644 > --- a/builtin/ls-files.c > +++ b/builtin/ls-files.c > @@ -12,7 +12,6 @@ > #include "dir.h" > #include "builtin.h" > #include "tree.h" > -#include "cache-tree.h" > #include "parse-options.h" > #include "resolve-undo.h" > #include "string-list.h" > @@ -421,12 +420,15 @@ static int get_common_prefix_len(const char *common_prefix) > return common_prefix_len; > } > > -static int read_one_entry_opt(struct index_state *istate, > - const struct object_id *oid, > - const char *base, int baselen, > - const char *pathname, > - unsigned mode, int stage, int opt) > +static int read_one_entry_quick(const struct object_id *oid, > + struct strbuf *basebuf, > + const char *pathname, > + unsigned mode, > + int stage, void *context) > { > + struct index_state *istate = context; > + const char *base = basebuf->buf; > + const int baselen = basebuf->len; > int len; > struct cache_entry *ce; > > @@ -442,64 +444,7 @@ static int read_one_entry_opt(struct index_state *istate, > memcpy(ce->name, base, baselen); > memcpy(ce->name + baselen, pathname, len+1); > oidcpy(&ce->oid, oid); > - return add_index_entry(istate, ce, opt); > -} > - > -static int read_one_entry(const struct object_id *oid, struct strbuf *base, > - const char *pathname, unsigned mode, int stage, > - void *context) > -{ > - struct index_state *istate = context; > - return read_one_entry_opt(istate, oid, base->buf, base->len, pathname, > - mode, stage, > - ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK); > -} > - > -/* > - * This is used when the caller knows there is no existing entries at > - * the stage that will conflict with the entry being added. > - */ > -static int read_one_entry_quick(const struct object_id *oid, struct strbuf *base, > - const char *pathname, unsigned mode, int stage, > - void *context) > -{ > - struct index_state *istate = context; > - return read_one_entry_opt(istate, oid, base->buf, base->len, pathname, > - mode, stage, > - ADD_CACHE_JUST_APPEND); > -} > - > - > -static int read_tree(struct repository *r, struct tree *tree, > - struct pathspec *match, struct index_state *istate) > -{ > - read_tree_fn_t fn = NULL; > - int i, err; > - > - > - /* > - * See if we have cache entry at the stage. If so, > - * do it the original slow way, otherwise, append and then > - * sort at the end. > - */ > - for (i = 0; !fn && i < istate->cache_nr; i++) { > - const struct cache_entry *ce = istate->cache[i]; > - if (ce_stage(ce) == 1) > - fn = read_one_entry; > - } > - > - if (!fn) > - fn = read_one_entry_quick; > - err = read_tree_recursive(r, tree, "", 0, 1, match, fn, istate); > - if (fn == read_one_entry || err) > - return err; > - > - /* > - * Sort the cache entry -- we need to nuke the cache tree, though. > - */ > - cache_tree_free(&istate->cache_tree); > - QSORT(istate->cache, istate->cache_nr, cmp_cache_name_compare); > - return 0; > + return add_index_entry(istate, ce, ADD_CACHE_JUST_APPEND); > } > > /* > @@ -540,7 +485,8 @@ void overlay_tree_on_index(struct index_state *istate, > PATHSPEC_PREFER_CWD, prefix, matchbuf); > } else > memset(&pathspec, 0, sizeof(pathspec)); > - if (read_tree(the_repository, tree, &pathspec, istate)) > + if (read_tree_recursive(the_repository, tree, "", 0, 1, > + &pathspec, read_one_entry_quick, istate)) > die("unable to read tree entries %s", tree_name); > > for (i = 0; i < istate->cache_nr; i++) { > -- > 2.31.0.rc0.126.g04f22c5b82