Patrick Steinhardt <ps@xxxxxx> writes: > +static int backend_for(struct reftable_backend **out, > + struct reftable_ref_store *store, > + const char *refname, > + const char **rewritten_ref, > + int reload) > { > + struct reftable_backend *be; > const char *wtname; > int wtname_len; > > - if (!refname) > - return &store->main_backend; > + if (!refname) { > + be = &store->main_backend; > + goto out; > + } > > switch (parse_worktree_ref(refname, &wtname, &wtname_len, rewritten_ref)) { > case REF_WORKTREE_OTHER: { > static struct strbuf wtname_buf = STRBUF_INIT; > struct strbuf wt_dir = STRBUF_INIT; > - struct reftable_backend *be; > > /* > * We're using a static buffer here so that we don't need to > @@ -162,7 +166,7 @@ static struct reftable_backend *backend_for(struct reftable_ref_store *store, > } > > strbuf_release(&wt_dir); > - return be; > + goto out; An interesting part of this function is not shown in the above context, but we look up an existing backend from a strmap, and allocate one if there isn't. In either case, be points at the backend to use. Now be is not local to this block, we can access it after jumping to "out" label. > +out: > + if (reload) { > + int ret = reftable_stack_reload(be->stack); > + if (ret) > + return ret; > + } > + *out = be; > + > + return 0; > } > @@ -828,17 +845,17 @@ static int reftable_be_read_raw_ref(struct ref_store *ref_store, > { > struct reftable_ref_store *refs = > reftable_be_downcast(ref_store, REF_STORE_READ, "read_raw_ref"); > - struct reftable_stack *stack = backend_for(refs, refname, &refname)->stack; > + struct reftable_backend *be; > int ret; > > if (refs->err < 0) > return refs->err; > > - ret = reftable_stack_reload(stack); > + ret = backend_for(&be, refs, refname, &refname, 1); > if (ret) > return ret; This one chooses to reload, so that the next one, i.e. "without-reload", would not read stale information? > - ret = read_ref_without_reload(refs, stack, refname, oid, referent, type); > + ret = read_ref_without_reload(refs, be->stack, refname, oid, referent, type); The following bit is curious. > + ret = backend_for(&be, refs, update->refname, NULL, 0); > + if (ret) > + return ret; > + We locate one without reloading, and ... > /* > * Search for a preexisting stack update. If there is one then we add > * the update to it, otherwise we set up a new stack update. > */ > for (i = 0; !arg && i < tx_data->args_nr; i++) > - if (tx_data->args[i].stack == stack) > + if (tx_data->args[i].be == be) > arg = &tx_data->args[i]; > if (!arg) { ... only when we cannot reuse preexisting one, ... > struct reftable_addition *addition; > > - ret = reftable_stack_reload(stack); > + ret = backend_for(&be, refs, update->refname, NULL, 1); > if (ret) > return ret; ... instead of directly doing reload on the instance we already have, we do another _for() to locate one, this time reload set to 1. That looks like doing some redundant work? I am confused. > @@ -1048,7 +1070,11 @@ static int reftable_be_transaction_prepare(struct ref_store *ref_store, > goto done; > } > > - ret = read_ref_without_reload(refs, backend_for(refs, "HEAD", NULL)->stack, "HEAD", > + ret = backend_for(&be, refs, "HEAD", NULL, 0); > + if (ret) > + goto done; > + > + ret = read_ref_without_reload(refs, be->stack, "HEAD", > &head_oid, &head_referent, &head_type); This now takes into account the possibility that backend_for() might fail. The original code would have segfaulted when it happened, I guess. > @@ -1057,10 +1083,11 @@ static int reftable_be_transaction_prepare(struct ref_store *ref_store, > for (i = 0; i < transaction->nr; i++) { > struct ref_update *u = transaction->updates[i]; > struct object_id current_oid = {0}; > - struct reftable_stack *stack; > const char *rewritten_ref; > > - stack = backend_for(refs, u->refname, &rewritten_ref)->stack; > + ret = backend_for(&be, refs, u->refname, &rewritten_ref, 0); > + if (ret) > + goto done; Ditto, we would have segfaulted in the next hunk when stack got NULL here ... > @@ -1116,7 +1143,7 @@ static int reftable_be_transaction_prepare(struct ref_store *ref_store, > string_list_insert(&affected_refnames, new_update->refname); > } > > - ret = read_ref_without_reload(refs, stack, rewritten_ref, > + ret = read_ref_without_reload(refs, be->stack, rewritten_ref, > ¤t_oid, &referent, &u->type); ... here. > @@ -1831,10 +1858,9 @@ static int reftable_be_copy_ref(struct ref_store *ref_store, > { > struct reftable_ref_store *refs = > reftable_be_downcast(ref_store, REF_STORE_WRITE, "copy_ref"); > - struct reftable_stack *stack = backend_for(refs, newrefname, &newrefname)->stack; > + struct reftable_backend *be; > struct write_copy_arg arg = { > .refs = refs, > - .stack = stack, > .oldname = oldrefname, > .newname = newrefname, > .logmsg = logmsg, > @@ -1845,10 +1871,11 @@ static int reftable_be_copy_ref(struct ref_store *ref_store, > if (ret < 0) > goto done; > > - ret = reftable_stack_reload(stack); > + ret = backend_for(&be, refs, newrefname, &newrefname, 1); > if (ret) > goto done; We used to grab "stack" upfront and then called reload here; we now do backend_for() and let it do the reload as well, so they should be equivalent. > - struct reftable_stack *stack = backend_for(refs, refname, &refname)->stack; > struct reftable_log_record log = {0}; > struct reftable_iterator it = {0}; > + struct reftable_backend *be; > int ret; > > if (refs->err < 0) > return refs->err; > > - ret = reftable_stack_init_log_iterator(stack, &it); > + ret = backend_for(&be, refs, refname, &refname, 0); > + if (ret) > + goto done; > + > + ret = reftable_stack_init_log_iterator(be->stack, &it); Again, other than the fact that the new code carefully prepares for the case where backend_for() fails to find be, the versions of the code with and without the patch are equivalent. > @@ -2052,16 +2083,20 @@ static int reftable_be_for_each_reflog_ent(struct ref_store *ref_store, > { > struct reftable_ref_store *refs = > reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent"); > - struct reftable_stack *stack = backend_for(refs, refname, &refname)->stack; > struct reftable_log_record *logs = NULL; > struct reftable_iterator it = {0}; > + struct reftable_backend *be; > size_t logs_alloc = 0, logs_nr = 0, i; > int ret; > > if (refs->err < 0) > return refs->err; > > - ret = reftable_stack_init_log_iterator(stack, &it); > + ret = backend_for(&be, refs, refname, &refname, 0); > + if (ret) > + goto done; > + > + ret = reftable_stack_init_log_iterator(be->stack, &it); Ditto. > @@ -2101,20 +2136,20 @@ static int reftable_be_reflog_exists(struct ref_store *ref_store, > { > struct reftable_ref_store *refs = > reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_exists"); > - struct reftable_stack *stack = backend_for(refs, refname, &refname)->stack; > struct reftable_log_record log = {0}; > struct reftable_iterator it = {0}; > + struct reftable_backend *be; > int ret; > > ret = refs->err; > if (ret < 0) > goto done; > > - ret = reftable_stack_reload(stack); > + ret = backend_for(&be, refs, refname, &refname, 1); > if (ret < 0) > goto done; > > - ret = reftable_stack_init_log_iterator(stack, &it); > + ret = reftable_stack_init_log_iterator(be->stack, &it); > if (ret < 0) > goto done; Ditto. Overall they seem to be mostly equivalent, except that the new code is a bit more careful against failing backend_for(). One part of the code confused me (and still I am unsure), but other than that it was a pleasant read. Thanks.