Refactor `read_ref_without_reload()` to accept a `struct reftable_stack` as input instead of accepting a `struct reftable_stack`. This allows us to implement an additional caching layer when reading refs where we can reuse reftable iterators. Signed-off-by: Patrick Steinhardt <ps@xxxxxx> --- refs/reftable-backend.c | 97 +++++++++++++++++++-------------------- reftable/reftable-stack.h | 3 ++ reftable/stack.c | 5 ++ 3 files changed, 54 insertions(+), 51 deletions(-) diff --git a/refs/reftable-backend.c b/refs/reftable-backend.c index 93f3602faa..99caa9d5e6 100644 --- a/refs/reftable-backend.c +++ b/refs/reftable-backend.c @@ -49,6 +49,37 @@ static void reftable_backend_release(struct reftable_backend *be) be->stack = NULL; } +static int reftable_backend_read_ref(struct reftable_backend *be, + const char *refname, + struct object_id *oid, + struct strbuf *referent, + unsigned int *type) +{ + struct reftable_ref_record ref = {0}; + int ret; + + ret = reftable_stack_read_ref(be->stack, refname, &ref); + if (ret) + goto done; + + if (ref.value_type == REFTABLE_REF_SYMREF) { + strbuf_reset(referent); + strbuf_addstr(referent, ref.value.symref); + *type |= REF_ISSYMREF; + } else if (reftable_ref_record_val1(&ref)) { + oidread(oid, reftable_ref_record_val1(&ref), + &hash_algos[hash_algo_by_id(reftable_stack_hash_id(be->stack))]); + } else { + /* We got a tombstone, which should not happen. */ + BUG("unhandled reference value type %d", ref.value_type); + } + +done: + assert(ret != REFTABLE_API_ERROR); + reftable_ref_record_release(&ref); + return ret; +} + struct reftable_ref_store { struct ref_store base; @@ -241,38 +272,6 @@ static void fill_reftable_log_record(struct reftable_log_record *log, const stru log->value.update.tz_offset = sign * atoi(tz_begin); } -static int read_ref_without_reload(struct reftable_ref_store *refs, - struct reftable_stack *stack, - const char *refname, - struct object_id *oid, - struct strbuf *referent, - unsigned int *type) -{ - struct reftable_ref_record ref = {0}; - int ret; - - ret = reftable_stack_read_ref(stack, refname, &ref); - if (ret) - goto done; - - if (ref.value_type == REFTABLE_REF_SYMREF) { - strbuf_reset(referent); - strbuf_addstr(referent, ref.value.symref); - *type |= REF_ISSYMREF; - } else if (reftable_ref_record_val1(&ref)) { - oidread(oid, reftable_ref_record_val1(&ref), - refs->base.repo->hash_algo); - } else { - /* We got a tombstone, which should not happen. */ - BUG("unhandled reference value type %d", ref.value_type); - } - -done: - assert(ret != REFTABLE_API_ERROR); - reftable_ref_record_release(&ref); - return ret; -} - static int reftable_be_config(const char *var, const char *value, const struct config_context *ctx, void *_opts) @@ -838,7 +837,7 @@ static int reftable_be_read_raw_ref(struct ref_store *ref_store, if (ret) return ret; - ret = read_ref_without_reload(refs, be->stack, refname, oid, referent, type); + ret = reftable_backend_read_ref(be, refname, oid, referent, type); if (ret < 0) return ret; if (ret > 0) { @@ -1057,8 +1056,8 @@ static int reftable_be_transaction_prepare(struct ref_store *ref_store, if (ret) goto done; - ret = read_ref_without_reload(refs, be->stack, "HEAD", - &head_oid, &head_referent, &head_type); + ret = reftable_backend_read_ref(be, "HEAD", &head_oid, + &head_referent, &head_type); if (ret < 0) goto done; ret = 0; @@ -1126,8 +1125,8 @@ static int reftable_be_transaction_prepare(struct ref_store *ref_store, string_list_insert(&affected_refnames, new_update->refname); } - ret = read_ref_without_reload(refs, be->stack, rewritten_ref, - ¤t_oid, &referent, &u->type); + ret = reftable_backend_read_ref(be, rewritten_ref, + ¤t_oid, &referent, &u->type); if (ret < 0) goto done; if (ret > 0 && !ref_update_expects_existing_old_ref(u)) { @@ -1585,7 +1584,7 @@ struct write_create_symref_arg { struct write_copy_arg { struct reftable_ref_store *refs; - struct reftable_stack *stack; + struct reftable_backend *be; const char *oldname; const char *newname; const char *logmsg; @@ -1610,7 +1609,7 @@ static int write_copy_table(struct reftable_writer *writer, void *cb_data) if (split_ident_line(&committer_ident, committer_info, strlen(committer_info))) BUG("failed splitting committer info"); - if (reftable_stack_read_ref(arg->stack, arg->oldname, &old_ref)) { + if (reftable_stack_read_ref(arg->be->stack, arg->oldname, &old_ref)) { ret = error(_("refname %s not found"), arg->oldname); goto done; } @@ -1649,7 +1648,7 @@ static int write_copy_table(struct reftable_writer *writer, void *cb_data) * the old branch and the creation of the new branch, and we cannot do * two changes to a reflog in a single update. */ - deletion_ts = creation_ts = reftable_stack_next_update_index(arg->stack); + deletion_ts = creation_ts = reftable_stack_next_update_index(arg->be->stack); if (arg->delete_old) creation_ts++; reftable_writer_set_limits(writer, deletion_ts, creation_ts); @@ -1692,8 +1691,8 @@ static int write_copy_table(struct reftable_writer *writer, void *cb_data) memcpy(logs[logs_nr].value.update.old_hash, old_ref.value.val1, GIT_MAX_RAWSZ); logs_nr++; - ret = read_ref_without_reload(arg->refs, arg->stack, "HEAD", &head_oid, - &head_referent, &head_type); + ret = reftable_backend_read_ref(arg->be, "HEAD", &head_oid, + &head_referent, &head_type); if (ret < 0) goto done; append_head_reflog = (head_type & REF_ISSYMREF) && !strcmp(head_referent.buf, arg->oldname); @@ -1736,7 +1735,7 @@ static int write_copy_table(struct reftable_writer *writer, void *cb_data) * copy over all log entries from the old reflog. Last but not least, * when renaming we also have to delete all the old reflog entries. */ - ret = reftable_stack_init_log_iterator(arg->stack, &it); + ret = reftable_stack_init_log_iterator(arg->be->stack, &it); if (ret < 0) goto done; @@ -1809,7 +1808,6 @@ static int reftable_be_rename_ref(struct ref_store *ref_store, { struct reftable_ref_store *refs = reftable_be_downcast(ref_store, REF_STORE_WRITE, "rename_ref"); - struct reftable_backend *be; struct write_copy_arg arg = { .refs = refs, .oldname = oldrefname, @@ -1823,11 +1821,10 @@ static int reftable_be_rename_ref(struct ref_store *ref_store, if (ret < 0) goto done; - ret = backend_for(&be, refs, newrefname, &newrefname, 1); + ret = backend_for(&arg.be, refs, newrefname, &newrefname, 1); if (ret) goto done; - arg.stack = be->stack; - ret = reftable_stack_add(be->stack, &write_copy_table, &arg); + ret = reftable_stack_add(arg.be->stack, &write_copy_table, &arg); done: assert(ret != REFTABLE_API_ERROR); @@ -1841,7 +1838,6 @@ static int reftable_be_copy_ref(struct ref_store *ref_store, { struct reftable_ref_store *refs = reftable_be_downcast(ref_store, REF_STORE_WRITE, "copy_ref"); - struct reftable_backend *be; struct write_copy_arg arg = { .refs = refs, .oldname = oldrefname, @@ -1854,11 +1850,10 @@ static int reftable_be_copy_ref(struct ref_store *ref_store, if (ret < 0) goto done; - ret = backend_for(&be, refs, newrefname, &newrefname, 1); + ret = backend_for(&arg.be, refs, newrefname, &newrefname, 1); if (ret) goto done; - arg.stack = be->stack; - ret = reftable_stack_add(be->stack, &write_copy_table, &arg); + ret = reftable_stack_add(arg.be->stack, &write_copy_table, &arg); done: assert(ret != REFTABLE_API_ERROR); diff --git a/reftable/reftable-stack.h b/reftable/reftable-stack.h index 54787f2ef5..6dfd22e3f5 100644 --- a/reftable/reftable-stack.h +++ b/reftable/reftable-stack.h @@ -149,4 +149,7 @@ struct reftable_compaction_stats { struct reftable_compaction_stats * reftable_stack_compaction_stats(struct reftable_stack *st); +/* return the hash ID of the merged table. */ +uint32_t reftable_stack_hash_id(struct reftable_stack *st); + #endif diff --git a/reftable/stack.c b/reftable/stack.c index c33979536e..530ba2d927 100644 --- a/reftable/stack.c +++ b/reftable/stack.c @@ -1790,3 +1790,8 @@ int reftable_stack_clean(struct reftable_stack *st) reftable_addition_destroy(add); return err; } + +uint32_t reftable_stack_hash_id(struct reftable_stack *st) +{ + return reftable_merged_table_hash_id(st->merged); +} -- 2.47.0.229.g8f8d6eee53.dirty