From: Nguyễn Thái Ngọc Duy <pclouds@xxxxxxxxx> Index format v4 requires some more computation to assemble a path based on a previous one. The current code is not very efficient because - it doubles memory copy, we assemble the final path in a temporary first before putting it back to a cache_entry - strbuf_remove() in expand_name_field() is not exactly a good fit for stripping a part at the end, _setlen() would do the same job and is much cheaper. - the open-coded loop to find the end of the string in expand_name_field() can't beat an optimized strlen() This patch avoids the temporary buffer and writes directly to the new cache_entry, which addresses the first two points. The last point could also be avoided if the total string length fits in the first 12 bits of ce_flags, if not we fall back to strlen(). Running "test-tool read-cache 100" on webkit.git (275k files), reading v2 only takes 4.226 seconds, while v4 takes 5.711 seconds, 35% more time. The patch reduces read time on v4 to 4.319 seconds. Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@xxxxxxxxx> Signed-off-by: Ben Peart <benpeart@xxxxxxxxxxxxx> --- read-cache.c | 136 +++++++++++++++++++++++++++------------------------ 1 file changed, 71 insertions(+), 65 deletions(-) diff --git a/read-cache.c b/read-cache.c index c01d34a71d..d21ccb5e67 100644 --- a/read-cache.c +++ b/read-cache.c @@ -1721,33 +1721,6 @@ int read_index(struct index_state *istate) return read_index_from(istate, get_index_file(), get_git_dir()); } -static struct cache_entry *cache_entry_from_ondisk(struct mem_pool *mem_pool, - struct ondisk_cache_entry *ondisk, - unsigned int flags, - const char *name, - size_t len) -{ - struct cache_entry *ce = mem_pool__ce_alloc(mem_pool, len); - - ce->ce_stat_data.sd_ctime.sec = get_be32(&ondisk->ctime.sec); - ce->ce_stat_data.sd_mtime.sec = get_be32(&ondisk->mtime.sec); - ce->ce_stat_data.sd_ctime.nsec = get_be32(&ondisk->ctime.nsec); - ce->ce_stat_data.sd_mtime.nsec = get_be32(&ondisk->mtime.nsec); - ce->ce_stat_data.sd_dev = get_be32(&ondisk->dev); - ce->ce_stat_data.sd_ino = get_be32(&ondisk->ino); - ce->ce_mode = get_be32(&ondisk->mode); - ce->ce_stat_data.sd_uid = get_be32(&ondisk->uid); - ce->ce_stat_data.sd_gid = get_be32(&ondisk->gid); - ce->ce_stat_data.sd_size = get_be32(&ondisk->size); - ce->ce_flags = flags & ~CE_NAMEMASK; - ce->ce_namelen = len; - ce->index = 0; - hashcpy(ce->oid.hash, ondisk->sha1); - memcpy(ce->name, name, len); - ce->name[len] = '\0'; - return ce; -} - /* * Adjacent cache entries tend to share the leading paths, so it makes * sense to only store the differences in later entries. In the v4 @@ -1762,22 +1735,24 @@ static unsigned long expand_name_field(struct strbuf *name, const char *cp_) if (name->len < len) die("malformed name field in the index"); - strbuf_remove(name, name->len - len, len); - for (ep = cp; *ep; ep++) - ; /* find the end */ + strbuf_setlen(name, name->len - len); + ep = cp + strlen((const char *)cp); strbuf_add(name, cp, ep - cp); return (const char *)ep + 1 - cp_; } -static struct cache_entry *create_from_disk(struct mem_pool *mem_pool, +static struct cache_entry *create_from_disk(struct mem_pool *ce_mem_pool, + unsigned int version, struct ondisk_cache_entry *ondisk, unsigned long *ent_size, - struct strbuf *previous_name) + const struct cache_entry *previous_ce) { struct cache_entry *ce; size_t len; const char *name; unsigned int flags; + size_t copy_len; + int expand_name_field = version == 4; /* On-disk flags are just 16 bits */ flags = get_be16(&ondisk->flags); @@ -1797,21 +1772,54 @@ static struct cache_entry *create_from_disk(struct mem_pool *mem_pool, else name = ondisk->name; - if (!previous_name) { - /* v3 and earlier */ - if (len == CE_NAMEMASK) - len = strlen(name); - ce = cache_entry_from_ondisk(mem_pool, ondisk, flags, name, len); + if (expand_name_field) { + const unsigned char *cp = (const unsigned char *)name; + size_t strip_len, previous_len; - *ent_size = ondisk_ce_size(ce); - } else { - unsigned long consumed; - consumed = expand_name_field(previous_name, name); - ce = cache_entry_from_ondisk(mem_pool, ondisk, flags, - previous_name->buf, - previous_name->len); + previous_len = previous_ce ? previous_ce->ce_namelen : 0; + strip_len = decode_varint(&cp); + if (previous_len < strip_len) { + if (previous_ce) + die(_("malformed name field in the index, near path '%s'"), + previous_ce->name); + else + die(_("malformed name field in the index in the first path")); + } + copy_len = previous_len - strip_len; + name = (const char *)cp; + } - *ent_size = (name - ((char *)ondisk)) + consumed; + if (len == CE_NAMEMASK) { + len = strlen(name); + if (expand_name_field) + len += copy_len; + } + + ce = mem_pool__ce_alloc(ce_mem_pool, len); + + ce->ce_stat_data.sd_ctime.sec = get_be32(&ondisk->ctime.sec); + ce->ce_stat_data.sd_mtime.sec = get_be32(&ondisk->mtime.sec); + ce->ce_stat_data.sd_ctime.nsec = get_be32(&ondisk->ctime.nsec); + ce->ce_stat_data.sd_mtime.nsec = get_be32(&ondisk->mtime.nsec); + ce->ce_stat_data.sd_dev = get_be32(&ondisk->dev); + ce->ce_stat_data.sd_ino = get_be32(&ondisk->ino); + ce->ce_mode = get_be32(&ondisk->mode); + ce->ce_stat_data.sd_uid = get_be32(&ondisk->uid); + ce->ce_stat_data.sd_gid = get_be32(&ondisk->gid); + ce->ce_stat_data.sd_size = get_be32(&ondisk->size); + ce->ce_flags = flags & ~CE_NAMEMASK; + ce->ce_namelen = len; + ce->index = 0; + hashcpy(ce->oid.hash, ondisk->sha1); + + if (expand_name_field) { + if (copy_len) + memcpy(ce->name, previous_ce->name, copy_len); + memcpy(ce->name + copy_len, name, len + 1 - copy_len); + *ent_size = (name - ((char *)ondisk)) + len + 1 - copy_len; + } else { + memcpy(ce->name, name, len + 1); + *ent_size = ondisk_ce_size(ce); } return ce; } @@ -1948,7 +1956,7 @@ static void *load_index_extensions(void *_data) */ static unsigned long load_cache_entry_block(struct index_state *istate, struct mem_pool *ce_mem_pool, int offset, int nr, void *mmap, - unsigned long start_offset, struct strbuf *previous_name) + unsigned long start_offset, const struct cache_entry *previous_ce) { int i; unsigned long src_offset = start_offset; @@ -1959,10 +1967,11 @@ static unsigned long load_cache_entry_block(struct index_state *istate, unsigned long consumed; disk_ce = (struct ondisk_cache_entry *)((char *)mmap + src_offset); - ce = create_from_disk(ce_mem_pool, disk_ce, &consumed, previous_name); + ce = create_from_disk(ce_mem_pool, istate->version, disk_ce, &consumed, previous_ce); set_index_entry(istate, i, ce); src_offset += consumed; + previous_ce = ce; } return src_offset - start_offset; } @@ -1970,20 +1979,16 @@ static unsigned long load_cache_entry_block(struct index_state *istate, static unsigned long load_all_cache_entries(struct index_state *istate, void *mmap, size_t mmap_size, unsigned long src_offset) { - struct strbuf previous_name_buf = STRBUF_INIT, *previous_name; unsigned long consumed; if (istate->version == 4) { - previous_name = &previous_name_buf; mem_pool_init(&istate->ce_mem_pool, istate->cache_nr * (sizeof(struct cache_entry) + CACHE_ENTRY_PATH_LENGTH)); } else { - previous_name = NULL; mem_pool_init(&istate->ce_mem_pool, estimate_cache_size(mmap_size, istate->cache_nr)); } consumed = load_cache_entry_block(istate, istate->ce_mem_pool, - 0, istate->cache_nr, mmap, src_offset, previous_name); - strbuf_release(&previous_name_buf); + 0, istate->cache_nr, mmap, src_offset, NULL); return consumed; } @@ -2005,8 +2010,7 @@ struct load_cache_entries_thread_data int offset, nr; void *mmap; unsigned long start_offset; - struct strbuf previous_name_buf; - struct strbuf *previous_name; + struct cache_entry *previous_ce; unsigned long consumed; /* return # of bytes in index file processed */ }; @@ -2019,7 +2023,7 @@ static void *load_cache_entries_thread(void *_data) struct load_cache_entries_thread_data *p = _data; p->consumed += load_cache_entry_block(p->istate, p->ce_mem_pool, - p->offset, p->nr, p->mmap, p->start_offset, p->previous_name); + p->offset, p->nr, p->mmap, p->start_offset, p->previous_ce); return NULL; } @@ -2066,20 +2070,23 @@ static unsigned long load_cache_entries_threaded(int nr_threads, struct index_st p->istate = istate; p->offset = i; p->nr = ce_per_thread < istate->cache_nr - i ? ce_per_thread : istate->cache_nr - i; + p->mmap = mmap; + p->start_offset = src_offset; /* create a mem_pool for each thread */ - if (istate->version == 4) + if (istate->version == 4) { mem_pool_init(&p->ce_mem_pool, estimate_cache_size_from_compressed(p->nr)); - else + + /* create a previous ce entry for this block of cache entries */ + if (previous_name->len) { + p->previous_ce = mem_pool__ce_alloc(p->ce_mem_pool, previous_name->len); + p->previous_ce->ce_namelen = previous_name->len; + memcpy(p->previous_ce->name, previous_name->buf, previous_name->len); + } + } else { mem_pool_init(&p->ce_mem_pool, estimate_cache_size(mmap_size, p->nr)); - - p->mmap = mmap; - p->start_offset = src_offset; - if (previous_name) { - strbuf_addbuf(&p->previous_name_buf, previous_name); - p->previous_name = &p->previous_name_buf; } if (pthread_create(&p->pthread, NULL, load_cache_entries_thread, p)) @@ -2102,7 +2109,7 @@ static unsigned long load_cache_entries_threaded(int nr_threads, struct index_st } else name = ondisk->name; - if (!previous_name) { + if (istate->version != 4) { size_t len; /* v3 and earlier */ @@ -2121,7 +2128,6 @@ static unsigned long load_cache_entries_threaded(int nr_threads, struct index_st if (pthread_join(p->pthread, NULL)) die("unable to join load_cache_entries_thread"); mem_pool_combine(istate->ce_mem_pool, p->ce_mem_pool); - strbuf_release(&p->previous_name_buf); consumed += p->consumed; } -- 2.18.0.windows.1