Introduce the mem_pool type and wrap the existing mp_block in this new type. The new mem_pool type will allow for the memory pool logic to be reused outside of fast-import. This type will be moved into its own file in a future commit. Signed-off-by: Jameson Miller <jamill@xxxxxxxxxxxxx> --- fast-import.c | 108 +++++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 89 insertions(+), 19 deletions(-) diff --git a/fast-import.c b/fast-import.c index 6c3215d7c3..1262d9e6be 100644 --- a/fast-import.c +++ b/fast-import.c @@ -216,6 +216,19 @@ struct mp_block { uintmax_t space[FLEX_ARRAY]; /* more */ }; +struct mem_pool { + struct mp_block *mp_block; + + /* + * The amount of available memory to grow the pool by. + * This size does not include the overhead for the mp_block. + */ + size_t block_alloc; + + /* The total amount of memory allocated by the pool. */ + size_t pool_alloc; +}; + struct atom_str { struct atom_str *next_atom; unsigned short str_len; @@ -304,9 +317,7 @@ static int global_argc; static const char **global_argv; /* Memory pools */ -static size_t mem_pool_alloc = 2*1024*1024 - sizeof(struct mp_block); -static size_t total_allocd; -static struct mp_block *mp_block_head; +static struct mem_pool fi_mem_pool = {0, 2*1024*1024 - sizeof(struct mp_block), 0 }; /* Atom management */ static unsigned int atom_table_sz = 4451; @@ -324,6 +335,7 @@ static off_t pack_size; /* Table of objects we've written. */ static unsigned int object_entry_alloc = 5000; static struct object_entry_pool *blocks; +static size_t total_allocd = 0; static struct object_entry *object_table[1 << 16]; static struct mark_set *marks; static const char *export_marks_file; @@ -634,6 +646,60 @@ static unsigned int hc_str(const char *s, size_t len) return r; } +static struct mp_block *pool_alloc_block() +{ + struct mp_block *p; + + fi_mem_pool.pool_alloc += sizeof(struct mp_block) + fi_mem_pool.block_alloc; + p = xmalloc(st_add(sizeof(struct mp_block), fi_mem_pool.block_alloc)); + p->next_block = fi_mem_pool.mp_block; + p->next_free = (char *)p->space; + p->end = p->next_free + fi_mem_pool.block_alloc; + fi_mem_pool.mp_block = p; + + return p; +} + +/* + * Allocates a block of memory with a specific size and + * appends it to the memory pool's list of blocks. + * + * This function is used to allocate blocks with sizes + * different than the default "block_alloc" size for the mem_pool. + * + * There are two use cases: + * 1) The initial block allocation for a memory pool. + * + * 2) large" blocks of a specific size, where the entire memory block + * is going to be used. This means the block will not have any + * available memory for future allocations. The block is placed at + * the end of the list so that it will be the last block searched + * for available space. + */ +static struct mp_block *pool_alloc_block_with_size(size_t block_alloc) +{ + struct mp_block *p, *block; + + fi_mem_pool.pool_alloc += sizeof(struct mp_block) + block_alloc; + p = xmalloc(st_add(sizeof(struct mp_block), block_alloc)); + + block = fi_mem_pool.mp_block; + if (block) { + while (block->next_block) + block = block->next_block; + + block->next_block = p; + } else { + fi_mem_pool.mp_block = p; + } + + p->next_block = NULL; + p->next_free = (char *)p->space; + p->end = p->next_free + block_alloc; + + return p; +} + static void *pool_alloc(size_t len) { struct mp_block *p; @@ -643,21 +709,25 @@ static void *pool_alloc(size_t len) if (len & (sizeof(uintmax_t) - 1)) len += sizeof(uintmax_t) - (len & (sizeof(uintmax_t) - 1)); - for (p = mp_block_head; p; p = p->next_block) - if ((p->end - p->next_free >= len)) - break; + p = fi_mem_pool.mp_block; + + /* + * In performance profiling, there was a minor perf benefit to + * check for available memory in the head block via the if + * statement, and only going through the loop when needed. + */ + if (p && + (p->end - p->next_free < len)) { + for (p = p->next_block; p; p = p->next_block) + if (p->end - p->next_free >= len) + break; + } if (!p) { - if (len >= (mem_pool_alloc/2)) { - total_allocd += len; - return xmalloc(len); - } - total_allocd += sizeof(struct mp_block) + mem_pool_alloc; - p = xmalloc(st_add(sizeof(struct mp_block), mem_pool_alloc)); - p->next_block = mp_block_head; - p->next_free = (char *) p->space; - p->end = p->next_free + mem_pool_alloc; - mp_block_head = p; + if (len >= (fi_mem_pool.block_alloc / 2)) + p = pool_alloc_block_with_size(len); + else + p = pool_alloc_block(); } r = p->next_free; @@ -667,7 +737,7 @@ static void *pool_alloc(size_t len) static void *pool_calloc(size_t count, size_t size) { - size_t len = count * size; + size_t len = st_mult(count, size); void *r = pool_alloc(len); memset(r, 0, len); return r; @@ -3541,8 +3611,8 @@ int cmd_main(int argc, const char **argv) fprintf(stderr, "Total branches: %10lu (%10lu loads )\n", branch_count, branch_load_count); fprintf(stderr, " marks: %10" PRIuMAX " (%10" PRIuMAX " unique )\n", (((uintmax_t)1) << marks->shift) * 1024, marks_set_count); fprintf(stderr, " atoms: %10u\n", atom_cnt); - fprintf(stderr, "Memory total: %10" PRIuMAX " KiB\n", (total_allocd + alloc_count*sizeof(struct object_entry))/1024); - fprintf(stderr, " pools: %10lu KiB\n", (unsigned long)(total_allocd/1024)); + fprintf(stderr, "Memory total: %10" PRIuMAX " KiB\n", (total_allocd + fi_mem_pool.pool_alloc + alloc_count*sizeof(struct object_entry))/1024); + fprintf(stderr, " pools: %10lu KiB\n", (unsigned long)((total_allocd + fi_mem_pool.pool_alloc) /1024)); fprintf(stderr, " objects: %10" PRIuMAX " KiB\n", (alloc_count*sizeof(struct object_entry))/1024); fprintf(stderr, "---------------------------------------------------------------------\n"); pack_report(); -- 2.14.3