[PATCH v2 3/5] fast-import: update pool_* functions to work on local pool

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Update memory pool functions to work on a passed in mem_pool instead of
the global mem_pool type. This is in preparation for making the memory
pool logic reusable.

Signed-off-by: Jameson Miller <jamill@xxxxxxxxxxxxx>
---
 fast-import.c | 52 ++++++++++++++++++++++++++--------------------------
 1 file changed, 26 insertions(+), 26 deletions(-)

diff --git a/fast-import.c b/fast-import.c
index 1262d9e6be..519e1cbd7f 100644
--- a/fast-import.c
+++ b/fast-import.c
@@ -646,16 +646,16 @@ static unsigned int hc_str(const char *s, size_t len)
 	return r;
 }
 
-static struct mp_block *pool_alloc_block()
+static struct mp_block *mem_pool_alloc_block(struct mem_pool *mem_pool)
 {
 	struct mp_block *p;
 
-	fi_mem_pool.pool_alloc += sizeof(struct mp_block) + fi_mem_pool.block_alloc;
-	p = xmalloc(st_add(sizeof(struct mp_block), fi_mem_pool.block_alloc));
-	p->next_block = fi_mem_pool.mp_block;
+	mem_pool->pool_alloc += sizeof(struct mp_block) + mem_pool->block_alloc;
+	p = xmalloc(st_add(sizeof(struct mp_block), mem_pool->block_alloc));
+	p->next_block = mem_pool->mp_block;
 	p->next_free = (char *)p->space;
-	p->end = p->next_free + fi_mem_pool.block_alloc;
-	fi_mem_pool.mp_block = p;
+	p->end = p->next_free + mem_pool->block_alloc;
+	mem_pool->mp_block = p;
 
 	return p;
 }
@@ -676,21 +676,21 @@ static struct mp_block *pool_alloc_block()
  *     the end of the list so that it will be the last block searched
  *     for available space.
  */
-static struct mp_block *pool_alloc_block_with_size(size_t block_alloc)
+static struct mp_block *mem_pool_alloc_block_with_size(struct mem_pool *mem_pool, size_t block_alloc)
 {
 	struct mp_block *p, *block;
 
-	fi_mem_pool.pool_alloc += sizeof(struct mp_block) + block_alloc;
+	mem_pool->pool_alloc += sizeof(struct mp_block) + block_alloc;
 	p = xmalloc(st_add(sizeof(struct mp_block), block_alloc));
 
-	block = fi_mem_pool.mp_block;
+	block = mem_pool->mp_block;
 	if (block) {
 		while (block->next_block)
 			block = block->next_block;
 
 		block->next_block = p;
 	} else {
-		fi_mem_pool.mp_block = p;
+		mem_pool->mp_block = p;
 	}
 
 	p->next_block = NULL;
@@ -700,7 +700,7 @@ static struct mp_block *pool_alloc_block_with_size(size_t block_alloc)
 	return p;
 }
 
-static void *pool_alloc(size_t len)
+static void *mem_pool_alloc(struct mem_pool *mem_pool, size_t len)
 {
 	struct mp_block *p;
 	void *r;
@@ -709,7 +709,7 @@ static void *pool_alloc(size_t len)
 	if (len & (sizeof(uintmax_t) - 1))
 		len += sizeof(uintmax_t) - (len & (sizeof(uintmax_t) - 1));
 
-	p = fi_mem_pool.mp_block;
+	p = mem_pool->mp_block;
 
 	/*
 	 * In performance profiling, there was a minor perf benefit to
@@ -724,10 +724,10 @@ static void *pool_alloc(size_t len)
 	}
 
 	if (!p) {
-		if (len >= (fi_mem_pool.block_alloc / 2))
-			p = pool_alloc_block_with_size(len);
+		if (len >= (mem_pool->block_alloc / 2))
+			p = mem_pool_alloc_block_with_size(mem_pool, len);
 		else
-			p = pool_alloc_block();
+			p = mem_pool_alloc_block(mem_pool);
 	}
 
 	r = p->next_free;
@@ -735,10 +735,10 @@ static void *pool_alloc(size_t len)
 	return r;
 }
 
-static void *pool_calloc(size_t count, size_t size)
+static void *mem_pool_calloc(struct mem_pool *mem_pool, size_t count, size_t size)
 {
 	size_t len = st_mult(count, size);
-	void *r = pool_alloc(len);
+	void *r = mem_pool_alloc(mem_pool, len);
 	memset(r, 0, len);
 	return r;
 }
@@ -746,7 +746,7 @@ static void *pool_calloc(size_t count, size_t size)
 static char *pool_strdup(const char *s)
 {
 	size_t len = strlen(s) + 1;
-	char *r = pool_alloc(len);
+	char *r = mem_pool_alloc(&fi_mem_pool, len);
 	memcpy(r, s, len);
 	return r;
 }
@@ -755,7 +755,7 @@ static void insert_mark(uintmax_t idnum, struct object_entry *oe)
 {
 	struct mark_set *s = marks;
 	while ((idnum >> s->shift) >= 1024) {
-		s = pool_calloc(1, sizeof(struct mark_set));
+		s = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct mark_set));
 		s->shift = marks->shift + 10;
 		s->data.sets[0] = marks;
 		marks = s;
@@ -764,7 +764,7 @@ static void insert_mark(uintmax_t idnum, struct object_entry *oe)
 		uintmax_t i = idnum >> s->shift;
 		idnum -= i << s->shift;
 		if (!s->data.sets[i]) {
-			s->data.sets[i] = pool_calloc(1, sizeof(struct mark_set));
+			s->data.sets[i] = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct mark_set));
 			s->data.sets[i]->shift = s->shift - 10;
 		}
 		s = s->data.sets[i];
@@ -802,7 +802,7 @@ static struct atom_str *to_atom(const char *s, unsigned short len)
 		if (c->str_len == len && !strncmp(s, c->str_dat, len))
 			return c;
 
-	c = pool_alloc(sizeof(struct atom_str) + len + 1);
+	c = mem_pool_alloc(&fi_mem_pool, sizeof(struct atom_str) + len + 1);
 	c->str_len = len;
 	memcpy(c->str_dat, s, len);
 	c->str_dat[len] = 0;
@@ -833,7 +833,7 @@ static struct branch *new_branch(const char *name)
 	if (check_refname_format(name, REFNAME_ALLOW_ONELEVEL))
 		die("Branch name doesn't conform to GIT standards: %s", name);
 
-	b = pool_calloc(1, sizeof(struct branch));
+	b = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct branch));
 	b->name = pool_strdup(name);
 	b->table_next_branch = branch_table[hc];
 	b->branch_tree.versions[0].mode = S_IFDIR;
@@ -869,7 +869,7 @@ static struct tree_content *new_tree_content(unsigned int cnt)
 			avail_tree_table[hc] = f->next_avail;
 	} else {
 		cnt = cnt & 7 ? ((cnt / 8) + 1) * 8 : cnt;
-		f = pool_alloc(sizeof(*t) + sizeof(t->entries[0]) * cnt);
+		f = mem_pool_alloc(&fi_mem_pool, sizeof(*t) + sizeof(t->entries[0]) * cnt);
 		f->entry_capacity = cnt;
 	}
 
@@ -2932,7 +2932,7 @@ static void parse_new_tag(const char *arg)
 	enum object_type type;
 	const char *v;
 
-	t = pool_alloc(sizeof(struct tag));
+	t = mem_pool_alloc(&fi_mem_pool, sizeof(struct tag));
 	memset(t, 0, sizeof(struct tag));
 	t->name = pool_strdup(arg);
 	if (last_tag)
@@ -3531,12 +3531,12 @@ int cmd_main(int argc, const char **argv)
 	atom_table = xcalloc(atom_table_sz, sizeof(struct atom_str*));
 	branch_table = xcalloc(branch_table_sz, sizeof(struct branch*));
 	avail_tree_table = xcalloc(avail_tree_table_sz, sizeof(struct avail_tree_content*));
-	marks = pool_calloc(1, sizeof(struct mark_set));
+	marks = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct mark_set));
 
 	global_argc = argc;
 	global_argv = argv;
 
-	rc_free = pool_alloc(cmd_save * sizeof(*rc_free));
+	rc_free = mem_pool_alloc(&fi_mem_pool, cmd_save * sizeof(*rc_free));
 	for (i = 0; i < (cmd_save - 1); i++)
 		rc_free[i].next = &rc_free[i + 1];
 	rc_free[cmd_save - 1].next = NULL;
-- 
2.14.3




[Index of Archives]     [Linux Kernel Development]     [Gcc Help]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [V4L]     [Bugtraq]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]     [Fedora Users]

  Powered by Linux