[Toy PATCH] Avoid spilling collided entries in object hash table to the next slots

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



If a position in object hash table is taken, we currently check out
the next one. This could potentially create long object chains. We
could create linked lists instead and leave the next slot alone.

This patch relies on the fact that pointers are usually aligned more
than one byte and it uses the first bit as "object vs linked list"
indicator. Architectures that don't support this can fall back to
original implementation.

The saving is real, although not ground breaking. I'm just not sure it
is worth the ugliness that comes with this patch. Although we could
avoid the alignment problem by saving that bit in a separate bitmap.
"git rev-list --objects --all" on linux-2.6.git:

        before       after
real    0m33.407s    0m31.732s
user    0m32.926s    0m31.460s
sys     0m0.407s     0m0.202s

lookup_object() goes down from 30% to 27% in perf reports.

Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@xxxxxxxxx>
---
 object.c | 42 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 42 insertions(+)

diff --git a/object.c b/object.c
index bcfd2c6..7b013a0 100644
--- a/object.c
+++ b/object.c
@@ -8,6 +8,17 @@
 static struct object **obj_hash;
 static int nr_objs, obj_hash_size;
 
+#ifdef USE_LINKED_LIST
+struct obj_list {
+	struct object *obj;
+	struct obj_list *next;
+};
+
+#define IS_LST(x) ((intptr_t)(x) & 1)
+#define MAKE_LST(x) (struct object *)((intptr_t)(x) | 1)
+#define GET_LST(x) (struct obj_list *)((intptr_t)(x) & ~1)
+#endif
+
 unsigned int get_max_object_index(void)
 {
 	return obj_hash_size;
@@ -53,13 +64,30 @@ static unsigned int hash_obj(struct object *obj, unsigned int n)
 static void insert_obj_hash(struct object *obj, struct object **hash, unsigned int size)
 {
 	unsigned int j = hash_obj(obj, size);
+#ifdef USE_LINKED_LIST
+	struct obj_list *lst, *o;
+
+	if (!hash[j]) {
+		hash[j] = obj;
+		return;
+	}
 
+	o = xmalloc(sizeof(*o));
+	o->obj = obj;
+
+	if (IS_LST(hash[j]))
+		o->next = GET_LST(hash[j]);
+	else
+		o->next = NULL;
+	hash[j] = MAKE_LST(o);
+#else
 	while (hash[j]) {
 		j++;
 		if (j >= size)
 			j = 0;
 	}
 	hash[j] = obj;
+#endif
 }
 
 static unsigned int hashtable_index(const unsigned char *sha1)
@@ -78,6 +106,19 @@ struct object *lookup_object(const unsigned char *sha1)
 		return NULL;
 
 	i = hashtable_index(sha1);
+#ifdef USE_LINKED_LIST
+	if (IS_LST(obj_hash[i])) {
+		struct obj_list *lst;
+		for (lst = GET_LST(obj_hash[i]); lst; lst = lst->next) {
+			if (!hashcmp(lst->obj->sha1, sha1))
+				return lst->obj;
+		}
+		return NULL;
+	} else {
+		struct object *obj = obj_hash[i];
+		return obj && !hashcmp(sha1, obj->sha1) ? obj : NULL;
+	}
+#else
 	while ((obj = obj_hash[i]) != NULL) {
 		if (!hashcmp(sha1, obj->sha1))
 			break;
@@ -86,6 +127,7 @@ struct object *lookup_object(const unsigned char *sha1)
 			i = 0;
 	}
 	return obj;
+#endif
 }
 
 void grow_object_hash_to(unsigned long nr)
-- 
1.8.2.83.gc99314b

--
To unsubscribe from this list: send the line "unsubscribe git" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html




[Index of Archives]     [Linux Kernel Development]     [Gcc Help]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [V4L]     [Bugtraq]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]     [Fedora Users]