hashtable_index() appears to be a close duplicate of hash_obj(). Keep only the later and make it usable for all cases. Also remove the modulus as this is an expansive operation. The size argument is always a power of 2 anyway, so a simple mask operation provides the same result. On a 'git rev-list --all --objects' run this decreased the time spent in lookup_object from 27.5% to 24.1%. Signed-off-by: Nicolas Pitre <nico@xxxxxxxxxxx> --- I discovered this patch in my git work tree dating from 2 years ago. diff --git a/object.c b/object.c index d8a4b1f..e2dae22 100644 --- a/object.c +++ b/object.c @@ -43,16 +43,16 @@ int type_from_string(const char *str) die("invalid object type \"%s\"", str); } -static unsigned int hash_obj(struct object *obj, unsigned int n) +static unsigned int hash_obj(const unsigned char *sha1, unsigned int n) { unsigned int hash; - memcpy(&hash, obj->sha1, sizeof(unsigned int)); - return hash % n; + memcpy(&hash, sha1, sizeof(unsigned int)); + return hash & (n - 1); } static void insert_obj_hash(struct object *obj, struct object **hash, unsigned int size) { - unsigned int j = hash_obj(obj, size); + unsigned int j = hash_obj(obj->sha1, size); while (hash[j]) { j++; @@ -62,13 +62,6 @@ static void insert_obj_hash(struct object *obj, struct object **hash, unsigned i hash[j] = obj; } -static unsigned int hashtable_index(const unsigned char *sha1) -{ - unsigned int i; - memcpy(&i, sha1, sizeof(unsigned int)); - return i % obj_hash_size; -} - struct object *lookup_object(const unsigned char *sha1) { unsigned int i, first; @@ -77,7 +70,7 @@ struct object *lookup_object(const unsigned char *sha1) if (!obj_hash) return NULL; - first = i = hashtable_index(sha1); + first = i = hash_obj(sha1, obj_hash_size); while ((obj = obj_hash[i]) != NULL) { if (!hashcmp(sha1, obj->sha1)) break; -- To unsubscribe from this list: send the line "unsubscribe git" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html