Re: [PATCH 2/2] fsck: use oidset for skiplist

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Am 11.08.2018 um 22:59 schrieb René Scharfe:
If the current oidset implementation is so bad, why not replace it with
one based on oid_array? ;-)

Intuitively I'd try a hashmap with no payload and open addressing via
sha1hash(), which should reduce memory allocations quite a bit -- no
need to store hash codes and next pointers, only an array of object IDs
with a fill rate of 50% or so.  Deletions are a bit awkward with that
scheme, though; they could perhaps be implemented as insertions into a
second hashmap.

Here's roughly what I had in mind, only with a free/occupied bitmap (or
a one-bit payload, if you will).  I tried a variant that encoded empty
slots as null_oid first, which has lower memory usage, but isn't any
faster than the current code.

# in git.git
$ hyperfine "./git-cat-file --batch-all-objects --buffer --unordered --batch-check='%(objectname)'"

Before:
Benchmark #1: ./git-cat-file --batch-all-objects --buffer --unordered --batch-check='%(objectname)'

  Time (mean ± σ):     269.5 ms ±  26.7 ms    [User: 247.7 ms, System: 21.4 ms]

  Range (min … max):   240.3 ms … 339.3 ms

After:
Benchmark #1: ./git-cat-file --batch-all-objects --buffer --unordered --batch-check='%(objectname)'

  Time (mean ± σ):     224.2 ms ±  18.2 ms    [User: 201.7 ms, System: 22.1 ms]

  Range (min … max):   205.0 ms … 259.0 ms

So that's only slightly faster. :-|

---
 builtin/cat-file.c | 93 +++++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 88 insertions(+), 5 deletions(-)

diff --git a/builtin/cat-file.c b/builtin/cat-file.c
index 45992c9be9..b197cca861 100644
--- a/builtin/cat-file.c
+++ b/builtin/cat-file.c
@@ -408,10 +408,93 @@ static void batch_one_object(const char *obj_name, struct batch_options *opt,
 	batch_object_write(obj_name, opt, data);
 }
+struct oidset2 {
+	size_t nr, alloc;
+	struct object_id *entries;
+	uint32_t *bitmap;
+};
+
+static int is_bit_set(const uint32_t *bitmap, size_t idx)
+{
+	uint32_t mask = 1 << (idx % bitsizeof(bitmap[0]));
+	return bitmap[idx / bitsizeof(bitmap[0])] & mask;
+}
+
+static void set_bit(uint32_t *bitmap, size_t idx)
+{
+	uint32_t mask = 1 << (idx % bitsizeof(bitmap[0]));
+	bitmap[idx / bitsizeof(bitmap[0])] |= mask;
+}
+
+static void oidset2_add(struct oidset2 *set, const struct object_id *oid)
+{
+	size_t idx;
+
+	for (idx = sha1hash(oid->hash) % set->alloc;;) {
+		if (!is_bit_set(set->bitmap, idx))
+			break;
+		if (!oidcmp(&set->entries[idx], oid))
+			return;
+		if (++idx >= set->alloc)
+			idx = 0;
+	}
+	oidcpy(&set->entries[idx], oid);
+	set_bit(set->bitmap, idx);
+	set->nr++;
+}
+
+static void oidset2_grow(struct oidset2 *set)
+{
+	struct oidset2 old_set = *set;
+	size_t idx;
+
+	set->alloc = (old_set.alloc + 1000) * 3 / 2;
+	set->nr = 0;
+	set->entries = xcalloc(set->alloc, sizeof(set->entries[0]));
+	set->bitmap = xcalloc(set->alloc / 32 + 1, sizeof(set->bitmap[0]));
+	for (idx = 0; idx < old_set.alloc; idx++) {
+		if (!is_bit_set(old_set.bitmap, idx))
+			continue;
+		oidset2_add(set, &old_set.entries[idx]);
+	}
+	free(old_set.entries);
+	free(old_set.bitmap);
+}
+
+static void oidset2_insert(struct oidset2 *set, const struct object_id *oid)
+{
+	if (set->nr + 1 > set->alloc * 2 / 3)
+		oidset2_grow(set);
+	oidset2_add(set, oid);
+}
+
+static int oidset2_contains(struct oidset2 *set, const struct object_id *oid)
+{
+	size_t idx;
+
+	if (!set->nr)
+		return 0;
+	for (idx = sha1hash(oid->hash) % set->alloc;;) {
+		if (!is_bit_set(set->bitmap, idx))
+			return 0;
+		if (!oidcmp(&set->entries[idx], oid))
+			return 1;
+		if (++idx >= set->alloc)
+			idx = 0;
+	}
+}
+
+static void oidset2_clear(struct oidset2 *set)
+{
+	FREE_AND_NULL(set->entries);
+	FREE_AND_NULL(set->bitmap);
+	set->alloc = set->nr = 0;
+}
+
 struct object_cb_data {
 	struct batch_options *opt;
 	struct expand_data *expand;
-	struct oidset *seen;
+	struct oidset2 *seen;
 };
static int batch_object_cb(const struct object_id *oid, void *vdata)
@@ -443,9 +526,9 @@ static int batch_unordered_object(const struct object_id *oid, void *vdata)
 {
 	struct object_cb_data *data = vdata;
- if (oidset_contains(data->seen, oid))
+	if (oidset2_contains(data->seen, oid))
 		return 0;
-	oidset_insert(data->seen, oid);
+	oidset2_insert(data->seen, oid);
return batch_object_cb(oid, data);
 }
@@ -510,7 +593,7 @@ static int batch_objects(struct batch_options *opt)
 		cb.expand = &data;
if (opt->unordered) {
-			struct oidset seen = OIDSET_INIT;
+			struct oidset2 seen = { 0 };
cb.seen = &seen; @@ -518,7 +601,7 @@ static int batch_objects(struct batch_options *opt)
 			for_each_packed_object(batch_unordered_packed, &cb,
 					       FOR_EACH_OBJECT_PACK_ORDER);
- oidset_clear(&seen);
+			oidset2_clear(&seen);
 		} else {
 			struct oid_array sa = OID_ARRAY_INIT;
--
2.18.0



[Index of Archives]     [Linux Kernel Development]     [Gcc Help]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [V4L]     [Bugtraq]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]     [Fedora Users]

  Powered by Linux