This patch adds some benchmarks for comparing the performance of hashmap lookups without the bloom filter vs. hashmap lookups with the bloom filter. Checking the bloom filter first for whether the element exists should overall enable a higher throughput for hashmap lookups, since if the element does not exist in the bloom filter, we can avoid a costly lookup in the hashmap. The benchmark results (running on 8 threads on a machine with one numa node, and taking the average of 3 runs) were as follows: 1 hash function - 10k entries: 15% slower 50k entries: 25% faster 100k entries: 30% faster 500k entres: 50% faster 1 million entries: 55% faster 5 million entries: 40% faster 2 hash functions - 10k entries: 20% slower 50k entries: 40% faster 100k entries: 50% faster 500k entres: 90% faster 1 million entries: 115% faster 5 million entries: 70% faster 3 hash functions - 10k entries: 5% slower 50k entries: 15% faster 100k entries: 25% faster 500k entres: 180% faster 1 million entries: 200% faster 5 million entries: 140% faster 4 hash functions - 10k entries: 20% slower 50k entries: 50% faster 100k entries: 60% faster 500k entres: 128% faster 1 million entries: 150% faster 5 million entries: 105% faster 5 hash functions - 10k entries: 10% faster 50k entries: 25% faster 100k entries: 50% faster 500k entres: 100% faster 1 million entries: 120% faster 5 million entries: 170% faster 6 hash functions - 10k entries: 1.5% faster 50k entries: 20% faster 100k entries: 25% faster 500k entres: 165% faster 1 million entries: 170% faster 5 million entries: 140% faster 7 hash functions - 10k entries: 7% slower 50k entries: 5% faster 100k entries: 15% faster 500k entres: 145% faster 1 million entries: 155% faster 5 million entries: 120% faster 8 hash functions - 10k entries: 15% slower 50k entries: 50% faster 100k entries: 60% faster 500k entres: 1305% faster 1 million entries: 135% faster 5 million entries: 105% faster 9 hash functions - 10k entries: 25% slower 50k entries: 35% faster 100k entries: 60% faster 500k entres: 105% faster 1 million entries: 130% faster 5 million entries: 90% faster 10 hash functions - 10k entries: 10% faster 50k entries: 30% faster 100k entries: 45% faster 500k entres: 90% faster 1 million entries: 105% faster 5 million entries: 140% faster The benchmark comparisions indicate that on average, using 3 hash functions generally is one of the most optimal choices. Signed-off-by: Joanne Koong <joannekoong@xxxxxx> --- tools/testing/selftests/bpf/bench.c | 22 ++++++++--- .../bpf/benchs/bench_bloom_filter_map.c | 39 +++++++++++++++++++ .../bpf/benchs/run_bench_bloom_filter_map.sh | 15 +++++++ .../selftests/bpf/benchs/run_common.sh | 12 ++++++ 4 files changed, 83 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c index 0bcbdb4405a3..7da1589a9fe0 100644 --- a/tools/testing/selftests/bpf/bench.c +++ b/tools/testing/selftests/bpf/bench.c @@ -92,20 +92,21 @@ void hits_drops_report_progress(int iter, struct bench_res *res, long delta_ns) printf("Iter %3d (%7.3lfus): ", iter, (delta_ns - 1000000000) / 1000.0); - printf("hits %8.3lfM/s (%7.3lfM/prod), drops %8.3lfM/s\n", - hits_per_sec, hits_per_prod, drops_per_sec); + printf("hits %8.3lfM/s (%7.3lfM/prod), drops %8.3lfM/s, total operations %8.3lfM/s\n", + hits_per_sec, hits_per_prod, drops_per_sec, hits_per_sec + drops_per_sec); } void hits_drops_report_final(struct bench_res res[], int res_cnt) { int i; - double hits_mean = 0.0, drops_mean = 0.0; - double hits_stddev = 0.0, drops_stddev = 0.0; + double hits_mean = 0.0, drops_mean = 0.0, total_ops_mean = 0.0; + double hits_stddev = 0.0, drops_stddev = 0.0, total_ops_stddev = 0.0; for (i = 0; i < res_cnt; i++) { hits_mean += res[i].hits / 1000000.0 / (0.0 + res_cnt); drops_mean += res[i].drops / 1000000.0 / (0.0 + res_cnt); } + total_ops_mean = hits_mean + drops_mean; if (res_cnt > 1) { for (i = 0; i < res_cnt; i++) { @@ -115,14 +116,21 @@ void hits_drops_report_final(struct bench_res res[], int res_cnt) drops_stddev += (drops_mean - res[i].drops / 1000000.0) * (drops_mean - res[i].drops / 1000000.0) / (res_cnt - 1.0); + total_ops_stddev += (total_ops_mean - + (res[i].hits + res[i].drops) / 1000000.0) * + (total_ops_mean - (res[i].hits + res[i].drops) / 1000000.0) + / (res_cnt - 1.0); } hits_stddev = sqrt(hits_stddev); drops_stddev = sqrt(drops_stddev); + total_ops_stddev = sqrt(total_ops_stddev); } printf("Summary: hits %8.3lf \u00B1 %5.3lfM/s (%7.3lfM/prod), ", hits_mean, hits_stddev, hits_mean / env.producer_cnt); - printf("drops %8.3lf \u00B1 %5.3lfM/s\n", + printf("drops %8.3lf \u00B1 %5.3lfM/s, ", drops_mean, drops_stddev); + printf("total operations %8.3lf \u00B1 %5.3lfM/s\n", + total_ops_mean, total_ops_stddev); } const char *argp_program_version = "benchmark"; @@ -356,6 +364,8 @@ extern const struct bench bench_pb_libbpf; extern const struct bench bench_pb_custom; extern const struct bench bench_bloom_filter_map; extern const struct bench bench_bloom_filter_false_positive; +extern const struct bench bench_hashmap_without_bloom_filter; +extern const struct bench bench_hashmap_with_bloom_filter; static const struct bench *benchs[] = { &bench_count_global, @@ -379,6 +389,8 @@ static const struct bench *benchs[] = { &bench_pb_custom, &bench_bloom_filter_map, &bench_bloom_filter_false_positive, + &bench_hashmap_without_bloom_filter, + &bench_hashmap_with_bloom_filter, }; static void setup_benchmark() diff --git a/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c b/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c index dadca2a6e900..39b64116dfc3 100644 --- a/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c +++ b/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c @@ -241,6 +241,23 @@ static void hashmap_lookup_setup(void) } } +static void hashmap_no_bloom_filter_setup(void) +{ + struct bpf_link *link; + + ctx.skel = setup_skeleton(); + + ctx.skel->data->hashmap_use_bloom_filter = false; + + populate_maps(); + + link = bpf_program__attach(ctx.skel->progs.prog_bloom_filter_hashmap_lookup); + if (!link) { + fprintf(stderr, "failed to attach program!\n"); + exit(1); + } +} + static void measure(struct bench_res *res) { long total_hits = 0, total_drops = 0, total_false_hits = 0; @@ -342,3 +359,25 @@ const struct bench bench_bloom_filter_false_positive = { .report_progress = false_hits_report_progress, .report_final = false_hits_report_final, }; + +const struct bench bench_hashmap_without_bloom_filter = { + .name = "hashmap-without-bloom-filter", + .validate = validate, + .setup = hashmap_no_bloom_filter_setup, + .producer_thread = producer, + .consumer_thread = consumer, + .measure = measure, + .report_progress = hits_drops_report_progress, + .report_final = hits_drops_report_final, +}; + +const struct bench bench_hashmap_with_bloom_filter = { + .name = "hashmap-with-bloom-filter", + .validate = validate, + .setup = hashmap_lookup_setup, + .producer_thread = producer, + .consumer_thread = consumer, + .measure = measure, + .report_progress = hits_drops_report_progress, + .report_final = hits_drops_report_final, +}; diff --git a/tools/testing/selftests/bpf/benchs/run_bench_bloom_filter_map.sh b/tools/testing/selftests/bpf/benchs/run_bench_bloom_filter_map.sh index 8f2de6e39313..53c14da00a3b 100755 --- a/tools/testing/selftests/bpf/benchs/run_bench_bloom_filter_map.sh +++ b/tools/testing/selftests/bpf/benchs/run_bench_bloom_filter_map.sh @@ -26,3 +26,18 @@ header "Bloom filter map, multi-producer contention" for t in 1 2 3 4 8 12 16 20 24 28 32 36 40 44 48 52; do summarize "$t threads - " "$($RUN_BENCH -p $t bloom-filter-map)" done + +header "Hashmap without bloom filter vs. hashmap with bloom filter (throughput, 8 threads)" +for h in {1..10}; do +subtitle "# hashes: $h" + for e in 10000 50000 75000 100000 250000 500000 750000 1000000 2500000 5000000; do + printf "%'d entries -\n" $e + printf "\t" + summarize_total "Hashmap without bloom filter: " \ + "$($RUN_BENCH --nr_hashes $h --nr_entries $e -p 8 hashmap-without-bloom-filter)" + printf "\t" + summarize_total "Hashmap with bloom filter: " \ + "$($RUN_BENCH --nr_hashes $h --nr_entries $e -p 8 hashmap-with-bloom-filter)" + done + printf "\n" +done diff --git a/tools/testing/selftests/bpf/benchs/run_common.sh b/tools/testing/selftests/bpf/benchs/run_common.sh index 670f23b037c4..9a16be78b180 100644 --- a/tools/testing/selftests/bpf/benchs/run_common.sh +++ b/tools/testing/selftests/bpf/benchs/run_common.sh @@ -33,6 +33,11 @@ function percentage() echo "$*" | sed -E "s/.*Percentage\s=\s+([0-9]+\.[0-9]+).*/\1/" } +function total() +{ + echo "$*" | sed -E "s/.*total operations\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+M\/s).*/\1/" +} + function summarize() { bench="$1" @@ -46,3 +51,10 @@ function summarize_percentage() summary=$(echo $2 | tail -n1) printf "%-20s %s%%\n" "$bench" "$(percentage $summary)" } + +function summarize_total() +{ + bench="$1" + summary=$(echo $2 | tail -n1) + printf "%-20s %s\n" "$bench" "$(total $summary)" +} -- 2.30.2