Optimize testing for a bit in an rbtree-based bitmap for the case where the calling application is scanning through the bitmap sequentially. Previously, we did this for a set of bits which were inside an allocated extent, but we did not optimize the case where there was a large number of bits after an allocated extents which were not in use. 1111111111111110000000000000000000 ^ optimized ^not optimized In my tests of a roughly half-filled file system, the run time of e2freefrag was halved, and the cpu time spent in userspace was during e2fsck's pass 5 was reduced by a factor of 30%. Signed-off-by: "Theodore Ts'o" <tytso@xxxxxxx> --- lib/ext2fs/blkmap64_rb.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/lib/ext2fs/blkmap64_rb.c b/lib/ext2fs/blkmap64_rb.c index a83f8ac..c9006f8 100644 --- a/lib/ext2fs/blkmap64_rb.c +++ b/lib/ext2fs/blkmap64_rb.c @@ -314,8 +314,8 @@ static errcode_t rb_resize_bmap(ext2fs_generic_bitmap bmap, inline static int rb_test_bit(struct ext2fs_rb_private *bp, __u64 bit) { - struct bmap_rb_extent *rcursor; - struct rb_node *parent = NULL; + struct bmap_rb_extent *rcursor, *next_ext; + struct rb_node *parent = NULL, *next; struct rb_node **n = &bp->root.rb_node; struct bmap_rb_extent *ext; @@ -330,6 +330,18 @@ rb_test_bit(struct ext2fs_rb_private *bp, __u64 bit) return 1; } + next = ext2fs_rb_next(&rcursor->node); + if (next) { + next_ext = ext2fs_rb_entry(next, struct bmap_rb_extent, node); + if ((bit >= rcursor->start + rcursor->count) && + (bit < next_ext->start)) { +#ifdef BMAP_STATS_OPS + bp->test_hit++; +#endif + return 0; + } + } + rcursor = *bp->wcursor; if (!rcursor) goto search_tree; -- 1.7.12.rc0.22.gcdd159b -- To unsubscribe from this list: send the line "unsubscribe linux-ext4" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html