On Fri, Feb 13 2015 at 8:27P -0500, Mikulas Patocka <mpatocka@xxxxxxxxxx> wrote: > Write requests are sorted in a red-black tree structure and are submitted > in the sorted order. > > In theory the sorting should be performed by the underlying disk scheduler, > however, in practice the disk scheduler accepts and sorts only 128 requests. > In order to sort more requests, we need to implement our own sorting. > > In testing, it was shown that this patch slightly increases performance in > some situations > > Signed-off-by: Mikulas Patocka <mpatocka@xxxxxxxxxx> FYI, I've folded this patch in to cleanup rb_tree node access: diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 954ba1f..e6a1460 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1175,9 +1175,13 @@ static void kcryptd_io_write(struct dm_crypt_io *io) generic_make_request(clone); } +#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node) + static int dmcrypt_write(void *data) { struct crypt_config *cc = data; + struct dm_crypt_io *io; + while (1) { struct rb_root write_tree; struct blk_plug plug; @@ -1221,8 +1225,7 @@ pop_from_list: */ blk_start_plug(&plug); do { - struct dm_crypt_io *io = rb_entry(rb_first(&write_tree), - struct dm_crypt_io, rb_node); + io = crypt_io_from_node(rb_first(&write_tree)); rb_erase(&io->rb_node, &write_tree); kcryptd_io_write(io); } while (!RB_EMPTY_ROOT(&write_tree)); @@ -1237,7 +1240,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) struct crypt_config *cc = io->cc; unsigned long flags; sector_t sector; - struct rb_node **p, *parent; + struct rb_node **rbp, *parent; if (unlikely(io->error < 0)) { crypt_free_buffer_pages(cc, clone); @@ -1252,19 +1255,17 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) clone->bi_iter.bi_sector = cc->start + io->sector; spin_lock_irqsave(&cc->write_thread_wait.lock, flags); - p = &cc->write_tree.rb_node; + rbp = &cc->write_tree.rb_node; parent = NULL; sector = io->sector; - while (*p) { - parent = *p; -#define io_node rb_entry(parent, struct dm_crypt_io, rb_node) - if (sector < io_node->sector) - p = &io_node->rb_node.rb_left; + while (*rbp) { + parent = *rbp; + if (sector < crypt_io_from_node(parent)->sector) + rbp = &(*rbp)->rb_left; else - p = &io_node->rb_node.rb_right; -#undef io_node + rbp = &(*rbp)->rb_right; } - rb_link_node(&io->rb_node, parent, p); + rb_link_node(&io->rb_node, parent, rbp); rb_insert_color(&io->rb_node, &cc->write_tree); wake_up_locked(&cc->write_thread_wait); -- dm-devel mailing list dm-devel@xxxxxxxxxx https://www.redhat.com/mailman/listinfo/dm-devel