Add an optional flag that ensures dm_bufio_get does not sleep. This allows the dm-bufio cache to be queried from interrupt context. To ensure that dm-bufio does not sleep, dm-bufio must use a spinlock instead of a mutex. Additionally, to avoid deadlocks, special care must be taken so that dm-bufio does not sleep while holding the spinlock. DM_BUFIO_GET_CANT_SLEEP is useful in some contexts, such as dm-verity, so that we can query the dm-bufio cache in a tasklet. If the required data is cached, processing can be handled immediately in the tasklet instead of waiting for a work-queue job to be scheduled. This can reduce latency when there is high CPU load and memory pressure. Signed-off-by: Nathan Huckleberry <nhuck@xxxxxxxxxx> --- drivers/md/dm-bufio.c | 26 ++++++++++++++++++++++---- include/linux/dm-bufio.h | 5 +++++ 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index ad5603eb12e3..3edeca7cfca6 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -81,6 +81,8 @@ */ struct dm_bufio_client { struct mutex lock; + spinlock_t spinlock; + unsigned long spinlock_flags; struct list_head lru[LIST_SIZE]; unsigned long n_buffers[LIST_SIZE]; @@ -90,6 +92,7 @@ struct dm_bufio_client { s8 sectors_per_block_bits; void (*alloc_callback)(struct dm_buffer *); void (*write_callback)(struct dm_buffer *); + bool may_sleep; struct kmem_cache *slab_buffer; struct kmem_cache *slab_cache; @@ -167,17 +170,26 @@ struct dm_buffer { static void dm_bufio_lock(struct dm_bufio_client *c) { - mutex_lock_nested(&c->lock, dm_bufio_in_request()); + if (c->may_sleep) + mutex_lock_nested(&c->lock, dm_bufio_in_request()); + else + spin_lock_irqsave_nested(&c->spinlock, c->spinlock_flags, dm_bufio_in_request()); } static int dm_bufio_trylock(struct dm_bufio_client *c) { - return mutex_trylock(&c->lock); + if (c->may_sleep) + return mutex_trylock(&c->lock); + else + return spin_trylock_irqsave(&c->spinlock, c->spinlock_flags); } static void dm_bufio_unlock(struct dm_bufio_client *c) { - mutex_unlock(&c->lock); + if (c->may_sleep) + mutex_unlock(&c->lock); + else + spin_unlock_irqrestore(&c->spinlock, c->spinlock_flags); } /*----------------------------------------------------------------*/ @@ -878,7 +890,7 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client * be allocated. */ while (1) { - if (dm_bufio_cache_size_latch != 1) { + if (dm_bufio_cache_size_latch != 1 && c->may_sleep) { b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); if (b) return b; @@ -1041,6 +1053,7 @@ static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state))) return NULL; + b->hold_count++; __relink_lru(b, test_bit(B_DIRTY, &b->state) || test_bit(B_WRITING, &b->state)); @@ -1748,12 +1761,17 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign c->alloc_callback = alloc_callback; c->write_callback = write_callback; + c->may_sleep = true; + if (flags & DM_BUFIO_GET_CANT_SLEEP) + c->may_sleep = false; + for (i = 0; i < LIST_SIZE; i++) { INIT_LIST_HEAD(&c->lru[i]); c->n_buffers[i] = 0; } mutex_init(&c->lock); + spin_lock_init(&c->spinlock); INIT_LIST_HEAD(&c->reserved_buffers); c->need_reserved_buffers = reserved_buffers; diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h index e21480715255..2a78f0cb8e71 100644 --- a/include/linux/dm-bufio.h +++ b/include/linux/dm-bufio.h @@ -17,6 +17,11 @@ struct dm_bufio_client; struct dm_buffer; +/* + * Flags for dm_bufio_client_create + */ +#define DM_BUFIO_GET_CANT_SLEEP 0x1 + /* * Create a buffered IO cache on a given device */ -- 2.37.1.359.gd136c6c3e2-goog -- dm-devel mailing list dm-devel@xxxxxxxxxx https://listman.redhat.com/mailman/listinfo/dm-devel