Add support for KASAN in the alloc_caches (apoll and netmsg_cache). Thus, if something touches the unused caches, it will raise a KASAN warning/exception. It poisons the object when the object is put to the cache, and unpoisons it when the object is gotten or freed. Signed-off-by: Breno Leitao <leitao@xxxxxxxxxx> --- io_uring/alloc_cache.h | 11 ++++++++--- io_uring/io_uring.c | 12 ++++++++++-- io_uring/net.c | 2 +- io_uring/poll.c | 2 +- 4 files changed, 20 insertions(+), 7 deletions(-) diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h index 0d9ff9402a37..0d5cd2c0a0ba 100644 --- a/io_uring/alloc_cache.h +++ b/io_uring/alloc_cache.h @@ -16,12 +16,15 @@ static inline bool io_alloc_cache_put(struct io_alloc_cache *cache, if (cache->nr_cached < IO_ALLOC_CACHE_MAX) { cache->nr_cached++; wq_stack_add_head(&entry->node, &cache->list); + /* KASAN poisons object */ + kasan_slab_free_mempool(entry); return true; } return false; } -static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache) +static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache, + size_t size) { struct io_wq_work_node *node; struct io_cache_entry *entry; @@ -29,6 +32,7 @@ static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *c if (cache->list.next) { node = cache->list.next; entry = container_of(node, struct io_cache_entry, node); + kasan_unpoison_range(entry, size); cache->list.next = node->next; return entry; } @@ -43,11 +47,12 @@ static inline void io_alloc_cache_init(struct io_alloc_cache *cache) } static inline void io_alloc_cache_free(struct io_alloc_cache *cache, - void (*free)(struct io_cache_entry *)) + void (*free)(struct io_cache_entry *), + size_t size) { struct io_cache_entry *entry; - while ((entry = io_alloc_cache_get(cache))) { + while ((entry = io_alloc_cache_get(cache, size))) { free(entry); } diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 80b6204769e8..6a98902b8f62 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -2766,6 +2766,15 @@ static void io_req_caches_free(struct io_ring_ctx *ctx) mutex_unlock(&ctx->uring_lock); } +static __cold void io_uring_acache_free(struct io_ring_ctx *ctx) +{ + + io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free, + sizeof(struct async_poll)); + io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free, + sizeof(struct io_async_msghdr)); +} + static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) { io_sq_thread_finish(ctx); @@ -2781,8 +2790,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) __io_sqe_files_unregister(ctx); io_cqring_overflow_kill(ctx); io_eventfd_unregister(ctx); - io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free); - io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free); + io_uring_acache_free(ctx); mutex_unlock(&ctx->uring_lock); io_destroy_buffers(ctx); if (ctx->sq_creds) diff --git a/io_uring/net.c b/io_uring/net.c index fbc34a7c2743..8dc67b23b030 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -139,7 +139,7 @@ static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req, struct io_async_msghdr *hdr; if (!(issue_flags & IO_URING_F_UNLOCKED)) { - entry = io_alloc_cache_get(&ctx->netmsg_cache); + entry = io_alloc_cache_get(&ctx->netmsg_cache, sizeof(struct io_async_msghdr)); if (entry) { hdr = container_of(entry, struct io_async_msghdr, cache); hdr->free_iov = NULL; diff --git a/io_uring/poll.c b/io_uring/poll.c index 8339a92b4510..295d59875f00 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -661,7 +661,7 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req, apoll = req->apoll; kfree(apoll->double_poll); } else if (!(issue_flags & IO_URING_F_UNLOCKED)) { - entry = io_alloc_cache_get(&ctx->apoll_cache); + entry = io_alloc_cache_get(&ctx->apoll_cache, sizeof(struct async_poll)); if (entry == NULL) goto alloc_apoll; apoll = container_of(entry, struct async_poll, cache); -- 2.39.0