From: Breno Leitao <leit@xxxxxx> Add support for KASAN in the alloc_caches (apoll and netmsg_cache). Thus, if something touches the unused caches, it will raise a KASAN warning/exception. It poisons the object when the object is put to the cache, and unpoisons it when the object is gotten or freed. Signed-off-by: Breno Leitao <leitao@xxxxxxxxxx> --- io_uring/alloc_cache.h | 11 ++++++++--- io_uring/io_uring.c | 14 ++++++++++++-- io_uring/net.c | 2 +- io_uring/net.h | 4 ---- io_uring/poll.c | 2 +- 5 files changed, 22 insertions(+), 11 deletions(-) diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h index ae61eb383cae..6c6bdde6306b 100644 --- a/io_uring/alloc_cache.h +++ b/io_uring/alloc_cache.h @@ -16,16 +16,20 @@ static inline bool io_alloc_cache_put(struct io_alloc_cache *cache, if (cache->nr_cached < IO_ALLOC_CACHE_MAX) { cache->nr_cached++; wq_stack_add_head(&entry->node, &cache->list); + /* KASAN poisons object */ + kasan_slab_free_mempool(entry); return true; } return false; } -static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache) +static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache, + size_t size) { if (cache->list.next) { struct io_cache_entry *entry; entry = container_of(cache->list.next, struct io_cache_entry, node); + kasan_unpoison_range(entry, size); cache->list.next = cache->list.next->next; return entry; } @@ -40,10 +44,11 @@ static inline void io_alloc_cache_init(struct io_alloc_cache *cache) } static inline void io_alloc_cache_free(struct io_alloc_cache *cache, - void (*free)(struct io_cache_entry *)) + void (*free)(struct io_cache_entry *), + size_t size) { while (1) { - struct io_cache_entry *entry = io_alloc_cache_get(cache); + struct io_cache_entry *entry = io_alloc_cache_get(cache, size); if (!entry) break; free(entry); diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 80b6204769e8..01367145689b 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -2766,6 +2766,17 @@ static void io_req_caches_free(struct io_ring_ctx *ctx) mutex_unlock(&ctx->uring_lock); } +static __cold void io_uring_acache_free(struct io_ring_ctx *ctx) +{ + + io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free, + sizeof(struct async_poll)); +#ifdef CONFIG_NET + io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free, + sizeof(struct io_async_msghdr)); +#endif +} + static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) { io_sq_thread_finish(ctx); @@ -2781,8 +2792,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) __io_sqe_files_unregister(ctx); io_cqring_overflow_kill(ctx); io_eventfd_unregister(ctx); - io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free); - io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free); + io_uring_acache_free(ctx); mutex_unlock(&ctx->uring_lock); io_destroy_buffers(ctx); if (ctx->sq_creds) diff --git a/io_uring/net.c b/io_uring/net.c index fbc34a7c2743..8dc67b23b030 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -139,7 +139,7 @@ static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req, struct io_async_msghdr *hdr; if (!(issue_flags & IO_URING_F_UNLOCKED)) { - entry = io_alloc_cache_get(&ctx->netmsg_cache); + entry = io_alloc_cache_get(&ctx->netmsg_cache, sizeof(struct io_async_msghdr)); if (entry) { hdr = container_of(entry, struct io_async_msghdr, cache); hdr->free_iov = NULL; diff --git a/io_uring/net.h b/io_uring/net.h index 5ffa11bf5d2e..d8359de84996 100644 --- a/io_uring/net.h +++ b/io_uring/net.h @@ -62,8 +62,4 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); void io_send_zc_cleanup(struct io_kiocb *req); void io_netmsg_cache_free(struct io_cache_entry *entry); -#else -static inline void io_netmsg_cache_free(struct io_cache_entry *entry) -{ -} #endif diff --git a/io_uring/poll.c b/io_uring/poll.c index 8339a92b4510..295d59875f00 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -661,7 +661,7 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req, apoll = req->apoll; kfree(apoll->double_poll); } else if (!(issue_flags & IO_URING_F_UNLOCKED)) { - entry = io_alloc_cache_get(&ctx->apoll_cache); + entry = io_alloc_cache_get(&ctx->apoll_cache, sizeof(struct async_poll)); if (entry == NULL) goto alloc_apoll; apoll = container_of(entry, struct async_poll, cache); -- 2.30.2