From: Xin Yin <yinxin.x@xxxxxxxxxxxxx> [ Upstream commit 1122f40072731525c06b1371cfa30112b9b54d27 ] For now, enqueuing and dequeuing on-demand requests all start from idx 0, this makes request distribution unfair. In the weighty concurrent I/O scenario, the request stored in higher idx will starve. Searching requests cyclically in cachefiles_ondemand_daemon_read, makes distribution fairer. Fixes: c8383054506c ("cachefiles: notify the user daemon when looking up cookie") Reported-by: Yongqing Li <liyongqing@xxxxxxxxxxxxx> Signed-off-by: Xin Yin <yinxin.x@xxxxxxxxxxxxx> Signed-off-by: David Howells <dhowells@xxxxxxxxxx> Reviewed-by: Jeffle Xu <jefflexu@xxxxxxxxxxxxxxxxx> Reviewed-by: Gao Xiang <hsiangkao@xxxxxxxxxxxxxxxxx> Link: https://lore.kernel.org/r/20220817065200.11543-1-yinxin.x@xxxxxxxxxxxxx/ # v1 Link: https://lore.kernel.org/r/20220825020945.2293-1-yinxin.x@xxxxxxxxxxxxx/ # v2 Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> --- fs/cachefiles/internal.h | 1 + fs/cachefiles/ondemand.c | 12 +++++++++--- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index 6cba2c6de2f96..2ad58c4652084 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -111,6 +111,7 @@ struct cachefiles_cache { char *tag; /* cache binding tag */ refcount_t unbind_pincount;/* refcount to do daemon unbind */ struct xarray reqs; /* xarray of pending on-demand requests */ + unsigned long req_id_next; struct xarray ondemand_ids; /* xarray for ondemand_id allocation */ u32 ondemand_id_next; }; diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index 7e1586bd5cf34..0254ed39f68ce 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -242,14 +242,19 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, unsigned long id = 0; size_t n; int ret = 0; - XA_STATE(xas, &cache->reqs, 0); + XA_STATE(xas, &cache->reqs, cache->req_id_next); /* - * Search for a request that has not ever been processed, to prevent - * requests from being processed repeatedly. + * Cyclically search for a request that has not ever been processed, + * to prevent requests from being processed repeatedly, and make + * request distribution fair. */ xa_lock(&cache->reqs); req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW); + if (!req && cache->req_id_next > 0) { + xas_set(&xas, 0); + req = xas_find_marked(&xas, cache->req_id_next - 1, CACHEFILES_REQ_NEW); + } if (!req) { xa_unlock(&cache->reqs); return 0; @@ -264,6 +269,7 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, } xas_clear_mark(&xas, CACHEFILES_REQ_NEW); + cache->req_id_next = xas.xa_index + 1; xa_unlock(&cache->reqs); id = xas.xa_index; -- 2.35.1