Implement .demand_read() callback for cachefiels backend. .demand_read() callback is responsible for notifying user daemon the pending request to process, and will get blocked until user daemon has prepared data and filled the hole. Signed-off-by: Jeffle Xu <jefflexu@xxxxxxxxxxxxxxxxx> --- fs/cachefiles/internal.h | 12 +++++++++ fs/cachefiles/io.c | 56 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+) diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index af8ac8107f77..eeb6ad7dcd49 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -61,6 +61,18 @@ struct cachefiles_object { #define CACHEFILES_OBJECT_USING_TMPFILE 0 /* Have an unlinked tmpfile */ }; +struct cachefiles_req_in { + uint64_t id; + uint64_t off; + uint64_t len; + char path[NAME_MAX]; +}; + +struct cachefiles_req { + struct completion done; + struct cachefiles_req_in req_in; +}; + /* * Cache files cache definition */ diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c index 60b1eac2ce78..376603e5ed99 100644 --- a/fs/cachefiles/io.c +++ b/fs/cachefiles/io.c @@ -539,12 +539,68 @@ static void cachefiles_end_operation(struct netfs_cache_resources *cres) fscache_end_cookie_access(fscache_cres_cookie(cres), fscache_access_io_end); } +static struct cachefiles_req *cachefiles_alloc_req(struct cachefiles_object *object, + loff_t start_pos, + size_t len) +{ + struct cachefiles_req *req; + struct cachefiles_req_in *req_in; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return NULL; + + req_in = &req->req_in; + + req_in->off = start_pos; + req_in->len = len; + strncpy(req_in->path, object->d_name, sizeof(req_in->path)); + + init_completion(&req->done); + + return req; +} + +int cachefiles_demand_read(struct netfs_cache_resources *cres, + loff_t start_pos, size_t len) +{ + struct cachefiles_object *object; + struct cachefiles_cache *cache; + struct cachefiles_req *req; + int ret; + + object = cachefiles_cres_object(cres); + cache = object->volume->cache; + + req = cachefiles_alloc_req(object, start_pos, len); + if (!req) + return -ENOMEM; + + spin_lock(&cache->reqs_lock); + ret = idr_alloc(&cache->reqs, req, 0, 0, GFP_KERNEL); + if (ret >= 0) + req->req_in.id = ret; + spin_unlock(&cache->reqs_lock); + if (ret < 0) { + kfree(req); + return -ENOMEM; + } + + wake_up_all(&cache->daemon_pollwq); + + wait_for_completion(&req->done); + kfree(req); + + return 0; +} + static const struct netfs_cache_ops cachefiles_netfs_cache_ops = { .end_operation = cachefiles_end_operation, .read = cachefiles_read, .write = cachefiles_write, .prepare_read = cachefiles_prepare_read, .prepare_write = cachefiles_prepare_write, + .demand_read = cachefiles_demand_read, }; /* -- 2.27.0