Since all related structures has been transformed with "fscache_" prefix, move all these structures to fscache.h as a final cleanup. Besides, make netfs.h include fscache.h rather than the other way around. This is an intuitive change since libnetfs lives one layer above fscache, accessing backing files with facache. This is a cleanup without logic change. Signed-off-by: Jingbo Xu <jefflexu@xxxxxxxxxxxxxxxxx> --- fs/afs/internal.h | 2 +- fs/erofs/fscache.c | 1 + fs/nfs/fscache.h | 2 +- include/linux/fscache.h | 80 ++++++++++++++++++++++++++++++++++++++++- include/linux/netfs.h | 80 +---------------------------------------- 5 files changed, 83 insertions(+), 82 deletions(-) diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 723d162078a3..5d1314265e3d 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -14,7 +14,7 @@ #include <linux/key.h> #include <linux/workqueue.h> #include <linux/sched.h> -#include <linux/fscache.h> +#include <linux/netfs.h> #include <linux/backing-dev.h> #include <linux/uuid.h> #include <linux/mm_types.h> diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c index e30a42a35ae7..69531be66b28 100644 --- a/fs/erofs/fscache.c +++ b/fs/erofs/fscache.c @@ -4,6 +4,7 @@ * Copyright (C) 2022, Bytedance Inc. All rights reserved. */ #include <linux/fscache.h> +#include <linux/netfs.h> #include "internal.h" static DEFINE_MUTEX(erofs_domain_list_lock); diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h index 2a37af880978..a0715f83a529 100644 --- a/fs/nfs/fscache.h +++ b/fs/nfs/fscache.h @@ -12,7 +12,7 @@ #include <linux/nfs_fs.h> #include <linux/nfs_mount.h> #include <linux/nfs4_mount.h> -#include <linux/fscache.h> +#include <linux/netfs.h> #include <linux/iversion.h> #ifdef CONFIG_NFS_FSCACHE diff --git a/include/linux/fscache.h b/include/linux/fscache.h index 034d009c0de7..457226a396d2 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -15,7 +15,6 @@ #define _LINUX_FSCACHE_H #include <linux/fs.h> -#include <linux/netfs.h> #include <linux/writeback.h> #include <linux/pagemap.h> @@ -151,6 +150,85 @@ struct fscache_cookie { #define FSCACHE_REQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */ #define FSCACHE_REQ_ONDEMAND 1 /* Set if it's from on-demand read mode */ +enum fscache_io_source { + FSCACHE_FILL_WITH_ZEROES, + FSCACHE_DOWNLOAD_FROM_SERVER, + FSCACHE_READ_FROM_CACHE, + FSCACHE_INVALID_READ, +} __mode(byte); + +typedef void (*fscache_io_terminated_t)(void *priv, ssize_t transferred_or_error, + bool was_async); + +/* + * Resources required to do operations on a cache. + */ +struct fscache_resources { + const struct fscache_ops *ops; + void *cache_priv; + void *cache_priv2; + unsigned int debug_id; /* Cookie debug ID */ + unsigned int inval_counter; /* object->inval_counter at begin_op */ +}; + +/* + * How to handle reading from a hole. + */ +enum fscache_read_from_hole { + FSCACHE_READ_HOLE_IGNORE, + FSCACHE_READ_HOLE_CLEAR, + FSCACHE_READ_HOLE_FAIL, +}; + +/* + * Table of operations for access to a cache. This is obtained by + * rreq->ops->begin_cache_operation(). + */ +struct fscache_ops { + /* End an operation */ + void (*end_operation)(struct fscache_resources *cres); + + /* Read data from the cache */ + int (*read)(struct fscache_resources *cres, + loff_t start_pos, + struct iov_iter *iter, + enum fscache_read_from_hole read_hole, + fscache_io_terminated_t term_func, + void *term_func_priv); + + /* Write data to the cache */ + int (*write)(struct fscache_resources *cres, + loff_t start_pos, + struct iov_iter *iter, + fscache_io_terminated_t term_func, + void *term_func_priv); + + /* Expand readahead request */ + void (*expand_readahead)(struct fscache_resources *cres, + loff_t *_start, size_t *_len, loff_t i_size); + + /* Prepare a read operation, shortening it to a cached/uncached + * boundary as appropriate. + */ + enum fscache_io_source (*prepare_read)(struct fscache_resources *cres, + loff_t *_start, size_t *_len, + unsigned long *_flags, loff_t i_size); + + /* Prepare a write operation, working out what part of the write we can + * actually do. + */ + int (*prepare_write)(struct fscache_resources *cres, + loff_t *_start, size_t *_len, loff_t i_size, + bool no_space_allocated_yet); + + /* Query the occupancy of the cache in a region, returning where the + * next chunk of data starts and how long it is. + */ + int (*query_occupancy)(struct fscache_resources *cres, + loff_t start, size_t len, size_t granularity, + loff_t *_data_start, size_t *_data_len); +}; + /* * slow-path functions for when there is actually caching available, and the * netfs does actually have a valid token diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 2ad4e1e88106..1977f953633a 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -16,19 +16,10 @@ #include <linux/workqueue.h> #include <linux/fs.h> +#include <linux/fscache.h> enum netfs_sreq_ref_trace; -enum fscache_io_source { - FSCACHE_FILL_WITH_ZEROES, - FSCACHE_DOWNLOAD_FROM_SERVER, - FSCACHE_READ_FROM_CACHE, - FSCACHE_INVALID_READ, -} __mode(byte); - -typedef void (*fscache_io_terminated_t)(void *priv, ssize_t transferred_or_error, - bool was_async); - /* * Per-inode context. This wraps the VFS inode. */ @@ -41,17 +32,6 @@ struct netfs_inode { loff_t remote_i_size; /* Size of the remote file */ }; -/* - * Resources required to do operations on a cache. - */ -struct fscache_resources { - const struct fscache_ops *ops; - void *cache_priv; - void *cache_priv2; - unsigned int debug_id; /* Cookie debug ID */ - unsigned int inval_counter; /* object->inval_counter at begin_op */ -}; - /* * Descriptor for a single component subrequest. */ @@ -128,64 +108,6 @@ struct netfs_request_ops { void (*done)(struct netfs_io_request *rreq); }; -/* - * How to handle reading from a hole. - */ -enum fscache_read_from_hole { - FSCACHE_READ_HOLE_IGNORE, - FSCACHE_READ_HOLE_CLEAR, - FSCACHE_READ_HOLE_FAIL, -}; - -/* - * Table of operations for access to a cache. This is obtained by - * rreq->ops->begin_cache_operation(). - */ -struct fscache_ops { - /* End an operation */ - void (*end_operation)(struct fscache_resources *cres); - - /* Read data from the cache */ - int (*read)(struct fscache_resources *cres, - loff_t start_pos, - struct iov_iter *iter, - enum fscache_read_from_hole read_hole, - fscache_io_terminated_t term_func, - void *term_func_priv); - - /* Write data to the cache */ - int (*write)(struct fscache_resources *cres, - loff_t start_pos, - struct iov_iter *iter, - fscache_io_terminated_t term_func, - void *term_func_priv); - - /* Expand readahead request */ - void (*expand_readahead)(struct fscache_resources *cres, - loff_t *_start, size_t *_len, loff_t i_size); - - /* Prepare a read operation, shortening it to a cached/uncached - * boundary as appropriate. - */ - enum fscache_io_source (*prepare_read)(struct fscache_resources *cres, - loff_t *_start, size_t *_len, - unsigned long *_flags, loff_t i_size); - - /* Prepare a write operation, working out what part of the write we can - * actually do. - */ - int (*prepare_write)(struct fscache_resources *cres, - loff_t *_start, size_t *_len, loff_t i_size, - bool no_space_allocated_yet); - - /* Query the occupancy of the cache in a region, returning where the - * next chunk of data starts and how long it is. - */ - int (*query_occupancy)(struct fscache_resources *cres, - loff_t start, size_t len, size_t granularity, - loff_t *_data_start, size_t *_data_len); -}; - struct readahead_control; void netfs_readahead(struct readahead_control *); int netfs_read_folio(struct file *, struct folio *); -- 2.19.1.6.gb485710b