Pass start and len to the rreq allocator. This should ensure that the fields are set so that ->init_request() can use them. Also add a parameter to indicates the origin of the request. Ceph can use this to tell whether to get caps. Changes ======= ver #3) - Change the author to me as Jeff feels that most of the patch is my changes now. ver #2) - Show the request origin in the netfs_rreq tracepoint. Signed-off-by: Jeff Layton <jlayton@xxxxxxxxxx> Co-developed-by: David Howells <dhowells@xxxxxxxxxx> Signed-off-by: David Howells <dhowells@xxxxxxxxxx> cc: linux-cachefs@xxxxxxxxxx Link: https://lore.kernel.org/r/164622989020.3564931.17517006047854958747.stgit@xxxxxxxxxxxxxxxxxxxxxx/ # v1 Link: https://lore.kernel.org/r/164678208569.1200972.12153682697842916557.stgit@xxxxxxxxxxxxxxxxxxxxxx/ # v2 --- fs/netfs/internal.h | 7 +++++-- fs/netfs/objects.c | 13 ++++++++++--- fs/netfs/read_helper.c | 23 +++++++++++------------ include/linux/netfs.h | 7 +++++++ include/trace/events/netfs.h | 11 ++++++++++- 5 files changed, 43 insertions(+), 18 deletions(-) diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h index a0b7d1bf9f3d..89837e904fa7 100644 --- a/fs/netfs/internal.h +++ b/fs/netfs/internal.h @@ -17,9 +17,12 @@ /* * objects.c */ -struct netfs_io_request *netfs_alloc_request(const struct netfs_request_ops *ops, +struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, + struct file *file, + const struct netfs_request_ops *ops, void *netfs_priv, - struct file *file); + loff_t start, size_t len, + enum netfs_io_origin origin); void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what); void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async); void netfs_put_request(struct netfs_io_request *rreq, bool was_async, diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c index 39097893e847..986d7a9d25dd 100644 --- a/fs/netfs/objects.c +++ b/fs/netfs/objects.c @@ -11,17 +11,24 @@ /* * Allocate an I/O request and initialise it. */ -struct netfs_io_request *netfs_alloc_request( - const struct netfs_request_ops *ops, void *netfs_priv, - struct file *file) +struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, + struct file *file, + const struct netfs_request_ops *ops, + void *netfs_priv, + loff_t start, size_t len, + enum netfs_io_origin origin) { static atomic_t debug_ids; struct netfs_io_request *rreq; rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL); if (rreq) { + rreq->start = start; + rreq->len = len; + rreq->origin = origin; rreq->netfs_ops = ops; rreq->netfs_priv = netfs_priv; + rreq->mapping = mapping; rreq->inode = file_inode(file); rreq->i_size = i_size_read(rreq->inode); rreq->debug_id = atomic_inc_return(&debug_ids); diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c index 8f277da487b6..dea085715286 100644 --- a/fs/netfs/read_helper.c +++ b/fs/netfs/read_helper.c @@ -763,12 +763,13 @@ void netfs_readahead(struct readahead_control *ractl, if (readahead_count(ractl) == 0) goto cleanup; - rreq = netfs_alloc_request(ops, netfs_priv, ractl->file); + rreq = netfs_alloc_request(ractl->mapping, ractl->file, + ops, netfs_priv, + readahead_pos(ractl), + readahead_length(ractl), + NETFS_READAHEAD); if (!rreq) goto cleanup; - rreq->mapping = ractl->mapping; - rreq->start = readahead_pos(ractl); - rreq->len = readahead_length(ractl); if (ops->begin_cache_operation) { ret = ops->begin_cache_operation(rreq); @@ -838,16 +839,15 @@ int netfs_readpage(struct file *file, _enter("%lx", folio_index(folio)); - rreq = netfs_alloc_request(ops, netfs_priv, file); + rreq = netfs_alloc_request(folio->mapping, file, ops, netfs_priv, + folio_file_pos(folio), folio_size(folio), + NETFS_READPAGE); if (!rreq) { if (netfs_priv) ops->cleanup(folio_file_mapping(folio), netfs_priv); folio_unlock(folio); return -ENOMEM; } - rreq->mapping = folio_file_mapping(folio); - rreq->start = folio_file_pos(folio); - rreq->len = folio_size(folio); if (ops->begin_cache_operation) { ret = ops->begin_cache_operation(rreq); @@ -1008,12 +1008,11 @@ int netfs_write_begin(struct file *file, struct address_space *mapping, } ret = -ENOMEM; - rreq = netfs_alloc_request(ops, netfs_priv, file); + rreq = netfs_alloc_request(mapping, file, ops, netfs_priv, + folio_file_pos(folio), folio_size(folio), + NETFS_READ_FOR_WRITE); if (!rreq) goto error; - rreq->mapping = folio_file_mapping(folio); - rreq->start = folio_file_pos(folio); - rreq->len = folio_size(folio); rreq->no_unlock_folio = folio_index(folio); __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags); netfs_priv = NULL; diff --git a/include/linux/netfs.h b/include/linux/netfs.h index c702bd8ea8da..7dc741d9b21b 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -150,6 +150,12 @@ struct netfs_io_subrequest { #define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */ }; +enum netfs_io_origin { + NETFS_READAHEAD, /* This read was triggered by readahead */ + NETFS_READPAGE, /* This read is a synchronous read */ + NETFS_READ_FOR_WRITE, /* This read is to prepare a write */ +} __mode(byte); + /* * Descriptor for an I/O helper request. This is used to make multiple I/O * operations to a variety of data stores and then stitch the result together. @@ -167,6 +173,7 @@ struct netfs_io_request { size_t submitted; /* Amount submitted for I/O so far */ size_t len; /* Length of the request */ short error; /* 0 or error that occurred */ + enum netfs_io_origin origin; /* Origin of the request */ loff_t i_size; /* Size of the file */ loff_t start; /* Start position */ pgoff_t no_unlock_folio; /* Don't unlock this folio after read */ diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h index 556859b0f107..f00e3e1821c8 100644 --- a/include/trace/events/netfs.h +++ b/include/trace/events/netfs.h @@ -21,6 +21,11 @@ EM(netfs_read_trace_readpage, "READPAGE ") \ E_(netfs_read_trace_write_begin, "WRITEBEGN") +#define netfs_rreq_origins \ + EM(NETFS_READAHEAD, "RA") \ + EM(NETFS_READPAGE, "RP") \ + E_(NETFS_READ_FOR_WRITE, "RW") + #define netfs_rreq_traces \ EM(netfs_rreq_trace_assess, "ASSESS ") \ EM(netfs_rreq_trace_copy, "COPY ") \ @@ -101,6 +106,7 @@ enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte); #define E_(a, b) TRACE_DEFINE_ENUM(a); netfs_read_traces; +netfs_rreq_origins; netfs_rreq_traces; netfs_sreq_sources; netfs_sreq_traces; @@ -159,17 +165,20 @@ TRACE_EVENT(netfs_rreq, TP_STRUCT__entry( __field(unsigned int, rreq ) __field(unsigned int, flags ) + __field(enum netfs_io_origin, origin ) __field(enum netfs_rreq_trace, what ) ), TP_fast_assign( __entry->rreq = rreq->debug_id; __entry->flags = rreq->flags; + __entry->origin = rreq->origin; __entry->what = what; ), - TP_printk("R=%08x %s f=%02x", + TP_printk("R=%08x %s %s f=%02x", __entry->rreq, + __print_symbolic(__entry->origin, netfs_rreq_origins), __print_symbolic(__entry->what, netfs_rreq_traces), __entry->flags) );