Okay, let's try something a little more drastic. See if we can at least get it booting to the point we can read the tracelog. If you can apply the attached patch? It won't release any folio_queue struct or put the refs on any pages, so it will quickly run out of memory - but if you have sufficient menory, it might be enough to boot. David --- 9p: [DEBUGGING] Don't release pages or folioq structs diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c index af46a598f4d7..702286484176 100644 --- a/fs/netfs/buffered_read.c +++ b/fs/netfs/buffered_read.c @@ -84,8 +84,8 @@ static size_t netfs_load_buffer_from_ra(struct netfs_io_request *rreq, folioq->orders[i] = order; size += PAGE_SIZE << order; - if (!folio_batch_add(put_batch, folio)) - folio_batch_release(put_batch); + //if (!folio_batch_add(put_batch, folio)) + // folio_batch_release(put_batch); } for (int i = nr; i < folioq_nr_slots(folioq); i++) diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c index 63280791de3b..cec55b7eb5bc 100644 --- a/fs/netfs/misc.c +++ b/fs/netfs/misc.c @@ -88,7 +88,7 @@ struct folio_queue *netfs_delete_buffer_head(struct netfs_io_request *wreq) if (next) next->prev = NULL; netfs_stat_d(&netfs_n_folioq); - kfree(head); + //kfree(head); wreq->buffer = next; return next; } @@ -108,11 +108,11 @@ void netfs_clear_buffer(struct netfs_io_request *rreq) continue; if (folioq_is_marked(p, slot)) { trace_netfs_folio(folio, netfs_folio_trace_put); - folio_put(folio); + //folio_put(folio); } } netfs_stat_d(&netfs_n_folioq); - kfree(p); + //kfree(p); } }