On Fri, Apr 14, 2023 at 06:01:16PM -0700, Luis Chamberlain wrote: > a) dynamically allocate those now > b) do a cursory review of the users of that and prepare them > to grok buffer heads which are blocksize based rather than > PAGE_SIZE based. So we just try to kill MAX_BUF_PER_PAGE. > > Without a) I think buffers after PAGE_SIZE won't get submit_bh() or lock for > bs > PAGE_SIZE right now. Worse, we'll overflow the array and corrupt the stack. This one is a simple fix ... +++ b/fs/buffer.c @@ -2282,7 +2282,7 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block) { struct inode *inode = folio->mapping->host; sector_t iblock, lblock; - struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; + struct buffer_head *bh, *head; unsigned int blocksize, bbits; int nr, i; int fully_mapped = 1; @@ -2335,7 +2335,6 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block) if (buffer_uptodate(bh)) continue; } - arr[nr++] = bh; } while (i++, iblock++, (bh = bh->b_this_page) != head); if (fully_mapped) @@ -2353,24 +2352,27 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block) } /* Stage two: lock the buffers */ - for (i = 0; i < nr; i++) { - bh = arr[i]; + bh = head; + do { lock_buffer(bh); mark_buffer_async_read(bh); - } + bh = bh->b_this_page; + } while (bh != head); /* * Stage 3: start the IO. Check for uptodateness * inside the buffer lock in case another process reading * the underlying blockdev brought it uptodate (the sct fix). */ - for (i = 0; i < nr; i++) { - bh = arr[i]; + bh = head; + do { if (buffer_uptodate(bh)) end_buffer_async_read(bh, 1); else submit_bh(REQ_OP_READ, bh); - } + bh = bh->b_this_page; + } while (bh != head); + return 0; } EXPORT_SYMBOL(block_read_full_folio);