On 5/24/18 1:57 PM, Matthew Wilcox wrote: > On Thu, May 24, 2018 at 12:43:06PM -0700, Andrew Morton wrote: >> On Thu, 24 May 2018 10:02:52 -0600 Jens Axboe <axboe@xxxxxxxxx> wrote: >>> do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, >>> sector_t *last_block_in_bio, struct buffer_head *map_bh, >>> unsigned long *first_logical_block, get_block_t get_block, >>> - gfp_t gfp) >>> + gfp_t gfp, bool is_readahead) >> >> That's a lot of arguments. > > struct mpage_args args = { > .bio = NULL, > .first_logical_block = 0, > .last_block_in_bio = 0, > .is_readahead = true, > .map_bh = { > .b_state = 0; > .b_size = 0; > }, > .get_block = get_block, > .gfp = readahead_gfp_mask(mapping); > }; > > ... > do_mpage_readpages(&args, page, nr_pages - page_idx); > > better than inlining? It makes it smaller, at least. diff --git a/fs/mpage.c b/fs/mpage.c index 0a5474237f5e..82682f1e5187 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -133,6 +133,18 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) } while (page_bh != head); } +struct mpage_args { + struct bio *bio; + struct page *page; + unsigned nr_pages; + sector_t last_block_in_bio; + struct buffer_head map_bh; + unsigned long first_logical_block; + get_block_t *get_block; + gfp_t gfp; + bool is_readahead; +}; + /* * This is the worker routine which does all the work of mapping the disk * blocks and constructs largest possible bios, submits them for IO if the @@ -142,12 +154,9 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) * represent the validity of its disk mapping and to decide when to do the next * get_block() call. */ -static struct bio * -do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, - sector_t *last_block_in_bio, struct buffer_head *map_bh, - unsigned long *first_logical_block, get_block_t get_block, - gfp_t gfp, bool is_readahead) +static struct bio *do_mpage_readpage(struct mpage_args *args) { + struct page *page = args->page; struct inode *inode = page->mapping->host; const unsigned blkbits = inode->i_blkbits; const unsigned blocks_per_page = PAGE_SIZE >> blkbits; @@ -161,7 +170,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, struct block_device *bdev = NULL; int length; int fully_mapped = 1; - int op_flags = is_readahead ? REQ_RAHEAD : 0; + int op_flags = args->is_readahead ? REQ_RAHEAD : 0; unsigned nblocks; unsigned relative_block; @@ -169,7 +178,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, goto confused; block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); - last_block = block_in_file + nr_pages * blocks_per_page; + last_block = block_in_file + args->nr_pages * blocks_per_page; last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; if (last_block > last_block_in_file) last_block = last_block_in_file; @@ -178,43 +187,43 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, /* * Map blocks using the result from the previous get_blocks call first. */ - nblocks = map_bh->b_size >> blkbits; - if (buffer_mapped(map_bh) && block_in_file > *first_logical_block && - block_in_file < (*first_logical_block + nblocks)) { - unsigned map_offset = block_in_file - *first_logical_block; + nblocks = args->map_bh.b_size >> blkbits; + if (buffer_mapped(&args->map_bh) && block_in_file > args->first_logical_block && + block_in_file < (args->first_logical_block + nblocks)) { + unsigned map_offset = block_in_file - args->first_logical_block; unsigned last = nblocks - map_offset; for (relative_block = 0; ; relative_block++) { if (relative_block == last) { - clear_buffer_mapped(map_bh); + clear_buffer_mapped(&args->map_bh); break; } if (page_block == blocks_per_page) break; - blocks[page_block] = map_bh->b_blocknr + map_offset + + blocks[page_block] = args->map_bh.b_blocknr + map_offset + relative_block; page_block++; block_in_file++; } - bdev = map_bh->b_bdev; + bdev = args->map_bh.b_bdev; } /* * Then do more get_blocks calls until we are done with this page. */ - map_bh->b_page = page; + args->map_bh.b_page = page; while (page_block < blocks_per_page) { - map_bh->b_state = 0; - map_bh->b_size = 0; + args->map_bh.b_state = 0; + args->map_bh.b_size = 0; if (block_in_file < last_block) { - map_bh->b_size = (last_block-block_in_file) << blkbits; - if (get_block(inode, block_in_file, map_bh, 0)) + args->map_bh.b_size = (last_block-block_in_file) << blkbits; + if (args->get_block(inode, block_in_file, &args->map_bh, 0)) goto confused; - *first_logical_block = block_in_file; + args->first_logical_block = block_in_file; } - if (!buffer_mapped(map_bh)) { + if (!buffer_mapped(&args->map_bh)) { fully_mapped = 0; if (first_hole == blocks_per_page) first_hole = page_block; @@ -229,8 +238,8 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, * we just collected from get_block into the page's buffers * so readpage doesn't have to repeat the get_block call */ - if (buffer_uptodate(map_bh)) { - map_buffer_to_page(page, map_bh, page_block); + if (buffer_uptodate(&args->map_bh)) { + map_buffer_to_page(page, &args->map_bh, page_block); goto confused; } @@ -238,20 +247,20 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, goto confused; /* hole -> non-hole */ /* Contiguous blocks? */ - if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1) + if (page_block && blocks[page_block-1] != args->map_bh.b_blocknr-1) goto confused; - nblocks = map_bh->b_size >> blkbits; + nblocks = args->map_bh.b_size >> blkbits; for (relative_block = 0; ; relative_block++) { if (relative_block == nblocks) { - clear_buffer_mapped(map_bh); + clear_buffer_mapped(&args->map_bh); break; } else if (page_block == blocks_per_page) break; - blocks[page_block] = map_bh->b_blocknr+relative_block; + blocks[page_block] = args->map_bh.b_blocknr+relative_block; page_block++; block_in_file++; } - bdev = map_bh->b_bdev; + bdev = args->map_bh.b_bdev; } if (first_hole != blocks_per_page) { @@ -274,43 +283,43 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, /* * This page will go to BIO. Do we need to send this BIO off first? */ - if (bio && (*last_block_in_bio != blocks[0] - 1)) - bio = mpage_bio_submit(REQ_OP_READ, op_flags, bio); + if (args->bio && (args->last_block_in_bio != blocks[0] - 1)) + args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio); alloc_new: - if (bio == NULL) { + if (args->bio == NULL) { if (first_hole == blocks_per_page) { if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9), page)) goto out; } - bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), - min_t(int, nr_pages, BIO_MAX_PAGES), gfp); - if (bio == NULL) + args->bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), + min_t(int, args->nr_pages, BIO_MAX_PAGES), args->gfp); + if (args->bio == NULL) goto confused; } length = first_hole << blkbits; - if (bio_add_page(bio, page, length, 0) < length) { - bio = mpage_bio_submit(REQ_OP_READ, op_flags, bio); + if (bio_add_page(args->bio, page, length, 0) < length) { + args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio); goto alloc_new; } - relative_block = block_in_file - *first_logical_block; - nblocks = map_bh->b_size >> blkbits; - if ((buffer_boundary(map_bh) && relative_block == nblocks) || + relative_block = block_in_file - args->first_logical_block; + nblocks = args->map_bh.b_size >> blkbits; + if ((buffer_boundary(&args->map_bh) && relative_block == nblocks) || (first_hole != blocks_per_page)) - bio = mpage_bio_submit(REQ_OP_READ, op_flags, bio); + args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio); else - *last_block_in_bio = blocks[blocks_per_page - 1]; + args->last_block_in_bio = blocks[blocks_per_page - 1]; out: - return bio; + return args->bio; confused: - if (bio) - bio = mpage_bio_submit(REQ_OP_READ, op_flags, bio); + if (args->bio) + args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio); if (!PageUptodate(page)) - block_read_full_page(page, get_block); + block_read_full_page(page, args->get_block); else unlock_page(page); goto out; @@ -364,15 +373,13 @@ int mpage_readpages(struct address_space *mapping, struct list_head *pages, unsigned nr_pages, get_block_t get_block) { - struct bio *bio = NULL; + struct mpage_args args = { + .get_block = get_block, + .gfp = readahead_gfp_mask(mapping), + .is_readahead = true, + }; unsigned page_idx; - sector_t last_block_in_bio = 0; - struct buffer_head map_bh; - unsigned long first_logical_block = 0; - gfp_t gfp = readahead_gfp_mask(mapping); - map_bh.b_state = 0; - map_bh.b_size = 0; for (page_idx = 0; page_idx < nr_pages; page_idx++) { struct page *page = lru_to_page(pages); @@ -380,18 +387,16 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, list_del(&page->lru); if (!add_to_page_cache_lru(page, mapping, page->index, - gfp)) { - bio = do_mpage_readpage(bio, page, - nr_pages - page_idx, - &last_block_in_bio, &map_bh, - &first_logical_block, - get_block, gfp, true); + args.gfp)) { + args.page = page; + args.nr_pages = nr_pages - page_idx; + args.bio = do_mpage_readpage(&args); } put_page(page); } BUG_ON(!list_empty(pages)); - if (bio) - mpage_bio_submit(REQ_OP_READ, REQ_RAHEAD, bio); + if (args.bio) + mpage_bio_submit(REQ_OP_READ, REQ_RAHEAD, args.bio); return 0; } EXPORT_SYMBOL(mpage_readpages); @@ -401,18 +406,16 @@ EXPORT_SYMBOL(mpage_readpages); */ int mpage_readpage(struct page *page, get_block_t get_block) { - struct bio *bio = NULL; - sector_t last_block_in_bio = 0; - struct buffer_head map_bh; - unsigned long first_logical_block = 0; - gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL); + struct mpage_args args = { + .page = page, + .nr_pages = 1, + .get_block = get_block, + .gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL), + }; - map_bh.b_state = 0; - map_bh.b_size = 0; - bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio, - &map_bh, &first_logical_block, get_block, gfp, false); - if (bio) - mpage_bio_submit(REQ_OP_READ, 0, bio); + args.bio = do_mpage_readpage(&args); + if (args.bio) + mpage_bio_submit(REQ_OP_READ, 0, args.bio); return 0; } EXPORT_SYMBOL(mpage_readpage); -- Jens Axboe