The quilt patch titled Subject: readahead: simplify gotos in page_cache_sync_ra() has been removed from the -mm tree. Its filename was readahead-simplify-gotos-in-page_cache_sync_ra.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: Jan Kara <jack@xxxxxxx> Subject: readahead: simplify gotos in page_cache_sync_ra() Date: Tue, 25 Jun 2024 12:19:00 +0200 Unify all conditions for initial readahead to simplify goto logic in page_cache_sync_ra(). No functional changes. Link: https://lkml.kernel.org/r/20240625101909.12234-10-jack@xxxxxxx Signed-off-by: Jan Kara <jack@xxxxxxx> Reviewed-by: Josef Bacik <josef@xxxxxxxxxxxxxx> Tested-by: Zhang Peng <zhangpengpeng0808@xxxxxxxxx> Cc: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/readahead.c | 26 +++++++++----------------- 1 file changed, 9 insertions(+), 17 deletions(-) --- a/mm/readahead.c~readahead-simplify-gotos-in-page_cache_sync_ra +++ a/mm/readahead.c @@ -532,20 +532,19 @@ void page_cache_sync_ra(struct readahead } max_pages = ractl_max_pages(ractl, req_count); + prev_index = (unsigned long long)ra->prev_pos >> PAGE_SHIFT; /* - * start of file or oversized read - */ - if (!index || req_count > max_pages) - goto initial_readahead; - - /* - * sequential cache miss + * A start of file, oversized read, or sequential cache miss: * trivial case: (index - prev_index) == 1 * unaligned reads: (index - prev_index) == 0 */ - prev_index = (unsigned long long)ra->prev_pos >> PAGE_SHIFT; - if (index - prev_index <= 1UL) - goto initial_readahead; + if (!index || req_count > max_pages || index - prev_index <= 1UL) { + ra->start = index; + ra->size = get_init_ra_size(req_count, max_pages); + ra->async_size = ra->size > req_count ? ra->size - req_count : + ra->size >> 1; + goto readit; + } /* * Query the page cache and look for the traces(cached history pages) @@ -572,13 +571,6 @@ void page_cache_sync_ra(struct readahead ra->start = index; ra->size = min(contig_count + req_count, max_pages); ra->async_size = 1; - goto readit; - -initial_readahead: - ra->start = index; - ra->size = get_init_ra_size(req_count, max_pages); - ra->async_size = ra->size > req_count ? ra->size - req_count : - ra->size >> 1; readit: ractl->_index = ra->start; page_cache_ra_order(ractl, ra, 0); _ Patches currently in -mm which might be from jack@xxxxxxx are