This patch cleans up shmem_find_get_pages_and_swap() interface: a) Pass struct pagevec* instead of ->pages b) Check if nr_pages is greater than PAGEVEC_SIZE inside the function c) Return the result via ->nr instead of using return value Compiling test only. Cc: Hugh Dickins <hughd@xxxxxxxxxx> Signed-off-by: WANG Cong <xiyou.wangcong@xxxxxxxxx> --- diff --git a/mm/shmem.c b/mm/shmem.c index 269d049..c4e08e2 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -312,15 +312,19 @@ static void shmem_delete_from_page_cache(struct page *page, void *radswap) /* * Like find_get_pages, but collecting swap entries as well as pages. */ -static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping, +static void shmem_find_get_pages_and_swap(struct address_space *mapping, pgoff_t start, unsigned int nr_pages, - struct page **pages, pgoff_t *indices) + struct pagevec *pvec, pgoff_t *indices) { unsigned int i; unsigned int ret; unsigned int nr_found; + struct page **pages = pvec->pages; rcu_read_lock(); + + if (nr_pages > PAGEVEC_SIZE) + nr_pages = PAGEVEC_SIZE; restart: nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, (void ***)pages, indices, start, nr_pages); @@ -357,7 +361,7 @@ export: if (unlikely(!ret && nr_found)) goto restart; rcu_read_unlock(); - return ret; + pvec->nr = ret; } /* @@ -409,8 +413,8 @@ void shmem_unlock_mapping(struct address_space *mapping) * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it * has finished, if it hits a row of PAGEVEC_SIZE swap entries. */ - pvec.nr = shmem_find_get_pages_and_swap(mapping, index, - PAGEVEC_SIZE, pvec.pages, indices); + shmem_find_get_pages_and_swap(mapping, index, + PAGEVEC_SIZE, &pvec, indices); if (!pvec.nr) break; index = indices[pvec.nr - 1] + 1; @@ -442,9 +446,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) pagevec_init(&pvec, 0); index = start; while (index <= end) { - pvec.nr = shmem_find_get_pages_and_swap(mapping, index, - min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, - pvec.pages, indices); + shmem_find_get_pages_and_swap(mapping, index, + end - index + 1, &pvec, indices); if (!pvec.nr) break; mem_cgroup_uncharge_start(); @@ -490,9 +493,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) index = start; for ( ; ; ) { cond_resched(); - pvec.nr = shmem_find_get_pages_and_swap(mapping, index, - min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, - pvec.pages, indices); + shmem_find_get_pages_and_swap(mapping, index, + end - index + 1, &pvec, indices); if (!pvec.nr) { if (index == start) break; -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/ Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>