There are two places doing page free related to struct mmu_gather_batch: 1 in tlb_flush_mmu_free, where pages gathered in mmu_gather_batch list are freed; 2 in tlb_flush_mmu_finish, where pages for the mmu_gather_batch structure(let's call it the batch page) are freed. There will be yet another place in the parallel free worker thread introduced in the following patch to free both the pages pointed to by the mmu_gather_batch list and the batch pages themselves. To avoid code duplication, add a new function named tlb_flush_mmu_free_batches for this purpose. Another reason to add this function is that after the following patch, cond_resched will need to be added at places where more than 10K pages can be freed, i.e. in tlb_flush_mmu_free and the worker function. Instead of adding cond_resched at multiple places, using a single function to reduce code duplication. There should be no functionality change. Signed-off-by: Aaron Lu <aaron.lu@xxxxxxxxx> --- mm/memory.c | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 14fc0b40f0bb..cdb2a53f251f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -250,14 +250,25 @@ static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) __tlb_reset_range(tlb); } -static void tlb_flush_mmu_free(struct mmu_gather *tlb) +static void tlb_flush_mmu_free_batches(struct mmu_gather_batch *batch_start, + bool free_batch_page) { - struct mmu_gather_batch *batch; + struct mmu_gather_batch *batch, *next; - for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { - free_pages_and_swap_cache(batch->pages, batch->nr); - batch->nr = 0; + for (batch = batch_start; batch; batch = next) { + next = batch->next; + if (batch->nr) { + free_pages_and_swap_cache(batch->pages, batch->nr); + batch->nr = 0; + } + if (free_batch_page) + free_pages((unsigned long)batch, 0); } +} + +static void tlb_flush_mmu_free(struct mmu_gather *tlb) +{ + tlb_flush_mmu_free_batches(&tlb->local, false); tlb->active = &tlb->local; } @@ -273,17 +284,12 @@ void tlb_flush_mmu(struct mmu_gather *tlb) */ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { - struct mmu_gather_batch *batch, *next; - tlb_flush_mmu(tlb); /* keep the page table cache within bounds */ check_pgt_cache(); - for (batch = tlb->local.next; batch; batch = next) { - next = batch->next; - free_pages((unsigned long)batch, 0); - } + tlb_flush_mmu_free_batches(tlb->local.next, true); tlb->local.next = NULL; } -- 2.7.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>