Ryan Roberts <ryan.roberts@xxxxxxx> writes: [...] > diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c > index 4f559f4ddd21..39725756e6bf 100644 > --- a/mm/mmu_gather.c > +++ b/mm/mmu_gather.c > @@ -47,6 +47,21 @@ static bool tlb_next_batch(struct mmu_gather *tlb) > return true; > } > > +unsigned int tlb_reserve_space(struct mmu_gather *tlb, unsigned int nr) > +{ > + struct mmu_gather_batch *batch = tlb->active; > + unsigned int nr_alloc = batch->max - batch->nr; > + > + while (nr_alloc < nr) { > + if (!tlb_next_batch(tlb)) > + break; > + nr_alloc += tlb->active->max; > + } > + > + tlb->active = batch; > + return nr_alloc; > +} Agree this addresses my previous comment nicely, so you can add: Reviewed-by: Alistair Popple <apopple@xxxxxxxxxx> > + > #ifdef CONFIG_SMP > static void tlb_flush_rmap_batch(struct mmu_gather_batch *batch, struct vm_area_struct *vma) > {