Hi all, On Thu, 7 Mar 2024 12:36:19 +1100 Stephen Rothwell <sfr@xxxxxxxxxxxxxxxx> wrote: > > Today's linux-next merge of the bpf-next tree got a conflict in: > > mm/vmalloc.c > > between commit: > > 8e1d743f2c26 ("mm: vmalloc: support multiple nodes in vmallocinfo") > > from the mm-stable tree and commit: > > e6f798225a31 ("mm: Introduce VM_SPARSE kind and vm_area_[un]map_pages().") > > from the bpf-next tree. > > I fixed it up (I think - see below) and can carry the fix as necessary. > This is now fixed as far as linux-next is concerned, but any non trivial > conflicts should be mentioned to your upstream maintainer when your tree > is submitted for merging. You may also want to consider cooperating > with the maintainer of the conflicting tree to minimise any particularly > complex conflicts. > > > diff --cc mm/vmalloc.c > index 25a8df497255,e5b8c70950bc..000000000000 > --- a/mm/vmalloc.c > +++ b/mm/vmalloc.c > @@@ -4755,81 -4423,70 +4820,84 @@@ static void show_numa_info(struct seq_f > > static void show_purge_info(struct seq_file *m) > { > + struct vmap_node *vn; > struct vmap_area *va; > + int i; > > - spin_lock(&purge_vmap_area_lock); > - list_for_each_entry(va, &purge_vmap_area_list, list) { > - seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n", > - (void *)va->va_start, (void *)va->va_end, > - va->va_end - va->va_start); > - } > - spin_unlock(&purge_vmap_area_lock); > -} > + for (i = 0; i < nr_vmap_nodes; i++) { > + vn = &vmap_nodes[i]; > > -static int s_show(struct seq_file *m, void *p) > -{ > - struct vmap_area *va; > - struct vm_struct *v; > - > - va = list_entry(p, struct vmap_area, list); > - > - if (!va->vm) { > - if (va->flags & VMAP_RAM) > - seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n", > + spin_lock(&vn->lazy.lock); > + list_for_each_entry(va, &vn->lazy.head, list) { > + seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n", > (void *)va->va_start, (void *)va->va_end, > va->va_end - va->va_start); > - > - goto final; > + } > + spin_unlock(&vn->lazy.lock); > } > +} > > - v = va->vm; > +static int vmalloc_info_show(struct seq_file *m, void *p) > +{ > + struct vmap_node *vn; > + struct vmap_area *va; > + struct vm_struct *v; > + int i; > > - seq_printf(m, "0x%pK-0x%pK %7ld", > - v->addr, v->addr + v->size, v->size); > + for (i = 0; i < nr_vmap_nodes; i++) { > + vn = &vmap_nodes[i]; > > - if (v->caller) > - seq_printf(m, " %pS", v->caller); > + spin_lock(&vn->busy.lock); > + list_for_each_entry(va, &vn->busy.head, list) { > + if (!va->vm) { > + if (va->flags & VMAP_RAM) > + seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n", > + (void *)va->va_start, (void *)va->va_end, > + va->va_end - va->va_start); > > - if (v->nr_pages) > - seq_printf(m, " pages=%d", v->nr_pages); > + continue; > + } > > - if (v->phys_addr) > - seq_printf(m, " phys=%pa", &v->phys_addr); > + v = va->vm; > > - if (v->flags & VM_IOREMAP) > - seq_puts(m, " ioremap"); > + seq_printf(m, "0x%pK-0x%pK %7ld", > + v->addr, v->addr + v->size, v->size); > > - if (v->flags & VM_SPARSE) > - seq_puts(m, " sparse"); > + if (v->caller) > + seq_printf(m, " %pS", v->caller); > > - if (v->flags & VM_ALLOC) > - seq_puts(m, " vmalloc"); > + if (v->nr_pages) > + seq_printf(m, " pages=%d", v->nr_pages); > > - if (v->flags & VM_MAP) > - seq_puts(m, " vmap"); > + if (v->phys_addr) > + seq_printf(m, " phys=%pa", &v->phys_addr); > > - if (v->flags & VM_USERMAP) > - seq_puts(m, " user"); > + if (v->flags & VM_IOREMAP) > + seq_puts(m, " ioremap"); > > - if (v->flags & VM_DMA_COHERENT) > - seq_puts(m, " dma-coherent"); > ++ if (v->flags & VM_SPARSE) > ++ seq_puts(m, " sparse"); > + > - if (is_vmalloc_addr(v->pages)) > - seq_puts(m, " vpages"); > + if (v->flags & VM_ALLOC) > + seq_puts(m, " vmalloc"); > > - show_numa_info(m, v); > - seq_putc(m, '\n'); > + if (v->flags & VM_MAP) > + seq_puts(m, " vmap"); > + > + if (v->flags & VM_USERMAP) > + seq_puts(m, " user"); > + > + if (v->flags & VM_DMA_COHERENT) > + seq_puts(m, " dma-coherent"); > + > + if (is_vmalloc_addr(v->pages)) > + seq_puts(m, " vpages"); > + > + show_numa_info(m, v); > + seq_putc(m, '\n'); > + } > + spin_unlock(&vn->busy.lock); > + } > > /* > * As a final step, dump "unpurged" areas. This is now a conflict between the net-next tree and the mm-stable tree. -- Cheers, Stephen Rothwell
Attachment:
pgpGgiN53_hq1.pgp
Description: OpenPGP digital signature