Re: linux-next: manual merge of the bpf-next tree with the mm-stable tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



> Hi all,
> 
> On Wed, 13 Mar 2024 10:51:17 +1100 Stephen Rothwell <sfr@xxxxxxxxxxxxxxxx> wrote:
> >
> > On Thu, 7 Mar 2024 12:36:19 +1100 Stephen Rothwell <sfr@xxxxxxxxxxxxxxxx> wrote:
> > > 
> > > Today's linux-next merge of the bpf-next tree got a conflict in:
> > > 
> > >   mm/vmalloc.c
> > > 
> > > between commit:
> > > 
> > >   8e1d743f2c26 ("mm: vmalloc: support multiple nodes in vmallocinfo")
> > > 
> > > from the mm-stable tree and commit:
> > > 
> > >   e6f798225a31 ("mm: Introduce VM_SPARSE kind and vm_area_[un]map_pages().")
> > > 
> > > from the bpf-next tree.
> > > 
> > > I fixed it up (I think - see below) and can carry the fix as necessary.
> > > This is now fixed as far as linux-next is concerned, but any non trivial
> > > conflicts should be mentioned to your upstream maintainer when your tree
> > > is submitted for merging.  You may also want to consider cooperating
> > > with the maintainer of the conflicting tree to minimise any particularly
> > > complex conflicts.
> > > 
> > > 
> > > diff --cc mm/vmalloc.c
> > > index 25a8df497255,e5b8c70950bc..000000000000
> > > --- a/mm/vmalloc.c
> > > +++ b/mm/vmalloc.c
> > > @@@ -4755,81 -4423,70 +4820,84 @@@ static void show_numa_info(struct seq_f
> > >   
> > >   static void show_purge_info(struct seq_file *m)
> > >   {
> > >  +	struct vmap_node *vn;
> > >   	struct vmap_area *va;
> > >  +	int i;
> > >   
> > >  -	spin_lock(&purge_vmap_area_lock);
> > >  -	list_for_each_entry(va, &purge_vmap_area_list, list) {
> > >  -		seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
> > >  -			(void *)va->va_start, (void *)va->va_end,
> > >  -			va->va_end - va->va_start);
> > >  -	}
> > >  -	spin_unlock(&purge_vmap_area_lock);
> > >  -}
> > >  +	for (i = 0; i < nr_vmap_nodes; i++) {
> > >  +		vn = &vmap_nodes[i];
> > >   
> > >  -static int s_show(struct seq_file *m, void *p)
> > >  -{
> > >  -	struct vmap_area *va;
> > >  -	struct vm_struct *v;
> > >  -
> > >  -	va = list_entry(p, struct vmap_area, list);
> > >  -
> > >  -	if (!va->vm) {
> > >  -		if (va->flags & VMAP_RAM)
> > >  -			seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
> > >  +		spin_lock(&vn->lazy.lock);
> > >  +		list_for_each_entry(va, &vn->lazy.head, list) {
> > >  +			seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
> > >   				(void *)va->va_start, (void *)va->va_end,
> > >   				va->va_end - va->va_start);
> > >  -
> > >  -		goto final;
> > >  +		}
> > >  +		spin_unlock(&vn->lazy.lock);
> > >   	}
> > >  +}
> > >   
> > >  -	v = va->vm;
> > >  +static int vmalloc_info_show(struct seq_file *m, void *p)
> > >  +{
> > >  +	struct vmap_node *vn;
> > >  +	struct vmap_area *va;
> > >  +	struct vm_struct *v;
> > >  +	int i;
> > >   
> > >  -	seq_printf(m, "0x%pK-0x%pK %7ld",
> > >  -		v->addr, v->addr + v->size, v->size);
> > >  +	for (i = 0; i < nr_vmap_nodes; i++) {
> > >  +		vn = &vmap_nodes[i];
> > >   
> > >  -	if (v->caller)
> > >  -		seq_printf(m, " %pS", v->caller);
> > >  +		spin_lock(&vn->busy.lock);
> > >  +		list_for_each_entry(va, &vn->busy.head, list) {
> > >  +			if (!va->vm) {
> > >  +				if (va->flags & VMAP_RAM)
> > >  +					seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
> > >  +						(void *)va->va_start, (void *)va->va_end,
> > >  +						va->va_end - va->va_start);
> > >   
> > >  -	if (v->nr_pages)
> > >  -		seq_printf(m, " pages=%d", v->nr_pages);
> > >  +				continue;
> > >  +			}
> > >   
> > >  -	if (v->phys_addr)
> > >  -		seq_printf(m, " phys=%pa", &v->phys_addr);
> > >  +			v = va->vm;
> > >   
> > >  -	if (v->flags & VM_IOREMAP)
> > >  -		seq_puts(m, " ioremap");
> > >  +			seq_printf(m, "0x%pK-0x%pK %7ld",
> > >  +				v->addr, v->addr + v->size, v->size);
> > >   
> > >  -	if (v->flags & VM_SPARSE)
> > >  -		seq_puts(m, " sparse");
> > >  +			if (v->caller)
> > >  +				seq_printf(m, " %pS", v->caller);
> > >   
> > >  -	if (v->flags & VM_ALLOC)
> > >  -		seq_puts(m, " vmalloc");
> > >  +			if (v->nr_pages)
> > >  +				seq_printf(m, " pages=%d", v->nr_pages);
> > >   
> > >  -	if (v->flags & VM_MAP)
> > >  -		seq_puts(m, " vmap");
> > >  +			if (v->phys_addr)
> > >  +				seq_printf(m, " phys=%pa", &v->phys_addr);
> > >   
> > >  -	if (v->flags & VM_USERMAP)
> > >  -		seq_puts(m, " user");
> > >  +			if (v->flags & VM_IOREMAP)
> > >  +				seq_puts(m, " ioremap");
> > >   
> > >  -	if (v->flags & VM_DMA_COHERENT)
> > >  -		seq_puts(m, " dma-coherent");
> > > ++			if (v->flags & VM_SPARSE)
> > > ++				seq_puts(m, " sparse");
> > > + 
> > >  -	if (is_vmalloc_addr(v->pages))
> > >  -		seq_puts(m, " vpages");
> > >  +			if (v->flags & VM_ALLOC)
> > >  +				seq_puts(m, " vmalloc");
> > >   
> > >  -	show_numa_info(m, v);
> > >  -	seq_putc(m, '\n');
> > >  +			if (v->flags & VM_MAP)
> > >  +				seq_puts(m, " vmap");
> > >  +
> > >  +			if (v->flags & VM_USERMAP)
> > >  +				seq_puts(m, " user");
> > >  +
> > >  +			if (v->flags & VM_DMA_COHERENT)
> > >  +				seq_puts(m, " dma-coherent");
> > >  +
> > >  +			if (is_vmalloc_addr(v->pages))
> > >  +				seq_puts(m, " vpages");
> > >  +
> > >  +			show_numa_info(m, v);
> > >  +			seq_putc(m, '\n');
> > >  +		}
> > >  +		spin_unlock(&vn->busy.lock);
> > >  +	}
> > >   
> > >   	/*
> > >   	 * As a final step, dump "unpurged" areas.  
> > 
> > This is now a conflict between the net-next tree and the mm-stable tree.
> 
>  ... and now a conflict between te mm-stable tree and Linus' tree.
>
If you need some help with resolving conflicts i can help. The problem
to me looks like:

<snip>
commit d7bca9199a27b8690ae1c71dc11f825154af7234
Author: Alexei Starovoitov <ast@xxxxxxxxxx>
Date:   Fri Mar 8 09:12:54 2024 -0800

    mm: Introduce vmap_page_range() to map pages in PCI address space

commit e6f798225a31485e47a6e4f6aa07ee9fdf80c2cb
Author: Alexei Starovoitov <ast@xxxxxxxxxx>
Date:   Mon Mar 4 19:05:16 2024 -0800

    mm: Introduce VM_SPARSE kind and vm_area_[un]map_pages().

commit 3e49a866c9dcbd8173e4f3e491293619a9e81fa4
Author: Alexei Starovoitov <ast@xxxxxxxxxx>
Date:   Mon Mar 4 19:05:15 2024 -0800

    mm: Enforce VM_IOREMAP flag and range in ioremap_page_range.
<snip>

those three patches were not based on linux-next and are currently
in the Linus tree(bypassing mm-tree?). Whereas below work:

mm: vmalloc: refactor vmalloc_dump_obj() function
mm: vmalloc: improve description of vmap node layer
mm: vmalloc: add a shrinker to drain vmap pools
mm: vmalloc: set nr_nodes based on CPUs in a system
mm: vmalloc: support multiple nodes in vmallocinfo
mm: vmalloc: support multiple nodes in vread_iter
mm: vmalloc: add a scan area of VA only once
mm: vmalloc: offload free_vmap_area_lock lock
mm: vmalloc: remove global purge_vmap_area_root rb-tree
mm/vmalloc: remove vmap_area_list
mm: vmalloc: remove global vmap_area_root rb-tree
mm: vmalloc: move vmap_init_free_space() down in vmalloc.c
mm: vmalloc: rename adjust_va_to_fit_type() function
mm: vmalloc: add va_alloc() helper

now should be based on Alexei Starovoitov base in order to resolve
a small conflict.

But you better know how to proceed. Just in case, if you need some
support please let me know i can help with conflict resolving.

--
Uladzislau Rezki




[Index of Archives]     [Linux Kernel]     [Linux USB Development]     [Yosemite News]     [Linux SCSI]

  Powered by Linux