Re: [PATCH v4 1/2] mm: vmalloc: Remove a global vmap_blocks xarray

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 03/30/23 at 09:06pm, Uladzislau Rezki (Sony) wrote:
......
> -static DEFINE_XARRAY(vmap_blocks);
> +static struct xarray *
> +addr_to_vb_xarray(unsigned long addr)

I would call it addr_to_vb_xa() if other parts have taken abbreviation.

Other than this nit, this looks great to me.

Reviewed-by: Baoquan He <bhe@xxxxxxxxxx>

> +{
> +	int index = (addr / VMAP_BLOCK_SIZE) % num_possible_cpus();
> +
> +	return &per_cpu(vmap_block_queue, index).vmap_blocks;
> +}
>  
>  /*
>   * We should probably have a fallback mechanism to allocate virtual memory
> @@ -1970,6 +2014,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
>  	struct vmap_block_queue *vbq;
>  	struct vmap_block *vb;
>  	struct vmap_area *va;
> +	struct xarray *xa;
>  	unsigned long vb_idx;
>  	int node, err;
>  	void *vaddr;
> @@ -2003,8 +2048,9 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
>  	bitmap_set(vb->used_map, 0, (1UL << order));
>  	INIT_LIST_HEAD(&vb->free_list);
>  
> +	xa = addr_to_vb_xarray(va->va_start);
>  	vb_idx = addr_to_vb_idx(va->va_start);
> -	err = xa_insert(&vmap_blocks, vb_idx, vb, gfp_mask);
> +	err = xa_insert(xa, vb_idx, vb, gfp_mask);
>  	if (err) {
>  		kfree(vb);
>  		free_vmap_area(va);
> @@ -2022,8 +2068,10 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
>  static void free_vmap_block(struct vmap_block *vb)
>  {
>  	struct vmap_block *tmp;
> +	struct xarray *xa;
>  
> -	tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start));
> +	xa = addr_to_vb_xarray(vb->va->va_start);
> +	tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start));
>  	BUG_ON(tmp != vb);
>  
>  	spin_lock(&vmap_area_lock);
> @@ -2135,6 +2183,7 @@ static void vb_free(unsigned long addr, unsigned long size)
>  	unsigned long offset;
>  	unsigned int order;
>  	struct vmap_block *vb;
> +	struct xarray *xa;
>  
>  	BUG_ON(offset_in_page(size));
>  	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
> @@ -2143,7 +2192,10 @@ static void vb_free(unsigned long addr, unsigned long size)
>  
>  	order = get_order(size);
>  	offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
> -	vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr));
> +
> +	xa = addr_to_vb_xarray(addr);
> +	vb = xa_load(xa, addr_to_vb_idx(addr));
> +
>  	spin_lock(&vb->lock);
>  	bitmap_clear(vb->used_map, offset, (1UL << order));
>  	spin_unlock(&vb->lock);
> @@ -3486,6 +3538,7 @@ static void vmap_ram_vread(char *buf, char *addr, int count, unsigned long flags
>  {
>  	char *start;
>  	struct vmap_block *vb;
> +	struct xarray *xa;
>  	unsigned long offset;
>  	unsigned int rs, re, n;
>  
> @@ -3503,7 +3556,8 @@ static void vmap_ram_vread(char *buf, char *addr, int count, unsigned long flags
>  	 * Area is split into regions and tracked with vmap_block, read out
>  	 * each region and zero fill the hole between regions.
>  	 */
> -	vb = xa_load(&vmap_blocks, addr_to_vb_idx((unsigned long)addr));
> +	xa = addr_to_vb_xarray((unsigned long) addr);
> +	vb = xa_load(xa, addr_to_vb_idx((unsigned long)addr));
>  	if (!vb)
>  		goto finished;
>  
> @@ -4272,6 +4326,7 @@ void __init vmalloc_init(void)
>  		p = &per_cpu(vfree_deferred, i);
>  		init_llist_head(&p->list);
>  		INIT_WORK(&p->wq, delayed_vfree_work);
> +		xa_init(&vbq->vmap_blocks);
>  	}
>  
>  	/* Import existing vmlist entries. */
> -- 
> 2.30.2
> 





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux