Re: [RFC PATCH 7/7] mm: zswap: Use acomp virtual address interface

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On (25/03/04 14:10), Herbert Xu wrote:
[..]
> +static void zs_map_object_sg(struct zs_pool *pool, unsigned long handle,
> +			     enum zs_mapmode mm, struct scatterlist sg[2])
> +{
> +	int handle_size = ZS_HANDLE_SIZE;
> +	struct zspage *zspage;
> +	struct zpdesc *zpdesc;
> +	unsigned long obj, off;
> +	unsigned int obj_idx;
> +
> +	struct size_class *class;
> +	struct zpdesc *zpdescs[2];
> +
> +	/* It guarantees it can get zspage from handle safely */
> +	read_lock(&pool->migrate_lock);
> +	obj = handle_to_obj(handle);
> +	obj_to_location(obj, &zpdesc, &obj_idx);
> +	zspage = get_zspage(zpdesc);
> +
> +	/*
> +	 * migration cannot move any zpages in this zspage. Here, class->lock
> +	 * is too heavy since callers would take some time until they calls
> +	 * zs_unmap_object API so delegate the locking from class to zspage
> +	 * which is smaller granularity.
> +	 */
> +	migrate_read_lock(zspage);
> +	read_unlock(&pool->migrate_lock);
> +
> +	class = zspage_class(pool, zspage);
> +	off = offset_in_page(class->size * obj_idx);
> +
> +	if (unlikely(ZsHugePage(zspage)))
> +		handle_size = 0;
> +
> +	if (off + class->size <= PAGE_SIZE) {
> +		/* this object is contained entirely within a page */
> +		sg_init_table(sg, 1);
> +		sg_set_page(sg, zpdesc_page(zpdesc), class->size - handle_size,
> +			    off + handle_size);
> +		return;
> +	}
> +
> +	/* this object spans two pages */
> +	zpdescs[0] = zpdesc;
> +	zpdescs[1] = get_next_zpdesc(zpdesc);
> +	BUG_ON(!zpdescs[1]);
> +
> +	sg_init_table(sg, 2);
> +	sg_set_page(sg, zpdesc_page(zpdescs[0]),
> +		    PAGE_SIZE - off - handle_size, off + handle_size);
> +	sg_set_page(&sg[1], zpdesc_page(zpdescs[1]),
> +		    class->size - (PAGE_SIZE - off - handle_size), 0);
> +}

[..]

>  static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
>  {
>  	struct zpool *zpool = entry->pool->zpool;
> -	struct scatterlist input, output;
>  	struct crypto_acomp_ctx *acomp_ctx;
> -	u8 *src;
> +	struct scatterlist input[2];
> +	struct scatterlist output;
>  
>  	acomp_ctx = acomp_ctx_get_cpu_lock(entry->pool);
> -	src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
> -	/*
> -	 * If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer
> -	 * to do crypto_acomp_decompress() which might sleep. In such cases, we must
> -	 * resort to copying the buffer to a temporary one.
> -	 * Meanwhile, zpool_map_handle() might return a non-linearly mapped buffer,
> -	 * such as a kmap address of high memory or even ever a vmap address.
> -	 * However, sg_init_one is only equipped to handle linearly mapped low memory.
> -	 * In such cases, we also must copy the buffer to a temporary and lowmem one.
> -	 */
> -	if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) ||
> -	    !virt_addr_valid(src)) {
> -		memcpy(acomp_ctx->buffer, src, entry->length);
> -		src = acomp_ctx->buffer;
> -		zpool_unmap_handle(zpool, entry->handle);
> -	}
> -
> -	sg_init_one(&input, src, entry->length);
> +	zpool_map_sg(zpool, entry->handle, ZPOOL_MM_RO, input);
>  	sg_init_table(&output, 1);
>  	sg_set_folio(&output, folio, PAGE_SIZE, 0);
> -	acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
> +	acomp_request_set_params(acomp_ctx->req, input, &output, entry->length, PAGE_SIZE);
>  	BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
>  	BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);

And at some point you do memcpy() from SG list to a local buffer?

zsmalloc map() has a shortcut - for objects that fit one physical
page (that includes huge incompressible PAGE_SIZE-ed objects)
zsmalloc kmap the physical page in question and returns a pointer
to that mapping.




[Index of Archives]     [Kernel]     [Gnu Classpath]     [Gnu Crypto]     [DM Crypt]     [Netfilter]     [Bugtraq]
  Powered by Linux