Re: [PATCH v8 13/14] mm: zswap: Allocate pool batching resources if the compressor supports batching.

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, Mar 03, 2025 at 12:47:23AM -0800, Kanchana P Sridhar wrote:
> This patch adds support for the per-CPU acomp_ctx to track multiple
> compression/decompression requests and multiple compression destination
> buffers. The zswap_cpu_comp_prepare() CPU onlining code will get the
> maximum batch-size the compressor supports. If so, it will allocate the
> necessary batching resources.
> 
> However, zswap does not use more than one request yet. Follow-up patches
> will actually utilize the multiple acomp_ctx requests/buffers for batch
> compression/decompression of multiple pages.
> 
> The newly added ZSWAP_MAX_BATCH_SIZE limits the amount of extra memory used
> for batching. There is a small extra memory overhead of allocating the
> "reqs" and "buffers" arrays for compressors that do not support batching.

That's two pointers per-CPU (i.e. 16 bytes on x86_64), right? Please
call that out in the commit log.

> 
> Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@xxxxxxxxx>
> ---
>  mm/zswap.c | 99 +++++++++++++++++++++++++++++++++++++-----------------
>  1 file changed, 69 insertions(+), 30 deletions(-)
> 
> diff --git a/mm/zswap.c b/mm/zswap.c
> index cff96df1df8b..fae59d6d5147 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -78,6 +78,16 @@ static bool zswap_pool_reached_full;
>  
>  #define ZSWAP_PARAM_UNSET ""
>  
> +/*
> + * For compression batching of large folios:
> + * Maximum number of acomp compress requests that will be processed
> + * in a batch, iff the zswap compressor supports batching.
> + * This limit exists because we preallocate enough requests and buffers
> + * in the per-cpu acomp_ctx accordingly. Hence, a higher limit means higher
> + * memory usage.
> + */

That's too verbose. Let's do something like:

/* Limit the batch size to limit per-CPU memory usage for reqs and buffers */
#define ZSWAP_MAX_BATCH_SIZE 8U

> +#define ZSWAP_MAX_BATCH_SIZE 8U
> +
>  static int zswap_setup(void);
>  
>  /* Enable/disable zswap */
> @@ -143,8 +153,8 @@ bool zswap_never_enabled(void)
>  
>  struct crypto_acomp_ctx {
>  	struct crypto_acomp *acomp;
> -	struct acomp_req *req;
> -	u8 *buffer;
> +	struct acomp_req **reqs;
> +	u8 **buffers;
>  	u8 nr_reqs;
>  	struct crypto_wait wait;
>  	struct mutex mutex;
> @@ -251,13 +261,22 @@ static void __zswap_pool_empty(struct percpu_ref *ref);
>  static void acomp_ctx_dealloc(struct crypto_acomp_ctx *acomp_ctx)
>  {
>  	if (!IS_ERR_OR_NULL(acomp_ctx) && acomp_ctx->nr_reqs) {
> +		u8 i;
> +
> +		if (acomp_ctx->reqs) {
> +			for (i = 0; i < acomp_ctx->nr_reqs; ++i)
> +				if (!IS_ERR_OR_NULL(acomp_ctx->reqs[i]))

Hmm I just realized we check IS_ERR_OR_NULL() here for the requests, but
only a NULL check in zswap_cpu_comp_prepare(). We also check
IS_ERR_OR_NULL here for acomp, but only IS_ERR() in
zswap_cpu_comp_prepare().

This doesn't make sense. Would you be able to include a patch before
this one to make these consistent? I can also send a follow up patch.

> +					acomp_request_free(acomp_ctx->reqs[i]);

Please add braces for the for loop here for readability, since the body
has more than one line, even if it's technically not required.

> +			kfree(acomp_ctx->reqs);
> +			acomp_ctx->reqs = NULL;
> +		}
>  
> -		if (!IS_ERR_OR_NULL(acomp_ctx->req))
> -			acomp_request_free(acomp_ctx->req);
> -		acomp_ctx->req = NULL;
> -
> -		kfree(acomp_ctx->buffer);
> -		acomp_ctx->buffer = NULL;
> +		if (acomp_ctx->buffers) {
> +			for (i = 0; i < acomp_ctx->nr_reqs; ++i)
> +				kfree(acomp_ctx->buffers[i]);
> +			kfree(acomp_ctx->buffers);
> +			acomp_ctx->buffers = NULL;
> +		}
>  
>  		if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
>  			crypto_free_acomp(acomp_ctx->acomp);
> @@ -271,6 +290,7 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
>  	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
>  	struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
>  	int ret = -ENOMEM;
> +	u8 i;
>  
>  	/*
>  	 * Just to be even more fail-safe against changes in assumptions and/or
> @@ -292,22 +312,41 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
>  		goto fail;
>  	}
>  
> -	acomp_ctx->nr_reqs = 1;
> +	acomp_ctx->nr_reqs = min(ZSWAP_MAX_BATCH_SIZE,
> +				 crypto_acomp_batch_size(acomp_ctx->acomp));
>  
> -	acomp_ctx->req = acomp_request_alloc(acomp_ctx->acomp);
> -	if (!acomp_ctx->req) {
> -		pr_err("could not alloc crypto acomp_request %s\n",
> -		       pool->tfm_name);
> -		ret = -ENOMEM;
> +	acomp_ctx->reqs = kcalloc_node(acomp_ctx->nr_reqs, sizeof(struct acomp_req *),
> +				       GFP_KERNEL, cpu_to_node(cpu));
> +	if (!acomp_ctx->reqs)
>  		goto fail;
> +
> +	for (i = 0; i < acomp_ctx->nr_reqs; ++i) {
> +		acomp_ctx->reqs[i] = acomp_request_alloc(acomp_ctx->acomp);
> +		if (!acomp_ctx->reqs[i]) {
> +			pr_err("could not alloc crypto acomp_request reqs[%d] %s\n",
> +				i, pool->tfm_name);
> +			goto fail;
> +		}
>  	}
>  
> -	acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
> -	if (!acomp_ctx->buffer) {
> -		ret = -ENOMEM;
> +	acomp_ctx->buffers = kcalloc_node(acomp_ctx->nr_reqs, sizeof(u8 *),
> +					  GFP_KERNEL, cpu_to_node(cpu));
> +	if (!acomp_ctx->buffers)
>  		goto fail;
> +
> +	for (i = 0; i < acomp_ctx->nr_reqs; ++i) {
> +		acomp_ctx->buffers[i] = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL,
> +						     cpu_to_node(cpu));
> +		if (!acomp_ctx->buffers[i])
> +			goto fail;
>  	}
>  
> +	/*
> +	 * The crypto_wait is used only in fully synchronous, i.e., with scomp
> +	 * or non-poll mode of acomp, hence there is only one "wait" per
> +	 * acomp_ctx, with callback set to reqs[0], under the assumption that
> +	 * there is at least 1 request per acomp_ctx.
> +	 */

I am not sure I understand. Does this say that we assume that scomp or
non-poll acomp will never use batching so having a single "wait" is
fine?

If so, this needs to be enforced at runtime or at least have a warning,
and not just mentioned in a comment, in case batching support is ever
added for these. Please clarify.

We should also probably merge the comments above crypto_init_wait() and
acomp_request_set_callback() now.

>  	crypto_init_wait(&acomp_ctx->wait);
>  
>  	/*




[Index of Archives]     [Kernel]     [Gnu Classpath]     [Gnu Crypto]     [DM Crypt]     [Netfilter]     [Bugtraq]
  Powered by Linux