On 2024/7/8 21:44, Takero Funaki wrote:
2024年7月8日(月) 12:56 Chengming Zhou <chengming.zhou@xxxxxxxxx>:
comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
dlen = acomp_ctx->req->dlen;
- if (comp_ret)
+
+ /* coa_compress returns -EINVAL for errors including insufficient dlen */
+ if (comp_ret && comp_ret != -EINVAL)
goto unlock;
Seems we don't need to care about? "comp_ret" is useless anymore.
Just:
if (comp_ret || dlen > PAGE_SIZE - 64)
dlen = PAGE_SIZE;
And remove the checkings of comp_ret at the end.
We actually don't need to hold mutex if we are just copying folio.
Thanks.
Thanks for reviewing.
For comp_ret, can we consolidate all possible error codes as
incompressible data?
Maybe we still want these debug counters? I'm not sure.
With your proposal, I think we don't care about compression failures
anymore, in all cases it's just ok to fallback to just copy the folio.
if we do not need to distinguish -EINVAL and the others, diff v2..v3
can be like:
@@ -62,8 +62,6 @@ static u64 zswap_pool_limit_hit;
static u64 zswap_written_back_pages;
/* Store failed due to a reclaim failure after pool limit was reached */
static u64 zswap_reject_reclaim_fail;
-/* Store failed due to compression algorithm failure */
-static u64 zswap_reject_compress_fail;
/* Compressed page was too big for the allocator to (optimally) store */
static u64 zswap_reject_compress_poor;
/* Store failed because underlying allocator could not get memory */
@@ -1043,10 +1041,6 @@ static bool zswap_compress(struct folio *folio,
struct zswap_entry *entry)
comp_ret =
crypto_wait_req(crypto_acomp_compress(acomp_ctx->req),
&acomp_ctx->wait);
dlen = acomp_ctx->req->dlen;
- /* coa_compress returns -EINVAL for errors including
insufficient dlen */
- if (comp_ret && comp_ret != -EINVAL)
- goto unlock;
-
/*
* If the data cannot be compressed well, store the data as-is.
* Switching by a threshold at
@@ -1056,7 +1050,8 @@ static bool zswap_compress(struct folio *folio,
struct zswap_entry *entry)
*/
if (comp_ret || dlen > PAGE_SIZE - 64) {
/* we do not use compressed result anymore */
- comp_ret = 0;
+ mutex_unlock(&acomp_ctx->mutex);
+ acomp_ctx = NULL;
dlen = PAGE_SIZE;
}
zpool = zswap_find_zpool(entry);
@@ -1083,12 +1078,11 @@ static bool zswap_compress(struct folio
*folio, struct zswap_entry *entry)
unlock:
if (alloc_ret == -ENOSPC)
zswap_reject_compress_poor++;
- else if (comp_ret)
- zswap_reject_compress_fail++;
If you want to keep these debug counters, you can move these forward.
else if (alloc_ret)
zswap_reject_alloc_fail++;
- mutex_unlock(&acomp_ctx->mutex);
+ if (acomp_ctx)
+ mutex_unlock(&acomp_ctx->mutex);
return comp_ret == 0 && alloc_ret == 0;
And here we don't care about comp_ret anymore.
Thanks.
}
@@ -1886,8 +1880,6 @@ static int zswap_debugfs_init(void)
zswap_debugfs_root, &zswap_reject_alloc_fail);
debugfs_create_u64("reject_kmemcache_fail", 0444,
zswap_debugfs_root, &zswap_reject_kmemcache_fail);
- debugfs_create_u64("reject_compress_fail", 0444,
- zswap_debugfs_root, &zswap_reject_compress_fail);
debugfs_create_u64("reject_compress_poor", 0444,
zswap_debugfs_root, &zswap_reject_compress_poor);
debugfs_create_u64("written_back_pages", 0444,