[PATCH] crypto: skcipher - Use restrict rather than hand-rolling accesses

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Rather than accessing 'alg' directly to avoid the aliasing issue
which leads to unnecessary reloads, use the __restrict keyword
to explicitly tell the compiler that there is no aliasing.

This generates equivalent if not superior code on x86 with gcc 12.

Note that in skcipher_walk_virt the alg assignment is moved after
might_sleep_if because that function is a compiler barrier and
forces a reload.

Signed-off-by: Herbert Xu <herbert@xxxxxxxxxxxxxxxxxxx>

diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index cdf0f11c7eaa..561aa5cc4ffa 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -306,14 +306,16 @@ static int skcipher_walk_first(struct skcipher_walk *walk)
 	return skcipher_walk_next(walk);
 }
 
-int skcipher_walk_virt(struct skcipher_walk *walk,
-		       struct skcipher_request *req, bool atomic)
+int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
+		       struct skcipher_request *__restrict req, bool atomic)
 {
-	const struct skcipher_alg *alg =
-		crypto_skcipher_alg(crypto_skcipher_reqtfm(req));
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct skcipher_alg *alg;
 
 	might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
 
+	alg = crypto_skcipher_alg(tfm);
+
 	walk->total = req->cryptlen;
 	walk->nbytes = 0;
 	walk->iv = req->iv;
@@ -329,14 +331,9 @@ int skcipher_walk_virt(struct skcipher_walk *walk,
 	scatterwalk_start(&walk->in, req->src);
 	scatterwalk_start(&walk->out, req->dst);
 
-	/*
-	 * Accessing 'alg' directly generates better code than using the
-	 * crypto_skcipher_blocksize() and similar helper functions here, as it
-	 * prevents the algorithm pointer from being repeatedly reloaded.
-	 */
-	walk->blocksize = alg->base.cra_blocksize;
-	walk->ivsize = alg->co.ivsize;
-	walk->alignmask = alg->base.cra_alignmask;
+	walk->blocksize = crypto_skcipher_blocksize(tfm);
+	walk->ivsize = crypto_skcipher_ivsize(tfm);
+	walk->alignmask = crypto_skcipher_alignmask(tfm);
 
 	if (alg->co.base.cra_type != &crypto_skcipher_type)
 		walk->stride = alg->co.chunksize;
@@ -347,10 +344,11 @@ int skcipher_walk_virt(struct skcipher_walk *walk,
 }
 EXPORT_SYMBOL_GPL(skcipher_walk_virt);
 
-static int skcipher_walk_aead_common(struct skcipher_walk *walk,
-				     struct aead_request *req, bool atomic)
+static int skcipher_walk_aead_common(struct skcipher_walk *__restrict walk,
+				     struct aead_request *__restrict req,
+				     bool atomic)
 {
-	const struct aead_alg *alg = crypto_aead_alg(crypto_aead_reqtfm(req));
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 
 	walk->nbytes = 0;
 	walk->iv = req->iv;
@@ -372,21 +370,17 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
 	scatterwalk_done(&walk->in, 0, walk->total);
 	scatterwalk_done(&walk->out, 0, walk->total);
 
-	/*
-	 * Accessing 'alg' directly generates better code than using the
-	 * crypto_aead_blocksize() and similar helper functions here, as it
-	 * prevents the algorithm pointer from being repeatedly reloaded.
-	 */
-	walk->blocksize = alg->base.cra_blocksize;
-	walk->stride = alg->chunksize;
-	walk->ivsize = alg->ivsize;
-	walk->alignmask = alg->base.cra_alignmask;
+	walk->blocksize = crypto_aead_blocksize(tfm);
+	walk->stride = crypto_aead_chunksize(tfm);
+	walk->ivsize = crypto_aead_ivsize(tfm);
+	walk->alignmask = crypto_aead_alignmask(tfm);
 
 	return skcipher_walk_first(walk);
 }
 
-int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
-			       struct aead_request *req, bool atomic)
+int skcipher_walk_aead_encrypt(struct skcipher_walk *__restrict walk,
+			       struct aead_request *__restrict req,
+			       bool atomic)
 {
 	walk->total = req->cryptlen;
 
@@ -394,8 +388,9 @@ int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
 }
 EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
 
-int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
-			       struct aead_request *req, bool atomic)
+int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk,
+			       struct aead_request *__restrict req,
+			       bool atomic)
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index 4f49621d3eb6..d6ae7a86fed2 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -197,13 +197,15 @@ int lskcipher_register_instance(struct crypto_template *tmpl,
 				struct lskcipher_instance *inst);
 
 int skcipher_walk_done(struct skcipher_walk *walk, int res);
-int skcipher_walk_virt(struct skcipher_walk *walk,
-		       struct skcipher_request *req,
+int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
+		       struct skcipher_request *__restrict req,
 		       bool atomic);
-int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
-			       struct aead_request *req, bool atomic);
-int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
-			       struct aead_request *req, bool atomic);
+int skcipher_walk_aead_encrypt(struct skcipher_walk *__restrict walk,
+			       struct aead_request *__restrict req,
+			       bool atomic);
+int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk,
+			       struct aead_request *__restrict req,
+			       bool atomic);
 
 static inline void skcipher_walk_abort(struct skcipher_walk *walk)
 {
-- 
Email: Herbert Xu <herbert@xxxxxxxxxxxxxxxxxxx>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt




[Index of Archives]     [Kernel]     [Gnu Classpath]     [Gnu Crypto]     [DM Crypt]     [Netfilter]     [Bugtraq]
  Powered by Linux