[PATCH v2 1/5] crypto: Multi-buffer encryptioin infrastructure support

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



In this patch, the infrastructure needed to support multibuffer
encryption implementation is added:

a) Enhace mcryptd daemon to support blkcipher requests.

b) Update configuration to include multi-buffer encryption build support.

c) Add support to crypto scatterwalk support that can sleep during
encryption operation, as we may have buffers for jobs in data lanes
that are half-finished, waiting for additional jobs to come to fill
empty lanes before we start the encryption again.  Therefore, we need to
enhance crypto walk with the option to map data buffers non-atomically.
This is done by algorithms run from crypto daemon who knows it is safe
to do so as it can save and restore FPU state in correct context.

For an introduction to the multi-buffer implementation, please see
http://www.intel.com/content/www/us/en/communications/communications-ia-multi-buffer-paper.html

Originally-by: Chandramouli Narayanan <mouli_7982@xxxxxxxxx>
Signed-off-by: Tim Chen <tim.c.chen@xxxxxxxxxxxxxxx>
---
 crypto/Kconfig               |  16 +++
 crypto/blkcipher.c           |  29 ++++-
 crypto/mcryptd.c             | 256 ++++++++++++++++++++++++++++++++++++++++++-
 crypto/scatterwalk.c         |   7 ++
 include/crypto/algapi.h      |   1 +
 include/crypto/mcryptd.h     |  36 ++++++
 include/crypto/scatterwalk.h |   6 +
 include/linux/crypto.h       |   1 +
 8 files changed, 347 insertions(+), 5 deletions(-)

diff --git a/crypto/Kconfig b/crypto/Kconfig
index 7240821..6b51084 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -888,6 +888,22 @@ config CRYPTO_AES_NI_INTEL
 	  ECB, CBC, LRW, PCBC, XTS. The 64 bit version has additional
 	  acceleration for CTR.
 
+config CRYPTO_AES_CBC_MB
+	tristate "AES CBC algorithm (x86_64 Multi-Buffer, Experimental)"
+	depends on X86 && 64BIT
+	select CRYPTO_ABLK_HELPER
+	select CRYPTO_MCRYPTD
+	help
+	  AES CBC encryption implemented using multi-buffer technique.
+	  This algorithm computes on multiple data lanes concurrently with
+	  SIMD instructions for better throughput.  It should only be
+	  used when there is significant work to generate many separate
+	  crypto requests that keep all the data lanes filled to get
+	  the performance benefit.  If the data lanes are unfilled, a
+	  flush operation will be initiated after some delay to process
+	  the exisiting crypto jobs, adding some extra latency at low
+	  load case.
+
 config CRYPTO_AES_SPARC64
 	tristate "AES cipher algorithms (SPARC64)"
 	depends on SPARC64
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 11b9814..9fd4028 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -35,6 +35,9 @@ enum {
 	BLKCIPHER_WALK_SLOW = 1 << 1,
 	BLKCIPHER_WALK_COPY = 1 << 2,
 	BLKCIPHER_WALK_DIFF = 1 << 3,
+	/* deal with scenarios where we can sleep during sg walk */
+	/* when we process part of a request */
+	BLKCIPHER_WALK_MAY_SLEEP = 1 << 4,
 };
 
 static int blkcipher_walk_next(struct blkcipher_desc *desc,
@@ -44,22 +47,38 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc,
 
 static inline void blkcipher_map_src(struct blkcipher_walk *walk)
 {
-	walk->src.virt.addr = scatterwalk_map(&walk->in);
+	/* add support for asynchronous requests which need no atomic map */
+	if (walk->flags & BLKCIPHER_WALK_MAY_SLEEP)
+		walk->src.virt.addr = scatterwalk_map_nonatomic(&walk->in);
+	else
+		walk->src.virt.addr = scatterwalk_map(&walk->in);
 }
 
 static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
 {
-	walk->dst.virt.addr = scatterwalk_map(&walk->out);
+	/* add support for asynchronous requests which need no atomic map */
+	if (walk->flags & BLKCIPHER_WALK_MAY_SLEEP)
+		walk->dst.virt.addr = scatterwalk_map_nonatomic(&walk->out);
+	else
+		walk->dst.virt.addr = scatterwalk_map(&walk->out);
 }
 
 static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
 {
-	scatterwalk_unmap(walk->src.virt.addr);
+	/* add support for asynchronous requests which need no atomic map */
+	if (walk->flags & BLKCIPHER_WALK_MAY_SLEEP)
+		scatterwalk_unmap_nonatomic(walk->src.virt.addr);
+	else
+		scatterwalk_unmap(walk->src.virt.addr);
 }
 
 static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
 {
-	scatterwalk_unmap(walk->dst.virt.addr);
+	/* add support for asynchronous requests which need no atomic map */
+	if (walk->flags & BLKCIPHER_WALK_MAY_SLEEP)
+		scatterwalk_unmap_nonatomic(walk->dst.virt.addr);
+	else
+		scatterwalk_unmap(walk->dst.virt.addr);
 }
 
 /* Get a spot of the specified length that does not straddle a page.
@@ -299,6 +318,8 @@ static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
 int blkcipher_walk_virt(struct blkcipher_desc *desc,
 			struct blkcipher_walk *walk)
 {
+	if (desc->flags & CRYPTO_TFM_SGWALK_MAY_SLEEP)
+		walk->flags |= BLKCIPHER_WALK_MAY_SLEEP;
 	walk->flags &= ~BLKCIPHER_WALK_PHYS;
 	walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
 	walk->cipher_blocksize = walk->walk_blocksize;
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index fe5b495a..01f747c 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -116,8 +116,28 @@ static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
 	return err;
 }
 
+static int mcryptd_enqueue_blkcipher_request(struct mcryptd_queue *queue,
+				  struct crypto_async_request *request,
+				  struct mcryptd_blkcipher_request_ctx *rctx)
+{
+	int cpu, err;
+	struct mcryptd_cpu_queue *cpu_queue;
+
+	cpu = get_cpu();
+	cpu_queue = this_cpu_ptr(queue->cpu_queue);
+	rctx->tag.cpu = cpu;
+
+	err = crypto_enqueue_request(&cpu_queue->queue, request);
+	pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
+		 cpu, cpu_queue, request);
+	queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
+	put_cpu();
+
+	return err;
+}
+
 /*
- * Try to opportunisticlly flush the partially completed jobs if
+ * Try to opportunistically flush the partially completed jobs if
  * crypto daemon is the only task running.
  */
 static void mcryptd_opportunistic_flush(void)
@@ -225,6 +245,130 @@ static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm)
 	return ictx->queue;
 }
 
+static int mcryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
+				   const u8 *key, unsigned int keylen)
+{
+	struct mcryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
+	struct crypto_blkcipher *child = ctx->child;
+	int err;
+
+	crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+	crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
+					  CRYPTO_TFM_REQ_MASK);
+	err = crypto_blkcipher_setkey(child, key, keylen);
+	crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
+					    CRYPTO_TFM_RES_MASK);
+	return err;
+}
+
+static void mcryptd_blkcipher_crypt(struct ablkcipher_request *req,
+				   struct crypto_blkcipher *child,
+				   int err,
+				   int (*crypt)(struct blkcipher_desc *desc,
+						struct scatterlist *dst,
+						struct scatterlist *src,
+						unsigned int len))
+{
+	struct mcryptd_blkcipher_request_ctx *rctx;
+	struct blkcipher_desc desc;
+
+	rctx = ablkcipher_request_ctx(req);
+
+	if (unlikely(err == -EINPROGRESS))
+		goto out;
+
+	/* set up the blkcipher request to work on */
+	desc.tfm = child;
+	desc.info = req->info;
+	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+	rctx->desc = desc;
+
+	/*
+	 * pass addr of descriptor stored in the request context
+	 * so that the callee can get to the request context
+	 */
+	err = crypt(&rctx->desc, req->dst, req->src, req->nbytes);
+	if (err) {
+		req->base.complete = rctx->complete;
+		goto out;
+	}
+	return;
+
+out:
+	local_bh_disable();
+	rctx->complete(&req->base, err);
+	local_bh_enable();
+
+}
+
+static void mcryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
+{
+	struct mcryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
+	struct crypto_blkcipher *child = ctx->child;
+
+	mcryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
+			       crypto_blkcipher_crt(child)->encrypt);
+}
+
+static void mcryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
+{
+	struct mcryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
+	struct crypto_blkcipher *child = ctx->child;
+
+	mcryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
+			       crypto_blkcipher_crt(child)->decrypt);
+}
+
+static int mcryptd_blkcipher_enqueue(struct ablkcipher_request *req,
+				    crypto_completion_t complete)
+{
+	struct mcryptd_blkcipher_request_ctx *rctx =
+			ablkcipher_request_ctx(req);
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct mcryptd_queue *queue;
+
+	queue = mcryptd_get_queue(crypto_ablkcipher_tfm(tfm));
+	rctx->complete = req->base.complete;
+	req->base.complete = complete;
+
+	return mcryptd_enqueue_blkcipher_request(queue, &req->base, rctx);
+}
+
+static int mcryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
+{
+	return mcryptd_blkcipher_enqueue(req, mcryptd_blkcipher_encrypt);
+}
+
+static int mcryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
+{
+	return mcryptd_blkcipher_enqueue(req, mcryptd_blkcipher_decrypt);
+}
+
+static int mcryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
+{
+	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
+	struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
+	struct crypto_spawn *spawn = &ictx->spawn;
+	struct mcryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_blkcipher *cipher;
+
+	cipher = crypto_spawn_blkcipher(spawn);
+	if (IS_ERR(cipher))
+		return PTR_ERR(cipher);
+
+	ctx->child = cipher;
+	tfm->crt_ablkcipher.reqsize =
+		sizeof(struct mcryptd_blkcipher_request_ctx);
+	return 0;
+}
+
+static void mcryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
+{
+	struct mcryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	crypto_free_blkcipher(ctx->child);
+}
+
 static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
 				   unsigned int tail)
 {
@@ -272,6 +416,70 @@ static inline void mcryptd_check_internal(struct rtattr **tb, u32 *type,
 		*mask |= CRYPTO_ALG_INTERNAL;
 }
 
+static int mcryptd_create_blkcipher(struct crypto_template *tmpl,
+				   struct rtattr **tb,
+				   struct mcryptd_queue *queue)
+{
+	struct mcryptd_instance_ctx *ctx;
+	struct crypto_instance *inst;
+	struct crypto_alg *alg;
+	u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
+	u32 mask = CRYPTO_ALG_TYPE_MASK;
+	int err;
+
+	mcryptd_check_internal(tb, &type, &mask);
+
+	alg = crypto_get_attr_alg(tb, type, mask);
+	if (IS_ERR(alg))
+		return PTR_ERR(alg);
+
+	pr_debug("crypto: mcryptd crypto alg: %s\n", alg->cra_name);
+	inst = mcryptd_alloc_instance(alg, 0, sizeof(*ctx));
+	err = PTR_ERR(inst);
+	if (IS_ERR(inst))
+		goto out_put_alg;
+
+	ctx = crypto_instance_ctx(inst);
+	ctx->queue = queue;
+
+	err = crypto_init_spawn(&ctx->spawn, alg, inst,
+				CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
+	if (err)
+		goto out_free_inst;
+
+	type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
+	if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
+		type |= CRYPTO_ALG_INTERNAL;
+	inst->alg.cra_flags = type;
+	inst->alg.cra_type = &crypto_ablkcipher_type;
+
+	inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
+	inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
+	inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
+
+	inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
+
+	inst->alg.cra_ctxsize = sizeof(struct mcryptd_blkcipher_ctx);
+
+	inst->alg.cra_init = mcryptd_blkcipher_init_tfm;
+	inst->alg.cra_exit = mcryptd_blkcipher_exit_tfm;
+
+	inst->alg.cra_ablkcipher.setkey = mcryptd_blkcipher_setkey;
+	inst->alg.cra_ablkcipher.encrypt = mcryptd_blkcipher_encrypt_enqueue;
+	inst->alg.cra_ablkcipher.decrypt = mcryptd_blkcipher_decrypt_enqueue;
+
+	err = crypto_register_instance(tmpl, inst);
+	if (err) {
+		crypto_drop_spawn(&ctx->spawn);
+out_free_inst:
+		kfree(inst);
+	}
+
+out_put_alg:
+	crypto_mod_put(alg);
+	return err;
+}
+
 static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
 {
 	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
@@ -563,6 +771,8 @@ static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
 		return PTR_ERR(algt);
 
 	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
+	case CRYPTO_ALG_TYPE_BLKCIPHER:
+		return mcryptd_create_blkcipher(tmpl, tb, &mqueue);
 	case CRYPTO_ALG_TYPE_DIGEST:
 		return mcryptd_create_hash(tmpl, tb, &mqueue);
 	break;
@@ -577,6 +787,10 @@ static void mcryptd_free(struct crypto_instance *inst)
 	struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
 
 	switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
+	case CRYPTO_ALG_TYPE_BLKCIPHER:
+		crypto_drop_spawn(&ctx->spawn);
+		kfree(inst);
+		return;
 	case CRYPTO_ALG_TYPE_AHASH:
 		crypto_drop_shash(&hctx->spawn);
 		kfree(ahash_instance(inst));
@@ -594,6 +808,46 @@ static struct crypto_template mcryptd_tmpl = {
 	.module = THIS_MODULE,
 };
 
+struct mcryptd_ablkcipher *mcryptd_alloc_ablkcipher(const char *alg_name,
+						  u32 type, u32 mask)
+{
+	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
+	struct crypto_tfm *tfm;
+
+	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
+		     "mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
+		return ERR_PTR(-EINVAL);
+	type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
+	type |= CRYPTO_ALG_TYPE_BLKCIPHER;
+	mask &= ~CRYPTO_ALG_TYPE_MASK;
+	mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
+	tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
+	if (IS_ERR(tfm))
+		return ERR_CAST(tfm);
+	if (tfm->__crt_alg->cra_module != THIS_MODULE) {
+		crypto_free_tfm(tfm);
+		return ERR_PTR(-EINVAL);
+	}
+
+	return __mcryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
+}
+EXPORT_SYMBOL_GPL(mcryptd_alloc_ablkcipher);
+
+struct crypto_blkcipher *mcryptd_ablkcipher_child(
+		struct mcryptd_ablkcipher *tfm)
+{
+	struct mcryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
+
+	return ctx->child;
+}
+EXPORT_SYMBOL_GPL(mcryptd_ablkcipher_child);
+
+void mcryptd_free_ablkcipher(struct mcryptd_ablkcipher *tfm)
+{
+	crypto_free_ablkcipher(&tfm->base);
+}
+EXPORT_SYMBOL_GPL(mcryptd_free_ablkcipher);
+
 struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
 					u32 type, u32 mask)
 {
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index ea5815c..edb9ac7 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -47,6 +47,13 @@ void *scatterwalk_map(struct scatter_walk *walk)
 }
 EXPORT_SYMBOL_GPL(scatterwalk_map);
 
+void *scatterwalk_map_nonatomic(struct scatter_walk *walk)
+{
+	return kmap(scatterwalk_page(walk)) +
+	       offset_in_page(walk->offset);
+}
+EXPORT_SYMBOL_GPL(scatterwalk_map_nonatomic);
+
 static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
 				 unsigned int more)
 {
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index c9fe145..4c5633f 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -301,6 +301,7 @@ static inline void blkcipher_walk_init(struct blkcipher_walk *walk,
 	walk->in.sg = src;
 	walk->out.sg = dst;
 	walk->total = nbytes;
+	walk->flags = 0;
 }
 
 static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk,
diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h
index c23ee1f..29f85bd 100644
--- a/include/crypto/mcryptd.h
+++ b/include/crypto/mcryptd.h
@@ -13,6 +13,7 @@
 #include <linux/crypto.h>
 #include <linux/kernel.h>
 #include <crypto/hash.h>
+#include <crypto/b128ops.h>
 
 struct mcryptd_ahash {
 	struct crypto_ahash base;
@@ -95,6 +96,41 @@ struct mcryptd_alg_state {
 	unsigned long (*flusher)(struct mcryptd_alg_cstate *cstate);
 };
 
+struct mcryptd_ablkcipher {
+	struct crypto_ablkcipher base;
+};
+
+static inline struct mcryptd_ablkcipher *__mcryptd_ablkcipher_cast(
+	struct crypto_ablkcipher *tfm)
+{
+	return (struct mcryptd_ablkcipher *)tfm;
+}
+
+/* alg_name should be algorithm to be cryptd-ed */
+struct mcryptd_ablkcipher *mcryptd_alloc_ablkcipher(const char *alg_name,
+						  u32 type, u32 mask);
+struct crypto_blkcipher *mcryptd_ablkcipher_child(
+	struct mcryptd_ablkcipher *tfm);
+void mcryptd_free_ablkcipher(struct mcryptd_ablkcipher *tfm);
+
+struct mcryptd_blkcipher_ctx {
+	struct crypto_blkcipher *child;
+	struct mcryptd_alg_state *alg_state;
+};
+
+struct mcryptd_blkcipher_request_ctx {
+	struct list_head waiter;
+	crypto_completion_t complete;
+	struct mcryptd_tag tag;
+	struct blkcipher_walk walk;
+	u8 flag;
+	int nbytes;
+	int error;
+	struct blkcipher_desc desc;
+	void *job;
+	u128 seq_iv;	/* running iv of a sequence */
+};
+
 /* return delay in jiffies from current time */
 static inline unsigned long get_delay(unsigned long t)
 {
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 35f99b6..9e66ecf 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -83,10 +83,16 @@ static inline void scatterwalk_unmap(void *vaddr)
 	kunmap_atomic(vaddr);
 }
 
+static inline void scatterwalk_unmap_nonatomic(void *vaddr)
+{
+	kunmap(vaddr);
+}
+
 void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg);
 void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
 			    size_t nbytes, int out);
 void *scatterwalk_map(struct scatter_walk *walk);
+void *scatterwalk_map_nonatomic(struct scatter_walk *walk);
 void scatterwalk_done(struct scatter_walk *walk, int out, int more);
 
 void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index e71cb70..deea374 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -115,6 +115,7 @@
 #define CRYPTO_TFM_RES_BAD_KEY_SCHED 	0x00400000
 #define CRYPTO_TFM_RES_BAD_BLOCK_LEN 	0x00800000
 #define CRYPTO_TFM_RES_BAD_FLAGS 	0x01000000
+#define CRYPTO_TFM_SGWALK_MAY_SLEEP	0x02000000
 
 /*
  * Miscellaneous stuff.
-- 
1.7.11.7



--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html



[Index of Archives]     [Kernel]     [Gnu Classpath]     [Gnu Crypto]     [DM Crypt]     [Netfilter]     [Bugtraq]

  Powered by Linux