From: Md Sadre Alam <quic_mdalam@xxxxxxxxxxx> The BAM driver now supports command descriptor locking. Add helper functions that perform the dummy writes and acquire/release the lock and use them across the supported algos. With this: if mutliple execution environments (e.g.: a trusted app and linux) try to access the same crypto engine, we can serialize their accesses. Signed-off-by: Md Sadre Alam <quic_mdalam@xxxxxxxxxxx> [Bartosz: rework the coding style, naming convention, commit message and ifdef logic] Co-developed-by: Bartosz Golaszewski <bartosz.golaszewski@xxxxxxxxxx> Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@xxxxxxxxxx> --- drivers/crypto/qce/aead.c | 4 ++++ drivers/crypto/qce/common.c | 30 ++++++++++++++++++++++++++++++ drivers/crypto/qce/core.h | 3 +++ drivers/crypto/qce/dma.c | 4 ++++ drivers/crypto/qce/dma.h | 2 ++ drivers/crypto/qce/sha.c | 4 ++++ drivers/crypto/qce/skcipher.c | 4 ++++ 7 files changed, 51 insertions(+) diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c index 11cec08544c9..5d45841c029e 100644 --- a/drivers/crypto/qce/aead.c +++ b/drivers/crypto/qce/aead.c @@ -63,6 +63,8 @@ static void qce_aead_done(void *data) sg_free_table(&rctx->dst_tbl); } + qce_bam_unlock(qce); + error = qce_check_status(qce, &status); if (error < 0 && (error != -EBADMSG)) dev_err(qce->dev, "aead operation error (%x)\n", status); @@ -433,6 +435,8 @@ qce_aead_async_req_handle(struct crypto_async_request *async_req) else rctx->assoclen = req->assoclen; + qce_bam_lock(qce); + diff_dst = (req->src != req->dst) ? true : false; dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c index 80984e853454..251bf3cb1dd5 100644 --- a/drivers/crypto/qce/common.c +++ b/drivers/crypto/qce/common.c @@ -565,6 +565,36 @@ int qce_start(struct crypto_async_request *async_req, u32 type) #define STATUS_ERRORS \ (BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) | BIT(HSD_ERR_SHIFT)) +void qce_bam_lock(struct qce_device *qce) +{ + int ret; + + qce_clear_bam_transaction(qce); + + /* This is just a dummy write to acquire the lock on the BAM pipe. */ + qce_write(qce, REG_AUTH_SEG_CFG, 0); + + ret = qce_submit_cmd_desc(qce, QCE_DMA_DESC_FLAG_LOCK); + if (ret) + dev_err(qce->dev, + "Failed to lock the command descriptor: %d\n", ret); +} + +void qce_bam_unlock(struct qce_device *qce) +{ + int ret; + + qce_clear_bam_transaction(qce); + + /* This just dummy write to release the lock on the BAM pipe. */ + qce_write(qce, REG_AUTH_SEG_CFG, 0); + + ret = qce_submit_cmd_desc(qce, QCE_DMA_DESC_FLAG_UNLOCK); + if (ret) + dev_err(qce->dev, + "Failed to unlock the command descriptor: %d\n", ret); +} + int qce_check_status(struct qce_device *qce, u32 *status) { int ret = 0; diff --git a/drivers/crypto/qce/core.h b/drivers/crypto/qce/core.h index b86caf8b926d..3341571991a4 100644 --- a/drivers/crypto/qce/core.h +++ b/drivers/crypto/qce/core.h @@ -65,4 +65,7 @@ struct qce_algo_ops { int (*async_req_handle)(struct crypto_async_request *async_req); }; +void qce_bam_lock(struct qce_device *qce); +void qce_bam_unlock(struct qce_device *qce); + #endif /* _CORE_H_ */ diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c index b8b305fc1b6a..f3178144fa94 100644 --- a/drivers/crypto/qce/dma.c +++ b/drivers/crypto/qce/dma.c @@ -80,6 +80,10 @@ int qce_submit_cmd_desc(struct qce_device *qce, unsigned long flags) int ret = 0; desc_flags = DMA_PREP_CMD; + if (flags & QCE_DMA_DESC_FLAG_LOCK) + desc_flags |= DMA_PREP_LOCK; + else if (flags & QCE_DMA_DESC_FLAG_UNLOCK) + desc_flags |= DMA_PREP_UNLOCK; /* * The HPG recommends always using the consumer pipe for command diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h index 7d9d58b414ed..c98dcab1dc62 100644 --- a/drivers/crypto/qce/dma.h +++ b/drivers/crypto/qce/dma.h @@ -21,6 +21,8 @@ struct qce_device; #define QCE_BAM_CMD_ELEMENT_SIZE 64 #define QCE_DMA_DESC_FLAG_BAM_NWD (0x0004) #define QCE_MAX_REG_READ 8 +#define QCE_DMA_DESC_FLAG_LOCK (0x0002) +#define QCE_DMA_DESC_FLAG_UNLOCK (0x0001) struct qce_result_dump { diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index 0c7aab711b7b..4c701fca16f2 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -60,6 +60,8 @@ static void qce_ahash_done(void *data) rctx->byte_count[0] = cpu_to_be32(result->auth_byte_count[0]); rctx->byte_count[1] = cpu_to_be32(result->auth_byte_count[1]); + qce_bam_unlock(qce); + error = qce_check_status(qce, &status); if (error < 0) dev_dbg(qce->dev, "ahash operation error (%x)\n", status); @@ -90,6 +92,8 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req) rctx->authklen = AES_KEYSIZE_128; } + qce_bam_lock(qce); + rctx->src_nents = sg_nents_for_len(req->src, req->nbytes); if (rctx->src_nents < 0) { dev_err(qce->dev, "Invalid numbers of src SG.\n"); diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c index cab796cd7e43..42414fe9b787 100644 --- a/drivers/crypto/qce/skcipher.c +++ b/drivers/crypto/qce/skcipher.c @@ -52,6 +52,8 @@ static void qce_skcipher_done(void *data) sg_free_table(&rctx->dst_tbl); + qce_bam_unlock(qce); + error = qce_check_status(qce, &status); if (error < 0) dev_dbg(qce->dev, "skcipher operation error (%x)\n", status); @@ -82,6 +84,8 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req) dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; + qce_bam_lock(qce); + rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen); if (diff_dst) rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen); -- 2.45.2