Re: [RFC PATCH 2/3] crypto: hisilicon hacv1 driver

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Am Dienstag, 30. Januar 2018, 16:29:52 CET schrieb Jonathan Cameron:

Hi Jonathan,

> +	/* Special path for single element SGLs with small packets. */
> +	if (sg_is_last(sgl) && sgl->length <= SEC_SMALL_PACKET_SIZE) {

This looks strangely familiar. Is this code affected by a similar issue fixed 
in https://patchwork.kernel.org/patch/10173981?

> +static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
> +                                  const u8 *key, unsigned int keylen,
> +                                  enum sec_cipher_alg alg)
> +{
> +       struct sec_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
> +       struct device *dev;
> +
> +       spin_lock(&ctx->lock);
> +       if (ctx->enc_key) {
> +               /* rekeying */
> +               dev = SEC_Q_DEV(ctx->queue);
> +               memset(ctx->enc_key, 0, SEC_MAX_CIPHER_KEY);
> +               memset(ctx->dec_key, 0, SEC_MAX_CIPHER_KEY);
> +               memset(&ctx->enc_req, 0, sizeof(ctx->enc_req));
> +               memset(&ctx->dec_req, 0, sizeof(ctx->dec_req));
> +       } else {
> +               /* new key */
> +               dev = SEC_Q_DEV(ctx->queue);
> +               ctx->enc_key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY,
> +                                                  &ctx->enc_pkey,
> GFP_ATOMIC); +               if (!ctx->enc_key) {
> +                       spin_unlock(&ctx->lock);
> +                       return -ENOMEM;
> +               }
> +               ctx->dec_key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY,
> +                                                  &ctx->dec_pkey,
> GFP_ATOMIC); +               if (!ctx->dec_key) {
> +                       spin_unlock(&ctx->lock);
> +                       goto out_free_enc;
> +               }
> +       }
> +       spin_unlock(&ctx->lock);
> +       if (sec_alg_skcipher_init_context(tfm, key, keylen, alg))
> +               goto out_free_all;
> +
> +       return 0;
> +
> +out_free_all:
> +       memset(ctx->dec_key, 0, SEC_MAX_CIPHER_KEY);
> +       dma_free_coherent(dev, SEC_MAX_CIPHER_KEY,
> +                         ctx->dec_key, ctx->dec_pkey);
> +       ctx->dec_key = NULL;
> +out_free_enc:
> +       memset(ctx->enc_key, 0, SEC_MAX_CIPHER_KEY);
> +       dma_free_coherent(dev, SEC_MAX_CIPHER_KEY,
> +                         ctx->enc_key, ctx->enc_pkey);
> +       ctx->enc_key = NULL;

Please use memzero_explicit.
> +
> +       return -ENOMEM;
> +}

> +static int sec_alg_skcipher_setkey_aes_xts(struct crypto_skcipher *tfm,
> +                                          const u8 *key, unsigned int
> keylen) +{
> +       enum sec_cipher_alg alg;
> +
> +       switch (keylen) {
> +       case AES_KEYSIZE_128 * 2:
> +               alg = SEC_AES_XTS_128;
> +               break;
> +       case AES_KEYSIZE_256 * 2:
> +               alg = SEC_AES_XTS_256;
> +               break;
> +       default:
> +               return -EINVAL;
> +       }

Please add xts_check_key()
> +
> +       return sec_alg_skcipher_setkey(tfm, key, keylen, alg);

> +static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm,
> +                                           const u8 *key, unsigned int
> keylen) +{
> +       if (keylen != DES_KEY_SIZE * 3)
> +               return -EINVAL;
> +
> +       return sec_alg_skcipher_setkey(tfm, key, keylen,
> SEC_3DES_ECB_192_3KEY); +}
> +
> +static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm,
> +                                           const u8 *key, unsigned int
> keylen) +{
> +       if (keylen != DES3_EDE_KEY_SIZE)
> +               return -EINVAL;
> +
> +       return sec_alg_skcipher_setkey(tfm, key, keylen,
> SEC_3DES_CBC_192_3KEY); +}

Please use __des3_ede_setkey

> +static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
> +				      struct skcipher_request *skreq)
> +{
> +	struct sec_crypto_request *sec_req = skcipher_request_ctx(skreq);
> +	struct sec_alg_skcipher_ctx *ctx = sec_req->skcipher_ctx;
> +	struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
> +	struct crypto_async_request *nextrequest;
> +	struct sec_crypto_request *nextsec_req;
> +	struct skcipher_request *nextskreq;
> +	int icv_or_skey_en;
> +	int err = 0;
> +
> +	icv_or_skey_en = (sec_resp->w0 & SEC_BD_W0_ICV_OR_SKEY_EN_M) >>
> +		SEC_BD_W0_ICV_OR_SKEY_EN_S;
> +	if (sec_resp->w1 & SEC_BD_W1_BD_INVALID || icv_or_skey_en == 3) {
> +		dev_err(ctx->queue->dev_info->dev,
> +			"Got an invalid answer %lu %d\n",
> +			sec_resp->w1 & SEC_BD_W1_BD_INVALID,
> +			icv_or_skey_en);
> +		err = -EINVAL;
> +		/*
> +		 * We need to muddle on to avoid getting stuck with elements
> +		 * on the queue. Error will be reported so userspace should
> +		 * know a mess has occurred.
> +		 */
> +	}
> +
> +	spin_lock(&ctx->queue->queuelock);
> +	sec_free_opdata(ctx->queue, skreq->src, skreq->dst, skreq);
> +	/* Put the IV in place for chained cases */
> +	switch (ctx->cipher_alg) {
> +	case SEC_AES_CBC_128:
> +	case SEC_AES_CBC_192:
> +	case SEC_AES_CBC_256:
> +		if (sec_req->req.w0 & SEC_BD_W0_DE)
> +			sg_pcopy_to_buffer(skreq->dst, sec_req->len_out,
> +					   skreq->iv,
> +					   crypto_skcipher_ivsize(atfm),
> +					   skreq->cryptlen -
> +					   crypto_skcipher_ivsize(atfm));
> +		else
> +			sg_pcopy_to_buffer(skreq->src, sec_req->len_in,
> +					   skreq->iv,
> +					   crypto_skcipher_ivsize(atfm),
> +					   skreq->cryptlen - 16);
> +		break;
> +	case SEC_AES_CTR_128:
> +	case SEC_AES_CTR_192:
> +	case SEC_AES_CTR_256:
> +		crypto_inc(skreq->iv, 16);
> +		break;
> +	default:
> +		/* Do not update */
> +		break;
> +	}
> +
> +	if (ctx->queue->havesoftqueue &&
> +	    !list_empty(&ctx->queue->softqueue.list) &&
> +	    sec_queue_empty(ctx->queue)) {
> +		nextrequest = crypto_dequeue_request(&ctx->queue->softqueue);
> +		nextskreq = container_of(nextrequest, struct skcipher_request,
> +					 base);
> +		nextsec_req = skcipher_request_ctx(nextskreq);
> +		/* We know there is space so this cannot fail */
> +		sec_queue_send(ctx->queue, &nextsec_req->req, nextskreq);


Looking at that code and considering what you said that only for CTR and CBC 
you need to apply proper IV dependency handling, I am wondering why XTS is not 
covered (there is an IV). What about the DES/3DES ciphers?

> +	/*
> +	 * Add to hardware queue only under following circumstances
> +	 * 1) Software and hardware queue empty so no chain dependencies
> +	 * 2) No dependencies as new IV - (check software queue empty
> +	 *    to maintain order)
> +	 * 3) No dependencies because the mode does no chaining.
> +	 *
> +	 * In other cases first insert onto the software queue which is then
> +	 * emptied as requests complete
> +	 */
> +	if (!ctx->queue->havesoftqueue ||
> +	    (list_empty(&ctx->queue->softqueue.list) &&
> +	     (sec_queue_empty(ctx->queue) ||
> +	      ctx->prev_iv_address != skreq->iv))) {

Maybe you want to rely on the flag given by the upper layer that I will 
propose shortly.

> +
> +static void sec_alg_skcipher_exit(struct crypto_skcipher *tfm)
> +{
> +	struct sec_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
> +	struct device *dev = SEC_Q_DEV(ctx->queue);
> +
> +	if (ctx->enc_key) {
> +		memset(ctx->enc_key, 0, SEC_MAX_CIPHER_KEY);
> +		dma_free_coherent(dev, SEC_MAX_CIPHER_KEY,
> +				  ctx->enc_key, ctx->enc_pkey);
> +	}
> +	if (ctx->dec_key) {
> +		memset(ctx->dec_key, 0, SEC_MAX_CIPHER_KEY);
> +		dma_free_coherent(dev, SEC_MAX_CIPHER_KEY,
> +				  ctx->dec_key, ctx->dec_pkey);

Please use memzero_explicit.

> +
> +	/* Get the first idle queue in SEC device */
> +	for (i = 0; i < SEC_Q_NUM; i++)

I think you should use curly braces here too. Consider what checkpatch.pl 
thinks.

> +		if (!sec_queue_in_use_get(&info->queues[i])) {
> +			sec_queue_in_use(&info->queues[i], 1);
> +			spin_unlock(&info->dev_lock);
> +			*q = &info->queues[i];
> +			return 0;
> +		}
> +	spin_unlock(&info->dev_lock);
> +
> +	return -ENODEV;
> +}
> +
> +static int sec_count_queues_in_use(struct sec_dev_info *info)
> +{
> +	int i, count = 0;
> +
> +	spin_lock(&info->dev_lock);
> +	for (i = 0; i < SEC_Q_NUM; i++)

dto.

Ciao
Stephan




[Index of Archives]     [Kernel]     [Gnu Classpath]     [Gnu Crypto]     [DM Crypt]     [Netfilter]     [Bugtraq]

  Powered by Linux