[bug report] crypto: hisilicon - SEC security accelerator driver

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hello Jonathan Cameron,

The patch 915e4e8413da: "crypto: hisilicon - SEC security accelerator
driver" from Jul 23, 2018, leads to the following Smatch static
checker warning:

drivers/crypto/hisilicon/sec/sec_algs.c:389 sec_send_request() warn: sleeping in atomic context
drivers/crypto/hisilicon/sec/sec_algs.c:494 sec_skcipher_alg_callback() warn: sleeping in atomic context
drivers/crypto/hisilicon/sec/sec_algs.c:506 sec_skcipher_alg_callback() warn: sleeping in atomic context
drivers/crypto/hisilicon/sec/sec_algs.c:824 sec_alg_skcipher_crypto() warn: sleeping in atomic context
drivers/crypto/hisilicon/sec/sec_drv.c:864 sec_queue_send() warn: sleeping in atomic context

drivers/crypto/hisilicon/sec/sec_algs.c
    421 static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
    422                                       struct crypto_async_request *req_base)
    423 {
    424         struct skcipher_request *skreq = container_of(req_base,
    425                                                       struct skcipher_request,
    426                                                       base);
    427         struct sec_request *sec_req = skcipher_request_ctx(skreq);
    428         struct sec_request *backlog_req;
    429         struct sec_request_el *sec_req_el, *nextrequest;
    430         struct sec_alg_tfm_ctx *ctx = sec_req->tfm_ctx;
    431         struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
    432         struct device *dev = ctx->queue->dev_info->dev;
    433         int icv_or_skey_en, ret;
    434         bool done;
    435 
    436         sec_req_el = list_first_entry(&sec_req->elements, struct sec_request_el,
    437                                       head);
    438         icv_or_skey_en = (sec_resp->w0 & SEC_BD_W0_ICV_OR_SKEY_EN_M) >>
    439                 SEC_BD_W0_ICV_OR_SKEY_EN_S;
    440         if (sec_resp->w1 & SEC_BD_W1_BD_INVALID || icv_or_skey_en == 3) {
    441                 dev_err(dev, "Got an invalid answer %lu %d\n",
    442                         sec_resp->w1 & SEC_BD_W1_BD_INVALID,
    443                         icv_or_skey_en);
    444                 sec_req->err = -EINVAL;
    445                 /*
    446                  * We need to muddle on to avoid getting stuck with elements
    447                  * on the queue. Error will be reported so requester so
    448                  * it should be able to handle appropriately.
    449                  */
    450         }
    451 
    452         spin_lock_bh(&ctx->queue->queuelock);
                ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Holding a spinlock.

    453         /* Put the IV in place for chained cases */
    454         switch (ctx->cipher_alg) {
    455         case SEC_C_AES_CBC_128:
    456         case SEC_C_AES_CBC_192:
    457         case SEC_C_AES_CBC_256:
    458                 if (sec_req_el->req.w0 & SEC_BD_W0_DE)
    459                         sg_pcopy_to_buffer(sec_req_el->sgl_out,
    460                                            sg_nents(sec_req_el->sgl_out),
    461                                            skreq->iv,
    462                                            crypto_skcipher_ivsize(atfm),
    463                                            sec_req_el->el_length -
    464                                            crypto_skcipher_ivsize(atfm));
    465                 else
    466                         sg_pcopy_to_buffer(sec_req_el->sgl_in,
    467                                            sg_nents(sec_req_el->sgl_in),
    468                                            skreq->iv,
    469                                            crypto_skcipher_ivsize(atfm),
    470                                            sec_req_el->el_length -
    471                                            crypto_skcipher_ivsize(atfm));
    472                 /* No need to sync to the device as coherent DMA */
    473                 break;
    474         case SEC_C_AES_CTR_128:
    475         case SEC_C_AES_CTR_192:
    476         case SEC_C_AES_CTR_256:
    477                 crypto_inc(skreq->iv, 16);
    478                 break;
    479         default:
    480                 /* Do not update */
    481                 break;
    482         }
    483 
    484         if (ctx->queue->havesoftqueue &&
    485             !kfifo_is_empty(&ctx->queue->softqueue) &&
    486             sec_queue_empty(ctx->queue)) {
    487                 ret = kfifo_get(&ctx->queue->softqueue, &nextrequest);
    488                 if (ret <= 0)
    489                         dev_err(dev,
    490                                 "Error getting next element from kfifo %d\n",
    491                                 ret);
    492                 else
    493                         /* We know there is space so this cannot fail */
--> 494                         sec_queue_send(ctx->queue, &nextrequest->req,
                                ^^^^^^^^^^^^^^^

Sleeping function.

    495                                        nextrequest->sec_req);
    496         } else if (!list_empty(&ctx->backlog)) {
    497                 /* Need to verify there is room first */
    498                 backlog_req = list_first_entry(&ctx->backlog,
    499                                                typeof(*backlog_req),
    500                                                backlog_head);
    501                 if (sec_queue_can_enqueue(ctx->queue,
    502                     backlog_req->num_elements) ||
    503                     (ctx->queue->havesoftqueue &&
    504                      kfifo_avail(&ctx->queue->softqueue) >
    505                      backlog_req->num_elements)) {
    506                         sec_send_request(backlog_req, ctx->queue);
                                ^^^^^^^^^^^^^^^^
Also sleeps.

    507                         backlog_req->req_base->complete(backlog_req->req_base,
    508                                                         -EINPROGRESS);
    509                         list_del(&backlog_req->backlog_head);
    510                 }
    511         }
    512         spin_unlock_bh(&ctx->queue->queuelock);

regards,
dan carpenter



[Index of Archives]     [Kernel]     [Gnu Classpath]     [Gnu Crypto]     [DM Crypt]     [Netfilter]     [Bugtraq]
  Powered by Linux