On 1/22/2020 12:46 PM, Corentin Labbe wrote: > Some bykeshedding are unnecessary since a workqueue can only be executed > one by one. > This behaviour is documented in: > - kernel/kthread.c: comment of kthread_worker_fn() > - Documentation/core-api/workqueue.rst: the functions associated with the work items one after the other [...] > @@ -73,16 +73,6 @@ static void crypto_pump_requests(struct crypto_engine *engine, > > spin_lock_irqsave(&engine->queue_lock, flags); > > - /* Make sure we are not already running a request */ > - if (engine->cur_req) > - goto out; > - This check is here for a good reason, namely because crypto engine cannot currently handle multiple crypto requests being in "flight" in parallel. More exactly, if this check is removed the following sequence could occur: crypto_pump_work() -> crypto_pump_requests() -> .do_one_request(areq1) crypto_pump_work() -> crypto_pump_requests() -> .do_one_request(areq2) crypto_finalize_request(areq1) crypto_finalize_request(areq2) This isn't correctly handled in crypto_finalize_request(), since .unprepare_request will be called only for areq2. /** * crypto_finalize_request - finalize one request if the request is done * @engine: the hardware engine * @req: the request need to be finalized * @err: error number */ static void crypto_finalize_request(struct crypto_engine *engine, struct crypto_async_request *req, int err) { unsigned long flags; bool finalize_cur_req = false; int ret; struct crypto_engine_ctx *enginectx; spin_lock_irqsave(&engine->queue_lock, flags); if (engine->cur_req == req) finalize_cur_req = true; spin_unlock_irqrestore(&engine->queue_lock, flags); if (finalize_cur_req) { enginectx = crypto_tfm_ctx(req->tfm); if (engine->cur_req_prepared && enginectx->op.unprepare_request) { ret = enginectx->op.unprepare_request(engine, req); if (ret) dev_err(engine->dev, "failed to unprepare request\n"); } spin_lock_irqsave(&engine->queue_lock, flags); engine->cur_req = NULL; engine->cur_req_prepared = false; spin_unlock_irqrestore(&engine->queue_lock, flags); } req->complete(req, err); kthread_queue_work(engine->kworker, &engine->pump_requests); } Horia