On 19/03/20 12:54 pm, Baolin Wang wrote: > There is an unusual case that the card is busy when trying to send a > command, and we can not polling the card status in interrupt context > by using request_atomic() to dispatch requests. > > Thus we should queue a work to try again in the non-atomic context > in case the host releases the busy signal later. I think this should be part of patch 1 > > Signed-off-by: Baolin Wang <baolin.wang7@xxxxxxxxx> Sorry for the slow reply. > --- > drivers/mmc/host/mmc_hsq.c | 37 ++++++++++++++++++++++++++++++++++++- > drivers/mmc/host/mmc_hsq.h | 1 + > 2 files changed, 37 insertions(+), 1 deletion(-) > > diff --git a/drivers/mmc/host/mmc_hsq.c b/drivers/mmc/host/mmc_hsq.c > index fdbaa98..3edad11 100644 > --- a/drivers/mmc/host/mmc_hsq.c > +++ b/drivers/mmc/host/mmc_hsq.c > @@ -15,11 +15,33 @@ > #define HSQ_NUM_SLOTS 64 > #define HSQ_INVALID_TAG HSQ_NUM_SLOTS > > +static void mmc_hsq_retry_handler(struct work_struct *work) > +{ > + struct mmc_hsq *hsq = container_of(work, struct mmc_hsq, retry_work); > + struct mmc_host *mmc = hsq->mmc; > + struct mmc_request *mrq = hsq->mrq; > + struct mmc_data *data = mrq->data; > + > + if (mmc->ops->request) { ->request() is not an optional mmc operation so checking it is not necessary. > + mmc->ops->request(mmc, mrq); > + return; > + } > + > + /* > + * If host does not supply the callback in normal context to > + * handle request, just finish this request. > + */ > + data->error = -EBUSY; > + data->bytes_xfered = 0; > + mmc_hsq_finalize_request(mmc, mrq); > +} > + > static void mmc_hsq_pump_requests(struct mmc_hsq *hsq) > { > struct mmc_host *mmc = hsq->mmc; > struct hsq_slot *slot; > unsigned long flags; > + int ret = 0; > > spin_lock_irqsave(&hsq->lock, flags); > > @@ -42,9 +64,21 @@ static void mmc_hsq_pump_requests(struct mmc_hsq *hsq) > spin_unlock_irqrestore(&hsq->lock, flags); > > if (mmc->ops->request_atomic) > - mmc->ops->request_atomic(mmc, hsq->mrq); > + ret = mmc->ops->request_atomic(mmc, hsq->mrq); > else > mmc->ops->request(mmc, hsq->mrq); > + > + /* > + * If returning BUSY from request_atomic(), which means the card > + * may be busy now, and we should change to non-atomic context to > + * try again for this unusual case, to avoid time-consuming operations > + * in the atomic context. > + * > + * Note: we can ignore other error cases, since the host driver > + * will handle them. > + */ > + if (ret == -EBUSY) > + schedule_work(&hsq->retry_work); Let's add a warning for unexpected return values i.e. WARN_ON_ONCE(ret && ret != -EBUSY); > } > > static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains) > @@ -327,6 +361,7 @@ int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc) > hsq->mmc->cqe_private = hsq; > mmc->cqe_ops = &mmc_hsq_ops; > > + INIT_WORK(&hsq->retry_work, mmc_hsq_retry_handler); > spin_lock_init(&hsq->lock); > init_waitqueue_head(&hsq->wait_queue); > > diff --git a/drivers/mmc/host/mmc_hsq.h b/drivers/mmc/host/mmc_hsq.h > index d51beb7..81f6c4f 100644 > --- a/drivers/mmc/host/mmc_hsq.h > +++ b/drivers/mmc/host/mmc_hsq.h > @@ -12,6 +12,7 @@ struct mmc_hsq { > wait_queue_head_t wait_queue; > struct hsq_slot *slot; > spinlock_t lock; > + struct work_struct retry_work; > > int next_tag; > int num_slots; >