On Fri, Mar 05, 2021 at 06:02:23PM +0100, Loic Poulain wrote: > This change ensures that PM reference is always get during packet > queueing and released either after queuing completion (RX) or once > the buffer has been consumed (TX). This guarantees proper update for > underlying MHI controller runtime status (e.g. last_busy timestamp) > and prevents suspend to be triggered while TX packets are flying, > or before we completed update of the RX ring. > Any reason why you didn't wait for RX completion also? Thanks, Mani > Signed-off-by: Loic Poulain <loic.poulain@xxxxxxxxxx> > --- > drivers/bus/mhi/core/main.c | 20 ++++++++++++++++---- > 1 file changed, 16 insertions(+), 4 deletions(-) > > diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c > index c780234..16b9640 100644 > --- a/drivers/bus/mhi/core/main.c > +++ b/drivers/bus/mhi/core/main.c > @@ -584,8 +584,11 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, > /* notify client */ > mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); > > - if (mhi_chan->dir == DMA_TO_DEVICE) > + if (mhi_chan->dir == DMA_TO_DEVICE) { > atomic_dec(&mhi_cntrl->pending_pkts); > + /* Release the reference got from mhi_queue() */ > + mhi_cntrl->runtime_put(mhi_cntrl); > + } > > /* > * Recycle the buffer if buffer is pre-allocated, > @@ -1021,9 +1024,11 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, > if (unlikely(ret)) > goto exit_unlock; > > - /* trigger M3 exit if necessary */ > - if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) > - mhi_trigger_resume(mhi_cntrl); > + /* Packet is queued, take a usage ref to exit M3 if necessary > + * for host->device buffer, balanced put is done on buffer completion > + * for device->host buffer, balanced put is after ringing the DB > + */ > + mhi_cntrl->runtime_get(mhi_cntrl); > > /* Assert dev_wake (to exit/prevent M1/M2)*/ > mhi_cntrl->wake_toggle(mhi_cntrl); > @@ -1034,6 +1039,9 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, > if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) > mhi_ring_chan_db(mhi_cntrl, mhi_chan); > > + if (dir == DMA_FROM_DEVICE) > + mhi_cntrl->runtime_put(mhi_cntrl); > + > exit_unlock: > read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); > > @@ -1431,6 +1439,10 @@ static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl, > result.buf_addr = buf_info->cb_buf; > mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); > } > + > + /* Release the reference got from mhi_queue() */ > + if (mhi_chan->dir == DMA_TO_DEVICE) > + mhi_cntrl->runtime_put(mhi_cntrl); > } > } > > -- > 2.7.4 >