Add support for queueing SKBs to the host over the transfer ring of the relevant channel. The mhi_ep_queue_skb() API will be used by the client networking drivers to queue the SKBs to the host over MHI bus. The host will add ring elements to the transfer ring periodically for the device and the device will write SKBs to the ring elements. If a single SKB doesn't fit in a ring element (TRE), it will be placed in multiple ring elements and the overflow event will be sent for all ring elements except the last one. For the last ring element, the EOT event will be sent indicating the packet boundary. Reviewed-by: Alex Elder <elder@xxxxxxxxxx> Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@xxxxxxxxxx> --- drivers/bus/mhi/ep/main.c | 82 +++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 9 +++++ 2 files changed, 91 insertions(+) diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index e2ed10b4a9d2..660d1e9791d3 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -434,6 +434,88 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_elem return 0; } +/* TODO: Handle partially formed TDs */ +int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb) +{ + struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan; + struct device *dev = &mhi_chan->mhi_dev->dev; + struct mhi_ring_element *el; + u32 buf_left, read_offset; + struct mhi_ep_ring *ring; + enum mhi_ev_ccs code; + void *read_addr; + u64 write_addr; + size_t tr_len; + u32 tre_len; + int ret; + + buf_left = skb->len; + ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; + + mutex_lock(&mhi_chan->lock); + + do { + /* Don't process the transfer ring if the channel is not in RUNNING state */ + if (mhi_chan->state != MHI_CH_STATE_RUNNING) { + dev_err(dev, "Channel not available\n"); + ret = -ENODEV; + goto err_exit; + } + + if (mhi_ep_queue_is_empty(mhi_dev, DMA_FROM_DEVICE)) { + dev_err(dev, "TRE not available!\n"); + ret = -ENOSPC; + goto err_exit; + } + + el = &ring->ring_cache[ring->rd_offset]; + tre_len = MHI_TRE_DATA_GET_LEN(el); + + tr_len = min(buf_left, tre_len); + read_offset = skb->len - buf_left; + read_addr = skb->data + read_offset; + write_addr = MHI_TRE_DATA_GET_PTR(el); + + dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id); + ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len); + if (ret < 0) { + dev_err(dev, "Error writing to the channel\n"); + goto err_exit; + } + + buf_left -= tr_len; + /* + * For all TREs queued by the host for DL channel, only the EOT flag will be set. + * If the packet doesn't fit into a single TRE, send the OVERFLOW event to + * the host so that the host can adjust the packet boundary to next TREs. Else send + * the EOT event to the host indicating the packet boundary. + */ + if (buf_left) + code = MHI_EV_CC_OVERFLOW; + else + code = MHI_EV_CC_EOT; + + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code); + if (ret) { + dev_err(dev, "Error sending transfer completion event\n"); + goto err_exit; + } + + mhi_ep_ring_inc_index(ring); + } while (buf_left); + + mutex_unlock(&mhi_chan->lock); + + return 0; + +err_exit: + mutex_unlock(&mhi_chan->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_ep_queue_skb); + static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) { size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index eecc8f35d630..478aece17046 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -265,4 +265,13 @@ void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl); */ bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir); +/** + * mhi_ep_queue_skb - Send SKBs to host over MHI Endpoint + * @mhi_dev: Device associated with the DL channel + * @skb: SKBs to be queued + * + * Return: 0 if the SKBs has been sent successfully, a negative error code otherwise. + */ +int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb); + #endif -- 2.25.1