On Mon, Feb 28, 2022 at 10:45:15AM -0600, Alex Elder wrote: > On 2/28/22 6:43 AM, Manivannan Sadhasivam wrote: > > Add support for processing MHI endpoint interrupts such as control > > interrupt, command interrupt and channel interrupt from the host. > > > > The interrupts will be generated in the endpoint device whenever host > > writes to the corresponding doorbell registers. The doorbell logic > > is handled inside the hardware internally. > > > > Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@xxxxxxxxxx> > > One suggestion for future work, but otherwise this looks good. > > Reviewed-by: Alex Elder <elder@xxxxxxxxxx> > > > --- > > drivers/bus/mhi/ep/main.c | 123 +++++++++++++++++++++++++++++++++++++- > > include/linux/mhi_ep.h | 4 ++ > > 2 files changed, 125 insertions(+), 2 deletions(-) > > > > diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c > > index 7a29543586d0..ce690b1aeace 100644 > > --- a/drivers/bus/mhi/ep/main.c > > +++ b/drivers/bus/mhi/ep/main.c > > @@ -143,6 +143,112 @@ static void mhi_ep_state_worker(struct work_struct *work) > > } > > } > > +static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned long ch_int, > > + u32 ch_idx) > > +{ > > + struct mhi_ep_ring_item *item; > > + struct mhi_ep_ring *ring; > > + bool work = !!ch_int; > > + LIST_HEAD(head); > > + u32 i; > > + > > + /* First add the ring items to a local list */ > > + for_each_set_bit(i, &ch_int, 32) { > > + /* Channel index varies for each register: 0, 32, 64, 96 */ > > + u32 ch_id = ch_idx + i; > > + > > + ring = &mhi_cntrl->mhi_chan[ch_id].ring; > > + item = kzalloc(sizeof(*item), GFP_ATOMIC); > > It looks like this will be used a lot, so I suggest you > consider creating a slab cache of ring items to allocate > from. I haven't suggested that elsewhere, but it's > possible there are other frequently-allocated structures > that would warrant that. > Sure. Thanks, Mani > > + if (!item) > > + return; > > + > > + item->ring = ring; > > + list_add_tail(&item->node, &head); > > + } > > + > > + /* Now, splice the local list into ch_db_list and queue the work item */ > > + if (work) { > > + spin_lock(&mhi_cntrl->list_lock); > > + list_splice_tail_init(&head, &mhi_cntrl->ch_db_list); > > + spin_unlock(&mhi_cntrl->list_lock); > > + } > > +} > > + > > +/* > > + * Channel interrupt statuses are contained in 4 registers each of 32bit length. > > + * For checking all interrupts, we need to loop through each registers and then > > + * check for bits set. > > + */ > > +static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl) > > +{ > > + u32 ch_int, ch_idx, i; > > + > > + /* Bail out if there is no channel doorbell interrupt */ > > + if (!mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl)) > > + return; > > + > > + for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) { > > + ch_idx = i * MHI_MASK_CH_EV_LEN; > > + > > + /* Only process channel interrupt if the mask is enabled */ > > + ch_int = mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask; > > + if (ch_int) { > > + mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx); > > + mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i), > > + mhi_cntrl->chdb[i].status); > > + } > > + } > > +} > > + > > +static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl, > > + enum mhi_state state) > > +{ > > + struct mhi_ep_state_transition *item; > > + > > + item = kzalloc(sizeof(*item), GFP_ATOMIC); > > + if (!item) > > + return; > > + > > + item->state = state; > > + spin_lock(&mhi_cntrl->list_lock); > > + list_add_tail(&item->node, &mhi_cntrl->st_transition_list); > > + spin_unlock(&mhi_cntrl->list_lock); > > + > > + queue_work(mhi_cntrl->wq, &mhi_cntrl->state_work); > > +} > > + > > +/* > > + * Interrupt handler that services interrupts raised by the host writing to > > + * MHICTRL and Command ring doorbell (CRDB) registers for state change and > > + * channel interrupts. > > + */ > > +static irqreturn_t mhi_ep_irq(int irq, void *data) > > +{ > > + struct mhi_ep_cntrl *mhi_cntrl = data; > > + struct device *dev = &mhi_cntrl->mhi_dev->dev; > > + enum mhi_state state; > > + u32 int_value; > > + > > + /* Acknowledge the ctrl interrupt */ > > + int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS); > > + mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, int_value); > > + > > + /* Check for ctrl interrupt */ > > + if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK, int_value)) { > > + dev_dbg(dev, "Processing ctrl interrupt\n"); > > + mhi_ep_process_ctrl_interrupt(mhi_cntrl, state); > > + } > > + > > + /* Check for command doorbell interrupt */ > > + if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) > > + dev_dbg(dev, "Processing command doorbell interrupt\n"); > > + > > + /* Check for channel interrupts */ > > + mhi_ep_check_channel_interrupt(mhi_cntrl); > > + > > + return IRQ_HANDLED; > > +} > > + > > static void mhi_ep_release_device(struct device *dev) > > { > > struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); > > @@ -339,7 +445,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, > > struct mhi_ep_device *mhi_dev; > > int ret; > > - if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio) > > + if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq) > > return -EINVAL; > > ret = mhi_ep_chan_init(mhi_cntrl, config); > > @@ -361,6 +467,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, > > } > > INIT_LIST_HEAD(&mhi_cntrl->st_transition_list); > > + INIT_LIST_HEAD(&mhi_cntrl->ch_db_list); > > spin_lock_init(&mhi_cntrl->state_lock); > > spin_lock_init(&mhi_cntrl->list_lock); > > mutex_init(&mhi_cntrl->event_lock); > > @@ -376,12 +483,20 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, > > goto err_destroy_wq; > > } > > + irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN); > > + ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH, > > + "doorbell_irq", mhi_cntrl); > > + if (ret) { > > + dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ\n"); > > + goto err_ida_free; > > + } > > + > > /* Allocate the controller device */ > > mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER); > > if (IS_ERR(mhi_dev)) { > > dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n"); > > ret = PTR_ERR(mhi_dev); > > - goto err_ida_free; > > + goto err_free_irq; > > } > > dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index); > > @@ -398,6 +513,8 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, > > err_put_dev: > > put_device(&mhi_dev->dev); > > +err_free_irq: > > + free_irq(mhi_cntrl->irq, mhi_cntrl); > > err_ida_free: > > ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); > > err_destroy_wq: > > @@ -417,6 +534,8 @@ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) > > destroy_workqueue(mhi_cntrl->wq); > > + free_irq(mhi_cntrl->irq, mhi_cntrl); > > + > > kfree(mhi_cntrl->mhi_cmd); > > kfree(mhi_cntrl->mhi_chan); > > diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h > > index dc27a5de7d3c..43aa9b133db4 100644 > > --- a/include/linux/mhi_ep.h > > +++ b/include/linux/mhi_ep.h > > @@ -70,6 +70,7 @@ struct mhi_ep_db_info { > > * @list_lock: Lock for protecting state transition and channel doorbell lists > > * @state_lock: Lock for protecting state transitions > > * @st_transition_list: List of state transitions > > + * @ch_db_list: List of queued channel doorbells > > * @wq: Dedicated workqueue for handling rings and state changes > > * @state_work: State transition worker > > * @raise_irq: CB function for raising IRQ to the host > > @@ -87,6 +88,7 @@ struct mhi_ep_db_info { > > * @chdb_offset: Channel doorbell offset set by the host > > * @erdb_offset: Event ring doorbell offset set by the host > > * @index: MHI Endpoint controller index > > + * @irq: IRQ used by the endpoint controller > > */ > > struct mhi_ep_cntrl { > > struct device *cntrl_dev; > > @@ -111,6 +113,7 @@ struct mhi_ep_cntrl { > > spinlock_t state_lock; > > struct list_head st_transition_list; > > + struct list_head ch_db_list; > > struct workqueue_struct *wq; > > struct work_struct state_work; > > @@ -137,6 +140,7 @@ struct mhi_ep_cntrl { > > u32 chdb_offset; > > u32 erdb_offset; > > u32 index; > > + int irq; > > }; > > /** >