On Mon, Apr 11, 2016 at 10:21:11AM -0400, Sinan Kaya wrote: > + * HIDMA is not aware of IOMMU presence since it follows the DMA API. All > + * IOMMU latency will be built into the data movement time. By the time > + * interrupt happens, IOMMU lookups + data movement has already taken place. Do you mean dmaengine API or dma mapping API here? Where is you IOMMU located wrt to dma controller? > + * > + * While the first read in a typical PCI endpoint ISR flushes all outstanding > + * requests traditionally to the destination, this concept does not apply > + * here for this HW. > + */ > +static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev) > +{ > + u32 status; > + u32 enable; > + u32 cause; > + > + /* > + * Fine tuned for this HW... > + * > + * This ISR has been designed for this particular hardware. Relaxed > + * read and write accessors are used for performance reasons due to > + * interrupt delivery guarantees. Do not copy this code blindly and > + * expect that to work. > + */ > + status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG); > + enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG); > + cause = status & enable; > + > + while (cause) { > + if ((cause & BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS)) || > + (cause & BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS)) || > + (cause & BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS)) || > + (cause & BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS)) || > + (cause & BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS))) { Switch please > + u8 err_code = HIDMA_EVRE_STATUS_ERROR; > + u8 err_info = 0xFF; > + > + /* Clear out pending interrupts */ > + writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); > + > + dev_err(lldev->dev, "error 0x%x, resetting...\n", > + cause); > + > + hidma_cleanup_pending_tre(lldev, err_info, err_code); > + > + /* reset the channel for recovery */ > + if (hidma_ll_setup(lldev)) { should this be done in ISR? > +int hidma_ll_resume(struct hidma_lldev *lldev) > +{ > + return hidma_ll_enable(lldev); > +} why do we need this empty function, use hidma_ll_enable. > +bool hidma_ll_isenabled(struct hidma_lldev *lldev) > +{ > + u32 val; > + > + val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG); > + lldev->trch_state = HIDMA_CH_STATE(val); > + val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG); > + lldev->evch_state = HIDMA_CH_STATE(val); > + > + /* both channels have to be enabled before calling this function */ > + if (((lldev->trch_state == HIDMA_CH_ENABLED) || > + (lldev->trch_state == HIDMA_CH_RUNNING)) && > + ((lldev->evch_state == HIDMA_CH_ENABLED) || > + (lldev->evch_state == HIDMA_CH_RUNNING))) > + return true; hmmm this looks hard to read, why not do: is_chan_enabled(state) { switch (state) { case HIDMA_CH_ENABLED: case HIDMA_CH_RUNNING: return true; default: return false; } and then : if (is_chan_enabled(lldev->trch_state) && is_chan_enabled(lldev->evch_state)) > +void hidma_ll_start(struct hidma_lldev *lldev) > +{ > + hidma_ll_hw_start(lldev); > +} Another dummy :( > +/* > + * Note that even though we stop this channel > + * if there is a pending transaction in flight > + * it will complete and follow the callback. > + * This request will prevent further requests > + * to be made. Why the odd formating? > +int hidma_ll_uninit(struct hidma_lldev *lldev) > +{ > + int rc = 0; > + u32 val; > + > + if (!lldev) > + return -ENODEV; > + > + if (lldev->initialized) { > + u32 required_bytes; > + > + lldev->initialized = 0; > + > + required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres; > + tasklet_kill(&lldev->task); > + memset(lldev->trepool, 0, required_bytes); > + lldev->trepool = NULL; > + lldev->pending_tre_count = 0; > + lldev->tre_write_offset = 0; > + > + rc = hidma_ll_reset(lldev); > + > + /* > + * Clear all pending interrupts again. > + * Otherwise, we observe reset complete interrupts. > + */ > + val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG); > + writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); > + hidma_ll_enable_irq(lldev, 0); uninit enables irq? -- ~Vinod -- To unsubscribe from this list: send the line "unsubscribe dmaengine" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html