Always call dpu_hw_intr_clear_intr_status_nolock() from the dpu_hw_intr_dispatch_irqs(). This simplifies the callback function and enforces clearing the hw interrupt status. Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@xxxxxxxxxx> --- drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c | 9 ----- .../gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c | 39 +++++++++---------- .../gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h | 9 ----- 3 files changed, 18 insertions(+), 39 deletions(-) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c index c977e8484174..dadb4103b0eb 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c @@ -41,15 +41,6 @@ static void dpu_core_irq_callback_handler(void *arg, int irq_idx) if (cb->func) cb->func(cb->arg, irq_idx); spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags); - - /* - * Clear pending interrupt status in HW. - * NOTE: dpu_core_irq_callback_handler is protected by top-level - * spinlock, so it is safe to clear any interrupt status here. - */ - dpu_kms->hw_intr->ops.clear_intr_status_nolock( - dpu_kms->hw_intr, - irq_idx); } int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c index a8d463a8e8fe..3d48ad69c901 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c @@ -771,6 +771,22 @@ static int dpu_hw_intr_irqidx_lookup(enum dpu_intr_type intr_type, return -EINVAL; } +static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr, + int irq_idx) +{ + int reg_idx; + + if (!intr) + return; + + reg_idx = dpu_irq_map[irq_idx].reg_idx; + DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off, + dpu_irq_map[irq_idx].irq_mask); + + /* ensure register writes go through */ + wmb(); +} + static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr, void (*cbfunc)(void *, int), void *arg) @@ -837,9 +853,8 @@ static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr, */ if (cbfunc) cbfunc(arg, irq_idx); - else - intr->ops.clear_intr_status_nolock( - intr, irq_idx); + + dpu_hw_intr_clear_intr_status_nolock(intr, irq_idx); /* * When callback finish, clear the irq_status @@ -1004,23 +1019,6 @@ static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr) return 0; } - -static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr, - int irq_idx) -{ - int reg_idx; - - if (!intr) - return; - - reg_idx = dpu_irq_map[irq_idx].reg_idx; - DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off, - dpu_irq_map[irq_idx].irq_mask); - - /* ensure register writes go through */ - wmb(); -} - static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr, int irq_idx, bool clear) { @@ -1062,7 +1060,6 @@ static void __setup_intr_ops(struct dpu_hw_intr_ops *ops) ops->dispatch_irqs = dpu_hw_intr_dispatch_irq; ops->clear_all_irqs = dpu_hw_intr_clear_irqs; ops->disable_all_irqs = dpu_hw_intr_disable_irqs; - ops->clear_intr_status_nolock = dpu_hw_intr_clear_intr_status_nolock; ops->get_interrupt_status = dpu_hw_intr_get_interrupt_status; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h index d8b9d5fe6b8c..8d005687b265 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h @@ -141,15 +141,6 @@ struct dpu_hw_intr_ops { void (*cbfunc)(void *arg, int irq_idx), void *arg); - /** - * clear_intr_status_nolock() - clears the HW interrupts without lock - * @intr: HW interrupt handle - * @irq_idx: Lookup irq index return from irq_idx_lookup - */ - void (*clear_intr_status_nolock)( - struct dpu_hw_intr *intr, - int irq_idx); - /** * get_interrupt_status - Gets HW interrupt status, and clear if set, * based on given lookup IRQ index. -- 2.30.2 _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/dri-devel