1. For multi-core jpegdec, the all hws may run at the same time, if one hw decoded firstly, the function of mtk_jpeg_dec_stop_streaming would be called, but others input buffers are decoding, this will cause some running buffers to be buffer done, causing errors; 2. add a parameter to calculate the decoding buffer counts, it wil decrease to 0 until the all buffers decoded and the mtk_jpeg_dec_stop_streaming could continue to be executed. Signed-off-by: kyrie.wu <kyrie.wu@xxxxxxxxxxxx> --- .../media/platform/mediatek/jpeg/mtk_jpeg_core.c | 16 ++++++++++++++++ .../media/platform/mediatek/jpeg/mtk_jpeg_core.h | 2 ++ .../platform/mediatek/jpeg/mtk_jpeg_dec_hw.c | 12 ++++++++++++ .../platform/mediatek/jpeg/mtk_jpeg_enc_hw.c | 11 ++++++++++- 4 files changed, 40 insertions(+), 1 deletion(-) diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c index 3082d0fcf974..9556accacfa0 100644 --- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c +++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c @@ -856,8 +856,12 @@ static struct vb2_v4l2_buffer *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx, static void mtk_jpeg_enc_stop_streaming(struct vb2_queue *q) { struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q); + struct mtk_jpeg_dev *jpeg = ctx->jpeg; struct vb2_v4l2_buffer *vb; + if (jpeg->variant->multi_core) + wait_event(jpeg->hw_wq, (atomic_read(&ctx->buf_list_cnt) == 0)); + while ((vb = mtk_jpeg_buf_remove(ctx, q->type))) v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR); } @@ -865,6 +869,7 @@ static void mtk_jpeg_enc_stop_streaming(struct vb2_queue *q) static void mtk_jpeg_dec_stop_streaming(struct vb2_queue *q) { struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q); + struct mtk_jpeg_dev *jpeg = ctx->jpeg; struct vb2_v4l2_buffer *vb; /* @@ -872,6 +877,9 @@ static void mtk_jpeg_dec_stop_streaming(struct vb2_queue *q) * Before STREAMOFF, we still have to return the old resolution and * subsampling. Update capture queue when the stream is off. */ + if (jpeg->variant->multi_core) + wait_event(jpeg->hw_wq, (atomic_read(&ctx->buf_list_cnt) == 0)); + if (ctx->state == MTK_JPEG_SOURCE_CHANGE && V4L2_TYPE_IS_CAPTURE(q->type)) { struct mtk_jpeg_src_buf *src_buf; @@ -1181,6 +1189,7 @@ static int mtk_jpeg_open(struct file *file) v4l2_fh_init(&ctx->fh, vfd); file->private_data = &ctx->fh; v4l2_fh_add(&ctx->fh); + atomic_set(&ctx->buf_list_cnt, 0); ctx->jpeg = jpeg; ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(jpeg->m2m_dev, ctx, @@ -1563,6 +1572,11 @@ static int mtk_jpegdec_set_hw_param(struct mtk_jpeg_ctx *ctx, return 0; } +static void jpeg_buf_queue_inc(struct mtk_jpeg_ctx *ctx) +{ + atomic_inc(&ctx->buf_list_cnt); +} + static irqreturn_t mtk_jpeg_enc_done(struct mtk_jpeg_dev *jpeg) { struct mtk_jpeg_ctx *ctx; @@ -1674,6 +1688,7 @@ static void mtk_jpegenc_worker(struct work_struct *work) &src_buf->vb2_buf); mtk_jpeg_set_enc_params(ctx, comp_jpeg[hw_id]->reg_base); mtk_jpeg_enc_start(comp_jpeg[hw_id]->reg_base); + jpeg_buf_queue_inc(ctx); v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx); spin_unlock_irqrestore(&comp_jpeg[hw_id]->hw_lock, flags); @@ -1792,6 +1807,7 @@ static void mtk_jpegdec_worker(struct work_struct *work) &bs, &fb); mtk_jpeg_dec_start(comp_jpeg[hw_id]->reg_base); + jpeg_buf_queue_inc(ctx); v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx); spin_unlock_irqrestore(&comp_jpeg[hw_id]->hw_lock, flags); diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h index ce9f14e711f5..ce884c190ce5 100644 --- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h +++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h @@ -299,6 +299,7 @@ struct mtk_jpeg_q_data { * @dst_done_queue: encoded frame buffer queue * @done_queue_lock: encoded frame operation spinlock * @last_done_frame_num: the last encoded frame number + * @buf_list_cnt: the frame buffer count own by jpeg driver */ struct mtk_jpeg_ctx { struct mtk_jpeg_dev *jpeg; @@ -317,6 +318,7 @@ struct mtk_jpeg_ctx { /* spinlock protecting the encode done buffer */ spinlock_t done_queue_lock; u32 last_done_frame_num; + atomic_t buf_list_cnt; }; #endif /* _MTK_JPEG_CORE_H */ diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c index fadfc4b5e366..37c015236524 100644 --- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c +++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c @@ -550,6 +550,11 @@ static void mtk_jpegdec_put_buf(struct mtk_jpegdec_comp_dev *jpeg) spin_unlock_irqrestore(&ctx->done_queue_lock, flags); } +static void jpeg_buf_queue_dec(struct mtk_jpeg_ctx *ctx) +{ + atomic_dec(&ctx->buf_list_cnt); +} + static void mtk_jpegdec_timeout_work(struct work_struct *work) { enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR; @@ -558,9 +563,11 @@ static void mtk_jpegdec_timeout_work(struct work_struct *work) job_timeout_work.work); struct mtk_jpeg_dev *master_jpeg = cjpeg->master_dev; struct vb2_v4l2_buffer *src_buf, *dst_buf; + struct mtk_jpeg_ctx *ctx; src_buf = cjpeg->hw_param.src_buffer; dst_buf = cjpeg->hw_param.dst_buffer; + ctx = cjpeg->hw_param.curr_ctx; v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, true); mtk_jpeg_dec_reset(cjpeg->reg_base); @@ -571,6 +578,7 @@ static void mtk_jpegdec_timeout_work(struct work_struct *work) wake_up(&master_jpeg->hw_wq); v4l2_m2m_buf_done(src_buf, buf_state); mtk_jpegdec_put_buf(cjpeg); + jpeg_buf_queue_dec(ctx); } static irqreturn_t mtk_jpegdec_hw_irq_handler(int irq, void *priv) @@ -578,12 +586,14 @@ static irqreturn_t mtk_jpegdec_hw_irq_handler(int irq, void *priv) struct vb2_v4l2_buffer *src_buf, *dst_buf; struct mtk_jpeg_src_buf *jpeg_src_buf; enum vb2_buffer_state buf_state; + struct mtk_jpeg_ctx *ctx; u32 dec_irq_ret; u32 irq_status; int i; struct mtk_jpegdec_comp_dev *jpeg = priv; struct mtk_jpeg_dev *master_jpeg = jpeg->master_dev; + ctx = jpeg->hw_param.curr_ctx; cancel_delayed_work(&jpeg->job_timeout_work); @@ -609,6 +619,8 @@ static irqreturn_t mtk_jpegdec_hw_irq_handler(int irq, void *priv) buf_state = VB2_BUF_STATE_DONE; v4l2_m2m_buf_done(src_buf, buf_state); mtk_jpegdec_put_buf(jpeg); + jpeg_buf_queue_dec(ctx); + pm_runtime_put(jpeg->dev); clk_disable_unprepare(jpeg->jdec_clk.clks->clk); diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c index fc7676d13adb..c7d98f8aabfa 100644 --- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c +++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c @@ -290,6 +290,11 @@ static void mtk_jpegenc_put_buf(struct mtk_jpegenc_comp_dev *jpeg) spin_unlock_irqrestore(&ctx->done_queue_lock, flags); } +static void jpeg_buf_queue_dec(struct mtk_jpeg_ctx *ctx) +{ + atomic_dec(&ctx->buf_list_cnt); +} + static void mtk_jpegenc_timeout_work(struct work_struct *work) { struct delayed_work *dly_work = to_delayed_work(work); @@ -300,9 +305,11 @@ static void mtk_jpegenc_timeout_work(struct work_struct *work) struct mtk_jpeg_dev *master_jpeg = cjpeg->master_dev; enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR; struct vb2_v4l2_buffer *src_buf, *dst_buf; + struct mtk_jpeg_ctx *ctx; src_buf = cjpeg->hw_param.src_buffer; dst_buf = cjpeg->hw_param.dst_buffer; + ctx = cjpeg->hw_param.curr_ctx; v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, true); mtk_jpeg_enc_reset(cjpeg->reg_base); @@ -313,6 +320,7 @@ static void mtk_jpegenc_timeout_work(struct work_struct *work) wake_up(&master_jpeg->hw_wq); v4l2_m2m_buf_done(src_buf, buf_state); mtk_jpegenc_put_buf(cjpeg); + jpeg_buf_queue_dec(ctx); } static irqreturn_t mtk_jpegenc_hw_irq_handler(int irq, void *priv) @@ -346,7 +354,8 @@ static irqreturn_t mtk_jpegenc_hw_irq_handler(int irq, void *priv) buf_state = VB2_BUF_STATE_DONE; v4l2_m2m_buf_done(src_buf, buf_state); mtk_jpegenc_put_buf(jpeg); - pm_runtime_put(ctx->jpeg->dev); + jpeg_buf_queue_dec(ctx); + pm_runtime_put(jpeg->dev); clk_disable_unprepare(jpeg->venc_clk.clks->clk); jpeg->hw_state = MTK_JPEG_HW_IDLE; -- 2.46.0