Re: [PATCH] drm/omap: Fix release of refill engine

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, Oct 11, 2012 at 11:08 PM, Andy Gross <andy.gross@xxxxxx> wrote:
> During asynchronous refills, we don't wait for the refill to
> finish.  However, we cannot release the engine back to the idle
> list until it has actually completed the refill operation.  The
> engine release will now be done in the IRQ handler, but only
> for asynchronous refill operations.
>
> Synchronous refills will continue to release the engine after they
> unblock from waiting on the refill.
>
> Signed-off-by: Andy Gross <andy.gross@xxxxxx>
> ---
>  drivers/staging/omapdrm/omap_dmm_priv.h  |    5 ++-
>  drivers/staging/omapdrm/omap_dmm_tiler.c |   77 ++++++++++++++++++++---------
>  2 files changed, 57 insertions(+), 25 deletions(-)
>
> diff --git a/drivers/staging/omapdrm/omap_dmm_priv.h b/drivers/staging/omapdrm/omap_dmm_priv.h
> index 09ebc50..5ea73305 100644
> --- a/drivers/staging/omapdrm/omap_dmm_priv.h
> +++ b/drivers/staging/omapdrm/omap_dmm_priv.h
> @@ -141,6 +141,8 @@ struct refill_engine {
>         /* only one trans per engine for now */
>         struct dmm_txn txn;
>
> +       unsigned int async;
> +
>         wait_queue_head_t wait_for_refill;
>
>         struct list_head idle_node;
> @@ -158,10 +160,11 @@ struct dmm {
>         dma_addr_t refill_pa;
>
>         /* refill engines */
> -       struct semaphore engine_sem;
> +       wait_queue_head_t engine_queue;
>         struct list_head idle_head;
>         struct refill_engine *engines;
>         int num_engines;
> +       atomic_t engine_counter;
>
>         /* container information */
>         int container_width;
> diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.c b/drivers/staging/omapdrm/omap_dmm_tiler.c
> index 7c19c5c..2fc9218 100644
> --- a/drivers/staging/omapdrm/omap_dmm_tiler.c
> +++ b/drivers/staging/omapdrm/omap_dmm_tiler.c
> @@ -29,7 +29,6 @@
>  #include <linux/mm.h>
>  #include <linux/time.h>
>  #include <linux/list.h>
> -#include <linux/semaphore.h>
>
>  #include "omap_dmm_tiler.h"
>  #include "omap_dmm_priv.h"
> @@ -120,6 +119,18 @@ static int wait_status(struct refill_engine *engine, uint32_t wait_mask)
>         return 0;
>  }
>
> +static void release_engine(struct refill_engine *engine)
> +{
> +       unsigned long flags;
> +
> +       spin_lock_irqsave(&list_lock, flags);
> +       list_add(&engine->idle_node, &omap_dmm->idle_head);
> +       spin_unlock_irqrestore(&list_lock, flags);
> +
> +       atomic_inc(&omap_dmm->engine_counter);
> +       wake_up_interruptible(&omap_dmm->engine_queue);
> +}
> +
>  static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
>  {
>         struct dmm *dmm = arg;
> @@ -130,9 +141,13 @@ static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
>         writel(status, dmm->base + DMM_PAT_IRQSTATUS);
>
>         for (i = 0; i < dmm->num_engines; i++) {
> -               if (status & DMM_IRQSTAT_LST)
> +               if (status & DMM_IRQSTAT_LST) {
>                         wake_up_interruptible(&dmm->engines[i].wait_for_refill);
>
> +                       if (&dmm->engines[i].async)

Are you sure about that & ?  That looks like a typo, rather than what you want..

> +                               release_engine(&dmm->engines[i]);
> +               }
> +
>                 status >>= 8;
>         }
>
> @@ -146,17 +161,24 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
>  {
>         struct dmm_txn *txn = NULL;
>         struct refill_engine *engine = NULL;
> +       int ret;
> +       unsigned long flags;
> +
>
> -       down(&dmm->engine_sem);
> +       /* wait until an engine is available */
> +       ret = wait_event_interruptible(omap_dmm->engine_queue,
> +               atomic_add_unless(&omap_dmm->engine_counter, -1, 0));
> +       if (ret)
> +               return ERR_PTR(ret);
>
>         /* grab an idle engine */
> -       spin_lock(&list_lock);
> +       spin_lock_irqsave(&list_lock, flags);
>         if (!list_empty(&dmm->idle_head)) {
>                 engine = list_entry(dmm->idle_head.next, struct refill_engine,
>                                         idle_node);
>                 list_del(&engine->idle_node);
>         }
> -       spin_unlock(&list_lock);
> +       spin_unlock_irqrestore(&list_lock, flags);
>
>         BUG_ON(!engine);
>
> @@ -174,7 +196,7 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
>   * Add region to DMM transaction.  If pages or pages[i] is NULL, then the
>   * corresponding slot is cleared (ie. dummy_pa is programmed)
>   */
> -static int dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
> +static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
>                 struct page **pages, uint32_t npages, uint32_t roll)
>  {
>         dma_addr_t pat_pa = 0;
> @@ -208,7 +230,7 @@ static int dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
>
>         txn->last_pat = pat;
>
> -       return 0;
> +       return;
>  }
>
>  /**
> @@ -238,6 +260,9 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
>                 goto cleanup;
>         }
>
> +       /* mark whether it is async to denote list management in IRQ handler */
> +       engine->async = wait ? 0 : 1;

why not just make 'async' a bool?

> +
>         /* kick reload */
>         writel(engine->refill_pa,
>                 dmm->base + reg[PAT_DESCR][engine->id]);
> @@ -252,11 +277,10 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
>         }
>
>  cleanup:
> -       spin_lock(&list_lock);
> -       list_add(&engine->idle_node, &dmm->idle_head);
> -       spin_unlock(&list_lock);
> +       /* only place engine back on list if we are done with it */
> +       if (ret || wait)
> +               release_engine(engine);
>
> -       up(&omap_dmm->engine_sem);
>         return ret;
>  }
>
> @@ -270,6 +294,7 @@ static int fill(struct tcm_area *area, struct page **pages,
>         struct tcm_area slice, area_s;
>         struct dmm_txn *txn;
>
> +

minor nit, but I guess you can get rid of this empty line addition

Anyways, with those minor fixes you can add my:

Signed-of-by: Rob Clark <rob@xxxxxx>


>         txn = dmm_txn_init(omap_dmm, area->tcm);
>         if (IS_ERR_OR_NULL(txn))
>                 return PTR_ERR(txn);
> @@ -280,16 +305,13 @@ static int fill(struct tcm_area *area, struct page **pages,
>                                 .x1 = slice.p1.x,  .y1 = slice.p1.y,
>                 };
>
> -               ret = dmm_txn_append(txn, &p_area, pages, npages, roll);
> -               if (ret)
> -                       goto fail;
> +               dmm_txn_append(txn, &p_area, pages, npages, roll);
>
>                 roll += tcm_sizeof(slice);
>         }
>
>         ret = dmm_txn_commit(txn, wait);
>
> -fail:
>         return ret;
>  }
>
> @@ -326,6 +348,7 @@ struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
>         struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
>         u32 min_align = 128;
>         int ret;
> +       unsigned long flags;
>
>         BUG_ON(!validfmt(fmt));
>
> @@ -347,9 +370,9 @@ struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
>         }
>
>         /* add to allocation list */
> -       spin_lock(&list_lock);
> +       spin_lock_irqsave(&list_lock, flags);
>         list_add(&block->alloc_node, &omap_dmm->alloc_head);
> -       spin_unlock(&list_lock);
> +       spin_unlock_irqrestore(&list_lock, flags);
>
>         return block;
>  }
> @@ -358,6 +381,7 @@ struct tiler_block *tiler_reserve_1d(size_t size)
>  {
>         struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
>         int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
> +       unsigned long flags;
>
>         if (!block)
>                 return ERR_PTR(-ENOMEM);
> @@ -370,9 +394,9 @@ struct tiler_block *tiler_reserve_1d(size_t size)
>                 return ERR_PTR(-ENOMEM);
>         }
>
> -       spin_lock(&list_lock);
> +       spin_lock_irqsave(&list_lock, flags);
>         list_add(&block->alloc_node, &omap_dmm->alloc_head);
> -       spin_unlock(&list_lock);
> +       spin_unlock_irqrestore(&list_lock, flags);
>
>         return block;
>  }
> @@ -381,13 +405,14 @@ struct tiler_block *tiler_reserve_1d(size_t size)
>  int tiler_release(struct tiler_block *block)
>  {
>         int ret = tcm_free(&block->area);
> +       unsigned long flags;
>
>         if (block->area.tcm)
>                 dev_err(omap_dmm->dev, "failed to release block\n");
>
> -       spin_lock(&list_lock);
> +       spin_lock_irqsave(&list_lock, flags);
>         list_del(&block->alloc_node);
> -       spin_unlock(&list_lock);
> +       spin_unlock_irqrestore(&list_lock, flags);
>
>         kfree(block);
>         return ret;
> @@ -507,16 +532,17 @@ static int omap_dmm_remove(struct platform_device *dev)
>  {
>         struct tiler_block *block, *_block;
>         int i;
> +       unsigned long flags;
>
>         if (omap_dmm) {
>                 /* free all area regions */
> -               spin_lock(&list_lock);
> +               spin_lock_irqsave(&list_lock, flags);
>                 list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
>                                         alloc_node) {
>                         list_del(&block->alloc_node);
>                         kfree(block);
>                 }
> -               spin_unlock(&list_lock);
> +               spin_unlock_irqrestore(&list_lock, flags);
>
>                 for (i = 0; i < omap_dmm->num_lut; i++)
>                         if (omap_dmm->tcm && omap_dmm->tcm[i])
> @@ -560,6 +586,8 @@ static int omap_dmm_probe(struct platform_device *dev)
>         INIT_LIST_HEAD(&omap_dmm->alloc_head);
>         INIT_LIST_HEAD(&omap_dmm->idle_head);
>
> +       init_waitqueue_head(&omap_dmm->engine_queue);
> +
>         /* lookup hwmod data - base address and irq */
>         mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
>         if (!mem) {
> @@ -588,6 +616,8 @@ static int omap_dmm_probe(struct platform_device *dev)
>         omap_dmm->container_width = 256;
>         omap_dmm->container_height = 128;
>
> +       atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines);
> +
>         /* read out actual LUT width and height */
>         pat_geom = readl(omap_dmm->base + DMM_PAT_GEOMETRY);
>         omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
> @@ -651,7 +681,6 @@ static int omap_dmm_probe(struct platform_device *dev)
>                 goto fail;
>         }
>
> -       sema_init(&omap_dmm->engine_sem, omap_dmm->num_engines);
>         for (i = 0; i < omap_dmm->num_engines; i++) {
>                 omap_dmm->engines[i].id = i;
>                 omap_dmm->engines[i].dmm = omap_dmm;
> --
> 1.7.5.4
>
> _______________________________________________
> dri-devel mailing list
> dri-devel@xxxxxxxxxxxxxxxxxxxxx
> http://lists.freedesktop.org/mailman/listinfo/dri-devel
_______________________________________________
dri-devel mailing list
dri-devel@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/dri-devel


[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux