Re: [PULL] http://mercurial.intuxication.org/hg/v4l-dvb-commits

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Fri, 2009-02-27 at 21:05 +0200, Igor M. Liplianin wrote:
> On 27 февраля 2009, "Igor M. Liplianin" <liplianin@xxxxxx> wrote:
> > On Fri, 27 Feb 2009, Igor M. Liplianin wrote:
> > > 01/02: dm1105: not demuxing from interrupt context.
> > > http://mercurial.intuxication.org/hg/v4l-dvb-commits?cmd=changeset;node=6
> > >faf0753950b
> >
> > I'm not sure if you considered this, but the default work queue is
> > multi-threaded with a kernel thread for each CPU.  This means that if the
> > IRQ handler calls schedule_work() while your work function is running then
> > you can get a second copy running of your work function running at the same
> > time.  It doesn't look like your work function uses atomit_t or locking, so
> > I'm guessing it's not safe to run concurrently.
> >
> > For the pluto2 patch, I created a single threaded work queue.  This avoids
> > the concurrency problem and it's not like the demuxer can run in parallel
> > anyway.  Having your own work queue also means that you don't have to worry
> > about latency from whatever other tasks might be in the default work queue.
> >
> > Also consider that the work function is queued mutliple times before it
> > runs, it will only run once.  I.e.  queuing a work func that's already in
> > the queue does nothing (one the function starts to run, it's no longer in
> > the queue and can be added again before it's finished).  The pluto2 patch I
> > posted didn't take this into account, but I've since fixed it.
> 
> I'll return to this :)
> But it complicates things more and more :(


Yes it does complicate things.  Here are some excerpts from what I had
to do for cx18.  Perhaps it will help you.  (Or perhaps it will not help
at all.  The CX23418 chip put alot of requirements on me that drove my
solution.)


-------------
cx18-driver.h:
-------------

struct cx18_epu_work_order {
        struct work_struct work;
        atomic_t pending;
        struct cx18 *cx;
        unsigned long flags;
        int rpu;
        struct cx18_mailbox mb;
   	...
};

struct cx18 {
	...
        struct workqueue_struct *work_queue;
        struct cx18_epu_work_order epu_work_order[CX18_MAX_EPU_WORK_ORDERS];
	...
};



-------------
cx18-driver.c:
-------------

static int __devinit cx18_init_struct1(struct cx18 *cx)
{
        int i;
 	...
        cx->work_queue = create_singlethread_workqueue(cx->v4l2_dev.name);
        if (cx->work_queue == NULL) {
                CX18_ERR("Unable to create work hander thread\n");
                return -ENOMEM;
        }

        for (i = 0; i < CX18_MAX_EPU_WORK_ORDERS; i++) {
                cx->epu_work_order[i].cx = cx;
                ...
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
                INIT_WORK(&cx->epu_work_order[i].work, cx18_epu_work_handler);
#else
                INIT_WORK(&cx->epu_work_order[i].work, cx18_epu_work_handler,
                          &cx->epu_work_order[i].work);
#endif
        }
	...
}


#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
static void cx18_cancel_epu_work_orders(struct cx18 *cx)
{
        int i;
        for (i = 0; i < CX18_MAX_EPU_WORK_ORDERS; i++)
                cancel_work_sync(&cx->epu_work_order[i].work);
}

#endif

static void cx18_remove(struct pci_dev *pci_dev)
{
	...
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
        cx18_sw2_irq_disable(cx, IRQ_CPU_TO_EPU_ACK | IRQ_APU_TO_EPU_ACK);

        cx18_halt_firmware(cx);

        cx18_cancel_epu_work_orders(cx);
#else

        flush_workqueue(cx->work_queue);

        cx18_sw2_irq_disable(cx, IRQ_CPU_TO_EPU_ACK | IRQ_APU_TO_EPU_ACK);

        cx18_halt_firmware(cx);
#endif

        destroy_workqueue(cx->work_queue);
	...
}



----------
cx18-irq.c:
----------

static void epu_cmd(struct cx18 *cx, u32 sw1)
{
        if (sw1 & IRQ_CPU_TO_EPU)
                cx18_api_epu_cmd_irq(cx, CPU);
        if (sw1 & IRQ_APU_TO_EPU)
                cx18_api_epu_cmd_irq(cx, APU);
}

irqreturn_t cx18_irq_handler(int irq, void *dev_id)
{
        struct cx18 *cx = (struct cx18 *)dev_id;
        u32 sw1, sw2, hw2;

        sw1 = cx18_read_reg(cx, SW1_INT_STATUS) & cx->sw1_irq_mask;
	...
	if (sw1)
                cx18_write_reg_expect(cx, sw1, SW1_INT_STATUS, ~sw1, sw1);
	...
        if (sw1)
                epu_cmd(cx, sw1);
	...
}


--------------
cx18-mailbox.c:
--------------

static inline
struct cx18_epu_work_order *alloc_epu_work_order_irq(struct cx18 *cx)
{
        int i;
        struct cx18_epu_work_order *order = NULL;

        for (i = 0; i < CX18_MAX_EPU_WORK_ORDERS; i++) {
                /*
                 * We only need "pending" atomic to inspect its contents,
                 * and need not do a check and set because:
                 * 1. Any work handler thread only clears "pending" and only
                 * on one, particular work order at a time, per handler thread.
                 * 2. "pending" is only set here, and we're serialized because
                 * we're called in an IRQ handler context.
                 */
                if (atomic_read(&cx->epu_work_order[i].pending) == 0) {
                        order = &cx->epu_work_order[i];
                        atomic_set(&order->pending, 1);
                        break;
                }
        }
        return order;
}

void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
{
	...
	int submit;
	...
        order = alloc_epu_work_order_irq(cx);
        if (order == NULL) {
                CX18_WARN("Unable to find blank work order form to schedule "
                          "incoming mailbox command processing\n");
                return;
        }
	...
	[setup work order to indicate the defereable work that must be done]
	[acknowledge the mailbox]
	....
        if (submit > 0) {
                queue_work(cx->work_queue, &order->work);
        }
	return;
}


static
void free_epu_work_order(struct cx18 *cx, struct cx18_epu_work_order *order)
{
        atomic_set(&order->pending, 0);
}

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
void cx18_epu_work_handler(struct work_struct *work)
{
        struct cx18_epu_work_order *order =
                        container_of(work, struct cx18_epu_work_order, work);
#else
void cx18_epu_work_handler(void *arg)
{
        struct cx18_epu_work_order *order = arg;
#endif
        struct cx18 *cx = order->cx;
        [ do deferrable work here ]
        free_epu_work_order(cx, order);
}


--
To unsubscribe from this list: send the line "unsubscribe linux-media" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Linux Input]     [Video for Linux]     [Gstreamer Embedded]     [Mplayer Users]     [Linux USB Devel]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [Yosemite Backpacking]
  Powered by Linux