not demuxing from interrupt context

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Here's an attempt to fix the pluto2 driver to not do this.  I don't have
the hardware or information about the hardware so I'm not sure if it's
right.  It's not that major of an undertaking.  It would have been easier
if the driver was already double buffering like it should have been.
# HG changeset patch
# User Trent Piepho <xyzzy@xxxxxxxxxxxxx>
# Date 1234955305 28800
# Node ID a5aea1a8b5bc1866d3559294a4caff90f7847ee3
# Parent  960985ba30c69c03fb030edd451bb26846ca75a0
pluto2: Demux packets from a work queue

From: Trent Piepho <xyzzy@xxxxxxxxxxxxx>

This driver was demuxing the transport stream from its interrupt handler.
This is problematic as demuxing must be locked against demuxer access from
process context.  If demuxing is done from interrupt context then local
irqs must be disabled when the demuxer lock is held.  However, demuxing is
too lengthy to be run with IRQs off.

Some problems are exposed with the simple way this driver does DMA.  It has
a single DMA buffer.  It gets an interrupt after 8 TS packets, processes
(demuxes) those, and then resets the device's DMA pointer back to the start
of the buffer.

Processing the packets may take a while, e.g., when the demuxer lock is
held by a process.  If new packets are received before processing is
finished and the DMA pointer is reset something will have to break.  Maybe
the card will buffer the packets until the interrupt is acked and lose them
when its internal buffer overflows?  Maybe it will keep DMAing, past the
end of of the DMA buffer?

I've given the driver two DMA buffers that it switches between, though it
can easily be increased.  When an interrupt is received it will switch to
the next buffer and queue and work function to deal with the old one.  The
work function allows the buffer to be demuxed from process context.

The driver was requesting the IRQ before it was finished setting up.  This
is a bad idea as shared IRQs mean the interrupt handler can get called as
soon as the IRQ is requested, even if the device's IRQs haven't been
enabled yet.  It's very easy to put code in the interrupt handler that will
crash if the driver hasn't been properly setup yet.

There was some code that attempts to count the number of TS packets in the
buffer by looking for start bytes.  It didn't take into account the buffer
size and would keep going past the end of the buffer if it happened to find
start bytes.  Unlikely, but possible.

Priority: normal

Signed-off-by: Trent Piepho <xyzzy@xxxxxxxxxxxxx>

diff -r 960985ba30c6 -r a5aea1a8b5bc linux/drivers/media/dvb/pluto2/pluto2.c
--- a/linux/drivers/media/dvb/pluto2/pluto2.c	Tue Feb 10 21:31:59 2009 +0100
+++ b/linux/drivers/media/dvb/pluto2/pluto2.c	Wed Feb 18 03:08:25 2009 -0800
@@ -88,12 +88,21 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr
 
 #define TS_DMA_PACKETS		(8)
 #define TS_DMA_BYTES		(188 * TS_DMA_PACKETS)
+#define TS_DMA_BUFFERS		(2)
+#if TS_DMA_BYTES + 188 > PAGE_SIZE
+#error "DMA buffer larger than one page"
+#endif
 
 #define I2C_ADDR_TDA10046	0x10
 #define I2C_ADDR_TUA6034	0xc2
 #define NHWFILTERS		8
 
 struct pluto {
+	/* dma buffer, at start so it's aligned.  Each buf gets its own
+	 * page, even though they are smaller.  */
+	u8 dma_buf[TS_DMA_BUFFERS][PAGE_SIZE];
+	struct work_struct work;
+
 	/* pci */
 	struct pci_dev *pdev;
 	u8 __iomem *io_mem;
@@ -118,9 +127,12 @@ struct pluto {
 	unsigned int overflow;
 
 	/* dma */
-	dma_addr_t dma_addr;
-	u8 dma_buf[TS_DMA_BYTES];
-	u8 dummy[4096];
+	dma_addr_t dma_addr[TS_DMA_BUFFERS];
+	atomic_t npackets[TS_DMA_BUFFERS];
+	unsigned int dma_buf_num, dmx_buf_num;
+
+	struct workqueue_struct *dmaq;
+	char wqname[16];
 };
 
 static inline struct pluto *feed_to_pluto(struct dvb_demux_feed *feed)
@@ -232,26 +244,36 @@ static void pluto_reset_ts(struct pluto 
 	}
 }
 
-static void pluto_set_dma_addr(struct pluto *pluto)
-{
-	pluto_writereg(pluto, REG_PCAR, pluto->dma_addr);
+static void pluto_set_dma_addr(struct pluto *pluto, unsigned int num)
+{
+	pluto_writereg(pluto, REG_PCAR, pluto->dma_addr[num]);
 }
 
 static int __devinit pluto_dma_map(struct pluto *pluto)
 {
-	pluto->dma_addr = pci_map_single(pluto->pdev, pluto->dma_buf,
+	int r;
+
+	pluto->dma_addr[0] = pci_map_single(pluto->pdev, pluto->dma_buf[0],
 			TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
-	return pci_dma_mapping_error(pluto->dma_addr);
-#else
-	return pci_dma_mapping_error(pluto->pdev, pluto->dma_addr);
-#endif
+	r = pci_dma_mapping_error(pluto->pdev, pluto->dma_addr[0]);
+	if (r)
+		return r;
+
+	pluto->dma_addr[1] = pci_map_single(pluto->pdev, pluto->dma_buf[1],
+			TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
+	r = pci_dma_mapping_error(pluto->pdev, pluto->dma_addr[1]);
+	if (r)
+		pci_unmap_single(pluto->pdev, pluto->dma_addr[0],
+				 TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
+
+	return r;
 }
 
 static void pluto_dma_unmap(struct pluto *pluto)
 {
-	pci_unmap_single(pluto->pdev, pluto->dma_addr,
+	pci_unmap_single(pluto->pdev, pluto->dma_addr[0],
+			TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
+	pci_unmap_single(pluto->pdev, pluto->dma_addr[1],
 			TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
 }
 
@@ -289,9 +311,12 @@ static int pluto_stop_feed(struct dvb_de
 
 static void pluto_dma_end(struct pluto *pluto, unsigned int nbpackets)
 {
+	unsigned int bnum = pluto->dma_buf_num % TS_DMA_BUFFERS;
+	unsigned int newbnum;
+
 	/* synchronize the DMA transfer with the CPU
 	 * first so that we see updated contents. */
-	pci_dma_sync_single_for_cpu(pluto->pdev, pluto->dma_addr,
+	pci_dma_sync_single_for_cpu(pluto->pdev, pluto->dma_addr[bnum],
 			TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
 
 	/* Workaround for broken hardware:
@@ -305,28 +330,73 @@ static void pluto_dma_end(struct pluto *
 	 *     has been transfered. Only a reset seems to solve this
 	 */
 	if ((nbpackets == 0) || (nbpackets > TS_DMA_PACKETS)) {
-		unsigned int i = 0;
-		while (pluto->dma_buf[i] == 0x47)
-			i += 188;
-		nbpackets = i / 188;
-		if (i == 0) {
+		unsigned int i;
+
+		for (i = 0, nbpackets = 0;
+		     i < sizeof(pluto->dma_buf[bnum]) && pluto->dma_buf[bnum][i] == 0x47;
+		     i += 188, nbpackets++)
+			;
+
+		if (nbpackets == 0) {
 			pluto_reset_ts(pluto, 1);
-			dev_printk(KERN_DEBUG, &pluto->pdev->dev, "resetting TS because of invalid packet counter\n");
+			dev_dbg(&pluto->pdev->dev,
+				"resetting TS because of invalid packet counter\n");
 		}
 	}
 
-	dvb_dmx_swfilter_packets(&pluto->demux, pluto->dma_buf, nbpackets);
+	/* Mark buffer as used */
+	atomic_set(&pluto->npackets[bnum], nbpackets);
+
+	if (nbpackets) {
+		/* Switch to the next buffer */
+		pluto->dma_buf_num++;
+
+		/* queue sending buffer to demuxer */
+		queue_work(pluto->dmaq, &pluto->work);
+	}
+	newbnum = pluto->dma_buf_num % TS_DMA_BUFFERS;
+
+	if (atomic_read(&pluto->npackets[newbnum]))
+		dev_warn(&pluto->pdev->dev, "Overflow, buffer not ready");
+		/* should probably do something here */
+
+	/* Sync the new buffer and give it back to the card */
+	pci_dma_sync_single_for_device(pluto->pdev, pluto->dma_addr[newbnum],
+				       TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
+
+	/* reset the dma address */
+	pluto_set_dma_addr(pluto, newbnum);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+static void pluto_dmx_buffer(void *_pluto)
+#else
+static void pluto_dmx_buffer(struct work_struct *work)
+#endif
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+	struct pluto *pluto = _pluto;
+#else
+	struct pluto *pluto = container_of(work, struct pluto, work);
+#endif
+	unsigned int bnum = pluto->dmx_buf_num % TS_DMA_BUFFERS;
+	unsigned int npackets = atomic_read(&pluto->npackets[bnum]);
+	unsigned int i;
+
+	if (!npackets)
+		return;
+
+	dvb_dmx_swfilter_packets(&pluto->demux, pluto->dma_buf[bnum], npackets);
 
 	/* clear the dma buffer. this is needed to be able to identify
 	 * new valid ts packets above */
-	memset(pluto->dma_buf, 0, nbpackets * 188);
-
-	/* reset the dma address */
-	pluto_set_dma_addr(pluto);
-
-	/* sync the buffer and give it back to the card */
-	pci_dma_sync_single_for_device(pluto->pdev, pluto->dma_addr,
-			TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
+	for (i = 0, npackets++; npackets; npackets--, i += 188)
+		pluto->dma_buf[bnum][i] = 0;
+
+	/* Mark buffer as empty */
+	atomic_set(&pluto->npackets[bnum], 0);
+
+	pluto->dmx_buf_num++;
 }
 
 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
@@ -349,20 +419,19 @@ static irqreturn_t pluto_irq(int irq, vo
 		return IRQ_HANDLED;
 	}
 
+	/* overflow interrupt */
+	if (tscr & TSCR_OVR)
+		pluto->overflow++;
+
 	/* dma end interrupt */
 	if (tscr & TSCR_DE) {
 		pluto_dma_end(pluto, (tscr & TSCR_NBPACKETS) >> 24);
-		/* overflow interrupt */
-		if (tscr & TSCR_OVR)
-			pluto->overflow++;
 		if (pluto->overflow) {
 			dev_err(&pluto->pdev->dev, "overflow irq (%d)\n",
 					pluto->overflow);
 			pluto_reset_ts(pluto, 1);
 			pluto->overflow = 0;
 		}
-	} else if (tscr & TSCR_OVR) {
-		pluto->overflow++;
 	}
 
 	/* ACK the interrupt */
@@ -412,7 +481,7 @@ static int __devinit pluto_hw_init(struc
 #endif
 	/* map DMA and set address */
 	pluto_dma_map(pluto);
-	pluto_set_dma_addr(pluto);
+	pluto_set_dma_addr(pluto, 0);
 
 	/* enable interrupts */
 	pluto_enable_irqs(pluto);
@@ -610,7 +679,7 @@ static int __devinit pluto2_probe(struct
 
 	pluto = kzalloc(sizeof(struct pluto), GFP_KERNEL);
 	if (!pluto)
-		goto out;
+		return -ENOMEM;
 
 	pluto->pdev = pdev;
 
@@ -639,13 +708,9 @@ static int __devinit pluto2_probe(struct
 
 	pci_set_drvdata(pdev, pluto);
 
-	ret = request_irq(pdev->irq, pluto_irq, IRQF_SHARED, DRIVER_NAME, pluto);
+	ret = pluto_hw_init(pluto);
 	if (ret < 0)
 		goto err_pci_iounmap;
-
-	ret = pluto_hw_init(pluto);
-	if (ret < 0)
-		goto err_free_irq;
 
 	/* i2c */
 	i2c_set_adapdata(&pluto->i2c_adap, pluto);
@@ -721,9 +786,27 @@ static int __devinit pluto2_probe(struct
 		goto err_disconnect_frontend;
 
 	dvb_net_init(dvb_adapter, &pluto->dvbnet, dmx);
-out:
-	return ret;
-
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+	INIT_WORK(&pluto->work, pluto_dmx_buffer, pluto);
+#else
+	INIT_WORK(&pluto->work, pluto_dmx_buffer);
+#endif
+	sprintf(pluto->wqname, "%s[%d] dvb", dvb_adapter->name, dvb_adapter->num);
+	pluto->dmaq = create_singlethread_workqueue(pluto->wqname);
+	if (!pluto->dmaq)
+		goto err_dvb_net;
+
+	ret = request_irq(pdev->irq, pluto_irq, IRQF_SHARED, DRIVER_NAME, pluto);
+	if (ret < 0)
+		goto err_workqueue;
+
+	return 0;
+
+err_workqueue:
+	destroy_workqueue(pluto->dmaq);
+err_dvb_net:
+	dvb_net_release(&pluto->dvbnet);
 err_disconnect_frontend:
 	dmx->disconnect_frontend(dmx);
 err_remove_mem_frontend:
@@ -740,8 +823,6 @@ err_i2c_del_adapter:
 	i2c_del_adapter(&pluto->i2c_adap);
 err_pluto_hw_exit:
 	pluto_hw_exit(pluto);
-err_free_irq:
-	free_irq(pdev->irq, pluto);
 err_pci_iounmap:
 	pci_iounmap(pdev, pluto->io_mem);
 err_pci_release_regions:
@@ -751,7 +832,7 @@ err_kfree:
 err_kfree:
 	pci_set_drvdata(pdev, NULL);
 	kfree(pluto);
-	goto out;
+	return ret;
 }
 
 static void __devexit pluto2_remove(struct pci_dev *pdev)
diff -r 960985ba30c6 -r a5aea1a8b5bc v4l/compat.h
--- a/v4l/compat.h	Tue Feb 10 21:31:59 2009 +0100
+++ b/v4l/compat.h	Wed Feb 18 03:08:25 2009 -0800
@@ -421,4 +421,8 @@ static inline int usb_endpoint_type(cons
 	} while (0)
 #endif
 
-#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
+#define pci_dma_mapping_error(dev, addr)	pci_dma_mapping_error(addr)
+#endif
+
+#endif

[Index of Archives]     [Linux Input]     [Video for Linux]     [Gstreamer Embedded]     [Mplayer Users]     [Linux USB Devel]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [Yosemite Backpacking]
  Powered by Linux