[+cc Marek] On Mon, Jun 3, 2013 at 6:44 AM, Michal Simek <michal.simek@xxxxxxxxxx> wrote: > Check that dma_ops are initialized correctly. > > Signed-off-by: Michal Simek <michal.simek@xxxxxxxxxx> > --- > Functions dma_mmap_attrs(), dma_get_sgtable_attrs() > already have this checking. > > --- > include/asm-generic/dma-mapping-common.h | 12 ++++++++++++ > 1 file changed, 12 insertions(+) > > diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h > index de8bf89..d430cab 100644 > --- a/include/asm-generic/dma-mapping-common.h > +++ b/include/asm-generic/dma-mapping-common.h > @@ -16,6 +16,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, > dma_addr_t addr; > > kmemcheck_mark_initialized(ptr, size); > + BUG_ON(!ops); Does this actually help anything? I expected that if "ops" is NULL, we would just oops anyway when we attempted to call "ops->map_page()" because we already trap null pointer dereferences. At least, when I tried leaving a pci_bus.ops pointer NULL, I got a nice panic and backtrace even without adding an explicit BUG_ON(). I cc'd Marek, who added the similar BUG_ON()s in dma_mmap_attrs() and dma_get_sgtable_attrs() with d2b7428eb0 and 64ccc9c033. > BUG_ON(!valid_dma_direction(dir)); > addr = ops->map_page(dev, virt_to_page(ptr), > (unsigned long)ptr & ~PAGE_MASK, size, > @@ -33,6 +34,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, > { > struct dma_map_ops *ops = get_dma_ops(dev); > > + BUG_ON(!ops); > BUG_ON(!valid_dma_direction(dir)); > if (ops->unmap_page) > ops->unmap_page(dev, addr, size, dir, attrs); > @@ -49,6 +51,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, > > for_each_sg(sg, s, nents, i) > kmemcheck_mark_initialized(sg_virt(s), s->length); > + BUG_ON(!ops); > BUG_ON(!valid_dma_direction(dir)); > ents = ops->map_sg(dev, sg, nents, dir, attrs); > debug_dma_map_sg(dev, sg, nents, ents, dir); > @@ -62,6 +65,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg > { > struct dma_map_ops *ops = get_dma_ops(dev); > > + BUG_ON(!ops); > BUG_ON(!valid_dma_direction(dir)); > debug_dma_unmap_sg(dev, sg, nents, dir); > if (ops->unmap_sg) > @@ -76,6 +80,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, > dma_addr_t addr; > > kmemcheck_mark_initialized(page_address(page) + offset, size); > + BUG_ON(!ops); > BUG_ON(!valid_dma_direction(dir)); > addr = ops->map_page(dev, page, offset, size, dir, NULL); > debug_dma_map_page(dev, page, offset, size, dir, addr, false); > @@ -88,6 +93,7 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, > { > struct dma_map_ops *ops = get_dma_ops(dev); > > + BUG_ON(!ops); > BUG_ON(!valid_dma_direction(dir)); > if (ops->unmap_page) > ops->unmap_page(dev, addr, size, dir, NULL); > @@ -100,6 +106,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, > { > struct dma_map_ops *ops = get_dma_ops(dev); > > + BUG_ON(!ops); > BUG_ON(!valid_dma_direction(dir)); > if (ops->sync_single_for_cpu) > ops->sync_single_for_cpu(dev, addr, size, dir); > @@ -112,6 +119,7 @@ static inline void dma_sync_single_for_device(struct device *dev, > { > struct dma_map_ops *ops = get_dma_ops(dev); > > + BUG_ON(!ops); > BUG_ON(!valid_dma_direction(dir)); > if (ops->sync_single_for_device) > ops->sync_single_for_device(dev, addr, size, dir); > @@ -126,6 +134,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, > { > const struct dma_map_ops *ops = get_dma_ops(dev); > > + BUG_ON(!ops); > BUG_ON(!valid_dma_direction(dir)); > if (ops->sync_single_for_cpu) > ops->sync_single_for_cpu(dev, addr + offset, size, dir); > @@ -140,6 +149,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev, > { > const struct dma_map_ops *ops = get_dma_ops(dev); > > + BUG_ON(!ops); > BUG_ON(!valid_dma_direction(dir)); > if (ops->sync_single_for_device) > ops->sync_single_for_device(dev, addr + offset, size, dir); > @@ -152,6 +162,7 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, > { > struct dma_map_ops *ops = get_dma_ops(dev); > > + BUG_ON(!ops); > BUG_ON(!valid_dma_direction(dir)); > if (ops->sync_sg_for_cpu) > ops->sync_sg_for_cpu(dev, sg, nelems, dir); > @@ -164,6 +175,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, > { > struct dma_map_ops *ops = get_dma_ops(dev); > > + BUG_ON(!ops); > BUG_ON(!valid_dma_direction(dir)); > if (ops->sync_sg_for_device) > ops->sync_sg_for_device(dev, sg, nelems, dir); > -- > 1.8.2.3 > -- To unsubscribe from this list: send the line "unsubscribe linux-arch" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html