On Wed, Oct 07, 2020 at 01:04:50PM +0100, Christoph Hellwig wrote: > > -static void setup_dma_device(struct ib_device *device) > > +static void setup_dma_device(struct ib_device *device, > > + struct device *dma_device) > > { > > + WARN_ON(!IS_ENABLED(CONFIG_DMA_VIRT_OPS) && !dma_device); > > + if (IS_ENABLED(CONFIG_DMA_VIRT_OPS) && !dma_device) { > > /* > > + * If the caller does not provide a DMA capable device then the > > + * IB device will be used. In this case the caller should fully > > + * setup the ibdev for DMA. This usually means using > > + * dma_virt_ops. > > */ > > + device->dev.dma_ops = &dma_virt_ops; > > > > + rdi->ibdev.dev.dma_ops = &dma_virt_ops; > > + rdi->ibdev.dev.dma_parms = rdi->ibdev.dev.parent->dma_parms; > > + rdi->ibdev.dev.coherent_dma_mask = > > + rdi->ibdev.dev.parent->coherent_dma_mask; > > > > dev->dev.dma_ops = &dma_virt_ops; > > > > @@ -384,8 +384,9 @@ static struct siw_device *siw_device_create(struct net_device *netdev) > > base_dev->dev.parent = parent; > > base_dev->dev.dma_ops = &dma_virt_ops; > > base_dev->dev.dma_parms = &sdev->dma_parms; > > - sdev->dma_parms = (struct device_dma_parameters) > > - { .max_segment_size = SZ_2G }; > > + dma_set_max_seg_size(&base_dev->dev, UINT_MAX); > > + dma_coerce_mask_and_coherent(&base_dev->dev, > > + dma_get_required_mask(&base_dev->dev)); > > This still keeps the duplicate dma_virt_ops assignments in the driver. > > The dma_coerce_mask_and_coherent in siw also doesn't make any sense to > me. Sorry for this.