Re: [PATCH v3] dmaengine: zxdma: Support ZTE ZX296702 dma

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Tue, Apr 21, 2015 at 10:50:59AM +0800, Jun Nie wrote:
> +static int zx_dma_probe(struct platform_device *op)
> +{
> +	struct zx_dma_dev *d;
> +	const struct of_device_id *of_id;
> +	struct resource *iores;
> +	int i, ret = 0, irq = 0;
> +
> +	iores = platform_get_resource(op, IORESOURCE_MEM, 0);
> +	if (!iores)
> +		return -EINVAL;
> +
> +	d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
> +	if (!d)
> +		return -ENOMEM;
> +
> +	d->base = devm_ioremap_resource(&op->dev, iores);
> +	if (IS_ERR(d->base)) {
> +		ret = PTR_ERR(d->base);
> +		goto dev_mem;
> +	}
> +
> +	of_id = of_match_device(zx6702_dma_dt_ids, &op->dev);

You're not using of_id anymore.

> +	of_property_read_u32((&op->dev)->of_node,
> +			     "dma-channels", &d->dma_channels);
> +	of_property_read_u32((&op->dev)->of_node,
> +			     "dma-requests", &d->dma_requests);
> +	if (!d->dma_requests || !d->dma_channels) {
> +		ret = -EINVAL;
> +		goto dev_mem;
> +	}
> +
> +	d->clk = devm_clk_get(&op->dev, NULL);
> +	if (IS_ERR(d->clk)) {
> +		dev_err(&op->dev, "no dma clk\n");
> +		ret = PTR_ERR(d->clk);
> +		goto dev_mem;
> +	}
> +
> +	irq = platform_get_irq(op, 0);
> +	ret = devm_request_irq(&op->dev, irq, zx_dma_int_handler,
> +			       0, DRIVER_NAME, d);
> +	if (ret)
> +		goto dev_mem;
> +
> +	/* A DMA memory pool for LLIs, align on 32-byte boundary */
> +	d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
> +			LLI_BLOCK_SIZE, 32, 0);
> +	if (!d->pool) {
> +		ret = -ENOMEM;
> +		goto dev_mem;
> +	}
> +
> +	/* init phy channel */
> +	d->phy = devm_kzalloc(&op->dev,
> +		d->dma_channels * sizeof(struct zx_dma_phy), GFP_KERNEL);
> +	if (!d->phy) {
> +		ret = -ENOMEM;
> +		goto free_pool;
> +	}
> +
> +	for (i = 0; i < d->dma_channels; i++) {
> +		struct zx_dma_phy *p = &d->phy[i];
> +
> +		p->idx = i;
> +		p->base = d->base + i * 0x40;
> +	}
> +
> +	INIT_LIST_HEAD(&d->slave.channels);
> +	dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
> +	dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
> +	dma_cap_set(DMA_PRIVATE, d->slave.cap_mask);
> +	d->slave.dev = &op->dev;
> +	d->slave.device_free_chan_resources = zx_dma_free_chan_resources;
> +	d->slave.device_tx_status = zx_dma_tx_status;
> +	d->slave.device_prep_dma_memcpy = zx_dma_prep_memcpy;
> +	d->slave.device_prep_slave_sg = zx_dma_prep_slave_sg;
> +	d->slave.device_issue_pending = zx_dma_issue_pending;
> +	d->slave.device_config = zx_dma_config;
> +	d->slave.device_terminate_all = zx_dma_terminate_all;
> +	d->slave.copy_align = DMA_ALIGN;
> +	d->slave.src_addr_widths = ZX_DMA_BUSWIDTHS;
> +	d->slave.dst_addr_widths = ZX_DMA_BUSWIDTHS;
> +	d->slave.directions = BIT(DMA_MEM_TO_MEM) | BIT(DMA_MEM_TO_DEV)
> +			| BIT(DMA_DEV_TO_MEM);
> +	d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
> +
> +	/* init virtual channel */
> +	d->chans = devm_kzalloc(&op->dev,
> +		d->dma_requests * sizeof(struct zx_dma_chan), GFP_KERNEL);
> +	if (!d->chans) {
> +		ret = -ENOMEM;
> +		goto free_phy;
> +	}
> +
> +	for (i = 0; i < d->dma_requests; i++) {
> +		struct zx_dma_chan *c = &d->chans[i];
> +
> +		c->status = DMA_IN_PROGRESS;
> +		INIT_LIST_HEAD(&c->node);
> +		c->vc.desc_free = zx_dma_free_desc;
> +		vchan_init(&c->vc, &d->slave);
> +	}
> +
> +	/* Enable clock before accessing registers */
> +	ret = clk_prepare_enable(d->clk);
> +	if (ret < 0) {
> +		dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
> +		goto free_vc;
> +	}
> +
> +	zx_dma_init_state(d);
> +
> +	spin_lock_init(&d->lock);
> +	INIT_LIST_HEAD(&d->chan_pending);
> +	platform_set_drvdata(op, d);
> +
> +	ret = dma_async_device_register(&d->slave);
> +	if (ret)
> +		goto clk_dis;
> +
> +	ret = of_dma_controller_register((&op->dev)->of_node,
> +					 zx_of_dma_simple_xlate, d);
> +	if (ret)
> +		goto of_dma_register_fail;
> +
> +	dev_info(&op->dev, "initialized\n");
> +	return 0;
> +
> +of_dma_register_fail:
> +	dma_async_device_unregister(&d->slave);
> +clk_dis:
> +	clk_disable_unprepare(d->clk);
> +free_vc:
> +	devm_kfree(&op->dev, d->chans);
> +free_phy:
> +	devm_kfree(&op->dev, d->phy);
> +free_pool:
> +	dmam_pool_destroy(d->pool);
> +dev_mem:
> +	devm_kfree(&op->dev, d);

The point of having all those devm_ functions is precisely to remove
this kind of calls, so I think it's quite redundant.

> +	return ret;
> +}
> +
> +static int zx_dma_remove(struct platform_device *op)
> +{
> +	struct zx_dma_chan *c, *cn;
> +	struct zx_dma_dev *d = platform_get_drvdata(op);
> +
> +	dma_async_device_unregister(&d->slave);
> +	of_dma_controller_free((&op->dev)->of_node);
> +
> +	list_for_each_entry_safe(c, cn, &d->slave.channels,
> +				 vc.chan.device_node) {
> +		list_del(&c->vc.chan.device_node);
> +	}
> +	clk_disable_unprepare(d->clk);
> +	dmam_pool_destroy(d->pool);

And here as well.

It looks fine otherwise. Once you have fixed those minor issues, you can add my
Reviewed-by: Maxime Ripard <maxime.ripard@xxxxxxxxxxxxxxxxxx>

Thanks!
Maxime

-- 
Maxime Ripard, Free Electrons
Embedded Linux, Kernel and Android engineering
http://free-electrons.com

Attachment: signature.asc
Description: Digital signature


[Index of Archives]     [Linux Kernel]     [Linux ARM (vger)]     [Linux ARM MSM]     [Linux Omap]     [Linux Arm]     [Linux Tegra]     [Fedora ARM]     [Linux for Samsung SOC]     [eCos]     [Linux PCI]     [Linux Fastboot]     [Gcc Help]     [Git]     [DCCP]     [IETF Announce]     [Security]     [Linux MIPS]     [Yosemite Campsites]

  Powered by Linux