Re: [PATCH v2 3/4] dmaengine: ti: k3-udma-glue: Add function to request TX channel by ID

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 




On 12/12/2023 13:10, Siddharth Vadapalli wrote:
> The existing function k3_udma_glue_request_tx_chn() supports requesting
> a TX DMA channel by its name. Add support to request TX channel by ID in
> the form of a new function k3_udma_glue_request_tx_chn_by_id() and export
> it for use by drivers which are probed by alternate methods (non
> device-tree) but still wish to make use of the existing DMA APIs. Such
> drivers could be informed about the TX channel to use by RPMsg for example.
> 
> Since the implementation of k3_udma_glue_request_tx_chn_by_id() reuses
> most of the code in k3_udma_glue_request_tx_chn(), create a new function
> for the common code named as k3_udma_glue_request_tx_chn_common().
> 
> Signed-off-by: Siddharth Vadapalli <s-vadapalli@xxxxxx>
> ---
> Changes since v1:
> - Updated commit message indicating the use-case for which the patch is
>   being added.
> 
>  drivers/dma/ti/k3-udma-glue.c    | 101 +++++++++++++++++++++++--------
>  include/linux/dma/k3-udma-glue.h |   4 ++
>  2 files changed, 79 insertions(+), 26 deletions(-)
> 
> diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
> index eff1ae3d3efe..ea5119a5e8eb 100644
> --- a/drivers/dma/ti/k3-udma-glue.c
> +++ b/drivers/dma/ti/k3-udma-glue.c
> @@ -274,29 +274,13 @@ static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
>  	return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
>  }
>  
> -struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
> -		const char *name, struct k3_udma_glue_tx_channel_cfg *cfg)
> +static int
> +k3_udma_glue_request_tx_chn_common(struct device *dev,
> +				   struct k3_udma_glue_tx_channel *tx_chn,
> +				   struct k3_udma_glue_tx_channel_cfg *cfg)
>  {
> -	struct k3_udma_glue_tx_channel *tx_chn;
>  	int ret;
>  
> -	tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
> -	if (!tx_chn)
> -		return ERR_PTR(-ENOMEM);
> -
> -	tx_chn->common.dev = dev;
> -	tx_chn->common.swdata_size = cfg->swdata_size;
> -	tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
> -	tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
> -	tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
> -	tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
> -
> -	/* parse of udmap channel */
> -	ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
> -					&tx_chn->common, true);
> -	if (ret)
> -		goto err;
> -
>  	tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
>  						tx_chn->common.psdata_size,
>  						tx_chn->common.swdata_size);
> @@ -312,7 +296,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
>  	if (IS_ERR(tx_chn->udma_tchanx)) {
>  		ret = PTR_ERR(tx_chn->udma_tchanx);
>  		dev_err(dev, "UDMAX tchanx get err %d\n", ret);
> -		goto err;
> +		return ret;
>  	}
>  	tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
>  
> @@ -325,7 +309,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
>  		dev_err(dev, "Channel Device registration failed %d\n", ret);
>  		put_device(&tx_chn->common.chan_dev);
>  		tx_chn->common.chan_dev.parent = NULL;
> -		goto err;
> +		return ret;
>  	}
>  
>  	if (xudma_is_pktdma(tx_chn->common.udmax)) {
> @@ -349,7 +333,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
>  					     &tx_chn->ringtxcq);
>  	if (ret) {
>  		dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret);
> -		goto err;
> +		return ret;
>  	}
>  
>  	/* Set the dma_dev for the rings to be configured */
> @@ -365,13 +349,13 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
>  	ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
>  	if (ret) {
>  		dev_err(dev, "Failed to cfg ringtx %d\n", ret);
> -		goto err;
> +		return ret;
>  	}
>  
>  	ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
>  	if (ret) {
>  		dev_err(dev, "Failed to cfg ringtx %d\n", ret);
> -		goto err;
> +		return ret;
>  	}
>  
>  	/* request and cfg psi-l */
> @@ -382,11 +366,42 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
>  	ret = k3_udma_glue_cfg_tx_chn(tx_chn);
>  	if (ret) {
>  		dev_err(dev, "Failed to cfg tchan %d\n", ret);
> -		goto err;
> +		return ret;
>  	}
>  
>  	k3_udma_glue_dump_tx_chn(tx_chn);
>  
> +	return 0;
> +}
> +
> +struct k3_udma_glue_tx_channel *
> +k3_udma_glue_request_tx_chn(struct device *dev, const char *name,
> +			    struct k3_udma_glue_tx_channel_cfg *cfg)
> +{
> +	struct k3_udma_glue_tx_channel *tx_chn;
> +	int ret;
> +
> +	tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
> +	if (!tx_chn)
> +		return ERR_PTR(-ENOMEM);
> +
> +	tx_chn->common.dev = dev;
> +	tx_chn->common.swdata_size = cfg->swdata_size;
> +	tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
> +	tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
> +	tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
> +	tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
> +
> +	/* parse of udmap channel */
> +	ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
> +					&tx_chn->common, true);
> +	if (ret)
> +		goto err;
> +
> +	ret = k3_udma_glue_request_tx_chn_common(dev, tx_chn, cfg);
> +	if (ret)
> +		goto err;
> +
>  	return tx_chn;
>  
>  err:
> @@ -395,6 +410,40 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
>  }
>  EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
>  
> +struct k3_udma_glue_tx_channel *
> +k3_udma_glue_request_tx_chn_by_id(struct device *dev, struct k3_udma_glue_tx_channel_cfg *cfg,
> +				  struct device_node *udmax_np, u32 thread_id)

udmax_np is not dev->of_node ?

> +{
> +	struct k3_udma_glue_tx_channel *tx_chn;
> +	int ret;
> +
> +	tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
> +	if (!tx_chn)
> +		return ERR_PTR(-ENOMEM);
> +
> +	tx_chn->common.dev = dev;
> +	tx_chn->common.swdata_size = cfg->swdata_size;
> +	tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
> +	tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
> +	tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
> +	tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
> +
> +	ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &tx_chn->common, true, thread_id);
> +	if (ret)
> +		goto err;
> +
> +	ret = k3_udma_glue_request_tx_chn_common(dev, tx_chn, cfg);
> +	if (ret)
> +		goto err;
> +
> +	return tx_chn;
> +
> +err:
> +	k3_udma_glue_release_tx_chn(tx_chn);
> +	return ERR_PTR(ret);
> +}
> +EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn_by_id);
> +
>  void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
>  {
>  	if (tx_chn->psil_paired) {
> diff --git a/include/linux/dma/k3-udma-glue.h b/include/linux/dma/k3-udma-glue.h
> index e443be4d3b4b..6205d84430ca 100644
> --- a/include/linux/dma/k3-udma-glue.h
> +++ b/include/linux/dma/k3-udma-glue.h
> @@ -26,6 +26,10 @@ struct k3_udma_glue_tx_channel;
>  struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
>  		const char *name, struct k3_udma_glue_tx_channel_cfg *cfg);
>  
> +struct k3_udma_glue_tx_channel *
> +k3_udma_glue_request_tx_chn_by_id(struct device *dev, struct k3_udma_glue_tx_channel_cfg *cfg,

I know it is going to be longer, but can the function be named more
precisely?
k3_udma_glue_request_tx_chn_by_thread_id

For TX/RX: a channel is always for one thread, right?
Probably:
k3_udma_glue_request_tx_chn_for_thread_id()

?

Other then this the series looks OK.


> +				  struct device_node *udmax_np, u32 thread_id);
> +
>  void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
>  int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
>  			     struct cppi5_host_desc_t *desc_tx,

-- 
Péter




[Index of Archives]     [Linux Kernel]     [Linux ARM (vger)]     [Linux ARM MSM]     [Linux Omap]     [Linux Arm]     [Linux Tegra]     [Fedora ARM]     [Linux for Samsung SOC]     [eCos]     [Linux PCI]     [Linux Fastboot]     [Gcc Help]     [Git]     [DCCP]     [IETF Announce]     [Security]     [Linux MIPS]     [Yosemite Campsites]

  Powered by Linux