Re: [PATCH net-next v2 2/6] tsnep: Add XDP TX support

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, Dec 08, 2022 at 06:40:41AM +0100, Gerhard Engleder wrote:
> Implement ndo_xdp_xmit() for XDP TX support. Support for fragmented XDP
> frames is included.
> 
> Signed-off-by: Gerhard Engleder <gerhard@xxxxxxxxxxxxxxxxxxxxx>
> ---
>  drivers/net/ethernet/engleder/tsnep.h      |  12 +-
>  drivers/net/ethernet/engleder/tsnep_main.c | 204 ++++++++++++++++++++-
>  2 files changed, 207 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
> index f72c0c4da1a9..29b04127f529 100644
> --- a/drivers/net/ethernet/engleder/tsnep.h
> +++ b/drivers/net/ethernet/engleder/tsnep.h
> @@ -57,6 +57,12 @@ struct tsnep_rxnfc_rule {
>  	int location;
>  };
>  
> +enum tsnep_tx_type {
> +	TSNEP_TX_TYPE_SKB,
> +	TSNEP_TX_TYPE_XDP_TX,
> +	TSNEP_TX_TYPE_XDP_NDO,
> +};
> +
>  struct tsnep_tx_entry {
>  	struct tsnep_tx_desc *desc;
>  	struct tsnep_tx_desc_wb *desc_wb;
> @@ -65,7 +71,11 @@ struct tsnep_tx_entry {
>  
>  	u32 properties;
>  
> -	struct sk_buff *skb;
> +	enum tsnep_tx_type type;
> +	union {
> +		struct sk_buff *skb;
> +		struct xdp_frame *xdpf;
> +	};
>  	size_t len;
>  	DEFINE_DMA_UNMAP_ADDR(dma);
>  };
> diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
> index a28fde9fb060..b97cfd5fa1fa 100644
> --- a/drivers/net/ethernet/engleder/tsnep_main.c
> +++ b/drivers/net/ethernet/engleder/tsnep_main.c
> @@ -310,10 +310,11 @@ static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
>  	struct tsnep_tx_entry *entry = &tx->entry[index];
>  
>  	entry->properties = 0;
> -	if (entry->skb) {
> +	if (entry->skb || entry->xdpf) {

i think this change is redundant, you could keep a single check as skb and
xdpf ptrs share the same memory, but i guess this makes it more obvious

>  		entry->properties = length & TSNEP_DESC_LENGTH_MASK;
>  		entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
> -		if (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)
> +		if (entry->type == TSNEP_TX_TYPE_SKB &&
> +		    skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)
>  			entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
>  
>  		/* toggle user flag to prevent false acknowledge
> @@ -400,6 +401,8 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
>  
>  		entry->desc->tx = __cpu_to_le64(dma);
>  
> +		entry->type = TSNEP_TX_TYPE_SKB;
> +
>  		map_len += len;
>  	}
>  
> @@ -417,12 +420,13 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
>  		entry = &tx->entry[(index + i) % TSNEP_RING_SIZE];
>  
>  		if (entry->len) {
> -			if (i == 0)
> +			if (i == 0 && entry->type == TSNEP_TX_TYPE_SKB)
>  				dma_unmap_single(dmadev,
>  						 dma_unmap_addr(entry, dma),
>  						 dma_unmap_len(entry, len),
>  						 DMA_TO_DEVICE);
> -			else
> +			else if (entry->type == TSNEP_TX_TYPE_SKB ||
> +				 entry->type == TSNEP_TX_TYPE_XDP_NDO)
>  				dma_unmap_page(dmadev,
>  					       dma_unmap_addr(entry, dma),
>  					       dma_unmap_len(entry, len),
> @@ -505,6 +509,122 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
>  	return NETDEV_TX_OK;
>  }
>  
> +static int tsnep_xdp_tx_map(struct xdp_frame *xdpf, struct tsnep_tx *tx,
> +			    struct skb_shared_info *shinfo, int count,
> +			    bool dma_map)
> +{
> +	struct device *dmadev = tx->adapter->dmadev;
> +	skb_frag_t *frag;
> +	unsigned int len;
> +	struct tsnep_tx_entry *entry;
> +	void *data;
> +	struct page *page;
> +	dma_addr_t dma;
> +	int map_len = 0;
> +	int i;
> +
> +	frag = NULL;
> +	len = xdpf->len;
> +	for (i = 0; i < count; i++) {
> +		entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE];
> +		if (dma_map) {
> +			data = unlikely(frag) ? skb_frag_address(frag) :
> +						xdpf->data;
> +			dma = dma_map_single(dmadev, data, len, DMA_TO_DEVICE);
> +			if (dma_mapping_error(dmadev, dma))
> +				return -ENOMEM;
> +
> +			entry->type = TSNEP_TX_TYPE_XDP_NDO;
> +		} else {
> +			page = unlikely(frag) ? skb_frag_page(frag) :
> +						virt_to_page(xdpf->data);
> +			dma = page_pool_get_dma_addr(page);
> +			if (unlikely(frag))
> +				dma += skb_frag_off(frag);
> +			else
> +				dma += sizeof(*xdpf) + xdpf->headroom;
> +			dma_sync_single_for_device(dmadev, dma, len,
> +						   DMA_BIDIRECTIONAL);
> +
> +			entry->type = TSNEP_TX_TYPE_XDP_TX;
> +		}
> +
> +		entry->len = len;
> +		dma_unmap_addr_set(entry, dma, dma);
> +
> +		entry->desc->tx = __cpu_to_le64(dma);
> +
> +		map_len += len;
> +
> +		if ((i + 1) < count) {
> +			frag = &shinfo->frags[i];
> +			len = skb_frag_size(frag);
> +		}
> +	}
> +
> +	return map_len;
> +}
> +
> +/* This function requires __netif_tx_lock is held by the caller. */
> +static int tsnep_xdp_xmit_frame_ring(struct xdp_frame *xdpf,
> +				     struct tsnep_tx *tx, bool dma_map)
> +{
> +	struct skb_shared_info *shinfo = xdp_get_shared_info_from_frame(xdpf);
> +	unsigned long flags;
> +	int count = 1;
> +	struct tsnep_tx_entry *entry;
> +	int length;
> +	int i;
> +	int retval;
> +
> +	if (unlikely(xdp_frame_has_frags(xdpf)))
> +		count += shinfo->nr_frags;
> +
> +	spin_lock_irqsave(&tx->lock, flags);
> +
> +	if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1 + count)) {

Wouldn't count + 1 be sufficient to check against the descs available?
if there are frags then you have already accounted them under count
variable so i feel like MAX_SKB_FRAGS is redundant.

> +		/* prevent full TX ring due to XDP */
> +		spin_unlock_irqrestore(&tx->lock, flags);
> +
> +		return -EBUSY;
> +	}
> +
> +	entry = &tx->entry[tx->write];
> +	entry->xdpf = xdpf;
> +
> +	retval = tsnep_xdp_tx_map(xdpf, tx, shinfo, count, dma_map);
> +	if (retval < 0) {
> +		tsnep_tx_unmap(tx, tx->write, count);
> +		entry->xdpf = NULL;
> +
> +		tx->dropped++;
> +
> +		spin_unlock_irqrestore(&tx->lock, flags);
> +
> +		netdev_err(tx->adapter->netdev, "XDP TX DMA map failed\n");
> +
> +		return retval;
> +	}
> +	length = retval;
> +
> +	for (i = 0; i < count; i++)
> +		tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length,
> +				  i == (count - 1));
> +	tx->write = (tx->write + count) % TSNEP_RING_SIZE;
> +
> +	/* descriptor properties shall be valid before hardware is notified */
> +	dma_wmb();
> +
> +	spin_unlock_irqrestore(&tx->lock, flags);
> +
> +	return 0;
> +}
> +

(...)



[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux