This is a note to let you know that I've just added the patch titled net: mvneta: make tx buffer array agnostic to the 5.4-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: net-mvneta-make-tx-buffer-array-agnostic.patch and it can be found in the queue-5.4 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit b2c818682cb6e0ec6ccbe6e6c7a89f1ea07d0083 Author: Lorenzo Bianconi <lorenzo@xxxxxxxxxx> Date: Sat Oct 19 10:13:26 2019 +0200 net: mvneta: make tx buffer array agnostic [ Upstream commit 9e58c8b410650b5a6eb5b8fad8474bd8425a4023 ] Allow tx buffer array to contain both skb and xdp buffers in order to enable xdp frame recycling adding XDP_TX verdict support Signed-off-by: Lorenzo Bianconi <lorenzo@xxxxxxxxxx> Signed-off-by: David S. Miller <davem@xxxxxxxxxxxxx> Stable-dep-of: 2960a2d33b02 ("net: mvneta: fix potential double-frees in mvneta_txq_sw_deinit()") Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 2c1ee32684988..977c2961aa2c2 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -549,6 +549,20 @@ struct mvneta_rx_desc { }; #endif +enum mvneta_tx_buf_type { + MVNETA_TYPE_SKB, + MVNETA_TYPE_XDP_TX, + MVNETA_TYPE_XDP_NDO, +}; + +struct mvneta_tx_buf { + enum mvneta_tx_buf_type type; + union { + struct xdp_frame *xdpf; + struct sk_buff *skb; + }; +}; + struct mvneta_tx_queue { /* Number of this TX queue, in the range 0-7 */ u8 id; @@ -564,8 +578,8 @@ struct mvneta_tx_queue { int tx_stop_threshold; int tx_wake_threshold; - /* Array of transmitted skb */ - struct sk_buff **tx_skb; + /* Array of transmitted buffers */ + struct mvneta_tx_buf *buf; /* Index of last TX DMA descriptor that was inserted */ int txq_put_index; @@ -1774,14 +1788,9 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp, int i; for (i = 0; i < num; i++) { + struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index]; struct mvneta_tx_desc *tx_desc = txq->descs + txq->txq_get_index; - struct sk_buff *skb = txq->tx_skb[txq->txq_get_index]; - - if (skb) { - bytes_compl += skb->len; - pkts_compl++; - } mvneta_txq_inc_get(txq); @@ -1789,9 +1798,12 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp, dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr, tx_desc->data_size, DMA_TO_DEVICE); - if (!skb) + if (!buf->skb) continue; - dev_kfree_skb_any(skb); + + bytes_compl += buf->skb->len; + pkts_compl++; + dev_kfree_skb_any(buf->skb); } netdev_tx_completed_queue(nq, pkts_compl, bytes_compl); @@ -2242,16 +2254,19 @@ static inline void mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_port *pp, struct mvneta_tx_queue *txq) { - struct mvneta_tx_desc *tx_desc; int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; + struct mvneta_tx_desc *tx_desc; - txq->tx_skb[txq->txq_put_index] = NULL; tx_desc = mvneta_txq_next_desc_get(txq); tx_desc->data_size = hdr_len; tx_desc->command = mvneta_skb_tx_csum(pp, skb); tx_desc->command |= MVNETA_TXD_F_DESC; tx_desc->buf_phys_addr = txq->tso_hdrs_phys + txq->txq_put_index * TSO_HEADER_SIZE; + buf->type = MVNETA_TYPE_SKB; + buf->skb = NULL; + mvneta_txq_inc_put(txq); } @@ -2260,6 +2275,7 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, struct sk_buff *skb, char *data, int size, bool last_tcp, bool is_last) { + struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; struct mvneta_tx_desc *tx_desc; tx_desc = mvneta_txq_next_desc_get(txq); @@ -2273,7 +2289,8 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, } tx_desc->command = 0; - txq->tx_skb[txq->txq_put_index] = NULL; + buf->type = MVNETA_TYPE_SKB; + buf->skb = NULL; if (last_tcp) { /* last descriptor in the TCP packet */ @@ -2281,7 +2298,7 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, /* last descriptor in SKB */ if (is_last) - txq->tx_skb[txq->txq_put_index] = skb; + buf->skb = skb; } mvneta_txq_inc_put(txq); return 0; @@ -2366,6 +2383,7 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, int i, nr_frags = skb_shinfo(skb)->nr_frags; for (i = 0; i < nr_frags; i++) { + struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; void *addr = skb_frag_address(frag); @@ -2385,12 +2403,13 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, if (i == nr_frags - 1) { /* Last descriptor */ tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; - txq->tx_skb[txq->txq_put_index] = skb; + buf->skb = skb; } else { /* Descriptor in the middle: Not First, Not Last */ tx_desc->command = 0; - txq->tx_skb[txq->txq_put_index] = NULL; + buf->skb = NULL; } + buf->type = MVNETA_TYPE_SKB; mvneta_txq_inc_put(txq); } @@ -2418,6 +2437,7 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev) struct mvneta_port *pp = netdev_priv(dev); u16 txq_id = skb_get_queue_mapping(skb); struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; + struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; struct mvneta_tx_desc *tx_desc; int len = skb->len; int frags = 0; @@ -2450,16 +2470,17 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev) goto out; } + buf->type = MVNETA_TYPE_SKB; if (frags == 1) { /* First and Last descriptor */ tx_cmd |= MVNETA_TXD_FLZ_DESC; tx_desc->command = tx_cmd; - txq->tx_skb[txq->txq_put_index] = skb; + buf->skb = skb; mvneta_txq_inc_put(txq); } else { /* First but not Last */ tx_cmd |= MVNETA_TXD_F_DESC; - txq->tx_skb[txq->txq_put_index] = NULL; + buf->skb = NULL; mvneta_txq_inc_put(txq); tx_desc->command = tx_cmd; /* Continue with other skb fragments */ @@ -3005,9 +3026,8 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp, txq->last_desc = txq->size - 1; - txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb), - GFP_KERNEL); - if (!txq->tx_skb) { + txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL); + if (!txq->buf) { dma_free_coherent(pp->dev->dev.parent, txq->size * MVNETA_DESC_ALIGNED_SIZE, txq->descs, txq->descs_phys); @@ -3019,7 +3039,7 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp, txq->size * TSO_HEADER_SIZE, &txq->tso_hdrs_phys, GFP_KERNEL); if (!txq->tso_hdrs) { - kfree(txq->tx_skb); + kfree(txq->buf); dma_free_coherent(pp->dev->dev.parent, txq->size * MVNETA_DESC_ALIGNED_SIZE, txq->descs, txq->descs_phys); @@ -3074,7 +3094,7 @@ static void mvneta_txq_sw_deinit(struct mvneta_port *pp, { struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); - kfree(txq->tx_skb); + kfree(txq->buf); if (txq->tso_hdrs) dma_free_coherent(pp->dev->dev.parent,