[ Upstream commit 58f2ce6f61615dfd8dd3cc01c9e5bb54ed35637e ] When sending non-linear skbs with jumbo frames, we set up the non-paged data and mark that as a last segment, although the paged fragments are also prepared. This will stall the TX queue and trigger a watchdog warning (a simple reproducer is to run an iperf client mode TCP test with a large MTU - networking fails instantly). Fix by checking if the skb is non-linear. Signed-off-by: Aaro Koskinen <aaro.koskinen@xxxxxxxxx> Acked-by: Jose Abreu <joabreu@xxxxxxxxxxxx> Signed-off-by: David S. Miller <davem@xxxxxxxxxxxxx> Signed-off-by: Sasha Levin (Microsoft) <sashal@xxxxxxxxxx> --- drivers/net/ethernet/stmicro/stmmac/ring_mode.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index afed0f0f4027..4d9bcb4d0378 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -79,7 +79,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); stmmac_prepare_tx_desc(priv, desc, 0, len, csum, - STMMAC_RING_MODE, 1, true, skb->len); + STMMAC_RING_MODE, 1, !skb_is_nonlinear(skb), + skb->len); } else { des2 = dma_map_single(priv->device, skb->data, nopaged_len, DMA_TO_DEVICE); @@ -91,7 +92,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum) tx_q->tx_skbuff_dma[entry].is_jumbo = true; desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum, - STMMAC_RING_MODE, 0, true, skb->len); + STMMAC_RING_MODE, 0, !skb_is_nonlinear(skb), + skb->len); } tx_q->cur_tx = entry; -- 2.19.1