Add sw fallback of tx checksum calculation for those tx queues that don't support tx checksum offloading. Because, some DWMAC IPs support tx checksum offloading only for a few initial tx queues, starting from tx queue 0. Signed-off-by: Rohan G Thomas <rohan.g.thomas@xxxxxxxxx> --- drivers/net/ethernet/stmicro/stmmac/stmmac.h | 5 +++++ .../net/ethernet/stmicro/stmmac/stmmac_main.c | 19 +++++++++++++++++++ .../ethernet/stmicro/stmmac/stmmac_platform.c | 4 ++++ include/linux/stmmac.h | 1 + 4 files changed, 29 insertions(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 3401e888a9f6..64d7dbe474bd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -210,12 +210,17 @@ struct stmmac_dma_conf { unsigned int dma_tx_size; }; +#define STMMAC_PRIV_FLG_TXQ_COE_LIMIT BIT(0) + struct stmmac_priv { /* Frequently used values are kept adjacent for cache effect */ u32 tx_coal_frames[MTL_MAX_TX_QUEUES]; u32 tx_coal_timer[MTL_MAX_TX_QUEUES]; u32 rx_coal_frames[MTL_MAX_TX_QUEUES]; + u32 flags; + + u32 tx_q_with_coe; int hwts_tx_en; bool tx_path_in_lpi_mode; bool tso; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 733b5e900817..f9ab6635218c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -4409,6 +4409,17 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) WARN_ON(tx_q->tx_skbuff[first_entry]); csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); + /* Some DWMAC IPs support tx coe only for a few initial tx queues, + * starting from tx queue 0. So checksum offloading for those queues + * that don't support tx coe needs to fallback to software checksum + * calculation. + */ + if (csum_insertion && (priv->flags & STMMAC_PRIV_FLG_TXQ_COE_LIMIT) && + queue >= priv->tx_q_with_coe) { + if (unlikely(skb_checksum_help(skb))) + goto dma_map_err; + csum_insertion = !csum_insertion; + } if (likely(priv->extend_desc)) desc = (struct dma_desc *)(tx_q->dma_etx + entry); @@ -7401,6 +7412,14 @@ int stmmac_dvr_probe(struct device *device, dev_info(priv->device, "SPH feature enabled\n"); } + if (priv->plat->tx_coe && + priv->plat->tx_queues_with_coe < priv->plat->tx_queues_to_use) { + priv->flags |= STMMAC_PRIV_FLG_TXQ_COE_LIMIT; + priv->tx_q_with_coe = priv->plat->tx_queues_with_coe; + dev_info(priv->device, "TX COE limited to %u tx queues\n", + priv->tx_q_with_coe); + } + /* Ideally our host DMA address width is the same as for the * device. However, it may differ and then we have to use our * host DMA width for allocation and the device DMA width for diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index be8e79c7aa34..0138b7c9c7ab 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -225,6 +225,10 @@ static int stmmac_mtl_setup(struct platform_device *pdev, &plat->tx_queues_to_use)) plat->tx_queues_to_use = 1; + if (of_property_read_u32(tx_node, "snps,tx-queues-with-coe", + &plat->tx_queues_with_coe)) + plat->tx_queues_with_coe = plat->tx_queues_to_use; + if (of_property_read_bool(tx_node, "snps,tx-sched-wrr")) plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR; else if (of_property_read_bool(tx_node, "snps,tx-sched-wfq")) diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 784277d666eb..cb508164eaea 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -252,6 +252,7 @@ struct plat_stmmacenet_data { u32 host_dma_width; u32 rx_queues_to_use; u32 tx_queues_to_use; + u32 tx_queues_with_coe; u8 rx_sched_algorithm; u8 tx_sched_algorithm; struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES]; -- 2.19.0