Set queue as preemptible or express via taprio. This will eventually set queue-specific preemptible field in TXQCTL register. Implement configure_tx(), a callback triggered by mmsv, to set tx_enabled and update preemptible queue settings. tx_enabled is a new field that serves as a condition in igc_tsn_enable_offload() before configuring the preemptible queue. This provides some control over FPE in TX, despite lacking a dedicated register. Verified that the correct preemptible hardware queue is set using the following commands: a) 1:1 TC-to-Queue Mapping $ sudo tc qdisc replace dev enp1s0 parent root handle 100 \ taprio num_tc 4 map 3 2 1 0 3 3 3 3 3 3 3 3 3 3 3 3 \ queues 1@0 1@1 1@2 1@3 base-time 0 sched-entry S F 100000 \ fp E E P P b) Non-1:1 TC-to-Queue Mapping $ sudo tc qdisc replace dev enp1s0 parent root handle 100 \ taprio num_tc 3 map 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 2 queues 2@0 1@2 1@3 fp E E P Co-developed-by: Vinicius Costa Gomes <vinicius.gomes@xxxxxxxxx> Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@xxxxxxxxx> Signed-off-by: Faizal Rahim <faizal.abdul.rahim@xxxxxxxxxxxxxxx> --- drivers/net/ethernet/intel/igc/igc.h | 3 +- drivers/net/ethernet/intel/igc/igc_defines.h | 1 + drivers/net/ethernet/intel/igc/igc_main.c | 36 ++++++++++++++++++++ drivers/net/ethernet/intel/igc/igc_tsn.c | 17 +++++++++ 4 files changed, 56 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 2f3662143589..59e6fca808e4 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -43,6 +43,7 @@ void igc_ethtool_set_ops(struct net_device *); struct fpe_t { struct ethtool_mmsv mmsv; u32 tx_min_frag_size; + bool tx_enabled; }; enum igc_mac_filter_type { @@ -163,7 +164,7 @@ struct igc_ring { bool launchtime_enable; /* true if LaunchTime is enabled */ ktime_t last_tx_cycle; /* end of the cycle with a launchtime transmission */ ktime_t last_ff_cycle; /* Last cycle with an active first flag */ - + bool preemptible; /* True if not express */ u32 start_time; u32 end_time; u32 max_sdu; diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index 038ee89f1e08..208899e67308 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -556,6 +556,7 @@ #define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001 #define IGC_TXQCTL_STRICT_CYCLE 0x00000002 #define IGC_TXQCTL_STRICT_END 0x00000004 +#define IGC_TXQCTL_PREEMPTIBLE 0x00000008 #define IGC_TXQCTL_QAV_SEL_MASK 0x000000C0 #define IGC_TXQCTL_QAV_SEL_CBS0 0x00000080 #define IGC_TXQCTL_QAV_SEL_CBS1 0x000000C0 diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 7fe6875d7bf7..f15ac7565fbd 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -6258,6 +6258,39 @@ static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now) return timespec64_compare(now, &b) > 0; } +static u32 igc_map_tc_to_queue(const struct igc_adapter *adapter, + unsigned long preemptible_tcs) +{ + struct net_device *dev = adapter->netdev; + u32 i, queue = 0; + + for (i = 0; i < dev->num_tc; i++) { + u32 offset, count; + + if (!(preemptible_tcs & BIT(i))) + continue; + + offset = dev->tc_to_txq[i].offset; + count = dev->tc_to_txq[i].count; + queue |= GENMASK(offset + count - 1, offset); + } + + return queue; +} + +static void igc_save_preempt_queue(struct igc_adapter *adapter, + const struct tc_mqprio_qopt_offload *mqprio) +{ + u32 preemptible_queue = igc_map_tc_to_queue(adapter, + mqprio->preemptible_tcs); + + for (int i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + tx_ring->preemptible = preemptible_queue & BIT(i); + } +} + static bool validate_schedule(struct igc_adapter *adapter, const struct tc_taprio_qopt_offload *qopt) { @@ -6344,6 +6377,7 @@ static int igc_qbv_clear_schedule(struct igc_adapter *adapter) ring->start_time = 0; ring->end_time = NSEC_PER_SEC; ring->max_sdu = 0; + ring->preemptible = false; } spin_lock_irqsave(&adapter->qbv_tx_lock, flags); @@ -6500,6 +6534,8 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, ring->max_sdu = 0; } + igc_save_preempt_queue(adapter, &qopt->mqprio); + return 0; } diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c index d9de2cfb0c17..95193a7e8768 100644 --- a/drivers/net/ethernet/intel/igc/igc_tsn.c +++ b/drivers/net/ethernet/intel/igc/igc_tsn.c @@ -122,6 +122,18 @@ static int igc_fpe_xmit_smd_frame(struct igc_adapter *adapter, return err; } +static void igc_fpe_configure_tx(struct ethtool_mmsv *mmsv, bool tx_enable) +{ + struct fpe_t *fpe = container_of(mmsv, struct fpe_t, mmsv); + struct igc_adapter *adapter; + + adapter = container_of(fpe, struct igc_adapter, fpe); + adapter->fpe.tx_enabled = tx_enable; + + /* Update config since tx_enabled affects preemptible queue configuration */ + igc_tsn_offload_apply(adapter); +} + static void igc_fpe_send_mpacket(struct ethtool_mmsv *mmsv, enum ethtool_mpacket type) { @@ -143,12 +155,14 @@ static void igc_fpe_send_mpacket(struct ethtool_mmsv *mmsv, } static const struct ethtool_mmsv_ops igc_mmsv_ops = { + .configure_tx = igc_fpe_configure_tx, .send_mpacket = igc_fpe_send_mpacket, }; void igc_fpe_init(struct igc_adapter *adapter) { adapter->fpe.tx_min_frag_size = TX_MIN_FRAG_SIZE; + adapter->fpe.tx_enabled = false; ethtool_mmsv_init(&adapter->fpe.mmsv, adapter->netdev, &igc_mmsv_ops); } @@ -456,6 +470,9 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) if (ring->launchtime_enable) txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT; + if (adapter->fpe.tx_enabled && ring->preemptible) + txqctl |= IGC_TXQCTL_PREEMPTIBLE; + /* Skip configuring CBS for Q2 and Q3 */ if (i > 1) goto skip_cbs; -- 2.34.1