Rajkumar Manoharan <rmanohar@xxxxxxxxxxxxxx> writes: > ath10k maintains common txqs list for all stations. This txq > management can be removed by migrating to mac80211 txq APIs > and let mac80211 handle txqs reordering based on reported airtime. > By doing this, txq fairness maintained in ath10k i.e processing > N frames per txq is removed. By adapting to mac80211 APIs, > ath10k will support mac80211 based airtime fairness algorithm. > > Signed-off-by: Toke Høiland-Jørgensen <toke@xxxxxxx> > Signed-off-by: Rajkumar Manoharan <rmanohar@xxxxxxxxxxxxxx> > --- > drivers/net/wireless/ath/ath10k/core.c | 2 - > drivers/net/wireless/ath/ath10k/core.h | 3 -- > drivers/net/wireless/ath/ath10k/htc.h | 1 - > drivers/net/wireless/ath/ath10k/htt_rx.c | 8 +++ > drivers/net/wireless/ath/ath10k/mac.c | 92 +++++++++++++------------------- > 5 files changed, 45 insertions(+), 61 deletions(-) > > diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c > index cf3c47b8cb2d..0684f87abc9b 100644 > --- a/drivers/net/wireless/ath/ath10k/core.c > +++ b/drivers/net/wireless/ath/ath10k/core.c > @@ -3068,9 +3068,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, > > mutex_init(&ar->conf_mutex); > spin_lock_init(&ar->data_lock); > - spin_lock_init(&ar->txqs_lock); > > - INIT_LIST_HEAD(&ar->txqs); > INIT_LIST_HEAD(&ar->peers); > init_waitqueue_head(&ar->peer_mapping_wq); > init_waitqueue_head(&ar->htt.empty_tx_wq); > diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h > index f6e5c29f74e7..d3e20aaf8023 100644 > --- a/drivers/net/wireless/ath/ath10k/core.h > +++ b/drivers/net/wireless/ath/ath10k/core.h > @@ -1054,10 +1054,7 @@ struct ath10k { > > /* protects shared structure data */ > spinlock_t data_lock; > - /* protects: ar->txqs, artxq->list */ > - spinlock_t txqs_lock; > > - struct list_head txqs; > struct list_head arvifs; > struct list_head peers; > struct ath10k_peer *peer_map[ATH10K_MAX_NUM_PEER_IDS]; > diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h > index 51fda6c23f69..cb30add7dd33 100644 > --- a/drivers/net/wireless/ath/ath10k/htc.h > +++ b/drivers/net/wireless/ath/ath10k/htc.h > @@ -51,7 +51,6 @@ > */ > > #define HTC_HOST_MAX_MSG_PER_RX_BUNDLE 8 > -#define HTC_HOST_MAX_MSG_PER_TX_BUNDLE 16 > > enum ath10k_htc_tx_flags { > ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01, > diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c > index f2405258a6d3..f2aaa2f7a022 100644 > --- a/drivers/net/wireless/ath/ath10k/htt_rx.c > +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c > @@ -2379,6 +2379,7 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb) > u8 tid; > int ret; > int i; > + bool may_tx; > > ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n"); > > @@ -2451,8 +2452,13 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb) > num_msdus = 0; > num_bytes = 0; > > + ieee80211_txq_schedule_start(hw, txq->ac); > + may_tx = ieee80211_txq_may_transmit(hw, txq); > while (num_msdus < max_num_msdus && > num_bytes < max_num_bytes) { > + if (!may_tx) > + break; > + > ret = ath10k_mac_tx_push_txq(hw, txq); > if (ret < 0) > break; > @@ -2460,6 +2466,8 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb) > num_msdus++; > num_bytes += ret; > } > + ieee80211_return_txq(hw, txq); > + ieee80211_txq_schedule_end(hw, txq->ac); > > record->num_msdus = cpu_to_le16(num_msdus); > record->num_bytes = cpu_to_le32(num_bytes); > diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c > index 97548f96a2f7..3b441179a36c 100644 > --- a/drivers/net/wireless/ath/ath10k/mac.c > +++ b/drivers/net/wireless/ath/ath10k/mac.c > @@ -3874,7 +3874,6 @@ static void ath10k_mac_txq_init(struct ieee80211_txq *txq) > > static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq) > { > - struct ath10k_txq *artxq; > struct ath10k_skb_cb *cb; > struct sk_buff *msdu; > int msdu_id; > @@ -3882,12 +3881,6 @@ static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq) > if (!txq) > return; > > - artxq = (void *)txq->drv_priv; > - spin_lock_bh(&ar->txqs_lock); > - if (!list_empty(&artxq->list)) > - list_del_init(&artxq->list); > - spin_unlock_bh(&ar->txqs_lock); > - > spin_lock_bh(&ar->htt.tx_lock); > idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) { > cb = ATH10K_SKB_CB(msdu); > @@ -3927,7 +3920,6 @@ static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw, > struct ath10k_txq *artxq = (void *)txq->drv_priv; > > /* No need to get locks */ > - > if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) > return true; > > @@ -4014,48 +4006,44 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, > return skb_len; > } > > -void ath10k_mac_tx_push_pending(struct ath10k *ar) > +static int ath10k_mac_schedule_txq(struct ieee80211_hw *hw, u32 ac) > { > - struct ieee80211_hw *hw = ar->hw; > struct ieee80211_txq *txq; > - struct ath10k_txq *artxq; > - struct ath10k_txq *last; > - int ret; > - int max; > - > - if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2)) > - return; > - > - spin_lock_bh(&ar->txqs_lock); > - rcu_read_lock(); > - > - last = list_last_entry(&ar->txqs, struct ath10k_txq, list); > - while (!list_empty(&ar->txqs)) { > - artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list); > - txq = container_of((void *)artxq, struct ieee80211_txq, > - drv_priv); > + int ret = 0; > > - /* Prevent aggressive sta/tid taking over tx queue */ > - max = HTC_HOST_MAX_MSG_PER_TX_BUNDLE; > - ret = 0; > - while (ath10k_mac_tx_can_push(hw, txq) && max--) { > + ieee80211_txq_schedule_start(hw, ac); > + while ((txq = ieee80211_next_txq(hw, ac))) { > + while (ath10k_mac_tx_can_push(hw, txq)) { > ret = ath10k_mac_tx_push_txq(hw, txq); > if (ret < 0) > break; > } > + ieee80211_return_txq(hw, txq); > + ath10k_htt_tx_txq_update(hw, txq); > + if (ret == -EBUSY) > + break; > + } > + ieee80211_txq_schedule_end(hw, ac); > > - list_del_init(&artxq->list); > - if (ret != -ENOENT) > - list_add_tail(&artxq->list, &ar->txqs); > + return ret; > +} > > - ath10k_htt_tx_txq_update(hw, txq); > +void ath10k_mac_tx_push_pending(struct ath10k *ar) > +{ > + struct ieee80211_hw *hw = ar->hw; > + u32 ac; > + > + if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2)) > + return; > > - if (artxq == last || (ret < 0 && ret != -ENOENT)) > + rcu_read_lock(); > + > + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { > + if (ath10k_mac_schedule_txq(hw, ac) == -EBUSY) > break; > } > > rcu_read_unlock(); > - spin_unlock_bh(&ar->txqs_lock); > } > EXPORT_SYMBOL(ath10k_mac_tx_push_pending); > > @@ -4293,31 +4281,25 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw, > static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw, > struct ieee80211_txq *txq) > { > - struct ath10k *ar = hw->priv; > - struct ath10k_txq *artxq = (void *)txq->drv_priv; > - struct ieee80211_txq *f_txq; > - struct ath10k_txq *f_artxq; > + u8 ac = txq->ac; > int ret = 0; > - int max = HTC_HOST_MAX_MSG_PER_TX_BUNDLE; > - > - spin_lock_bh(&ar->txqs_lock); > - if (list_empty(&artxq->list)) > - list_add_tail(&artxq->list, &ar->txqs); > > - f_artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list); > - f_txq = container_of((void *)f_artxq, struct ieee80211_txq, drv_priv); > - list_del_init(&f_artxq->list); > + ieee80211_txq_schedule_start(hw, ac); > + txq = ieee80211_next_txq(hw, ac); > + ieee80211_txq_schedule_end(hw, ac); You should really defer this schedule_end() until the scheduling episode actually, y'know, ends... :) > + if (!txq) > + return; > > - while (ath10k_mac_tx_can_push(hw, f_txq) && max--) { > - ret = ath10k_mac_tx_push_txq(hw, f_txq); > + while (ath10k_mac_tx_can_push(hw, txq)) { > + ret = ath10k_mac_tx_push_txq(hw, txq); > if (ret < 0) > break; > } > - if (ret != -ENOENT) > - list_add_tail(&f_artxq->list, &ar->txqs); > - spin_unlock_bh(&ar->txqs_lock); > - > - ath10k_htt_tx_txq_update(hw, f_txq); > + if (ret == -EBUSY) { > + ieee80211_txq_schedule_start(hw, ac); > + ieee80211_return_txq(hw, txq); > + ieee80211_txq_schedule_end(hw, ac); > + } And ieee80211_return_txq() should be called regardless of the return code (it'll do it's own checking and do nothing if the queue is empty). -Toke