Search Linux Wireless

[RFC v1 208/256] cl8k: add tx/single_cfm.c

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Viktor Barna <viktor.barna@xxxxxxxxxx>

(Part of the split. Please, take a look at the cover letter for more
details).

Signed-off-by: Viktor Barna <viktor.barna@xxxxxxxxxx>
---
 .../net/wireless/celeno/cl8k/tx/single_cfm.c  | 214 ++++++++++++++++++
 1 file changed, 214 insertions(+)
 create mode 100644 drivers/net/wireless/celeno/cl8k/tx/single_cfm.c

diff --git a/drivers/net/wireless/celeno/cl8k/tx/single_cfm.c b/drivers/net/wireless/celeno/cl8k/tx/single_cfm.c
new file mode 100644
index 000000000000..d90148d3f9bf
--- /dev/null
+++ b/drivers/net/wireless/celeno/cl8k/tx/single_cfm.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: MIT
+/* Copyright(c) 2019-2021, Celeno Communications Ltd. */
+
+#include "tx/single_cfm.h"
+#include "tx/tx_queue.h"
+#include "tx/tx_inject.h"
+#include "chip.h"
+#include "tx/sw_txhdr.h"
+#include "enhanced_tim.h"
+
+/*
+ * cl_hw->single_cfm_queues:
+ * These queues are used to keep pointers to skb's sent
+ * as singles and waiting for confirmation.
+ */
+
+#define SINGLE_POLL_TIMEOUT 50
+
+void cl_single_cfm_init(struct cl_hw *cl_hw)
+{
+       int i = 0;
+
+       for (i = 0; i < MAX_SINGLE_QUEUES; i++)
+               INIT_LIST_HEAD(&cl_hw->single_cfm_queues[i].head);
+}
+
+void cl_single_cfm_add(struct cl_hw *cl_hw, struct cl_sw_txhdr *sw_txhdr, u32 queue_idx)
+{
+       list_add_tail(&sw_txhdr->cfm_list, &cl_hw->single_cfm_queues[queue_idx].head);
+}
+
+struct cl_sw_txhdr *cl_single_cfm_find(struct cl_hw *cl_hw, u32 queue_idx,
+                                      dma_addr_t dma_addr)
+{
+       struct cl_single_cfm_queue *cfm_queue = NULL;
+       struct cl_sw_txhdr *sw_txhdr = NULL;
+       struct cl_sw_txhdr *tmp = NULL;
+
+       if (queue_idx >= MAX_SINGLE_QUEUES)
+               return NULL;
+
+       cfm_queue = &cl_hw->single_cfm_queues[queue_idx];
+
+       list_for_each_entry_safe(sw_txhdr, tmp, &cfm_queue->head, cfm_list)
+               if (le32_to_cpu(sw_txhdr->txdesc.umacdesc.packet_addr[0]) == dma_addr) {
+                       list_del(&sw_txhdr->cfm_list);
+
+                       return sw_txhdr;
+               }
+
+       return NULL;
+}
+
+static void cl_single_cfm_flush_queue(struct cl_hw *cl_hw, u32 queue_idx)
+{
+       struct cl_single_cfm_queue *cfm_queue = &cl_hw->single_cfm_queues[queue_idx];
+       struct cl_tx_queue *tx_queue = NULL;
+       struct cl_sw_txhdr *sw_txhdr = NULL;
+       struct sk_buff *skb = NULL;
+       struct ieee80211_tx_info *tx_info = NULL;
+       dma_addr_t dma_addr;
+
+       if (list_empty(&cfm_queue->head))
+               return;
+
+       do {
+               sw_txhdr = list_first_entry(&cfm_queue->head, struct cl_sw_txhdr, cfm_list);
+               dma_addr = le32_to_cpu(sw_txhdr->txdesc.umacdesc.packet_addr[0]);
+               skb = sw_txhdr->skb;
+               tx_info = IEEE80211_SKB_CB(skb);
+
+               dma_unmap_single(cl_hw->chip->dev, dma_addr, sw_txhdr->map_len, DMA_TO_DEVICE);
+
+               if (cl_tx_ctrl_is_inject(tx_info))
+                       cl_tx_inject_cfm(cl_hw);
+
+               cl_tx_single_free_skb(cl_hw, skb);
+               list_del(&sw_txhdr->cfm_list);
+               cl_sw_txhdr_free(cl_hw, sw_txhdr);
+       } while (!list_empty(&cfm_queue->head));
+
+       /*
+        * Set fw_free_space back to maximum after flushing the queue
+        * and clear the enhanced TIM.
+        */
+       tx_queue = &cl_hw->tx_queues.single[queue_idx];
+       tx_queue->fw_free_space = tx_queue->fw_max_size;
+       cl_enhanced_tim_clear_tx_single(cl_hw, queue_idx, tx_queue->hw_index,
+                                       false, tx_queue->cl_sta, tx_queue->tid);
+}
+
+void cl_single_cfm_flush_all(struct cl_hw *cl_hw)
+{
+       u32 i = 0;
+
+       for (i = 0; i < MAX_SINGLE_QUEUES; i++)
+               cl_single_cfm_flush_queue(cl_hw, i);
+}
+
+void cl_single_cfm_flush_sta(struct cl_hw *cl_hw, u8 sta_idx)
+{
+       /* Flush all single confirmation queues of this sta, and reset write index */
+       u8 ac;
+       u16 queue_idx;
+
+       spin_lock_bh(&cl_hw->tx_lock_single);
+
+       for (ac = 0; ac < AC_MAX; ac++) {
+               queue_idx = QUEUE_IDX(sta_idx, ac);
+               cl_single_cfm_flush_queue(cl_hw, queue_idx);
+
+#ifdef CONFIG_CL_PCIE
+               cl_hw->ipc_env->ring_indices_elem->indices->txdesc_write_idx.single[queue_idx] = 0;
+#endif
+       }
+
+       spin_unlock_bh(&cl_hw->tx_lock_single);
+}
+
+static void cl_single_cfm_poll_timeout(struct cl_hw *cl_hw, u32 queue_idx)
+{
+       /*
+        * When polling failed clear the enhanced TIM so that firmware will
+        * not try to transmit these packets.
+        */
+       struct cl_tx_queue *tx_queue = &cl_hw->tx_queues.single[queue_idx];
+
+       cl_dbg_err(cl_hw, "Polling timeout (queue_idx = %u)\n", queue_idx);
+
+       spin_lock_bh(&cl_hw->tx_lock_single);
+       cl_enhanced_tim_clear_tx_single(cl_hw, queue_idx, tx_queue->hw_index,
+                                       false, tx_queue->cl_sta, tx_queue->tid);
+       spin_unlock_bh(&cl_hw->tx_lock_single);
+}
+
+void cl_single_cfm_poll_empty(struct cl_hw *cl_hw, u32 queue_idx)
+{
+       struct cl_single_cfm_queue *cfm_queue = &cl_hw->single_cfm_queues[queue_idx];
+       bool empty = false;
+       int i = 0;
+
+       if (test_bit(CL_DEV_FW_ERROR, &cl_hw->drv_flags))
+               return;
+
+       while (true) {
+               spin_lock_bh(&cl_hw->tx_lock_single);
+               empty = list_empty(&cfm_queue->head);
+               spin_unlock_bh(&cl_hw->tx_lock_single);
+
+               if (empty)
+                       return;
+
+               if (++i == SINGLE_POLL_TIMEOUT) {
+                       cl_single_cfm_poll_timeout(cl_hw, queue_idx);
+                       return;
+               }
+
+               msleep(20);
+       }
+}
+
+static bool list_hp_empty_sta(struct cl_hw *cl_hw, u8 sta_idx)
+{
+       struct cl_single_cfm_queue *hp_cfm_queue = &cl_hw->single_cfm_queues[HIGH_PRIORITY_QUEUE];
+       struct cl_sw_txhdr *sw_txhdr = NULL;
+
+       list_for_each_entry(sw_txhdr, &hp_cfm_queue->head, cfm_list)
+               if (sw_txhdr->sta_idx == sta_idx)
+                       return false;
+
+       return true;
+}
+
+static void cl_single_cfm_poll_empty_hp(struct cl_hw *cl_hw, u8 sta_idx)
+{
+       bool empty = false;
+       int i = 0;
+
+       if (test_bit(CL_DEV_FW_ERROR, &cl_hw->drv_flags))
+               return;
+
+       while (true) {
+               spin_lock_bh(&cl_hw->tx_lock_single);
+               empty = list_hp_empty_sta(cl_hw, sta_idx);
+               spin_unlock_bh(&cl_hw->tx_lock_single);
+
+               if (empty)
+                       return;
+
+               if (++i == SINGLE_POLL_TIMEOUT) {
+                       cl_single_cfm_poll_timeout(cl_hw, HIGH_PRIORITY_QUEUE);
+                       return;
+               }
+
+               msleep(20);
+       }
+}
+
+void cl_single_cfm_poll_empty_sta(struct cl_hw *cl_hw, u8 sta_idx)
+{
+       /*
+        * Poll all single queues belonging to this station, and poll all
+        * packets belonging to this station in the high priority queue.
+        */
+       u8 ac;
+       u16 queue_idx;
+
+       for (ac = 0; ac < AC_MAX; ac++) {
+               queue_idx = QUEUE_IDX(sta_idx, ac);
+               cl_single_cfm_poll_empty(cl_hw, queue_idx);
+       }
+
+       cl_single_cfm_poll_empty_hp(cl_hw, sta_idx);
+}
--
2.30.0

________________________________
The information transmitted is intended only for the person or entity to which it is addressed and may contain confidential and/or privileged material. Any retransmission, dissemination, copying or other use of, or taking of any action in reliance upon this information is prohibited. If you received this in error, please contact the sender and delete the material from any computer. Nothing contained herein shall be deemed as a representation, warranty or a commitment by Celeno. No warranties are expressed or implied, including, but not limited to, any implied warranties of non-infringement, merchantability and fitness for a particular purpose.
________________________________





[Index of Archives]     [Linux Host AP]     [ATH6KL]     [Linux Wireless Personal Area Network]     [Linux Bluetooth]     [Wireless Regulations]     [Linux Netdev]     [Kernel Newbies]     [Linux Kernel]     [IDE]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite Hiking]     [MIPS Linux]     [ARM Linux]     [Linux RAID]

  Powered by Linux