Search Linux Wireless

[RFC v1 026/256] cl8k: add bus/pci/tx_pci.c

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Viktor Barna <viktor.barna@xxxxxxxxxx>

(Part of the split. Please, take a look at the cover letter for more
details).

Signed-off-by: Viktor Barna <viktor.barna@xxxxxxxxxx>
---
 .../net/wireless/celeno/cl8k/bus/pci/tx_pci.c | 434 ++++++++++++++++++
 1 file changed, 434 insertions(+)
 create mode 100644 drivers/net/wireless/celeno/cl8k/bus/pci/tx_pci.c

diff --git a/drivers/net/wireless/celeno/cl8k/bus/pci/tx_pci.c b/drivers/net/wireless/celeno/cl8k/bus/pci/tx_pci.c
new file mode 100644
index 000000000000..4aeaa6a74777
--- /dev/null
+++ b/drivers/net/wireless/celeno/cl8k/bus/pci/tx_pci.c
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: MIT
+/* Copyright(c) 2019-2021, Celeno Communications Ltd. */
+
+#include <net/mac80211.h>
+#include "bus/pci/tx_pci.h"
+#include "bus/pci/ipc.h"
+#include "ipc_shared.h"
+#include "chip.h"
+#include "tx/tx.h"
+#include "sta.h"
+#include "enhanced_tim.h"
+#include "tx/bcmc_cfm.h"
+#include "tx/single_cfm.h"
+#include "tx/agg_cfm.h"
+#include "tx/tx_queue.h"
+#include "tx/agg_tx_report.h"
+#include "tx/sw_txhdr.h"
+#include "tx/tx_inject.h"
+#include "bus/pci/irq.h"
+#ifdef TRACE_SUPPORT
+#include "trace.h"
+#endif
+
+static void cl_tx_ipc_txdesc_populate(struct cl_hw *cl_hw, struct txdesc *txdesc,
+                                     u8 queue_type, u32 ipc_queue_idx)
+{
+       /*
+        * 1) Request allocation of txdesc associated with queue type and index from the ipc layer.
+        * 2) Populate ipc-txdesc with the received txdesc.
+        * 3) Increase write index - (must be last action since FW fetch WR idx first).
+        */
+       u32 *write_idx_ptr = NULL;
+       struct txdesc *ipc_txdesc = NULL;
+       struct cl_ipc_ring_indices *indices = cl_hw->ipc_env->ring_indices_elem->indices;
+       struct cl_ipc_txdesc_write_idx *txdesc_write_idx =
+               (struct cl_ipc_txdesc_write_idx *)&indices->txdesc_write_idx;
+       u32 write_idx = 0;
+       u32 masked_write_idx = 0;
+
+       switch (queue_type) {
+       case QUEUE_TYPE_AGG:
+               ipc_txdesc = cl_hw->ipc_env->tx_queues.ipc_txdesc_agg[ipc_queue_idx];
+               write_idx = le32_to_cpu(txdesc_write_idx->agg[ipc_queue_idx]);
+               write_idx_ptr = &txdesc_write_idx->agg[ipc_queue_idx];
+               masked_write_idx = write_idx & (cl_hw->max_agg_tx_q_size - 1);
+               break;
+       case QUEUE_TYPE_SINGLE:
+               ipc_txdesc = cl_hw->ipc_env->tx_queues.ipc_txdesc_single[ipc_queue_idx];
+               write_idx = le32_to_cpu(txdesc_write_idx->single[ipc_queue_idx]);
+               write_idx_ptr = &txdesc_write_idx->single[ipc_queue_idx];
+               masked_write_idx = write_idx & (IPC_TXDESC_CNT_SINGLE - 1);
+               break;
+       case QUEUE_TYPE_BCMC:
+               ipc_txdesc = cl_hw->ipc_env->tx_queues.ipc_txdesc_bcmc;
+               write_idx = le32_to_cpu(txdesc_write_idx->bcmc);
+               write_idx_ptr = &txdesc_write_idx->bcmc;
+               masked_write_idx = write_idx & (IPC_TXDESC_CNT_BCMC - 1);
+               break;
+       default:
+               cl_dbg_verbose(cl_hw, "undefined queue type %u\n", queue_type);
+               WARN_ON(true);
+       }
+
+       ipc_txdesc += masked_write_idx;
+
+       memcpy(ipc_txdesc, txdesc, sizeof(struct txdesc));
+
+       /*
+        * Update write pointer only after new txdesc copy is done since FW
+        * fetch WR pointer first, if not, FW might read and old txdesc since
+        * WR index indicate txdesc is valid.
+        */
+       *write_idx_ptr = cpu_to_le32(write_idx + 1);
+}
+
+static int cl_tx_pci_agg_cfm_handler(struct cl_hw *cl_hw)
+{
+       struct cl_agg_cfm_queue *cfm_queue = NULL;
+       struct cl_tx_queue *tx_queue = NULL;
+       struct cl_ipc_ring_indices *indices = cl_hw->ipc_env->ring_indices_elem->indices;
+       int total_cfm_handled = 0;
+       int free_space_add = 0;
+       u16 new_ssn = 0;
+       u16 prev_ssn = 0;
+       u8 used_cntr = 0;
+       u8 ba_queue_idx = 0;
+
+       for (ba_queue_idx = 0; ba_queue_idx < IPC_MAX_BA_SESSIONS; ba_queue_idx++) {
+
+               spin_lock(&cl_hw->tx_lock_cfm_agg);
+
+               cfm_queue = &cl_hw->agg_cfm_queues[ba_queue_idx];
+               if (list_empty(&cfm_queue->head)) {
+                       spin_unlock(&cl_hw->tx_lock_cfm_agg);
+                       continue;
+               }
+
+               tx_queue = cfm_queue->tx_queue;
+               free_space_add = 0;
+               prev_ssn = cfm_queue->ssn;
+               new_ssn = le16_to_cpu(indices->new_ssn_idx[ba_queue_idx]);
+
+               /*
+                * Continue to free skb's until:
+                * 1. list is empty.
+                * 2. agg ssn is equal to new ssn received from ssn.
+                */
+               while (!list_empty(&cfm_queue->head) && (cfm_queue->ssn != new_ssn)) {
+                       cl_agg_cfm_free_head_skb(cl_hw, cfm_queue, ba_queue_idx);
+                       free_space_add++;
+                       cfm_queue->ssn = ((cfm_queue->ssn + 1) & 0xFFF);
+               }
+
+               /* Sanity check. test if all skb's marked to be free. */
+               if (unlikely(cfm_queue->ssn != new_ssn))
+                       cl_dbg_err(cl_hw,
+                                  "ssn diff - queue idx=%u, new ssn=%u, prev ssn=%u, cfm ssn=%u\n",
+                                  ba_queue_idx, new_ssn, prev_ssn, cfm_queue->ssn);
+
+               spin_unlock(&cl_hw->tx_lock_cfm_agg);
+
+               if (free_space_add > 0) {
+                       spin_lock(&cl_hw->tx_lock_agg);
+
+                       if (tx_queue) {
+                               tx_queue->fw_free_space += free_space_add;
+                               tx_queue->total_fw_cfm += free_space_add;
+
+                               /*
+                                * If FW used all packets that driver pushed to him,
+                                * clear the enhanced TIM bit.
+                                */
+                               if (cl_txq_is_fw_empty(tx_queue))
+                                       cl_enhanced_tim_clear_tx_agg(cl_hw,
+                                                                    ba_queue_idx,
+                                                                    tx_queue->hw_index,
+                                                                    tx_queue->cl_sta,
+                                                                    tx_queue->tid);
+                       }
+
+                       spin_unlock(&cl_hw->tx_lock_agg);
+
+                       total_cfm_handled += free_space_add;
+               }
+
+               /* Optimization - avoid running the for loop IPC_MAX_BA_SESSIONS times */
+               used_cntr++;
+               if (used_cntr == cl_hw->used_agg_queues)
+                       break;
+       }
+
+       return total_cfm_handled;
+}
+
+void cl_tx_pci_agg_cfm_tasklet(unsigned long data)
+{
+       struct cl_hw *cl_hw = (struct cl_hw *)data;
+       int cfm_handled;
+
+#ifdef TRACE_SUPPORT
+       trace_cl_trace_tx_agg_cfm_tasklet_start(cl_hw->idx);
+#endif
+
+       cfm_handled = cl_tx_pci_agg_cfm_handler(cl_hw);
+
+       if (!test_bit(CL_DEV_STOP_HW, &cl_hw->drv_flags))
+               cl_irq_enable(cl_hw, cl_hw->ipc_e2a_irq.txdesc_ind);
+
+#ifdef TRACE_SUPPORT
+       trace_cl_trace_tx_agg_cfm_tasklet_end(cl_hw->idx, cfm_handled);
+#endif
+}
+
+static void cl_tx_pci_single_cfm_handler(struct cl_hw *cl_hw, u32 cfm_status,
+                                        u32 dma_addr, u32 single_queue_idx)
+{
+       struct sk_buff *skb = NULL;
+       struct ieee80211_tx_info *tx_info = NULL;
+       struct cl_hw_tx_status *status = (struct cl_hw_tx_status *)&cfm_status;
+       struct cl_sw_txhdr *sw_txhdr = NULL;
+       struct cl_tx_queue *tx_queue = NULL;
+       struct cl_sta *cl_sta = NULL;
+       unsigned long flags = 0;
+       u8 hw_queue;
+       bool is_bcn;
+
+       if (status->is_bcmc) {
+               spin_lock_irqsave(&cl_hw->tx_lock_bcmc, flags);
+               sw_txhdr = cl_bcmc_cfm_find(cl_hw, dma_addr, status->keep_skb);
+               tx_queue = &cl_hw->tx_queues.bcmc;
+       } else {
+               spin_lock_bh(&cl_hw->tx_lock_single);
+               sw_txhdr = cl_single_cfm_find(cl_hw, single_queue_idx, dma_addr);
+               tx_queue = &cl_hw->tx_queues.single[single_queue_idx];
+       }
+
+       if (!sw_txhdr) {
+               cl_dbg_err(cl_hw, "Failed to find single cfm [single_queue_idx %u] status 0x%x\n",
+                          single_queue_idx, cfm_status);
+               goto out;
+       }
+
+       skb = sw_txhdr->skb;
+       tx_info = IEEE80211_SKB_CB(skb);
+       hw_queue = sw_txhdr->hw_queue;
+       is_bcn = sw_txhdr->is_bcn;
+
+       /*
+        * Used for beacon frames only !!
+        * if skb was already confirmed we do not need to inc FwFreeSpace counter
+        */
+       if (likely(!status->freespace_inc_skip)) {
+               tx_queue->total_fw_cfm++;
+               tx_queue->fw_free_space++;
+
+               /* Clear the TIM element if assoicated IPC queue is empty */
+               if (!is_bcn && cl_txq_is_fw_empty(tx_queue)) {
+                       bool no_ps_buffer =
+                               (tx_info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) ? true : false;
+
+                       cl_sta_lock(cl_hw);
+                       cl_sta = cl_sta_get(cl_hw, sw_txhdr->sta_idx);
+                       cl_enhanced_tim_clear_tx_single(cl_hw, single_queue_idx, hw_queue,
+                                                       no_ps_buffer, cl_sta, sw_txhdr->tid);
+                       cl_sta_unlock(cl_hw);
+               }
+       } else  if (!is_bcn) {
+               cl_dbg_verbose(cl_hw, "should no be here - is_bcn=%d hw_queue=%d\n",
+                              is_bcn, hw_queue);
+       }
+
+       /*
+        * Used for beacon frames only !!
+        * if this flag is set, it means FW still need this beacon skb, therefore
+        * we do not free this skb.
+        */
+       if (unlikely(status->keep_skb)) {
+               if (!is_bcn)
+                       cl_dbg_verbose(cl_hw, "should not be here - is_bcn=%d hw_queue=%d\n",
+                                      is_bcn, hw_queue);
+               goto out;
+       }
+
+       dma_unmap_single(cl_hw->chip->dev, dma_addr, sw_txhdr->map_len, DMA_TO_DEVICE);
+
+       /*
+        * If queue is not empty call cl_txq_sched() to
+        * transfer packets from the queue to firmware
+        */
+       if (!list_empty(&tx_queue->hdrs))
+               cl_txq_sched(cl_hw, tx_queue);
+
+       /* Cl_tx_inject_cfm() must be called inside the lock */
+       if (cl_tx_ctrl_is_inject(tx_info)) {
+               cl_sta_lock(cl_hw);
+               cl_sta = cl_sta_get(cl_hw, sw_txhdr->sta_idx);
+               if (cl_sta)
+                       cl_agg_tx_report_simulate_for_single(cl_hw, cl_sta, status);
+               cl_sta_unlock(cl_hw);
+
+               cl_tx_inject_cfm(cl_hw);
+               dev_kfree_skb_any(skb);
+               cl_sw_txhdr_free(cl_hw, sw_txhdr);
+               goto out;
+       }
+
+       if (status->is_bcmc)
+               spin_unlock_irqrestore(&cl_hw->tx_lock_bcmc, flags);
+       else
+               spin_unlock_bh(&cl_hw->tx_lock_single);
+
+       if (is_bcn) {
+               struct ieee80211_vif *vif = sw_txhdr->cl_vif->vif;
+
+               if (vif) {
+                       if (vif->csa_active &&
+                           ieee80211_beacon_cntdwn_is_complete(vif))
+                               ieee80211_csa_finish(vif);
+               }
+
+               consume_skb(skb);
+               cl_sw_txhdr_free(cl_hw, sw_txhdr);
+               return;
+       }
+
+       if (status->frm_successful && !(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
+               tx_info->flags |= IEEE80211_TX_STAT_ACK;
+
+       cl_sta_lock(cl_hw);
+       cl_sta = cl_sta_get(cl_hw, sw_txhdr->sta_idx);
+
+       if (cl_sta) {
+               if (tx_queue->type != QUEUE_TYPE_BCMC &&
+                   ieee80211_is_data(sw_txhdr->fc) &&
+                   !cl_tx_ctrl_is_eapol(tx_info))
+                       cl_agg_tx_report_simulate_for_single(cl_hw, cl_sta, status);
+
+               cl_tx_check_start_ba_session(cl_hw, cl_sta->stainfo, skb);
+       }
+
+       cl_sta_unlock(cl_hw);
+
+       if (tx_info->ack_frame_id)
+               ieee80211_tx_status(cl_hw->hw, skb);
+       else
+               consume_skb(skb);
+
+       cl_sw_txhdr_free(cl_hw, sw_txhdr);
+       return;
+
+out:
+       if (status->is_bcmc)
+               spin_unlock_irqrestore(&cl_hw->tx_lock_bcmc, flags);
+       else
+               spin_unlock_bh(&cl_hw->tx_lock_single);
+}
+
+void cl_tx_pci_single_cfm_tasklet(unsigned long data)
+{
+       struct cl_hw *cl_hw = (struct cl_hw *)data;
+       struct cl_ipc_host_env *ipc_env = cl_hw->ipc_env;
+       struct cl_ipc_cfm_msg *msg = NULL;
+
+#ifdef TRACE_SUPPORT
+       trace_cl_trace_tx_pci_single_cfm_tasklet_start(cl_hw->idx, ipc_env->cfm_used_idx);
+#endif
+
+       msg = (struct cl_ipc_cfm_msg *)(ipc_env->cfm_virt_base_addr) +
+               (ipc_env->cfm_used_idx % IPC_CFM_CNT);
+
+       while (msg && msg->dma_addr) {
+               u32 cfm_used_idx = ipc_env->cfm_used_idx++;
+
+               cl_tx_pci_single_cfm_handler(cl_hw,
+                                            le32_to_cpu(msg->status),
+                                            le32_to_cpu(msg->dma_addr),
+                                            le32_to_cpu(msg->single_queue_idx));
+               msg->dma_addr = 0;
+               ipc_env->shared->cfm_read_pointer = cpu_to_le32(cfm_used_idx);
+               msg = (struct cl_ipc_cfm_msg *)(ipc_env->cfm_virt_base_addr) +
+                       (ipc_env->cfm_used_idx % IPC_CFM_CNT);
+       }
+
+       /* Enable the Tx CFM interrupt bit */
+       if (!test_bit(CL_DEV_STOP_HW, &cl_hw->drv_flags))
+               cl_irq_enable(cl_hw, cl_hw->ipc_e2a_irq.txcfm);
+
+#ifdef TRACE_SUPPORT
+       trace_cl_trace_tx_pci_single_cfm_tasklet_end(cl_hw->idx, ipc_env->cfm_used_idx);
+#endif
+}
+
+void cl_tx_pci_pkt_fw_send(struct cl_hw *cl_hw, struct cl_sw_txhdr *sw_txhdr,
+                          struct cl_tx_queue *tx_queue)
+{
+       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(sw_txhdr->skb);
+       struct txdesc *txdesc = &sw_txhdr->txdesc;
+       struct tx_host_info *host_info = &txdesc->host_info;
+       struct cl_sta *cl_sta = sw_txhdr->cl_sta;
+       struct cl_vif *cl_vif = sw_txhdr->cl_vif;
+       u8 hw_queue = sw_txhdr->hw_queue;
+       u16 a2e_trigger_bit_pos;
+       u8 tid = sw_txhdr->tid;
+       u8 queue_type = tx_queue->type;
+       bool no_ps_buffer = !!(tx_info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER);
+       u16 ipc_queue_idx = tx_queue->index;
+       bool is_mgmt = ieee80211_is_mgmt(sw_txhdr->fc);
+       u8 *cpu_addr = (u8 *)sw_txhdr->skb->data -
+                      ((host_info->host_padding & 1) * 2);
+       dma_addr_t dma_addr = dma_map_single(cl_hw->chip->dev, cpu_addr,
+                                            sw_txhdr->map_len, DMA_TO_DEVICE);
+
+       if (WARN_ON(dma_mapping_error(cl_hw->chip->dev, dma_addr))) {
+               tx_queue->dump_dma_map_fail++;
+
+               if (queue_type == QUEUE_TYPE_SINGLE) {
+                       if (!is_mgmt)
+                               cl_vif->sequence_number = DEC_SN(cl_vif->sequence_number);
+
+                       cl_tx_single_free_skb(cl_hw, sw_txhdr->skb);
+               } else {
+                       if (queue_type == QUEUE_TYPE_AGG) {
+                               struct cl_baw *baw = &cl_sta->baws[tid];
+
+                               baw->tid_seq = DEC_SN(baw->tid_seq);
+                       }
+
+                       dev_kfree_skb_any(sw_txhdr->skb);
+               }
+
+               cl_sw_txhdr_free(cl_hw, sw_txhdr);
+               return;
+       }
+
+       txdesc->umacdesc.packet_addr[0] = cpu_to_le32(dma_addr);
+
+       cl_tx_ipc_txdesc_populate(cl_hw, txdesc, queue_type, ipc_queue_idx);
+
+       /* make sure memory is written before push to HW */
+       wmb();
+
+       /*
+        * 1) Notify firmware on new buffered traffic by updating the enhanced tim.
+        * 2) Push sw_txhdr to confirmation list
+        */
+       if (queue_type == QUEUE_TYPE_AGG) {
+               a2e_trigger_bit_pos = IPC_IRQ_A2E_TXDESC_AGG_MAP(hw_queue);
+               cl_agg_cfm_add(cl_hw, sw_txhdr, ipc_queue_idx);
+               cl_enhanced_tim_set_tx_agg(cl_hw, ipc_queue_idx, hw_queue,
+                                          no_ps_buffer, cl_sta, tid);
+       } else if (queue_type == QUEUE_TYPE_SINGLE) {
+               a2e_trigger_bit_pos = IPC_IRQ_A2E_TXDESC_SINGLE_MAP(hw_queue);
+               cl_single_cfm_add(cl_hw, sw_txhdr, ipc_queue_idx);
+               cl_enhanced_tim_set_tx_single(cl_hw, ipc_queue_idx, hw_queue,
+                                             no_ps_buffer, cl_sta, tid);
+       } else {
+               a2e_trigger_bit_pos = IPC_IRQ_A2E_TXDESC_SINGLE_MAP(hw_queue);
+               cl_bcmc_cfm_add(cl_hw, sw_txhdr);
+       }
+
+       /* Tx_queue counters */
+       tx_queue->fw_free_space--;
+       tx_queue->total_fw_push_desc++;
+       tx_queue->total_fw_push_skb += host_info->packet_cnt;
+
+#ifdef TRACE_SUPPORT
+       trace_cl_trace_tx_push(cl_hw->idx, sw_txhdr->skb, host_info->packet_cnt,
+                              txdesc->e2w_txhdr_param.seq_ctrl, tid);
+#endif
+
+       /* Trigger interrupt to firmware so that it will know that a new descriptor is ready */
+       cl_hw->ipc_host2xmac_trigger_set(cl_hw->chip, BIT(a2e_trigger_bit_pos));
+}
+
--
2.30.0

________________________________
The information transmitted is intended only for the person or entity to which it is addressed and may contain confidential and/or privileged material. Any retransmission, dissemination, copying or other use of, or taking of any action in reliance upon this information is prohibited. If you received this in error, please contact the sender and delete the material from any computer. Nothing contained herein shall be deemed as a representation, warranty or a commitment by Celeno. No warranties are expressed or implied, including, but not limited to, any implied warranties of non-infringement, merchantability and fitness for a particular purpose.
________________________________





[Index of Archives]     [Linux Host AP]     [ATH6KL]     [Linux Wireless Personal Area Network]     [Linux Bluetooth]     [Wireless Regulations]     [Linux Netdev]     [Kernel Newbies]     [Linux Kernel]     [IDE]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite Hiking]     [MIPS Linux]     [ARM Linux]     [Linux RAID]

  Powered by Linux