Search Linux Wireless

[RFC v1 172/256] cl8k: add rx/rx.c

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Viktor Barna <viktor.barna@xxxxxxxxxx>

(Part of the split. Please, take a look at the cover letter for more
details).

Signed-off-by: Viktor Barna <viktor.barna@xxxxxxxxxx>
---
 drivers/net/wireless/celeno/cl8k/rx/rx.c | 1108 ++++++++++++++++++++++
 1 file changed, 1108 insertions(+)
 create mode 100644 drivers/net/wireless/celeno/cl8k/rx/rx.c

diff --git a/drivers/net/wireless/celeno/cl8k/rx/rx.c b/drivers/net/wireless/celeno/cl8k/rx/rx.c
new file mode 100644
index 000000000000..d55038ae2e85
--- /dev/null
+++ b/drivers/net/wireless/celeno/cl8k/rx/rx.c
@@ -0,0 +1,1108 @@
+// SPDX-License-Identifier: MIT
+/* Copyright(c) 2019-2021, Celeno Communications Ltd. */
+
+#include "tx/tx.h"
+#include "rx/rx.h"
+#include "rx/rx_amsdu.h"
+#include "stats.h"
+#include "rate_ctrl.h"
+#include "rssi.h"
+#include "band.h"
+#include "utils/utils.h"
+#include "vns.h"
+#include "dfs/dfs.h"
+#include "wrs/wrs_api.h"
+#include "twt.h"
+#include "recovery.h"
+#include "fw/fw_dbg.h"
+#include "def.h"
+#include "rx/rx_reorder.h"
+#include "ops.h"
+#include "chip.h"
+#include "channel.h"
+#ifdef CONFIG_CL_PCIE
+#include "bus/pci/rx_pci.h"
+#endif
+#ifdef TRACE_SUPPORT
+#include "trace.h"
+#endif
+
+/* Must correspond to FW definition of MM_SEC_DEFAULT_KEY_COUNT */
+#define MM_SEC_DEFAULT_KEY_COUNT 64
+
+#define VHT_MCS_MASK 0x0F
+#define VHT_MCS_OFT  0
+
+/* Number of entries in HW legacy rate conversion table */
+#define LEGACY_RATE_MAX 16
+
+static const s8 legacy_rates_lut[LEGACY_RATE_MAX] = {
+       0,      /* 0: 1 Mbps   */
+       1,      /* 1: 2 Mbps   */
+       2,      /* 2: 5.5 Mbps */
+       3,      /* 3: 11 Mbps  */
+       -1,     /* 4: Invalid  */
+       -1,     /* 5: Invalid  */
+       -1,     /* 6: Invalid  */
+       -1,     /* 7: Invalid  */
+       10,     /* 8: 48 Mbps  */
+       8,      /* 9: 24 Mbps  */
+       6,      /* 10: 12 Mbps */
+       4,      /* 11: 6 Mbps  */
+       11,     /* 12: 54 Mbps */
+       9,      /* 13: 36 Mbps */
+       7,      /* 14: 18 Mbps */
+       5       /* 15: 9 Mbps  */
+};
+
+/*
+ * rx_skb_cnt is an atomic counter that tracks the total number of skbs in
+ * the entire host.
+ * The counter is incremented when skb is allocated, and freed when the skb
+ * is freed (=destructor function called).
+ * Therefore the counter is global (and not part of cl_hw or cl_chip).
+ *
+ * rx_skb_max is the configured to:
+ * max(chip0->conf->ci_rx_skb_max, chip1->conf->ci_rx_skb_max)
+ */
+static atomic_t rx_skb_cnt = ATOMIC_INIT(0);
+static u32 rx_skb_max;
+
+static void cl_rx_skb_destructor(struct sk_buff *skb)
+{
+       atomic_dec(&rx_skb_cnt);
+}
+
+static DEFINE_PER_CPU(struct tasklet_struct, rx_remote_tasklet_mac[TCV_TOTAL]);
+
+static void cl_rx_remote_cpu_mac(struct cl_hw *cl_hw)
+{
+       int cpu = cl_hw->conf->ci_rx_remote_cpu_mac;
+       struct tasklet_struct *t = &per_cpu(rx_remote_tasklet_mac[cl_hw->idx], cpu);
+
+       if (!test_bit(TASKLET_STATE_SCHED, &t->state))
+               smp_call_function_single(cpu, cl_rx_remote_tasklet_sched, t, 0);
+}
+
+static int cl_rx_check_err(struct cl_hw *cl_hw, struct sk_buff *skb, struct hw_rxhdr *rxhdr)
+{
+       u32 status;
+
+       if (rxhdr->frm_successful_rx)
+               return 0;
+
+       /* The status field is in offset of 14 u32's */
+       status = *((u32 *)rxhdr + 14);
+
+       if (rxhdr->fcs_err) {
+               cl_hw->radio_stats[CL_RADIO_FCS_ERROR]++;
+               cl_dbg_err(cl_hw, "fcs_err (status 0x%x)\n", status);
+       }
+
+       if (rxhdr->rx_fifo_oflow) {
+               cl_hw->radio_stats[CL_RADIO_RX_FIFO_OVERFLOW]++;
+               cl_dbg_err(cl_hw, "rx_fifo_oflow (status 0x%x)\n", status);
+       }
+
+       if (rxhdr->undef_err) {
+               cl_hw->radio_stats[CL_RADIO_UNDEFINED_ERROR]++;
+               cl_dbg_err(cl_hw, "undef_err (status 0x%x)\n", status);
+       }
+
+       if (rxhdr->phy_err) {
+               cl_hw->radio_stats[CL_RADIO_PHY_ERROR]++;
+               cl_dbg_err(cl_hw, "phy_err (status 0x%x)\n", status);
+       }
+
+       if (rxhdr->addr_mismatch) {
+               cl_hw->radio_stats[CL_RADIO_ADDRESS_MISMATCH]++;
+               cl_dbg_err(cl_hw, "addr_mismatch (status 0x%x)\n", status);
+       }
+
+       cl_hw->rx_info.pkt_drop_not_success++;
+       kfree_skb(skb);
+
+       return -1;
+}
+
+static u8 chnl_bw_to_rate_info_bw[CHNL_BW_MAX] = {
+       [CHNL_BW_20] = RATE_INFO_BW_20,
+       [CHNL_BW_40] = RATE_INFO_BW_40,
+       [CHNL_BW_80] = RATE_INFO_BW_80,
+       [CHNL_BW_160] = RATE_INFO_BW_160,
+};
+
+static u8 chnl_bw_factor[CHNL_BW_MAX] = {
+       [CHNL_BW_20] = 0,
+       [CHNL_BW_40] = 3,
+       [CHNL_BW_80] = 6,
+       [CHNL_BW_160] = 9,
+};
+
+static int cl_rx_fill_status(struct cl_hw *cl_hw, struct cl_sta *cl_sta, struct sk_buff *skb,
+                            struct hw_rxhdr *rxhdr, u8 *encrypt_len)
+{
+       s8 rssi[MAX_ANTENNAS] = RX_HDR_RSSI(rxhdr);
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+       u8 tid = ieee80211_get_tid(hdr);
+       u8 factor;
+
+       memset(status, 0, sizeof(struct ieee80211_rx_status));
+
+       status->mactime = ((u64)le32_to_cpu((rxhdr->tsf_hi)) << 32) | le32_to_cpu(rxhdr->tsf_lo);
+       status->flag |= RX_FLAG_MACTIME_END;
+
+       if (cl_sta && cl_sta->tid_agg_rx[tid])
+               status->flag |= RX_FLAG_DUP_VALIDATED;
+
+       status->antenna = rxhdr->antenna_set;
+       status->band = cl_band_from_fw_idx(rxhdr->phy_band);
+
+       if (rxhdr->format_mod >= FORMATMOD_HE_SU) {
+               status->encoding = RX_ENC_HE;
+               status->rate_idx = (rxhdr->mcs & VHT_MCS_MASK) >> VHT_MCS_OFT;
+               status->nss = rxhdr->n_sts + 1;
+
+               /* he_gi expectes to get values according to enum nl80211_he_gi */
+               status->he_gi = convert_gi_format_wrs_to_fw(WRS_MODE_HE, rxhdr->gi_type);
+       } else if (rxhdr->format_mod == FORMATMOD_VHT) {
+               status->encoding = RX_ENC_VHT;
+               status->rate_idx = (rxhdr->mcs & VHT_MCS_MASK) >> VHT_MCS_OFT;
+               status->nss = rxhdr->n_sts + 1;
+
+               if (rxhdr->gi_type)
+                       status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+       } else if (rxhdr->format_mod == FORMATMOD_HT_GF) {
+               status->encoding = RX_ENC_HT;
+               status->enc_flags |= RX_ENC_FLAG_HT_GF;
+               status->rate_idx = rxhdr->mcs;
+
+               if (rxhdr->gi_type)
+                       status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+       } else if (rxhdr->format_mod == FORMATMOD_HT_MF) {
+               status->encoding = RX_ENC_HT;
+               status->rate_idx = rxhdr->mcs;
+
+               if (rxhdr->gi_type)
+                       status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+       } else {
+               if (legacy_rates_lut[rxhdr->leg_rate] != -1)
+                       status->rate_idx = legacy_rates_lut[rxhdr->leg_rate];
+               if (status->band != NL80211_BAND_2GHZ)
+                       status->rate_idx -= RATE_CTRL_OFFSET_OFDM;
+               if (!rxhdr->pre_type)
+                       status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+       }
+
+       if (rxhdr->aggregation) {
+               status->flag |= RX_FLAG_AMPDU_DETAILS;
+               status->ampdu_reference = rxhdr->ampdu_cnt;
+       }
+
+       /* Set bw field */
+       status->bw = chnl_bw_to_rate_info_bw[rxhdr->ch_bw];
+
+       factor = chnl_bw_factor[rxhdr->ch_bw];
+       cl_rssi_bw_adjust(cl_hw, rxhdr, factor);
+
+       /*
+        * Todo check if when a frame is received on 40MHz or more bandwidth,
+        * we need to take the center1_freq instead of the prim20_freq
+        */
+       status->freq = le16_to_cpu(Q2_TO_FREQ(rxhdr->phy_prim20_freq));
+
+       status->signal = cl_rssi_calc_equivalent(cl_hw, rssi);
+
+       switch (rxhdr->decr_status) {
+       case CL_RX_HDR_DECR_UNENC:
+               if (ieee80211_has_protected(hdr->frame_control)) {
+                       cl_dbg_warn(cl_hw, "Protected frame unencrypted\n");
+                       cl_hw->rx_info.pkt_drop_unencrypted++;
+                       return -1;
+               }
+               break;
+       case CL_RX_HDR_DECR_ICVFAIL:
+       case CL_RX_HDR_DECR_AMSDUDISCARD:
+       case CL_RX_HDR_DECR_NULLKEY:
+       case CL_RX_HDR_DECR_CCMPFAIL:
+               cl_dbg_warn(cl_hw, "Decryption failed (%u)\n", rxhdr->decr_status);
+               cl_hw->rx_info.pkt_drop_decrypt_fail++;
+               *encrypt_len = 0;
+               return -1;
+       case CL_RX_HDR_DECR_WEPSUCCESS:
+       case CL_RX_HDR_DECR_TKIPSUCCESS:
+               *encrypt_len = IEEE80211_WEP_ICV_LEN;
+               status->flag |= (RX_FLAG_DECRYPTED | RX_FLAG_ICV_STRIPPED);
+               break;
+       case CL_RX_HDR_DECR_CCMPSUCCESS:
+               *encrypt_len = IEEE80211_CCMP_HDR_LEN;
+               status->flag |= (RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED);
+               status->flag |= RX_FLAG_PN_VALIDATED;
+               break;
+       }
+
+       return 0;
+}
+
+static void cl_rx_action_twt_setup(struct cl_hw *cl_hw, struct cl_ieee80211_mgmt *mgmt,
+                                  int len, struct cl_sta *cl_sta)
+{
+       u32 min_size = 0;
+       u8 negotiation_type =
+               mgmt->u.action.u.twt_individual_setup.twt_elem.control.fields.negotiation_type;
+
+       /* Individual TWT */
+       if ((negotiation_type & 0x2) == 0) {
+               /* Verify min size */
+               min_size = IEEE80211_MIN_ACTION_SIZE  + 4 +
+                       sizeof(mgmt->u.action.u.twt_individual_setup.twt_elem);
+
+               if (len < min_size) {
+                       cl_dbg_err(cl_hw, "TWT: Individual setup action frame length error\n");
+                       return;
+               }
+
+               /* Regular individual TWT */
+               if (negotiation_type == 0)
+                       cl_twt_handle_individual_setup_request(cl_hw, cl_sta, mgmt);
+       } else { /* Broadcast TWT */
+               /* Verify min size */
+               min_size = IEEE80211_MIN_ACTION_SIZE  + 4 +
+                       sizeof(mgmt->u.action.u.twt_broadcast_setup.twt_elem);
+
+               if (len < min_size) {
+                       cl_dbg_err(cl_hw, "TWT: Broadcast setup action frame length error\n");
+                       return;
+               }
+       }
+}
+
+static void cl_rx_action_twt_teardown(struct cl_hw *cl_hw, struct cl_ieee80211_mgmt *mgmt,
+                                     int len, struct cl_sta *cl_sta)
+{
+       u8 negotiation_type;
+
+       if (len < IEEE80211_MIN_ACTION_SIZE + 2) {
+               cl_dbg_err(cl_hw, "Invalid length of TWT teardown action frame\n");
+               return;
+       }
+
+       negotiation_type = mgmt->u.action.u.twt_individual_teardown.negotiation_type;
+
+       if (negotiation_type <= 1)
+               cl_twt_handle_individual_teardown_request(cl_hw, cl_sta, mgmt);
+}
+
+static void cl_rx_action_frame_handler(struct cl_hw *cl_hw, struct cl_ieee80211_mgmt *mgmt,
+                                      int len, struct cl_sta *cl_sta)
+{
+       /* Verify action code is present */
+       if (len < IEEE80211_MIN_ACTION_SIZE + 1)
+               return;
+
+       switch (mgmt->u.action.category) {
+       case WLAN_CATEGORY_UNPROTECTED_S1G:
+               if (!cl_twt_is_enabled(cl_hw))
+                       break;
+
+               if (cl_sta->cl_vif->vif->type != NL80211_IFTYPE_AP)
+                       break;
+
+               switch (mgmt->u.action.u.twt_individual_setup.action_code) {
+               case WLAN_UNPROT_S1G_ACTION_TWT_SETUP:
+                       cl_rx_action_twt_setup(cl_hw, mgmt, len, cl_sta);
+                       break;
+               case WLAN_UNPROT_S1G_ACTION_TWT_TEARDOWN:
+                       cl_rx_action_twt_teardown(cl_hw, mgmt, len, cl_sta);
+                       break;
+               default:
+                       break;
+               }
+               break;
+       case WLAN_CATEGORY_WNM:
+               /* TODO: Here may be bss_color_check_action */
+               break;
+       default:
+               break;
+       }
+}
+
+static void cl_rx_mgmt_check(struct cl_hw *cl_hw, struct sk_buff *skb,
+                            struct cl_sta *cl_sta, struct hw_rxhdr *rxhdr)
+{
+       struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+       __le16 fc = mgmt->frame_control;
+
+       if (!ieee80211_is_mgmt(fc))
+               return;
+
+       if (cl_sta) {
+               if (ieee80211_is_action(fc))
+                       cl_rx_action_frame_handler(cl_hw, (struct cl_ieee80211_mgmt *)mgmt,
+                                                  skb->len, cl_sta);
+       } else {
+               s8 rssi[MAX_ANTENNAS] = RX_HDR_RSSI(rxhdr);
+
+               cl_vns_mgmt_handler(cl_hw, mgmt->sa, rssi);
+
+               if (ieee80211_is_assoc_req(fc) || ieee80211_is_assoc_resp(fc))
+                       cl_rssi_assoc_handle(cl_hw, mgmt->sa, rxhdr);
+       }
+}
+
+static void cl_rx_data_check(struct cl_hw *cl_hw, struct sk_buff *skb,
+                            struct cl_sta *cl_sta, u32 packet_len, struct hw_rxhdr *rxhdr)
+{
+       if (cl_sta) {
+               cl_traffic_rx_handler(cl_hw, cl_sta, packet_len);
+
+               if (!rxhdr->aggregation || (rxhdr->aggregation && rxhdr->mpdu_cnt == 0))
+                       cl_motion_sense_rssi_data(cl_hw, cl_sta, rxhdr);
+       }
+}
+
+static bool cl_rx_skb_done(struct cl_hw *cl_hw, struct sk_buff *skb,
+                          struct cl_sta *cl_sta, struct hw_rxhdr *rxhdr)
+{
+       struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       __le16 fc = hdr->frame_control;
+       struct cl_vif *cl_vif = NULL;
+
+       /* Update trigger base statistics */
+       cl_fw_dbg_trigger_based_update(cl_hw, rxhdr, hdr);
+
+       if (cl_sta) {
+               cl_vif = cl_sta->cl_vif;
+               skb->dev = cl_vif->dev;
+
+               cl_stats_update_rx_rate(cl_hw, cl_sta, rxhdr);
+
+               if (!rxhdr->aggregation || (rxhdr->aggregation && rxhdr->mpdu_cnt == 0))
+                       cl_rssi_rx_handler(cl_hw, cl_sta, rxhdr, status->signal);
+       } else {
+               cl_vif = cl_vif_get_by_mac(cl_hw, hdr->addr3);
+               skb->dev = cl_vif ? cl_vif->dev : NULL;
+
+               if (cl_hw->chip->conf->ce_production_mode)
+                       cl_stats_update_rx_rate_production(cl_hw, rxhdr);
+       }
+
+       /* DATA */
+       if (ieee80211_is_data(fc)) {
+               cl_rx_data_check(cl_hw, skb, cl_sta, skb->len, rxhdr);
+               goto out;
+       }
+
+       /* MGMT/CTL */
+       if (cl_sta)
+               cl_motion_sense_rssi_mgmt_ctl(cl_hw, cl_sta, rxhdr);
+
+       /* MGMT */
+       cl_rx_mgmt_check(cl_hw, skb, cl_sta, rxhdr);
+
+out:
+       if (rx_skb_max &&
+           atomic_read(&rx_skb_cnt) >= rx_skb_max) {
+               cl_hw->rx_info.pkt_drop_host_limit++;
+               kfree_skb(skb);
+               return false;
+       }
+
+       return true;
+}
+
+static void cl_rx_pass_to_mac(struct cl_hw *cl_hw,
+                             struct ieee80211_sta *sta,
+                             struct sk_buff_head *frames)
+{
+       if (cl_hw->conf->ci_rx_remote_cpu_mac == -1) {
+               struct sk_buff *skb = NULL;
+
+               while ((skb = __skb_dequeue(frames)))
+                       ieee80211_rx_napi(cl_hw->hw, sta, skb, NULL);
+       } else {
+               struct sk_buff_head *rx_remote_queue_mac = &cl_hw->rx_remote_queue_mac;
+
+               spin_lock(&rx_remote_queue_mac->lock);
+               skb_queue_splice_tail_init(frames, rx_remote_queue_mac);
+               spin_unlock(&rx_remote_queue_mac->lock);
+
+               cl_rx_remote_cpu_mac(cl_hw);
+       }
+}
+
+static void cl_rx_amsdu_done_reorder(struct cl_hw *cl_hw, struct cl_sta *cl_sta,
+                                    struct sk_buff_head *frames)
+{
+       struct sk_buff *skb = NULL;
+       struct sk_buff_head reorder_buf;
+
+       /* Init the reorder buffer */
+       __skb_queue_head_init(&reorder_buf);
+
+       while ((skb = __skb_dequeue(frames)))
+               cl_rx_reorder_ampdu(cl_hw, cl_sta, skb, &reorder_buf);
+
+       if (!skb_queue_empty(&reorder_buf))
+               cl_rx_pass_to_mac(cl_hw, &cl_sta->stainfo->sta, &reorder_buf);
+}
+
+static void cl_rx_amsdu_done(struct cl_hw *cl_hw, struct cl_amsdu_rx_state *amsdu_rx_state)
+{
+       struct sk_buff_head *frames = &amsdu_rx_state->frames;
+       struct sk_buff *skb = __skb_peek(frames);
+       struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+       struct cl_sta *cl_sta;
+       struct cl_vif *cl_vif;
+       struct hw_rxhdr *rxhdr = amsdu_rx_state->rxhdr;
+       u32 packet_len = amsdu_rx_state->packet_len;
+       struct ieee80211_sta *sta;
+
+       /* START - cl_sta protected block */
+       cl_sta_lock(cl_hw);
+       cl_sta = cl_sta_get(cl_hw, amsdu_rx_state->sta_idx);
+
+       if (!cl_sta) {
+               cl_sta_unlock(cl_hw);
+               cl_hw->rx_info.pkt_drop_sta_null += amsdu_rx_state->msdu_cnt;
+               __skb_queue_purge(frames);
+               return;
+       }
+
+       sta = &cl_sta->stainfo->sta;
+       cl_vif = cl_sta->cl_vif;
+       skb->dev = cl_vif->dev;
+
+       cl_rx_data_check(cl_hw, skb, cl_sta, packet_len, rxhdr);
+       cl_stats_update_rx_rate(cl_hw, cl_sta, rxhdr);
+
+       if (!rxhdr->aggregation || (rxhdr->aggregation && rxhdr->mpdu_cnt == 0))
+               cl_rssi_rx_handler(cl_hw, cl_sta, rxhdr, status->signal);
+
+       cl_sta_unlock(cl_hw);
+       /* END - cl_sta protected block */
+
+       if (rx_skb_max &&
+           (atomic_read(&rx_skb_cnt) + amsdu_rx_state->msdu_cnt) >= rx_skb_max) {
+               cl_hw->rx_info.pkt_drop_host_limit += amsdu_rx_state->msdu_cnt;
+               __skb_queue_purge(frames);
+               return;
+       }
+
+       if (cl_sta->tid_agg_rx[amsdu_rx_state->tid])
+               cl_rx_amsdu_done_reorder(cl_hw, cl_sta, frames);
+       else
+               cl_rx_pass_to_mac(cl_hw, sta, frames);
+}
+
+static void cl_rx_invalid_tailroom(struct cl_hw *cl_hw, struct sk_buff *skb, u32 len)
+{
+       cl_dbg_err(cl_hw, "Invalid RX header length - tailroom=%d, len=%u\n",
+                  skb_tailroom(skb), len);
+       cl_hw->rx_info.pkt_drop_rxhdr_len_error++;
+       kfree_skb(skb);
+}
+
+static void cl_rx_invalid_pattern(struct cl_hw *cl_hw, struct sk_buff *skb, u32 pattern)
+{
+       cl_dbg_err(cl_hw, "WRONG PATTERN - 0x%x\n", pattern);
+       cl_hw->rx_info.pkt_drop_wrong_pattern++;
+       kfree_skb(skb);
+}
+
+static int cl_rx_get_sta_idx(struct cl_hw *cl_hw, struct hw_rxhdr *rxhdr)
+{
+       int sta_idx = rxhdr->key_sram_index - MM_SEC_DEFAULT_KEY_COUNT;
+
+       if (sta_idx >= 0 && sta_idx < CL_MAX_NUM_STA)
+               return sta_idx;
+
+       cl_dbg_err(cl_hw, "invalid sta_idx %d, key_sram_index=%d\n",
+                  sta_idx, rxhdr->key_sram_index);
+
+       return -1;
+}
+
+static void cl_rx_handle_first_amsdu(struct cl_hw *cl_hw, struct sk_buff *skb,
+                                    struct cl_amsdu_rx_state *amsdu_rx_state,
+                                    struct hw_rxhdr *rxhdr, u8 sta_idx, u8 tid, u8 encrypt_len)
+{
+       /*
+        * First MSDU recived frame:
+        * ------------------------------------------
+        * || WLAN_HDR || MSDU HDR || MSDU PAYLOAD ||
+        * ------------------------------------------
+        */
+       cl_rx_amsdu_stats(cl_hw, rxhdr->msdu_cnt);
+
+       if (rxhdr->corrupted_amsdu) {
+               cl_rx_amsdu_first_corrupted(cl_hw, skb, rxhdr);
+       } else {
+               cl_rx_amsdu_first(cl_hw, skb, rxhdr, sta_idx, tid, encrypt_len);
+
+               /* If there are more MSDU's, hold on with the update
+                * to the upper layer until A-MSDU is complete
+                */
+               if (amsdu_rx_state->msdu_remaining_cnt == 0)
+                       cl_rx_amsdu_done(cl_hw, amsdu_rx_state);
+       }
+}
+
+static void cl_rx_handle_sub_amsdu(struct cl_hw *cl_hw, struct sk_buff *skb,
+                                  struct cl_amsdu_rx_state *amsdu_rx_state)
+{
+       /* Update the remaining MSDU counter */
+       amsdu_rx_state->msdu_remaining_cnt--;
+
+       /* Free MSDU with error */
+       if (amsdu_rx_state->amsdu_error) {
+               cl_rx_amsdu_sub_error(cl_hw, skb);
+               return;
+       }
+
+       /* Add the sub-MSDU to the existing ones */
+       if (!cl_rx_amsdu_sub(cl_hw, skb))
+               return;
+
+       /* This is the last MSDU, A-MSDU is complete, push to upper layer */
+       if (amsdu_rx_state->msdu_remaining_cnt == 0)
+               cl_rx_amsdu_done(cl_hw, amsdu_rx_state);
+}
+
+static void cl_rx_handle_ps(struct cl_hw *cl_hw, struct cl_sta *cl_sta, struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
+       struct ieee80211_sta *sta = &cl_sta->stainfo->sta;
+       bool is_ps;
+       __le16 fc = hdr->frame_control;
+
+       if (ieee80211_is_pspoll(fc) ||
+           ieee80211_has_morefrags(fc) ||
+           !(ieee80211_is_mgmt(fc) ||
+             ieee80211_is_data(fc)))
+               return;
+
+       is_ps = ieee80211_has_pm(hdr->frame_control);
+
+       cl_sta_ps_notify(cl_hw, cl_sta, is_ps);
+       ieee80211_sta_ps_transition(sta, is_ps);
+}
+
+static void cl_rx_handle_skb(struct cl_hw *cl_hw, struct sk_buff *skb)
+{
+       u8 encrypt_len = 0;
+       u8 tid = 0;
+       u32 mpdu_offset = 0;
+       u32 len = 0;
+       int sta_idx = -1;
+       bool is_amsdu = false;
+       bool skb_done = false;
+       struct cl_sta *cl_sta = NULL;
+       struct ieee80211_sta *sta = NULL;
+       struct hw_rxhdr *rxhdr = NULL;
+       struct cl_tid_ampdu_rx *tid_agg_rx = NULL;
+       struct cl_amsdu_rx_state *amsdu_rx_state = &cl_hw->amsdu_rx_state;
+       s8 remote_cpu_mac = cl_hw->conf->ci_rx_remote_cpu_mac;
+
+       if (amsdu_rx_state->msdu_remaining_cnt > 0) {
+               cl_rx_handle_sub_amsdu(cl_hw, skb, amsdu_rx_state);
+               return;
+       }
+
+       rxhdr = (struct hw_rxhdr *)skb->data;
+       mpdu_offset = sizeof(struct hw_rxhdr);
+
+       if (rxhdr->rx_padding_done)
+               mpdu_offset += CL_PADDING_IN_BYTES;
+
+       /* Pull the HW RX header */
+       skb_reserve(skb, mpdu_offset);
+
+       /*
+        * Sanity check - the embedded layer is responsible to validate the pattern correctness.
+        * If pattern is invalid then it is likely that the embedded layer did some thing wrong.
+        */
+       if (rxhdr->pattern != IPC_RX_DMA_OVER_PATTERN) {
+               cl_rx_invalid_pattern(cl_hw, skb, rxhdr->pattern);
+               return;
+       }
+
+       if (cl_rx_check_err(cl_hw, skb, rxhdr))
+               return;
+
+       /* Convert gi from firmware format to driver format */
+       rxhdr->gi_type = convert_gi_format_fw_to_wrs(rxhdr->format_mod, rxhdr->gi_type);
+
+       /*
+        * For TCV1 fill in the rxhdr rssi "holes" so that values will start from rssi1.
+        * The implementation below takes into account elastic mimo, and maximum number
+        * of antennas for TCV1.
+        */
+       if (cl_hw_is_tcv1(cl_hw)) {
+               if (cl_chip_is_6ant(cl_hw->chip)) {
+                       rxhdr->rssi1 = rxhdr->rssi2;
+                       rxhdr->rssi2 = rxhdr->rssi3;
+                       rxhdr->rssi3 = rxhdr->rssi4;
+                       rxhdr->rssi4 = rxhdr->rssi5;
+                       rxhdr->rssi5 = rxhdr->rssi6;
+               } else if (cl_chip_is_4ant(cl_hw->chip)) {
+                       rxhdr->rssi1 = rxhdr->rssi3;
+                       rxhdr->rssi2 = rxhdr->rssi4;
+                       rxhdr->rssi3 = rxhdr->rssi5;
+                       rxhdr->rssi4 = rxhdr->rssi6;
+               }
+       }
+
+       if (rxhdr->key_sram_v)
+               sta_idx = cl_rx_get_sta_idx(cl_hw, rxhdr);
+
+       cl_sta_lock(cl_hw);
+
+       if (sta_idx != -1) {
+               cl_sta = cl_sta_get(cl_hw, sta_idx);
+
+               if (cl_sta) {
+                       sta = &cl_sta->stainfo->sta;
+
+                       if (cl_hw->conf->ci_fast_rx_en) {
+                               tid = ieee80211_get_tid((struct ieee80211_hdr *)skb->data);
+                               tid_agg_rx = cl_sta->tid_agg_rx[tid];
+                               cl_rx_handle_ps(cl_hw, cl_sta, skb);
+                       }
+
+                       /* Store the pointer to sta in the skb->sk field */
+                       if (remote_cpu_mac != -1)
+                               skb->sk = (struct sock *)sta;
+               }
+       }
+
+       if (unlikely(cl_rx_fill_status(cl_hw, cl_sta, skb, rxhdr, &encrypt_len))) {
+               cl_sta_unlock(cl_hw);
+               kfree_skb(skb);
+               return;
+       }
+
+       /*
+        * RXM sets rxhdr->msdu_cnt=1 also for non AMSDU, so the correct check
+        * is the new amsdu_present bit.
+        */
+       is_amsdu = rxhdr->amsdu_present;
+
+       /* Is A-MSDU frame? */
+       if (is_amsdu) {
+               cl_rx_handle_first_amsdu(cl_hw, skb, amsdu_rx_state, rxhdr, sta_idx,
+                                        tid, encrypt_len);
+               cl_sta_unlock(cl_hw);
+               return;
+       }
+
+       len = le32_to_cpu(rxhdr->len);
+
+       if (skb_tailroom(skb) >= len) {
+               /* Push the WLAN HDR + MDPU payload to the skb data */
+               skb_put(skb, len);
+               cl_hw->rx_info.non_amsdu++;
+       } else {
+               cl_sta_unlock(cl_hw);
+               cl_rx_invalid_tailroom(cl_hw, skb, len);
+               return;
+       }
+
+       skb_done = cl_rx_skb_done(cl_hw, skb, cl_sta, rxhdr);
+
+       cl_sta_unlock(cl_hw);
+
+       if (!skb_done)
+               return;
+
+       if (tid_agg_rx) {
+               struct sk_buff_head reorder_buf;
+
+               /* Init the reorder buffer */
+               __skb_queue_head_init(&reorder_buf);
+               cl_rx_reorder_ampdu(cl_hw, cl_sta, skb, &reorder_buf);
+
+               if (!skb_queue_empty(&reorder_buf))
+                       cl_rx_pass_to_mac(cl_hw, sta, &reorder_buf);
+       } else {
+               if (remote_cpu_mac == -1) {
+                       ieee80211_rx_napi(cl_hw->hw, sta, skb, NULL);
+               } else {
+                       skb_queue_tail(&cl_hw->rx_remote_queue_mac, skb);
+                       cl_rx_remote_cpu_mac(cl_hw);
+               }
+       }
+}
+
+static void cl_rx_tasklet(unsigned long data)
+{
+       struct cl_hw *cl_hw = (struct cl_hw *)data;
+       struct sk_buff *skb = NULL;
+       u16 pkt_cnt = 0;
+
+       if (cl_recovery_in_progress(cl_hw))
+               return;
+
+#ifdef TRACE_SUPPORT
+       trace_cl_trace_rx_tasklet_start(cl_hw->idx);
+#endif
+
+       while ((skb = skb_dequeue(&cl_hw->rx_skb_queue))) {
+               cl_rx_handle_skb(cl_hw, skb);
+
+               if (++pkt_cnt > cl_hw->conf->ce_rx_pkts_budget) {
+                       if (cl_hw->chip->conf->ci_rx_resched_tasklet)
+                               tasklet_schedule(&cl_hw->rx_resched_tasklet);
+                       else
+                               tasklet_schedule(&cl_hw->rx_tasklet);
+
+                       cl_hw->rx_info.exceed_pkt_budget++;
+                       return;
+               }
+       }
+
+#ifdef TRACE_SUPPORT
+       trace_cl_trace_rx_tasklet_end(cl_hw->idx, pkt_cnt);
+#endif
+}
+
+static void cl_rx_resched_tasklet(unsigned long data)
+{
+       struct cl_hw *cl_hw = (struct cl_hw *)data;
+
+       tasklet_schedule(&cl_hw->rx_tasklet);
+}
+
+static void cl_rx_remote_tasklet_mac(unsigned long data)
+{
+       struct cl_hw *cl_hw = (struct cl_hw *)data;
+       struct sk_buff *skb = NULL;
+       struct ieee80211_sta *sta;
+
+       if (cl_recovery_in_progress(cl_hw))
+               return;
+
+       cl_rx_remote_cpu_info(cl_hw);
+
+       while ((skb = skb_dequeue(&cl_hw->rx_remote_queue_mac))) {
+               /*
+                * Get sta pointer from skb->sk (stored their in cl_rx_remote_cpu_mac)
+                * and reset skb->sk.
+                */
+               sta = (struct ieee80211_sta *)skb->sk;
+               skb->sk = NULL;
+
+               ieee80211_rx_napi(cl_hw->hw, sta, skb, NULL);
+       }
+}
+
+void cl_rx_init(struct cl_hw *cl_hw)
+{
+       s8 cpu_mac = cl_hw->conf->ci_rx_remote_cpu_mac;
+
+       /* Set rx_skb_max to be the maximum of ci_rx_skb_max configured for each chip */
+       rx_skb_max = max(cl_hw->chip->conf->ci_rx_skb_max, rx_skb_max);
+
+       skb_queue_head_init(&cl_hw->rx_remote_queue_mac);
+       skb_queue_head_init(&cl_hw->rx_skb_queue);
+       __skb_queue_head_init(&cl_hw->amsdu_rx_state.frames);
+
+       tasklet_init(&cl_hw->rx_tasklet, cl_rx_tasklet, (unsigned long)cl_hw);
+       tasklet_init(&cl_hw->rx_resched_tasklet, cl_rx_resched_tasklet, (unsigned long)cl_hw);
+
+       if (cpu_mac >= 0)
+               tasklet_init(&per_cpu(rx_remote_tasklet_mac[cl_hw->idx], cpu_mac),
+                            cl_rx_remote_tasklet_mac,
+                            (unsigned long)cl_hw);
+#ifdef CONFIG_CL_PCIE
+       cl_rx_pci_init(cl_hw);
+#endif
+}
+
+void cl_rx_off(struct cl_hw *cl_hw)
+{
+       s8 cpu_mac = cl_hw->conf->ci_rx_remote_cpu_mac;
+
+       if (cpu_mac >= 0)
+               tasklet_kill(&per_cpu(rx_remote_tasklet_mac[cl_hw->idx], cpu_mac));
+
+       tasklet_kill(&cl_hw->rx_tasklet);
+       tasklet_kill(&cl_hw->rx_resched_tasklet);
+
+       skb_queue_purge(&cl_hw->rx_remote_queue_mac);
+       skb_queue_purge(&cl_hw->rx_skb_queue);
+
+       cl_rx_amsdu_reset(cl_hw);
+#ifdef CONFIG_CL_PCIE
+       cl_rx_pci_deinit(cl_hw);
+#endif
+}
+
+void cl_rx_remote_tasklet_sched(void *t)
+{
+       tasklet_schedule((struct tasklet_struct *)t);
+}
+
+void cl_rx_remote_cpu_info(struct cl_hw *cl_hw)
+{
+       u32 processor_id = smp_processor_id();
+
+       if (processor_id < CPU_MAX_NUM)
+               cl_hw->rx_info.remote_cpu[processor_id]++;
+}
+
+void cl_rx_push_queue(struct cl_hw *cl_hw, struct sk_buff *skb)
+{
+       skb_queue_tail(&cl_hw->rx_skb_queue, skb);
+       tasklet_schedule(&cl_hw->rx_tasklet);
+}
+
+void cl_rx_skb_alloc_handler(struct sk_buff *skb)
+{
+       skb->destructor = cl_rx_skb_destructor;
+       atomic_inc(&rx_skb_cnt);
+}
+
+void cl_rx_post_recovery(struct cl_hw *cl_hw)
+{
+       if (!skb_queue_empty(&cl_hw->rx_skb_queue))
+               tasklet_schedule(&cl_hw->rx_tasklet);
+
+       if (!skb_queue_empty(&cl_hw->rx_remote_queue_mac))
+               tasklet_schedule(&per_cpu(rx_remote_tasklet_mac[cl_hw->idx],
+                                         cl_hw->conf->ci_rx_remote_cpu_mac));
+}
+
+void cl_rx_info_reset(struct cl_hw *cl_hw)
+{
+       pr_debug("Reset uplink stats\n");
+       memset(&cl_hw->rx_info, 0, sizeof(struct cl_rx_path_info));
+}
+
+int cl_rx_info_print(struct cl_hw *cl_hw)
+{
+       struct cl_rx_path_info *rx_info = &cl_hw->rx_info;
+       struct ieee80211_local *local = hw_to_local(cl_hw->hw);
+       int i;
+       bool uplink_amsdu = false;
+       char *buf = NULL;
+       ssize_t buf_size;
+       int err = 0;
+       int len = 0;
+
+       for (i = 0; i < RX_MAX_MSDU_IN_AMSDU; i++)
+               if (rx_info->amsdu_cnt[i] > 0) {
+                       uplink_amsdu = true;
+                       break;
+               }
+
+       cl_snprintf(&buf, &len, &buf_size,
+                   "Uplink counters\n"
+                   "-----------------------------------\n");
+       cl_snprintf(&buf, &len, &buf_size,
+                   "rx_desc[RXM]                 = %u\n",
+                   rx_info->rx_desc[CL_RX_BUF_RXM]);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "rx_desc[FW]                  = %u\n",
+                   rx_info->rx_desc[CL_RX_BUF_FW]);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "elem_alloc_fail              = %u\n",
+                   rx_info->elem_alloc_fail);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "skb_null                     = %u\n",
+                   rx_info->skb_null);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "pkt_drop_amsdu_corrupted     = %u\n",
+                   rx_info->pkt_drop_amsdu_corrupted);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "pkt_drop_sub_amsdu_corrupted = %u\n",
+                   rx_info->pkt_drop_sub_amsdu_corrupted);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "pkt_drop_amsdu_len_error     = %u\n",
+                   rx_info->pkt_drop_amsdu_len_error);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "pkt_drop_sub_amsdu_len_error = %u\n",
+                   rx_info->pkt_drop_sub_amsdu_len_error);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "pkt_drop_wrong_pattern       = %u\n",
+                   rx_info->pkt_drop_wrong_pattern);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "pkt_drop_not_success         = %u\n",
+                   rx_info->pkt_drop_not_success);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "pkt_drop_unencrypted         = %u\n",
+                   rx_info->pkt_drop_unencrypted);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "pkt_drop_decrypt_fail        = %u\n",
+                   rx_info->pkt_drop_decrypt_fail);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "pkt_drop_rxhdr_len_error     = %u\n",
+                   rx_info->pkt_drop_rxhdr_len_error);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "pkt_drop_sta_null            = %u\n",
+                   rx_info->pkt_drop_sta_null);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "pkt_drop_host_limit          = %u\n",
+                   rx_info->pkt_drop_host_limit);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "netif_rx                     = %u\n",
+                   rx_info->netif_rx);
+
+       len += snprintf(buf + len, PAGE_SIZE - len,
+                       "\nQueue length\n"
+                       "-----------------------------------\n");
+       len += snprintf(buf + len, PAGE_SIZE - len,
+                       "rx_skb_queue                 = %u\n",
+                       skb_queue_len(&cl_hw->rx_skb_queue));
+       len += snprintf(buf + len, PAGE_SIZE - len,
+                       "rx_remote_queue_mac          = %u\n",
+                       skb_queue_len(&cl_hw->rx_remote_queue_mac));
+       len += snprintf(buf + len, PAGE_SIZE - len,
+                       "local_skb_queue              = %u\n",
+                       skb_queue_len(&local->skb_queue));
+
+       cl_snprintf(&buf, &len, &buf_size,
+                   "\nSKB count\n"
+                   "-----------------------------------\n");
+       cl_snprintf(&buf, &len, &buf_size,
+                   "rx_skb_max                   = %u\n",
+                   rx_skb_max);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "rx_skb_cnt                   = %u\n",
+                   atomic_read(&rx_skb_cnt));
+
+       cl_snprintf(&buf, &len, &buf_size,
+                   "\nBuffer processing\n"
+                   "-----------------------------------\n");
+       cl_snprintf(&buf, &len, &buf_size,
+                   "IRQ                          = %u\n",
+                   rx_info->buffer_process_irq);
+       cl_snprintf(&buf, &len, &buf_size,
+                   "Tasklet                      = %u\n",
+                   rx_info->buffer_process_tasklet);
+
+       cl_snprintf(&buf, &len, &buf_size,
+                   "\nUplink Non AMSDU\n"
+                   "-----------------------------------\n"
+                   "NON AMSDU = %u\n", rx_info->non_amsdu);
+
+       if (uplink_amsdu) {
+               cl_snprintf(&buf, &len, &buf_size,
+                           "\nUplink AMSDU\n"
+                           "-----------------------------------\n");
+
+               for (i = 0; i < RX_MAX_MSDU_IN_AMSDU; i++)
+                       if (rx_info->amsdu_cnt[i] > 0)
+                               cl_snprintf(&buf, &len, &buf_size,
+                                           "AMSDU[%d] = %u\n", i + 1, rx_info->amsdu_cnt[i]);
+       }
+
+       if (cl_hw->conf->ci_rx_remote_cpu_drv != -1 ||
+           cl_hw->conf->ci_rx_remote_cpu_mac != -1) {
+               cl_snprintf(&buf, &len, &buf_size,
+                           "\nRemote CPU\n"
+                           "-----------------------------------\n");
+
+               for (i = 0; i < CPU_MAX_NUM; i++) {
+                       if (rx_info->remote_cpu[i] == 0)
+                               continue;
+
+                       cl_snprintf(&buf, &len, &buf_size, "cpu #%u: %u\n",
+                                   i, rx_info->remote_cpu[i]);
+               }
+       }
+
+       cl_snprintf(&buf, &len, &buf_size,
+                   "\nUplink schedule\n"
+                   "-----------------------------------\n"
+                   "exceed_pkt_budget = %u\n",
+                   rx_info->exceed_pkt_budget);
+
+       cl_snprintf(&buf, &len, &buf_size,
+                   "\nUplink buckets RXM\n"
+                   "-----------------------------------\n");
+
+       for (i = 0; i < IPC_RXBUF_NUM_BUCKETS_RXM; i++) {
+               if (rx_info->pkt_handle_bucket_rxm[i] == 0)
+                       continue;
+
+               cl_snprintf(&buf, &len, &buf_size,
+                           "Bucket [%lu -> %lu]: %u\n",
+                           i * IPC_RXBUF_BUCKET_SIZE,
+                           (i + 1) * IPC_RXBUF_BUCKET_SIZE - 1,
+                           rx_info->pkt_handle_bucket_rxm[i]);
+       }
+
+       cl_snprintf(&buf, &len, &buf_size,
+                   "\nUplink buckets FW\n"
+                   "-----------------------------------\n");
+
+       for (i = 0; i < IPC_RXBUF_NUM_BUCKETS_FW; i++) {
+               if (rx_info->pkt_handle_bucket_fw[i] == 0)
+                       continue;
+
+               cl_snprintf(&buf, &len, &buf_size,
+                           "Bucket [%lu -> %lu]: %u\n",
+                           i * IPC_RXBUF_BUCKET_SIZE,
+                           (i + 1) * IPC_RXBUF_BUCKET_SIZE - 1,
+                           rx_info->pkt_handle_bucket_fw[i]);
+       }
+
+       err = cl_vendor_reply(cl_hw, buf, len);
+       kfree(buf);
+
+       return err;
+}
+
+void cl_rx_netif(struct cl_hw *cl_hw, struct cl_vif *cl_vif, struct sk_buff *skb)
+{
+#ifdef TRACE_SUPPORT
+       trace_cl_trace_netif_rx_start(cl_hw->idx);
+#endif
+
+       cl_hw->rx_info.netif_rx++;
+
+       netif_receive_skb(skb);
+
+#ifdef TRACE_SUPPORT
+       trace_cl_trace_netif_rx_end(cl_hw->idx);
+#endif
+}
+
+void cl_rx_finish(struct cl_hw *cl_hw, struct sk_buff *skb)
+{
+       struct cl_vif *cl_vif = NETDEV_TO_CL_VIF(skb->dev);
+
+       cl_rx_netif(cl_hw, cl_vif, skb);
+}
+
+u8 cl_rx_get_skb_ac(struct ieee80211_hdr *hdr)
+{
+       if (ieee80211_is_data_qos(hdr->frame_control)) {
+               u8 *qos_ctl = ieee80211_get_qos_ctl(hdr);
+               u8 tid = *qos_ctl & IEEE80211_QOS_CTL_TAG1D_MASK;
+               return tid_to_ac[tid];
+       }
+
+       return AC_BE;
+}
+
+bool cl_rx_process_in_irq(struct cl_hw *cl_hw)
+{
+       struct cl_ipc_ring_indices *indices = cl_hw->ipc_env->ring_indices_elem->indices;
+       u32 read_idx = le32_to_cpu(indices->rxdesc_read_idx[CL_RX_BUF_RXM]);
+       u32 write_idx = le32_to_cpu(indices->rxdesc_write_idx[CL_RX_BUF_RXM]);
+       u32 free_buffers = read_idx - write_idx;
+
+       if (free_buffers < (IPC_RXBUF_CNT_RXM / 2)) {
+               cl_hw->rx_info.buffer_process_irq++;
+               return true;
+       }
+
+       cl_hw->rx_info.buffer_process_tasklet++;
+       return false;
+}
+
--
2.30.0

________________________________
The information transmitted is intended only for the person or entity to which it is addressed and may contain confidential and/or privileged material. Any retransmission, dissemination, copying or other use of, or taking of any action in reliance upon this information is prohibited. If you received this in error, please contact the sender and delete the material from any computer. Nothing contained herein shall be deemed as a representation, warranty or a commitment by Celeno. No warranties are expressed or implied, including, but not limited to, any implied warranties of non-infringement, merchantability and fitness for a particular purpose.
________________________________





[Index of Archives]     [Linux Host AP]     [ATH6KL]     [Linux Wireless Personal Area Network]     [Linux Bluetooth]     [Wireless Regulations]     [Linux Netdev]     [Kernel Newbies]     [Linux Kernel]     [IDE]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite Hiking]     [MIPS Linux]     [ARM Linux]     [Linux RAID]

  Powered by Linux