Search Linux Wireless

[RFC v1 092/256] cl8k: add fw/msg_cfm.c

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Viktor Barna <viktor.barna@xxxxxxxxxx>

(Part of the split. Please, take a look at the cover letter for more
details).

Signed-off-by: Viktor Barna <viktor.barna@xxxxxxxxxx>
---
 drivers/net/wireless/celeno/cl8k/fw/msg_cfm.c | 316 ++++++++++++++++++
 1 file changed, 316 insertions(+)
 create mode 100644 drivers/net/wireless/celeno/cl8k/fw/msg_cfm.c

diff --git a/drivers/net/wireless/celeno/cl8k/fw/msg_cfm.c b/drivers/net/wireless/celeno/cl8k/fw/msg_cfm.c
new file mode 100644
index 000000000000..a63751d0804e
--- /dev/null
+++ b/drivers/net/wireless/celeno/cl8k/fw/msg_cfm.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: MIT
+/* Copyright(c) 2019-2021, Celeno Communications Ltd. */
+
+#include "fw/msg_cfm.h"
+#include "fw/msg_rx.h"
+#include "recovery.h"
+#include "reg/reg_ipc.h"
+#include "chip.h"
+#include "hw_assert.h"
+#include "config.h"
+#include "coredump.h"
+
+static void cl_check_exception(struct cl_hw *cl_hw)
+{
+       /* Check if Tensilica exception occurred */
+       int i;
+       struct cl_ipc_exception_struct *data =
+               (struct cl_ipc_exception_struct *)cl_hw->ipc_env->shared;
+
+       if (data->pattern != IPC_EXCEPTION_PATTERN)
+               return;
+
+       cl_dbg_err(cl_hw, "######################### firmware tensilica exception:\n");
+       cl_dbg_err(cl_hw, "................................. type: ");
+
+       switch (data->type) {
+       case 0:
+               cl_dbg_err(cl_hw, "EXCEPTION_ILLEGALINSTRUCTION\n");
+               break;
+       case 2:
+               cl_dbg_err(cl_hw, "EXCEPTION_INSTRUCTIONFETCHERROR\n");
+               break;
+       case 3:
+               cl_dbg_err(cl_hw, "EXCEPTION_LOADSTOREERROR\n");
+               break;
+       case 6:
+               cl_dbg_err(cl_hw, "EXCEPTION_INTEGERDIVIDEBYZERO\n");
+               break;
+       case 7:
+               cl_dbg_err(cl_hw, "EXCEPTION_SPECULATION\n");
+               break;
+       case 8:
+               cl_dbg_err(cl_hw, "EXCEPTION_PRIVILEGED\n");
+               break;
+       case 9:
+               cl_dbg_err(cl_hw, "EXCEPTION_UNALIGNED\n");
+               break;
+       case 16:
+               cl_dbg_err(cl_hw, "EXCEPTION_INSTTLBMISS\n");
+               break;
+       case 17:
+               cl_dbg_err(cl_hw, "EXCEPTION_INSTTLBMULTIHIT\n");
+               break;
+       case 18:
+               cl_dbg_err(cl_hw, "EXCEPTION_INSTFETCHPRIVILEGE\n");
+               break;
+       case 20:
+               cl_dbg_err(cl_hw, "EXCEPTION_INSTFETCHPROHIBITED\n");
+               break;
+       case 24:
+               cl_dbg_err(cl_hw, "EXCEPTION_LOADSTORETLBMISS\n");
+               break;
+       case 25:
+               cl_dbg_err(cl_hw, "EXCEPTION_LOADSTORETLBMULTIHIT\n");
+               break;
+       case 26:
+               cl_dbg_err(cl_hw, "EXCEPTION_LOADSTOREPRIVILEGE\n");
+               break;
+       case 28:
+               cl_dbg_err(cl_hw, "EXCEPTION_LOADPROHIBITED\n");
+               break;
+       default:
+               cl_dbg_err(cl_hw, "unknown\n");
+               break;
+       }
+
+       cl_dbg_err(cl_hw, "................................. EPC: %08X\n", data->epc);
+       cl_dbg_err(cl_hw, "................................. EXCSAVE: %08X\n", data->excsave);
+       cl_dbg_err(cl_hw, "..........................BACKTRACE-PC.........................\n");
+
+       for (i = 0; i < IPC_BACKTRACT_DEPTH; i++)
+               cl_dbg_err(cl_hw, "PC#%d: 0x%08X\n", i, data->backtrace.pc[i]);
+}
+
+static u16 cl_msg_cfm_clear_bit(u16 cfm)
+{
+       if (cfm < MM_REQ_CFM_MAX)
+               return ((cfm - 1) >> 1);
+
+       return ((cfm - 1 - FIRST_MSG(TASK_DBG) + MM_REQ_CFM_MAX) >> 1);
+}
+
+u16 cl_msg_cfm_set_bit(u16 req)
+{
+       if (req < MM_REQ_CFM_MAX)
+               return (req >> 1);
+
+       return ((req - FIRST_MSG(TASK_DBG) + MM_REQ_CFM_MAX) >> 1);
+}
+
+int cl_msg_cfm_wait(struct cl_hw *cl_hw, u16 bit, u16 req_id)
+{
+       /*
+        * Start a timeout to stop on the main waiting queue,
+        * and then check the result.
+        */
+       struct cl_chip *chip = cl_hw->chip;
+       int timeout = 0, error = 0;
+       int max_timeout = 0;
+
+       if (!cl_hw->msg_calib_timeout)
+               max_timeout = CL_MSG_CFM_TIMEOUT_JIFFIES;
+       else
+               max_timeout = CL_MSG_CFM_TIMEOUT_CALIB_JIFFIES;
+
+       /* Wait for confirmation message */
+       timeout = wait_event_timeout(cl_hw->wait_queue,
+                                    !CFM_TEST_BIT(bit, &cl_hw->cfm_flags),
+                                    max_timeout);
+
+       if (timeout == 0) {
+               /*
+                * Timeout occurred!
+                * Make sure that confirmation wasn't received after the timeout.
+                */
+               if (CFM_TEST_BIT(bit, &cl_hw->cfm_flags)) {
+                       cl_dbg_verbose(cl_hw, "[WARN] Timeout occurred - %s\n",
+                                      MSG_ID_STR(req_id));
+                       error = -ETIMEDOUT;
+               }
+       }
+
+       if (error) {
+               struct cl_irq_stats *irq_stats = &chip->irq_stats;
+               unsigned long now = jiffies, flags;
+               u32 status, raw_status;
+
+               /*
+                * The interrupt was not handled in time, lets try to handle it safely.
+                * The spin lock protects us from the following race scenarios:
+                * 1) atomic read of the IPC status register,
+                * 2) execution on the msg handler twice from different context.
+                * 3) disable context switch from the same core.
+                */
+               spin_lock_irqsave(&chip->isr_lock, flags);
+
+               status = ipc_xmac_2_host_status_get(chip);
+               raw_status = ipc_xmac_2_host_raw_status_get(chip);
+
+               cl_dbg_verbose(cl_hw,
+                              "[INFO] status=0x%x, raw_status=0x%x, last_isr_statuses=0x%x, "
+                              "last_rx=%ums, last_tx=%ums, last_isr=%ums\n",
+                              status,
+                              raw_status,
+                              irq_stats->last_isr_statuses,
+                              jiffies_to_msecs(now - irq_stats->last_rx),
+                              jiffies_to_msecs(now - irq_stats->last_tx),
+                              jiffies_to_msecs(now - irq_stats->last_isr));
+
+               if (status & cl_hw->ipc_e2a_irq.msg) {
+                       /*
+                        * WORKAROUND #1: In some cases the kernel is losing sync with the
+                        * interrupt handler and the reason is still unknown.
+                        * It seems that disabling master interrupt for a couple of cycles and
+                        * then re-enabling it restores the sync with the cl interrupt handler.
+                        */
+                       ipc_host_global_int_en_set(chip, 0);
+
+                       /* Acknowledge the MSG interrupt */
+                       ipc_xmac_2_host_ack_set(cl_hw->chip, cl_hw->ipc_e2a_irq.msg);
+
+                       /*
+                        * Unlock before calling cl_msg_rx_tasklet() because
+                        * spin_unlock_irqrestore() disables interrupts, but in
+                        * cl_msg_rx_tasklet() there might be several places that
+                        * use spin_unlock_bh() which enables soft-irqs.
+                        */
+                       spin_unlock_irqrestore(&chip->isr_lock, flags);
+
+                       /*
+                        * Call the tasklet handler (it also gives the CPU that
+                        * is mapped to the cl_interrupt few cycle to recover)
+                        */
+                       cl_msg_rx_tasklet((unsigned long)cl_hw);
+
+                       /* Re-enable master interrupts */
+                       ipc_host_global_int_en_set(chip, 1);
+               } else {
+                       /*
+                        * WORKAROUND #2: Try to call the handler unconditioanly.
+                        * Maybe we cleared the "cl_hw->ipc_e2a_irq.msg" without handling it.
+                        */
+
+                       /*
+                        * Unlock before calling cl_msg_rx_tasklet() because
+                        * spin_unlock_irqrestore() disables interrupts, but in
+                        * cl_msg_rx_tasklet() there might be several places
+                        * that use spin_unlock_bh() which enables soft-irqs.
+                        */
+                       spin_unlock_irqrestore(&chip->isr_lock, flags);
+
+                       /* Call the tasklet handler */
+                       cl_msg_rx_tasklet((unsigned long)cl_hw);
+               }
+
+               /* Did the workarounds work? */
+               if (CFM_TEST_BIT(bit, &cl_hw->cfm_flags)) {
+                       cl_dbg_verbose(cl_hw, "[ERR] Failed to recover from timeout\n");
+               } else {
+                       cl_dbg_verbose(cl_hw, "[INFO] Managed to recover from timeout\n");
+                       error = 0;
+                       goto exit;
+               }
+
+               /* Failed handling the message */
+               CFM_CLEAR_BIT(bit, &cl_hw->cfm_flags);
+
+               cl_check_exception(cl_hw);
+
+               cl_hw_assert_check(cl_hw);
+
+               if (!strcmp(chip->conf->ce_ela_mode, "XTDEBUG") ||
+                   !strcmp(chip->conf->ce_ela_mode, "XTDEBUG_STD")) {
+                       /*
+                        * TODO: Special debug hack: collect debug info & skip restart
+                        * "wait4cfm" string is expected by debug functionality
+                        */
+                       goto exit;
+               }
+
+               if (!test_bit(CL_DEV_HW_RESTART, &cl_hw->drv_flags) &&
+                   !test_bit(CL_DEV_SW_RESTART, &cl_hw->drv_flags) &&
+                   test_bit(CL_DEV_STARTED, &cl_hw->drv_flags) &&
+                   !cl_hw->is_stop_context) {
+                       /* Unlock msg mutex before restarting */
+                       mutex_unlock(&cl_hw->msg_tx_mutex);
+
+                       if (cl_coredump_is_scheduled(cl_hw))
+                               set_bit(CL_DEV_FW_ERROR, &cl_hw->drv_flags);
+                       else
+                               cl_recovery_start(cl_hw, RECOVERY_WAIT4CFM);
+
+                       return error;
+               }
+       }
+
+exit:
+       /* Unlock msg mutex */
+       mutex_unlock(&cl_hw->msg_tx_mutex);
+
+       return error;
+}
+
+static void cl_msg_cfm_assign_params(struct cl_hw *cl_hw, struct cl_ipc_e2a_msg *msg)
+{
+       u32 *param;
+       u16 msg_id = le16_to_cpu(msg->id);
+       u16 msg_len = le16_to_cpu(msg->param_len);
+
+       /* A message sent in background is not allowed to assign confirmation parameters */
+       if (cl_hw->msg_background) {
+               cl_dbg_verbose(cl_hw,
+                              "Background message can't assign confirmation parameters (%s)\n",
+                              MSG_ID_STR(msg_id));
+               return;
+       }
+
+       if (msg->param_len) {
+               param = kzalloc(msg_len, GFP_ATOMIC);
+               if (param) {
+                       memcpy(param, msg->param, msg_len);
+                       if (cl_hw->msg_cfm_params[msg_id])
+                               cl_dbg_err(cl_hw, "msg_cfm_params is not NULL for %s\n",
+                                          MSG_ID_STR(msg_id));
+                       cl_hw->msg_cfm_params[msg_id] = param;
+               } else {
+                       cl_dbg_err(cl_hw, "param allocation failed\n");
+               }
+       } else {
+               u16 dummy_dest_id = le16_to_cpu(msg->dummy_dest_id);
+               u16 dummy_src_id = le16_to_cpu(msg->dummy_src_id);
+
+               cl_dbg_err(cl_hw, "msg->param_len is 0 [%u,%u,%u]\n",
+                          msg_id, dummy_dest_id, dummy_src_id);
+       }
+}
+
+void cl_msg_cfm_assign_and_clear(struct cl_hw *cl_hw, struct cl_ipc_e2a_msg *msg)
+{
+       u16 bit = cl_msg_cfm_clear_bit(msg->id);
+
+       if (CFM_TEST_BIT(bit, &cl_hw->cfm_flags)) {
+               cl_msg_cfm_assign_params(cl_hw, msg);
+               CFM_CLEAR_BIT(bit, &cl_hw->cfm_flags);
+       } else {
+               cl_dbg_verbose(cl_hw, "Msg ID not set in cfm_flags (%s)\n", MSG_ID_STR(msg->id));
+       }
+}
+
+void cl_msg_cfm_clear(struct cl_hw *cl_hw, struct cl_ipc_e2a_msg *msg)
+{
+       u16 bit = cl_msg_cfm_clear_bit(msg->id);
+
+       if (!CFM_TEST_AND_CLEAR_BIT(bit, &cl_hw->cfm_flags))
+               cl_dbg_verbose(cl_hw, "Msg ID not set in cfm_flags (%s)\n", MSG_ID_STR(msg->id));
+}
+
+void cl_msg_cfm_simulate_timeout(struct cl_hw *cl_hw)
+{
+       u16 bit = cl_msg_cfm_set_bit(DBG_SET_MOD_FILTER_REQ);
+
+       mutex_lock(&cl_hw->msg_tx_mutex);
+       CFM_SET_BIT(bit, &cl_hw->cfm_flags);
+       cl_msg_cfm_wait(cl_hw, bit, DBG_STR_SHIFT(DBG_SET_MOD_FILTER_REQ));
+}
+
--
2.30.0

________________________________
The information transmitted is intended only for the person or entity to which it is addressed and may contain confidential and/or privileged material. Any retransmission, dissemination, copying or other use of, or taking of any action in reliance upon this information is prohibited. If you received this in error, please contact the sender and delete the material from any computer. Nothing contained herein shall be deemed as a representation, warranty or a commitment by Celeno. No warranties are expressed or implied, including, but not limited to, any implied warranties of non-infringement, merchantability and fitness for a particular purpose.
________________________________





[Index of Archives]     [Linux Host AP]     [ATH6KL]     [Linux Wireless Personal Area Network]     [Linux Bluetooth]     [Wireless Regulations]     [Linux Netdev]     [Kernel Newbies]     [Linux Kernel]     [IDE]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite Hiking]     [MIPS Linux]     [ARM Linux]     [Linux RAID]

  Powered by Linux