From: Henning Colliander <henning.colliander@xxxxxxxxxxx> This patch adds support for Kvaser PCIEcan devices. This includes support for up to 4 CAN channels on a single card, depending on device. Signed-off-by: Henning Colliander <henning.colliander@xxxxxxxxxxx> Signed-off-by: Jimmy Assarsson <extja@xxxxxxxxxx> Signed-off-by: Christer Beskow <chbe@xxxxxxxxxx> --- drivers/net/can/Kconfig | 13 + drivers/net/can/Makefile | 1 + drivers/net/can/kvaser_pciefd.c | 2089 +++++++++++++++++++++++++++++++ 3 files changed, 2103 insertions(+) create mode 100644 drivers/net/can/kvaser_pciefd.c diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 7cdd0cead693..93dc0ab29947 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -119,6 +119,19 @@ config CAN_JANZ_ICAN3 This driver can also be built as a module. If so, the module will be called janz-ican3.ko. +config CAN_KVASER_PCIEFD + depends on PCI + tristate "Kvaser PCIe FD cards" + help + This is a driver for the Kvaser PCI Express CAN FD family. + + Supported devices: + Kvaser PCIEcan 4xHS + Kvaser PCIEcan 2xHS v2 + Kvaser PCIEcan HS v2 + Kvaser Mini PCI Express HS v2 + Kvaser Mini PCI Express 2xHS v2 + config CAN_SUN4I tristate "Allwinner A10 CAN controller" depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 44922bf29b6a..22164300122d 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o obj-$(CONFIG_CAN_GRCAN) += grcan.o obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/ obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o +obj-$(CONFIG_CAN_KVASER_PCIEFD) += kvaser_pciefd.o obj-$(CONFIG_CAN_MSCAN) += mscan/ obj-$(CONFIG_CAN_M_CAN) += m_can/ obj-$(CONFIG_CAN_PEAK_PCIEFD) += peak_canfd/ diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c new file mode 100644 index 000000000000..98381f9890c7 --- /dev/null +++ b/drivers/net/can/kvaser_pciefd.c @@ -0,0 +1,2089 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved. + * Parts of this driver are based on the following: + * - Kvaser linux pciefd driver (version 5.25) + * - PEAK linux canfd driver + * - Altera Avalon EPCS flash controller driver + */ + +#include <linux/kernel.h> +#include <linux/version.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/can/dev.h> +#include <linux/timer.h> +#include <linux/netdevice.h> +#include <linux/crc32.h> + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Kvaser AB <support@xxxxxxxxxx>"); +MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); + +#define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd" + +#define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000) +#define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200)) +#define KVASER_PCIEFD_MAX_ERR_REP 256 +#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17 +#define KVASER_PCIEFD_DMA_SIZE (4 * 1024) +#define KVASER_PCIEFD_64BIT_DMA_BIT BIT(0) + +#define KVASER_PCIEFD_VENDOR 0x1a07 +#define KVASER_PCIEFD_4HS_ID 0x000d +#define KVASER_PCIEFD_2HS_ID 0x000e +#define KVASER_PCIEFD_HS_ID 0x000f +#define KVASER_PCIEFD_MINIPCIE_HS_ID 0x0010 +#define KVASER_PCIEFD_MINIPCIE_2HS_ID 0x0011 + +#define KVASER_PCIEFD_DMA_MAP_BASE 0x00001000 + +#define KVASER_PCIEFD_KCAN0_BASE 0x00010000 +#define KVASER_PCIEFD_KCAN_BASE_OFFSET 0x1000 + +/* PCIe IRQ register */ +#define KVASER_PCIEFD_IRQ_REG 0x0040 +#define KVASER_PCIEFD_IEN_REG 0x0050 + +#define KVASER_PCIEFD_IRQ_ALL_MSK 0x1f +#define KVASER_PCIEFD_IRQ_SRB BIT(4) + +#define KVASER_PCIEFD_GET_IRQ(pcie) \ + ioread32((pcie)->reg_base + KVASER_PCIEFD_IRQ_REG) + +#define KVASER_PCIEFD_SET_IRQ(pcie, data) \ + iowrite32(data, (pcie)->reg_base + KVASER_PCIEFD_IRQ_REG) + +#define KVASER_PCIEFD_SET_IEN(pcie, data) \ + iowrite32(data, (pcie)->reg_base + KVASER_PCIEFD_IEN_REG) + +/* Kvaser KCAN packet */ +#define KVASER_PCIEFD_PACKET_IDE BIT(30) +#define KVASER_PCIEFD_PACKET_RTR BIT(29) +#define KVASER_PCIEFD_PACKET_DLC(dlc) (((dlc) & 0xf) << 8) +#define KVASER_PCIEFD_PACKET_SEQ(seq) ((seq) & 0xff) +#define KVASER_PCIEFD_PACKET_ESI BIT(13) +#define KVASER_PCIEFD_PACKET_BRS BIT(14) +#define KVASER_PCIEFD_PACKET_FDF BIT(15) +#define KVASER_PCIEFD_PACKET_SMS BIT(16) +#define KVASER_PCIEFD_PACKET_AREQ BIT(31) + +#define KVASER_PCIEFD_PACK_GET_TYPE(p) (((p)->header[1] >> 28) & 0xf) +#define KVASER_PCIEFD_PACK_GET_DLC(p) (((p)->header[1] >> 8) & 0xf) +#define KVASER_PCIEFD_PACK_GET_FDF(p) \ + ((p)->header[1] & KVASER_PCIEFD_PACKET_FDF) +#define KVASER_PCIEFD_PACK_GET_BRS(p) \ + ((p)->header[1] & KVASER_PCIEFD_PACKET_BRS) +#define KVASER_PCIEFD_PACK_GET_ESI(p) \ + ((p)->header[1] & KVASER_PCIEFD_PACKET_ESI) +#define KVASER_PCIEFD_PACK_GET_ID(p) ((p)->header[0] & CAN_EFF_MASK) +#define KVASER_PCIEFD_PACK_GET_CHID(p) (((p)->header[1] >> 25) & 0x07) + +#define KVASER_PCIEFD_DPACK_GET_RTR(p) \ + ((p)->header[0] & KVASER_PCIEFD_PACKET_RTR) +#define KVASER_PCIEFD_DPACK_GET_IDE(p) \ + ((p)->header[0] & KVASER_PCIEFD_PACKET_IDE) + +#define KVASER_PCIEFD_APACK_GET_FLU(p) ((p)->header[0] & BIT(8)) +#define KVASER_PCIEFD_APACK_GET_CT(p) ((p)->header[0] & BIT(9)) +#define KVASER_PCIEFD_APACK_GET_ABL(p) ((p)->header[0] & BIT(10)) +#define KVASER_PCIEFD_APACK_GET_NACK(p) ((p)->header[0] & BIT(11)) +#define KVASER_PCIEFD_APACK_GET_SEQ(p) ((p)->header[0] & 0xff) + +#define KVASER_PCIEFD_SPACK_GET_IDET(p) ((p)->header[0] & BIT(20)) +#define KVASER_PCIEFD_SPACK_GET_IRM(p) ((p)->header[0] & BIT(21)) +#define KVASER_PCIEFD_SPACK_GET_RMCD(p) ((p)->header[0] & BIT(22)) +#define KVASER_PCIEFD_SPACK_GET_AUTO(p) ((p)->header[1] & BIT(21)) +#define KVASER_PCIEFD_SPACK_GET_CMDSEQ(p) ((p)->header[1] & 0xff) + +#define KVASER_PCIEFD_EPACK_GET_RXE(p) (((p)->header[0] >> 8) & 0xff) +#define KVASER_PCIEFD_EPACK_GET_TXE(p) ((p)->header[0] & 0xff) +#define KVASER_PCIEFD_EPACK_GET_BOFF(p) ((p)->header[0] & BIT(16)) +#define KVASER_PCIEFD_EPACK_GET_EPLR(p) ((p)->header[1] & BIT(24)) +#define KVASER_PCIEFD_EPACK_GET_EWLR(p) ((p)->header[1] & BIT(23)) + +/* Packet types */ +#define KVASER_PCIEFD_PACK_TYPE_DATA 0 +#define KVASER_PCIEFD_PACK_TYPE_ACK 1 +#define KVASER_PCIEFD_PACK_TYPE_TXRQ 2 +#define KVASER_PCIEFD_PACK_TYPE_ERROR 3 +#define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 4 +#define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 5 +#define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 6 +#define KVASER_PCIEFD_PACK_TYPE_STATUS 8 +#define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 9 + +#define KVASER_PCIEFD_KCAN_CTRL_EFLUSH (4 << 29) +#define KVASER_PCIEFD_KCAN_CTRL_EFRAME (5 << 29) + +/* CAN controller registers */ +#define KVASER_PCIEFD_KCAN_FIFO_REG 0x100 +#define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180 +#define KVASER_PCIEFD_KCAN_CTRL_REG 0x2C0 + +#define KVASER_PCIEFD_KCAN_SET_FIFO(can, data) \ + iowrite32(data, (can)->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG) + +#define KVASER_PCIEFD_KCAN_SET_FIFO_REP(can, data, len) \ + iowrite32_rep((can)->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG, data, len) + +#define KVASER_PCIEFD_KCAN_SET_FIFO_LAST(can, data) \ + __raw_writel(data, (can)->reg_base + KVASER_PCIEFD_KCAN_FIFO_LAST_REG) + +#define KVASER_PCIEFD_KCAN_SET_CTRL(can, data) \ + iowrite32(data, (can)->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG) + +/* CAN controller CMD register */ +#define KVASER_PCIEFD_KCAN_CMD_REG 0x400 + +/* Request status packet */ +#define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0) +/* Abort, flush and reset */ +#define KVASER_PCIEFD_KCAN_CMD_AT BIT(1) +#define KVASER_PCIEFD_KCAN_CMD_SEQNO(s) (((s) & 0xff) << 16) + +#define KVASER_PCIEFD_KCAN_SET_CMD(can, data) \ + iowrite32(data, (can)->reg_base + KVASER_PCIEFD_KCAN_CMD_REG) + +/* CAN controller IRQ/IEN register */ +#define KVASER_PCIEFD_KCAN_IEN_REG 0x408 +#define KVASER_PCIEFD_KCAN_IRQ_REG 0x410 + +#define KVASER_PCIEFD_KCAN_GET_IRQ(pcie) \ + ioread32((pcie)->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG) + +#define KVASER_PCIEFD_KCAN_SET_IRQ(can, data) \ + iowrite32(data, (can)->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG) + +#define KVASER_PCIEFD_KCAN_SET_IEN(can, data) \ + iowrite32(data, (can)->reg_base + KVASER_PCIEFD_KCAN_IEN_REG) + +/* Tx FIFO unaligned read */ +#define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0) +/* Tx FIFO unaligned end */ +#define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1) +/* Bus parameter protection error */ +#define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2) +/* FDF bit when controller is in classic mode */ +#define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3) +/* Rx FIFO overflow */ +#define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5) +/* Abort done */ +#define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13) +/* Tx buffer flush done */ +#define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14) +/* Tx FIFO overflow */ +#define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15) +/* Tx FIFO empty */ +#define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16) +/* Transmitter unaligned */ +#define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17) + +/* CAN controller TX NPACKETS register */ +#define KVASER_PCIEFD_KCAN_TX_NPACKETS_REG 0x414 + +#define KVASER_PCIEFD_KCAN_GET_TX_NPACKETS(can) \ + ioread32((can)->reg_base + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) + +#define KVASER_PCIEFD_KCAN_TX_NPACKETS_GET_COUNT(can) \ + (KVASER_PCIEFD_KCAN_GET_TX_NPACKETS(can) & 0xff) + +#define KVASER_PCIEFD_KCAN_TX_NPACKETS_GET_MAX_COUNT(can) \ + ((KVASER_PCIEFD_KCAN_GET_TX_NPACKETS(can) >> 16) & 0xff) + +/* CAN controller STAT register */ +#define KVASER_PCIEFD_KCAN_STAT_REG 0x418 + +#define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK (KVASER_PCIEFD_KCAN_STAT_AR \ + | KVASER_PCIEFD_KCAN_STAT_BOFF | KVASER_PCIEFD_KCAN_STAT_RMR \ + | KVASER_PCIEFD_KCAN_STAT_IRM) + +/* Abort request */ +#define KVASER_PCIEFD_KCAN_STAT_AR BIT(7) +/* Idle state. Controller in reset mode and no abort or flush pending */ +#define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10) +/* Bus off */ +#define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11) +/* Reset mode request */ +#define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14) +/* Controller in reset mode */ +#define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15) +/* Controller got one-shot capability */ +#define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16) +/* Controller got CAN FD capability */ +#define KVASER_PCIEFD_KCAN_STAT_FD BIT(19) + +#define KVASER_PCIEFD_KCAN_GET_STAT(can) \ + ioread32((can)->reg_base + KVASER_PCIEFD_KCAN_STAT_REG) + +#define KVASER_PCIEFD_KCAN_STAT_GET_CMD_SEQ_NO(data) (((data) >> 24) & 0xff) + +/* CAN controller MODE register */ +#define KVASER_PCIEFD_KCAN_MODE_REG 0x41C + +/* Reset mode */ +#define KVASER_PCIEFD_KCAN_MODE_RM BIT(8) +/* Listen only mode */ +#define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9) +/* Error packet enable */ +#define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12) +/* CAN FD non-ISO */ +#define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15) +/* Acknowledgment packet type */ +#define KVASER_PCIEFD_KCAN_MODE_APT BIT(20) +/* Active error flag enable. Clear to force error passive */ +#define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23) +/* Classic CAN mode */ +#define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31) + +#define KVASER_PCIEFD_KCAN_GET_MODE(can) \ + ioread32((can)->reg_base + KVASER_PCIEFD_KCAN_MODE_REG) + +#define KVASER_PCIEFD_KCAN_SET_MODE(can, data) \ + iowrite32(data, (can)->reg_base + KVASER_PCIEFD_KCAN_MODE_REG) + +/* CAN controller BTRN/BTRD (bittiming) registers */ +#define KVASER_PCIEFD_KCAN_BTRN_REG 0x420 +#define KVASER_PCIEFD_KCAN_BTRD_REG 0x428 + +#define KVASER_PCIEFD_KCAN_SET_BITTIMING(tseg1, tseg2, sjw, brp) \ + ((((tseg2) - 1) & 0x1f) << 26 | \ + (((tseg1) - 1) & 0x1ff) << 17 | \ + (((sjw) - 1) & 0xf) << 13 | \ + (((brp) - 1) & 0x1fff)) + +#define KVASER_PCIEFD_KCAN_SET_BTRN(can, data) \ + iowrite32(data, (can)->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG) + +#define KVASER_PCIEFD_KCAN_SET_BTRD(can, data) \ + iowrite32(data, (can)->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG) + +/* CAN controller PWM register */ +#define KVASER_PCIEFD_KCAN_PWM_REG 0x430 + +#define KVASER_PCIEFD_KCAN_GET_PWM(can) \ + ioread32((can)->reg_base + KVASER_PCIEFD_KCAN_PWM_REG) + +#define KVASER_PCIEFD_KCAN_SET_PWM(can, data) \ + iowrite32(data, (can)->reg_base + KVASER_PCIEFD_KCAN_PWM_REG) + +#define KVASER_PCIEFD_KCAN_PWM_GET_TOP(data) (((data) >> 16) & 0xff) + +#define KVASER_PCIEFD_KCAN_PWM_SET_TOP(data) (((data) & 0xff) << 16) + +#define KVASER_PCIEFD_KCAN_PWM_SET_TRIGGER(data) ((data) & 0xff) + +/* Loopback control register */ +#define KVASER_PCIEFD_LOOP_REG 0x1F000 + +#define KVASER_PCIEFD_SET_LB_OFF(pcie) \ + iowrite32(0, (pcie)->reg_base + KVASER_PCIEFD_LOOP_REG) + +/* Shared receive buffer */ +#define KVASER_PCIEFD_SRB_BASE 0x1F200 + +/* Shared receive buffer command register */ +#define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x0200) + +/* Reset DMA buffer 0 */ +#define KVASER_PCIEFD_SRB_CMD_RDB0(pcie)\ + iowrite32(BIT(4), (pcie)->reg_base + KVASER_PCIEFD_SRB_CMD_REG) + +/* Reset DMA buffer 1 */ +#define KVASER_PCIEFD_SRB_CMD_RDB1(pcie)\ + iowrite32(BIT(5), (pcie)->reg_base + KVASER_PCIEFD_SRB_CMD_REG) + +/* Reset FIFO buffer */ +#define KVASER_PCIEFD_SRB_CMD_FOR(pcie)\ + iowrite32(1, (pcie)->reg_base + KVASER_PCIEFD_SRB_CMD_REG) + +/* Shared receive buffer IRQ, IRQ enabled, and IRQ status register */ +#define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x0204) +#define KVASER_PCIEFD_SRB_ISTAT_REG (KVASER_PCIEFD_SRB_BASE + 0x0208) +#define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x020C) + +/* DMA packet done, buffer 0 and 1 */ +#define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8) +#define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9) + +/* DMA overflow, buffer 0 and 1 */ +#define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10) +#define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11) + +/* DMA underflow, buffer 0 and 1 */ +#define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12) +#define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13) + +#define KVASER_PCIEFD_SRB_GET_ISTAT(pcie) \ + ioread32((pcie)->reg_base + KVASER_PCIEFD_SRB_ISTAT_REG) + +#define KVASER_PCIEFD_SRB_SET_IRQ(pcie, data) \ + iowrite32(data, (pcie)->reg_base + KVASER_PCIEFD_SRB_IRQ_REG) + +#define KVASER_PCIEFD_SRB_GET_IRQ(pcie) \ + ioread32((pcie)->reg_base + KVASER_PCIEFD_SRB_IRQ_REG) + +#define KVASER_PCIEFD_SRB_SET_IEN(pcie, data) \ + iowrite32(data, (pcie)->reg_base + KVASER_PCIEFD_SRB_IEN_REG) + +/* Shared receive buffer status register */ +#define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x0210) + +#define KVASER_PCIEFD_SRB_GET_STAT(pcie) \ + ioread32((pcie)->reg_base + KVASER_PCIEFD_SRB_STAT_REG) + +/* DMA idle */ +#define KVASER_PCIEFD_SRB_STAT_DI BIT(15) +/* DMA support */ +#define KVASER_PCIEFD_SRB_STAT_DMA BIT(24) + +/* Shared receive buffer control register */ +#define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x0218) + +#define KVASER_PCIEFD_SRB_ENABLE_DMA(pcie) \ + iowrite32(1, (pcie)->reg_base + KVASER_PCIEFD_SRB_CTRL_REG) + +#define KVASER_PCIEFD_SRB_DISABLE_DMA(pcie) \ + iowrite32(0, (pcie)->reg_base + KVASER_PCIEFD_SRB_CTRL_REG) + +/* System identification and information register */ +#define KVASER_PCIEFD_SYSID_BASE 0x1F020 +#define KVASER_PCIEFD_SYSID_VERSION_REG (KVASER_PCIEFD_SYSID_BASE + 0x08) +#define KVASER_PCIEFD_SYSID_CANFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0x0C) +#define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14) + +#define KVASER_PCIEFD_SYSID_GET_VERSION(pcie) \ + ioread32((pcie)->reg_base + KVASER_PCIEFD_SYSID_VERSION_REG) + +#define KVASER_PCIEFD_SYSID_GET_CANFREQ(pcie) \ + ioread32((pcie)->reg_base + KVASER_PCIEFD_SYSID_CANFREQ_REG) + +#define KVASER_PCIEFD_SYSID_GET_BUILD(pcie) \ + ioread32((pcie)->reg_base + KVASER_PCIEFD_SYSID_BUILD_REG) + +#define KVASER_PCIEFD_SYSID_GET_NR_CHAN(sysid) (((sysid) >> 24) & 0xff) +#define KVASER_PCIEFD_SYSID_GET_MAJOR(sysid) (((sysid) >> 16) & 0xff) +#define KVASER_PCIEFD_SYSID_GET_MINOR(sysid) ((sysid) & 0xff) +#define KVASER_PCIEFD_SYSID_GET_BUILD_REV(build) (((build) >> 1) & 0x7fff) + +/* EPCS flash controller definitions */ +/* EPCS flash base */ +#define KVASER_PCIEFD_SPI_BASE 0x1FC00 + +#define KVASER_PCIEFD_CFG_IMG_SZ (64 * 1024) +#define KVASER_PCIEFD_CFG_IMG_OFFSET (31 * 65536L) + +#define KVASER_PCIEFD_CFG_MAX_PARAMS 256 +#define KVASER_PCIEFD_CFG_MAGIC 0xCAFEF00D +#define KVASER_PCIEFD_CFG_PARAM_MAX_SZ 24 +#define KVASER_PCIEFD_CFG_SYS_VER 1 +#define KVASER_PCIEFD_CFG_PARAM_NR_CHAN 130 + +/* Commands for controlling the onboard flash */ +#define KVASER_PCIEFD_FLASH_RES_CMD ((u8)0xAB) +#define KVASER_PCIEFD_FLASH_READ_CMD ((u8)0x03) +#define KVASER_PCIEFD_FLASH_STATUS_CMD ((u8)0x05) + +#define KVASER_PCIEFD_SPI_RX_REG (KVASER_PCIEFD_SPI_BASE + 0) +#define KVASER_PCIEFD_SPI_TX_REG (KVASER_PCIEFD_SPI_BASE + 4) +#define KVASER_PCIEFD_SPI_STATUS_REG (KVASER_PCIEFD_SPI_BASE + 8) +#define KVASER_PCIEFD_SPI_CTRL_REG (KVASER_PCIEFD_SPI_BASE + 12) +#define KVASER_PCIEFD_SPI_SSEL_REG (KVASER_PCIEFD_SPI_BASE + 20) + +#define KVASER_PCIEFD_SPI_TMT BIT(5) +#define KVASER_PCIEFD_SPI_TRDY BIT(6) +#define KVASER_PCIEFD_SPI_RRDY BIT(7) + +#define KVASER_PCIEFD_SPI_SET_SSEL(pcie) \ + iowrite32(1, (pcie)->reg_base + KVASER_PCIEFD_SPI_SSEL_REG) + +#define KVASER_PCIEFD_SPI_SET_SS0(pcie) \ + iowrite32(BIT(10), (pcie)->reg_base + KVASER_PCIEFD_SPI_CTRL_REG) + +#define KVASER_PCIEFD_SPI_UNSET_SS0(pcie) \ + iowrite32(0, (pcie)->reg_base + KVASER_PCIEFD_SPI_CTRL_REG) + +#define KVASER_PCIEFD_SPI_WRITE_TX(pcie, data) \ + iowrite32(data, (pcie)->reg_base + KVASER_PCIEFD_SPI_TX_REG) + +#define KVASER_PCIEFD_SPI_READ_RX(pcie) \ + ioread32((pcie)->reg_base + KVASER_PCIEFD_SPI_RX_REG) + +#define KVASER_PCIEFD_SPI_GET_STATUS(pcie) \ + ioread32((pcie)->reg_base + KVASER_PCIEFD_SPI_STATUS_REG) + +struct kvaser_pciefd; + +struct kvaser_pciefd_can { + struct can_priv can; + struct kvaser_pciefd *kv_pcie; + void __iomem *reg_base; + struct can_berr_counter bec; + int cmd_seq; + int err_rep_cnt; + int echo_idx; + spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */ + spinlock_t echo_lock; /* Locks the message echo buffer */ + struct timer_list bec_poll_timer; + struct completion start_comp, flush_comp; +}; + +#define KVASER_PCIEFD_MAX_CAN_CHANNELS 4 +#define KVASER_PCIEFD_DMA_COUNT 2 +struct kvaser_pciefd { + struct pci_dev *pci; + void __iomem *reg_base; + struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS]; + void *dma_data[KVASER_PCIEFD_DMA_COUNT]; + u8 nr_channels; + u32 freq; + u32 freq_to_ticks_div; +}; + +struct kvaser_pciefd_packet { + u32 header[2]; + u8 data[64]; + u64 timestamp; +}; + +static const struct can_bittiming_const kvaser_pciefd_bittiming_const = { + .name = KVASER_PCIEFD_DRV_NAME, + .tseg1_min = 1, + .tseg1_max = 255, + .tseg2_min = 1, + .tseg2_max = 32, + .sjw_max = 16, + .brp_min = 1, + .brp_max = 4096, + .brp_inc = 1, +}; + +struct kvaser_pciefd_cfg_param { + __le32 magic; + __le32 nr; + __le32 len; + u8 data[KVASER_PCIEFD_CFG_PARAM_MAX_SZ]; +}; + +struct kvaser_pciefd_cfg_img { + __le32 version; + __le32 magic; + __le32 crc; + struct kvaser_pciefd_cfg_param params[KVASER_PCIEFD_CFG_MAX_PARAMS]; +}; + +static struct pci_device_id kvaser_pciefd_id_table[] = { + { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_ID), }, + { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_ID), }, + { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_ID), }, + { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_ID), }, + { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_ID), }, + { 0,}, +}; +MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table); + +/* Onboard flash memory functions */ +static int kvaser_pciefd_spi_wait_loop(struct kvaser_pciefd *pcie, int msk) +{ + int loopmax = 10; + u32 res; + + do { + res = KVASER_PCIEFD_SPI_GET_STATUS(pcie); + } while (loopmax-- > 0 && (res & msk) == 0); + + if (loopmax < 0) + return -1; + + return 0; +} + +static int kvaser_pciefd_spi_cmd(struct kvaser_pciefd *pcie, const u8 *tx, + u32 tx_len, u8 *rx, u32 rx_len) +{ + int c; + + KVASER_PCIEFD_SPI_SET_SSEL(pcie); + KVASER_PCIEFD_SPI_SET_SS0(pcie); + KVASER_PCIEFD_SPI_READ_RX(pcie); + + c = tx_len; + while (c--) { + if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY)) + return -EIO; + + KVASER_PCIEFD_SPI_WRITE_TX(pcie, *tx++); + + if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY)) + return -EIO; + + KVASER_PCIEFD_SPI_READ_RX(pcie); + } + + c = rx_len; + while (c-- > 0) { + if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY)) + return -EIO; + + KVASER_PCIEFD_SPI_WRITE_TX(pcie, 0); + + if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY)) + return -EIO; + + *rx++ = KVASER_PCIEFD_SPI_READ_RX(pcie); + } + + if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TMT)) + return -EIO; + + KVASER_PCIEFD_SPI_UNSET_SS0(pcie); + + if (c != -1) { + dev_err(&pcie->pci->dev, "Flash SPI transfer failed\n"); + return -EIO; + } + + return 0; +} + +static int kvaser_pciefd_cfg_read_and_verify(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_cfg_img *img) +{ + int offset = KVASER_PCIEFD_CFG_IMG_OFFSET; + int res, crc; + u8 *crc_buff; + + u8 cmd[] = { + KVASER_PCIEFD_FLASH_READ_CMD, + (uint8_t)((offset >> 16) & 0xFF), + (uint8_t)((offset >> 8) & 0xFF), + (uint8_t)(offset & 0xFF) + }; + + res = kvaser_pciefd_spi_cmd(pcie, cmd, 4, (u8 *)img, + KVASER_PCIEFD_CFG_IMG_SZ); + if (res) + return res; + + crc_buff = (u8 *)img->params; + + if (le32_to_cpu(img->version) != KVASER_PCIEFD_CFG_SYS_VER) { + dev_err(&pcie->pci->dev, + "Config flash corrupted, version number is wrong\n"); + return -ENODEV; + } + + if (le32_to_cpu(img->magic) != KVASER_PCIEFD_CFG_MAGIC) { + dev_err(&pcie->pci->dev, + "Config flash corrupted, magic number is wrong\n"); + return -ENODEV; + } + + crc = ~crc32_be(0xffffffff, crc_buff, sizeof(img->params)); + if (le32_to_cpu(img->crc) != crc) { + dev_err(&pcie->pci->dev, + "Stored CRC does not match flash image contents\n"); + return -EIO; + } + + return 0; +} + +static void kvaser_pciefd_cfg_read_params(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_cfg_img *img) +{ + struct kvaser_pciefd_cfg_param *param; + + param = &img->params[KVASER_PCIEFD_CFG_PARAM_NR_CHAN]; + memcpy(&pcie->nr_channels, param->data, le32_to_cpu(param->len)); +} + +static int kvaser_pciefd_read_cfg(struct kvaser_pciefd *pcie) +{ + int res; + struct kvaser_pciefd_cfg_img *img; + + /* Read electronic signature */ + u8 cmd[] = {KVASER_PCIEFD_FLASH_RES_CMD, 0, 0, 0}; + + res = kvaser_pciefd_spi_cmd(pcie, cmd, 4, cmd, 1); + if (res) + return -EIO; + + img = kmalloc(KVASER_PCIEFD_CFG_IMG_SZ, GFP_KERNEL); + if (!img) + return -ENOMEM; + + if (cmd[0] != 0x14) { + dev_err(&pcie->pci->dev, + "Flash id is %d instead of expected 0x14\n", cmd[0]); + + res = -ENODEV; + goto image_free; + } + + cmd[0] = KVASER_PCIEFD_FLASH_STATUS_CMD; + res = kvaser_pciefd_spi_cmd(pcie, cmd, 1, cmd, 1); + if (res) { + goto image_free; + } else if (cmd[0] & 1) { + res = -EIO; + /* No write is ever done, the WIP should never be set */ + dev_err(&pcie->pci->dev, "Unexpected WIP bit set in flash\n"); + goto image_free; + } + + res = kvaser_pciefd_cfg_read_and_verify(pcie, img); + if (res) { + res = -EIO; + goto image_free; + } + + kvaser_pciefd_cfg_read_params(pcie, img); + +image_free: + kfree(img); + return res; +} + +static void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can) +{ + u32 cmd; + + cmd = KVASER_PCIEFD_KCAN_CMD_SRQ; + cmd |= KVASER_PCIEFD_KCAN_CMD_SEQNO(++can->cmd_seq); + KVASER_PCIEFD_KCAN_SET_CMD(can, cmd); +} + +static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can) +{ + u32 mode; + unsigned long irq; + + spin_lock_irqsave(&can->lock, irq); + mode = KVASER_PCIEFD_KCAN_GET_MODE(can); + if (!(mode & KVASER_PCIEFD_KCAN_MODE_EPEN)) { + mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; + KVASER_PCIEFD_KCAN_SET_MODE(can, mode); + } + spin_unlock_irqrestore(&can->lock, irq); +} + +static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can) +{ + u32 mode; + unsigned long irq; + + spin_lock_irqsave(&can->lock, irq); + mode = KVASER_PCIEFD_KCAN_GET_MODE(can); + mode &= ~KVASER_PCIEFD_KCAN_MODE_EPEN; + KVASER_PCIEFD_KCAN_SET_MODE(can, mode); + spin_unlock_irqrestore(&can->lock, irq); +} + +static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can) +{ + u32 msk; + + msk = KVASER_PCIEFD_KCAN_IRQ_TE | KVASER_PCIEFD_KCAN_IRQ_ROF | + KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD | + KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL | + KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP | + KVASER_PCIEFD_KCAN_IRQ_TAR | KVASER_PCIEFD_KCAN_IRQ_TFD; + + KVASER_PCIEFD_KCAN_SET_IEN(can, msk); + + return 0; +} + +static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can) +{ + u32 mode; + unsigned long irq; + + spin_lock_irqsave(&can->lock, irq); + + mode = KVASER_PCIEFD_KCAN_GET_MODE(can); + if (can->can.ctrlmode & CAN_CTRLMODE_FD) { + mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM; + if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) + mode |= KVASER_PCIEFD_KCAN_MODE_NIFDEN; + else + mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; + } else { + mode |= KVASER_PCIEFD_KCAN_MODE_CCM; + mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; + } + + if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + mode |= KVASER_PCIEFD_KCAN_MODE_LOM; + + mode |= KVASER_PCIEFD_KCAN_MODE_EEN; + mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; + /* Use ACK packet type */ + mode &= ~KVASER_PCIEFD_KCAN_MODE_APT; + mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; + KVASER_PCIEFD_KCAN_SET_MODE(can, mode); + + spin_unlock_irqrestore(&can->lock, irq); +} + +static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can) +{ + u32 ien, status; + unsigned long irq; + + spin_lock_irqsave(&can->lock, irq); + KVASER_PCIEFD_KCAN_SET_IRQ(can, -1); + ien = KVASER_PCIEFD_KCAN_IRQ_ABD; + ien |= KVASER_PCIEFD_KCAN_IRQ_TFD; + KVASER_PCIEFD_KCAN_SET_IEN(can, ien); + + status = KVASER_PCIEFD_KCAN_GET_STAT(can); + if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) { + u32 cmd; + + /* If controller is already idle, run abort, flush and reset */ + cmd = KVASER_PCIEFD_KCAN_CMD_AT; + cmd |= KVASER_PCIEFD_KCAN_CMD_SEQNO(++can->cmd_seq); + KVASER_PCIEFD_KCAN_SET_CMD(can, cmd); + } else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) { + u32 mode; + + /* Put controller in reset mode */ + mode = KVASER_PCIEFD_KCAN_GET_MODE(can); + mode |= KVASER_PCIEFD_KCAN_MODE_RM; + KVASER_PCIEFD_KCAN_SET_MODE(can, mode); + } + + spin_unlock_irqrestore(&can->lock, irq); +} + +static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can) +{ + u32 ien, mode; + unsigned long irq; + + del_timer(&can->bec_poll_timer); + + if (!completion_done(&can->flush_comp)) + kvaser_pciefd_start_controller_flush(can); + + if (!wait_for_completion_timeout(&can->flush_comp, + KVASER_PCIEFD_WAIT_TIMEOUT)) { + netdev_err(can->can.dev, "Timeout during bus on flush\n"); + return -ETIMEDOUT; + } + + spin_lock_irqsave(&can->lock, irq); + KVASER_PCIEFD_KCAN_SET_IEN(can, 0); + KVASER_PCIEFD_KCAN_SET_IRQ(can, -1); + + ien = KVASER_PCIEFD_KCAN_IRQ_ABD; + ien |= KVASER_PCIEFD_KCAN_IRQ_TFD; + KVASER_PCIEFD_KCAN_SET_IEN(can, ien); + + mode = KVASER_PCIEFD_KCAN_GET_MODE(can); + mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; + KVASER_PCIEFD_KCAN_SET_MODE(can, mode); + spin_unlock_irqrestore(&can->lock, irq); + + if (!wait_for_completion_timeout(&can->start_comp, + KVASER_PCIEFD_WAIT_TIMEOUT)) { + netdev_err(can->can.dev, "Timeout during bus on reset\n"); + return -ETIMEDOUT; + } + /* Reset interrupt handling */ + KVASER_PCIEFD_KCAN_SET_IEN(can, 0); + KVASER_PCIEFD_KCAN_SET_IRQ(can, -1); + + kvaser_pciefd_set_tx_irq(can); + kvaser_pciefd_setup_controller(can); + + can->can.state = CAN_STATE_ERROR_ACTIVE; + netif_wake_queue(can->can.dev); + can->bec.txerr = 0; + can->bec.rxerr = 0; + can->err_rep_cnt = 0; + + return 0; +} + +static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can) +{ + int top, trigger; + u32 pwm_ctrl; + unsigned long irq; + + spin_lock_irqsave(&can->lock, irq); + pwm_ctrl = KVASER_PCIEFD_KCAN_GET_PWM(can); + top = KVASER_PCIEFD_KCAN_PWM_GET_TOP(pwm_ctrl); + + trigger = (100 * top + 50) / 100; + if (trigger < 0) + trigger = 0; + + pwm_ctrl = KVASER_PCIEFD_KCAN_PWM_SET_TRIGGER(trigger); + pwm_ctrl |= KVASER_PCIEFD_KCAN_PWM_SET_TOP(top); + KVASER_PCIEFD_KCAN_SET_PWM(can, pwm_ctrl); + spin_unlock_irqrestore(&can->lock, irq); +} + +static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can) +{ + int duty = 95; + int sys_clk = can->can.clock.freq; + int freq = 500000; + int top, trigger; + u32 pwm_ctrl; + unsigned long irq; + + kvaser_pciefd_pwm_stop(can); + spin_lock_irqsave(&can->lock, irq); + + /* Set frequency */ + top = sys_clk / (2 * freq) - 1; + + pwm_ctrl = KVASER_PCIEFD_KCAN_PWM_SET_TRIGGER(top); + pwm_ctrl |= KVASER_PCIEFD_KCAN_PWM_SET_TOP(top); + KVASER_PCIEFD_KCAN_SET_PWM(can, pwm_ctrl); + + /* Set duty cycle to 95 */ + trigger = (100 * top - duty * (top + 1) + 50) / 100; + pwm_ctrl = KVASER_PCIEFD_KCAN_PWM_SET_TRIGGER(trigger); + pwm_ctrl |= KVASER_PCIEFD_KCAN_PWM_SET_TOP(top); + KVASER_PCIEFD_KCAN_SET_PWM(can, pwm_ctrl); + spin_unlock_irqrestore(&can->lock, irq); +} + +static int kvaser_pciefd_open(struct net_device *netdev) +{ + int err; + struct kvaser_pciefd_can *can = netdev_priv(netdev); + + err = open_candev(netdev); + if (err) + return err; + + err = kvaser_pciefd_bus_on(can); + if (err) + return err; + + return 0; +} + +static int kvaser_pciefd_stop(struct net_device *netdev) +{ + struct kvaser_pciefd_can *can = netdev_priv(netdev); + int ret = 0; + + /* Don't interrupt ongoing flush */ + if (!completion_done(&can->flush_comp)) + kvaser_pciefd_start_controller_flush(can); + + if (!wait_for_completion_timeout(&can->flush_comp, + KVASER_PCIEFD_WAIT_TIMEOUT)) { + netdev_err(can->can.dev, "Timeout during stop\n"); + ret = -ETIMEDOUT; + } else { + KVASER_PCIEFD_KCAN_SET_IEN(can, 0); + del_timer(&can->bec_poll_timer); + } + close_candev(netdev); + + return ret; +} + +static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_packet *p, + struct kvaser_pciefd_can *can, + struct sk_buff *skb) +{ + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + int packet_size; + int seq = can->echo_idx; + + memset(p, 0, sizeof(*p)); + + if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) + p->header[1] |= KVASER_PCIEFD_PACKET_SMS; + + if (cf->can_id & CAN_RTR_FLAG) + p->header[0] |= KVASER_PCIEFD_PACKET_RTR; + + if (cf->can_id & CAN_EFF_FLAG) + p->header[0] |= KVASER_PCIEFD_PACKET_IDE; + + p->header[0] |= cf->can_id & CAN_EFF_MASK; + p->header[1] |= KVASER_PCIEFD_PACKET_DLC(can_len2dlc(cf->len)); + p->header[1] |= KVASER_PCIEFD_PACKET_AREQ; + + if (can_is_canfd_skb(skb)) { + p->header[1] |= KVASER_PCIEFD_PACKET_FDF; + if (cf->flags & CANFD_BRS) + p->header[1] |= KVASER_PCIEFD_PACKET_BRS; + if (cf->flags & CANFD_ESI) + p->header[1] |= KVASER_PCIEFD_PACKET_ESI; + } + + p->header[1] |= KVASER_PCIEFD_PACKET_SEQ(seq); + + packet_size = cf->len; + memcpy(p->data, cf->data, packet_size); + + return DIV_ROUND_UP(packet_size, 4); +} + +static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct kvaser_pciefd_can *can = netdev_priv(netdev); + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + unsigned long irq_flags; + + if (can_dropped_invalid_skb(netdev, skb)) + return NETDEV_TX_OK; + + if (cf->can_id & CAN_ERR_FLAG) { + u32 eframe; + + eframe = KVASER_PCIEFD_KCAN_CTRL_EFRAME; + + spin_lock_irqsave(&can->echo_lock, irq_flags); + eframe |= can->echo_idx; + + /* Prepare and save echo skb in internal slot */ + can_put_echo_skb(skb, netdev, can->echo_idx); + + /* Move echo index to the next slot */ + can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max; + + KVASER_PCIEFD_KCAN_SET_CTRL(can, eframe); + + if (can->can.echo_skb[can->echo_idx]) + netif_stop_queue(netdev); + + spin_unlock_irqrestore(&can->echo_lock, irq_flags); + } else { + struct kvaser_pciefd_packet packet; + struct kvaser_pciefd_packet *p = &packet; + int nwords; + int count; + + nwords = kvaser_pciefd_prepare_tx_packet(p, can, skb); + + spin_lock_irqsave(&can->echo_lock, irq_flags); + + count = KVASER_PCIEFD_KCAN_TX_NPACKETS_GET_COUNT(can); + /* No room for this message, stop the queue until at least one + * successful transmit + */ + if (count >= KVASER_PCIEFD_CAN_TX_MAX_COUNT || + can->can.echo_skb[can->echo_idx]) { + netif_stop_queue(netdev); + spin_unlock_irqrestore(&can->echo_lock, irq_flags); + return NETDEV_TX_BUSY; + } + + /* Prepare and save echo skb in internal slot */ + can_put_echo_skb(skb, netdev, can->echo_idx); + + /* Move echo index to the next slot */ + can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max; + + /* Write header to fifo */ + KVASER_PCIEFD_KCAN_SET_FIFO(can, p->header[0]); + KVASER_PCIEFD_KCAN_SET_FIFO(can, p->header[1]); + + if (nwords) { + u32 data_last = ((u32 *)p->data)[nwords - 1]; + + /* Write data to fifo, except last word */ + KVASER_PCIEFD_KCAN_SET_FIFO_REP(can, p->data, + nwords - 1); + /* Write last word to end of fifo */ + KVASER_PCIEFD_KCAN_SET_FIFO_LAST(can, data_last); + } else { + /* Complete write to fifo */ + KVASER_PCIEFD_KCAN_SET_FIFO_LAST(can, 0); + } + + count = KVASER_PCIEFD_KCAN_TX_NPACKETS_GET_COUNT(can); + /* No room for a new message, stop the queue until at least one + * successful transmit + */ + if (count >= KVASER_PCIEFD_CAN_TX_MAX_COUNT || + can->can.echo_skb[can->echo_idx]) + netif_stop_queue(netdev); + + spin_unlock_irqrestore(&can->echo_lock, irq_flags); + } + + return NETDEV_TX_OK; +} + +static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, int data) +{ + u32 mode, test, btrn; + unsigned long irq_flags; + int loopmax = 10; + struct can_bittiming *bt; + + if (data) + bt = &can->can.data_bittiming; + else + bt = &can->can.bittiming; + + btrn = KVASER_PCIEFD_KCAN_SET_BITTIMING(bt->prop_seg + bt->phase_seg1, + bt->phase_seg2, bt->sjw, + bt->brp); + + spin_lock_irqsave(&can->lock, irq_flags); + mode = KVASER_PCIEFD_KCAN_GET_MODE(can); + + /* Put the circuit in reset mode */ + KVASER_PCIEFD_KCAN_SET_MODE(can, mode | KVASER_PCIEFD_KCAN_MODE_RM); + + /* Can only set bittiming if in reset mode, but this should be fast */ + do { + test = KVASER_PCIEFD_KCAN_GET_MODE(can); + } while (!(test & KVASER_PCIEFD_KCAN_MODE_RM) && --loopmax); + + if (loopmax <= 0) { + spin_unlock_irqrestore(&can->lock, irq_flags); + return -EBUSY; + } + + if (data) + KVASER_PCIEFD_KCAN_SET_BTRD(can, btrn); + else + KVASER_PCIEFD_KCAN_SET_BTRN(can, btrn); + + /* Restore previous reset mode status */ + KVASER_PCIEFD_KCAN_SET_MODE(can, mode); + + spin_unlock_irqrestore(&can->lock, irq_flags); + return 0; +} + +static int kvaser_pciefd_set_nominal_bittiming(struct net_device *ndev) +{ + return kvaser_pciefd_set_bittiming(netdev_priv(ndev), 0); +} + +static int kvaser_pciefd_set_data_bittiming(struct net_device *ndev) +{ + return kvaser_pciefd_set_bittiming(netdev_priv(ndev), 1); +} + +static int kvaser_pciefd_set_mode(struct net_device *ndev, enum can_mode mode) +{ + struct kvaser_pciefd_can *can = netdev_priv(ndev); + int ret = 0; + + switch (mode) { + case CAN_MODE_START: + if (!can->can.restart_ms) + ret = kvaser_pciefd_bus_on(can); + break; + default: + return -EOPNOTSUPP; + } + + return ret; +} + +static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev, + struct can_berr_counter *bec) +{ + struct kvaser_pciefd_can *can = netdev_priv(ndev); + + bec->rxerr = can->bec.rxerr; + bec->txerr = can->bec.txerr; + return 0; +} + +static void kvaser_pciefd_bec_poll_timer(struct timer_list *data) +{ + struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer); + + kvaser_pciefd_enable_err_gen(can); + kvaser_pciefd_request_status(can); + can->err_rep_cnt = 0; +} + +static const struct net_device_ops kvaser_pciefd_netdev_ops = { + .ndo_open = kvaser_pciefd_open, + .ndo_stop = kvaser_pciefd_stop, + .ndo_start_xmit = kvaser_pciefd_start_xmit, + .ndo_change_mtu = can_change_mtu, +}; + +static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) +{ + int i; + + for (i = 0; i < pcie->nr_channels; i++) { + struct net_device *netdev; + struct kvaser_pciefd_can *can; + u32 data; + + netdev = alloc_candev(sizeof(struct kvaser_pciefd_can), + KVASER_PCIEFD_CAN_TX_MAX_COUNT); + if (!netdev) + return -ENOMEM; + + can = netdev_priv(netdev); + netdev->netdev_ops = &kvaser_pciefd_netdev_ops; + can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE + + i * KVASER_PCIEFD_KCAN_BASE_OFFSET; + + can->kv_pcie = pcie; + can->cmd_seq = 0; + can->err_rep_cnt = 0; + can->bec.txerr = 0; + can->bec.rxerr = 0; + + init_completion(&can->start_comp); + init_completion(&can->flush_comp); + timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer, + 0); + + if (KVASER_PCIEFD_KCAN_TX_NPACKETS_GET_MAX_COUNT(can) < + KVASER_PCIEFD_CAN_TX_MAX_COUNT) { + dev_err(&pcie->pci->dev, + "Max Tx count is smaller than expected\n"); + + free_candev(netdev); + return -ENODEV; + } + + can->can.clock.freq = pcie->freq; + can->can.echo_skb_max = KVASER_PCIEFD_CAN_TX_MAX_COUNT; + can->echo_idx = 0; + spin_lock_init(&can->echo_lock); + spin_lock_init(&can->lock); + can->can.bittiming_const = &kvaser_pciefd_bittiming_const; + can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const; + + can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming; + can->can.do_set_data_bittiming = + kvaser_pciefd_set_data_bittiming; + + can->can.do_set_mode = kvaser_pciefd_set_mode; + can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter; + + can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_FD | + CAN_CTRLMODE_FD_NON_ISO; + + data = KVASER_PCIEFD_KCAN_GET_STAT(can); + if (!(data & KVASER_PCIEFD_KCAN_STAT_FD)) { + dev_err(&pcie->pci->dev, + "CAN FD not supported as expected %d\n", i); + + free_candev(netdev); + return -ENODEV; + } + + if (data & KVASER_PCIEFD_KCAN_STAT_CAP) + can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; + + netdev->flags |= IFF_ECHO; + + SET_NETDEV_DEV(netdev, &pcie->pci->dev); + + KVASER_PCIEFD_KCAN_SET_IRQ(can, -1); + KVASER_PCIEFD_KCAN_SET_IEN(can, KVASER_PCIEFD_KCAN_IRQ_ABD + | KVASER_PCIEFD_KCAN_IRQ_TFD); + + pcie->can[i] = can; + kvaser_pciefd_pwm_start(can); + } + + return 0; +} + +static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie) +{ + int i; + + for (i = 0; i < pcie->nr_channels; i++) { + int err = register_candev(pcie->can[i]->can.dev); + + if (err) { + int j; + + /* Unregister all successfully registered devices. */ + for (j = 0; j < i; j++) + unregister_candev(pcie->can[j]->can.dev); + return err; + } + } + + return 0; +} + +static void kvaser_pciefd_write_dma_map(struct kvaser_pciefd *pcie, + dma_addr_t addr, int offset) +{ + u32 word1, word2; + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + word1 = addr | KVASER_PCIEFD_64BIT_DMA_BIT; + word2 = addr >> 32; +#else + word1 = addr; + word2 = 0; +#endif + iowrite32(word1, pcie->reg_base + offset); + iowrite32(word2, pcie->reg_base + offset + 4); +} + +static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) +{ + int i; + u32 status; + dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT]; + + KVASER_PCIEFD_SRB_DISABLE_DMA(pcie); + for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) { + unsigned int offset = KVASER_PCIEFD_DMA_MAP_BASE + 8 * i; + + pcie->dma_data[i] = + dmam_alloc_coherent(&pcie->pci->dev, + KVASER_PCIEFD_DMA_SIZE, + &dma_addr[i], + GFP_KERNEL); + + if (!pcie->dma_data[i] || !dma_addr[i]) { + dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n", + KVASER_PCIEFD_DMA_SIZE); + return -ENOMEM; + } + + kvaser_pciefd_write_dma_map(pcie, dma_addr[i], offset); + } + + /* Reset Rx FIFO, and both DMA buffers */ + KVASER_PCIEFD_SRB_CMD_FOR(pcie); + KVASER_PCIEFD_SRB_CMD_RDB0(pcie); + KVASER_PCIEFD_SRB_CMD_RDB1(pcie); + + status = KVASER_PCIEFD_SRB_GET_STAT(pcie); + if (!(status & KVASER_PCIEFD_SRB_STAT_DI)) { + dev_err(&pcie->pci->dev, "DMA not idle before enabling\n"); + return -EIO; + } + + /* Enable the DMA */ + KVASER_PCIEFD_SRB_ENABLE_DMA(pcie); + + return 0; +} + +static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie) +{ + u32 sysid, status, build; + int ret; + + ret = kvaser_pciefd_read_cfg(pcie); + if (ret) + return ret; + + sysid = KVASER_PCIEFD_SYSID_GET_VERSION(pcie); + + if (pcie->nr_channels != KVASER_PCIEFD_SYSID_GET_NR_CHAN(sysid)) { + dev_err(&pcie->pci->dev, + "Number of channels does not match: %d vs %d\n", + pcie->nr_channels, + KVASER_PCIEFD_SYSID_GET_NR_CHAN(sysid)); + return -ENODEV; + } + + if (pcie->nr_channels > KVASER_PCIEFD_MAX_CAN_CHANNELS) + pcie->nr_channels = KVASER_PCIEFD_MAX_CAN_CHANNELS; + + build = KVASER_PCIEFD_SYSID_GET_BUILD(pcie); + dev_dbg(&pcie->pci->dev, "Version %u.%u.%u\n", + KVASER_PCIEFD_SYSID_GET_MAJOR(sysid), + KVASER_PCIEFD_SYSID_GET_MINOR(sysid), + KVASER_PCIEFD_SYSID_GET_BUILD_REV(build)); + + status = KVASER_PCIEFD_SRB_GET_STAT(pcie); + if (!(status & KVASER_PCIEFD_SRB_STAT_DMA)) { + dev_err(&pcie->pci->dev, + "Hardware without DMA is not supported\n"); + return -ENODEV; + } + + pcie->freq = KVASER_PCIEFD_SYSID_GET_CANFREQ(pcie); + pcie->freq_to_ticks_div = pcie->freq / 1000000; + if (pcie->freq_to_ticks_div == 0) + pcie->freq_to_ticks_div = 1; + + /* Turn off all loopback functionality */ + KVASER_PCIEFD_SET_LB_OFF(pcie); + return ret; +} + +static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_packet *p) +{ + struct sk_buff *skb; + struct canfd_frame *cf; + int ch_id = KVASER_PCIEFD_PACK_GET_CHID(p); + struct can_priv *priv; + struct net_device_stats *stats; + struct skb_shared_hwtstamps *shhwtstamps; + + if (ch_id >= pcie->nr_channels) + return -EIO; + + priv = &pcie->can[ch_id]->can; + stats = &priv->dev->stats; + + if (KVASER_PCIEFD_PACK_GET_FDF(p)) { + skb = alloc_canfd_skb(priv->dev, &cf); + if (!skb) { + stats->rx_dropped++; + return -ENOMEM; + } + + if (KVASER_PCIEFD_PACK_GET_BRS(p)) + cf->flags |= CANFD_BRS; + + if (KVASER_PCIEFD_PACK_GET_ESI(p)) + cf->flags |= CANFD_ESI; + } else { + skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf); + if (!skb) { + stats->rx_dropped++; + return -ENOMEM; + } + } + + cf->can_id = KVASER_PCIEFD_PACK_GET_ID(p); + if (KVASER_PCIEFD_DPACK_GET_IDE(p)) + cf->can_id |= CAN_EFF_FLAG; + + cf->len = can_dlc2len(KVASER_PCIEFD_PACK_GET_DLC(p)); + + if (KVASER_PCIEFD_DPACK_GET_RTR(p)) + cf->can_id |= CAN_RTR_FLAG; + else + memcpy(cf->data, p->data, cf->len); + + shhwtstamps = skb_hwtstamps(skb); + + shhwtstamps->hwtstamp = + ns_to_ktime(div_u64(p->timestamp * 1000, + pcie->freq_to_ticks_div)); + + stats->rx_bytes += cf->len; + stats->rx_packets++; + + return netif_rx(skb); +} + +static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can, + struct can_frame *cf, + enum can_state new_state, + enum can_state tx_state, + enum can_state rx_state) +{ + can_change_state(can->can.dev, cf, tx_state, rx_state); + + if (new_state == CAN_STATE_BUS_OFF) { + struct net_device *ndev = can->can.dev; + unsigned long irq_flags; + + spin_lock_irqsave(&can->lock, irq_flags); + netif_stop_queue(can->can.dev); + spin_unlock_irqrestore(&can->lock, irq_flags); + + /* Prevent CAN controller from auto recover from bus off */ + if (!can->can.restart_ms) + kvaser_pciefd_start_controller_flush(can); + + can_bus_off(ndev); + } +} + +static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_packet *p, + struct can_berr_counter *bec, + enum can_state *new_state, + enum can_state *tx_state, + enum can_state *rx_state) +{ + if (KVASER_PCIEFD_EPACK_GET_BOFF(p) || KVASER_PCIEFD_SPACK_GET_IRM(p)) + *new_state = CAN_STATE_BUS_OFF; + else if (bec->txerr >= 255 || bec->rxerr >= 255) + *new_state = CAN_STATE_BUS_OFF; + else if (KVASER_PCIEFD_EPACK_GET_EPLR(p)) + *new_state = CAN_STATE_ERROR_PASSIVE; + else if (bec->txerr >= 128 || bec->rxerr >= 128) + *new_state = CAN_STATE_ERROR_PASSIVE; + else if (KVASER_PCIEFD_EPACK_GET_EWLR(p)) + *new_state = CAN_STATE_ERROR_WARNING; + else if (bec->txerr >= 96 || bec->rxerr >= 96) + *new_state = CAN_STATE_ERROR_WARNING; + else + *new_state = CAN_STATE_ERROR_ACTIVE; + + *tx_state = bec->txerr >= bec->rxerr ? *new_state : 0; + *rx_state = bec->txerr <= bec->rxerr ? *new_state : 0; +} + +static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can, + struct kvaser_pciefd_packet *p) +{ + struct can_berr_counter bec; + enum can_state old_state, new_state, tx_state, rx_state; + struct net_device *ndev = can->can.dev; + struct sk_buff *skb; + struct can_frame *cf; + struct skb_shared_hwtstamps *shhwtstamps; + struct net_device_stats *stats = &ndev->stats; + + old_state = can->can.state; + + bec.txerr = KVASER_PCIEFD_EPACK_GET_TXE(p); + bec.rxerr = KVASER_PCIEFD_EPACK_GET_RXE(p); + + kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, + &rx_state); + + skb = alloc_can_err_skb(ndev, &cf); + if (!skb) { + stats->rx_dropped++; + return -ENOMEM; + } + + if (new_state != old_state) { + kvaser_pciefd_change_state(can, cf, new_state, tx_state, + rx_state); + + if (old_state == CAN_STATE_BUS_OFF && + new_state == CAN_STATE_ERROR_ACTIVE && + can->can.restart_ms) { + can->can.can_stats.restarts++; + cf->can_id |= CAN_ERR_RESTARTED; + } + } + + can->err_rep_cnt++; + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = + ns_to_ktime(div_u64(p->timestamp * 1000, + can->kv_pcie->freq_to_ticks_div)); + + can->can.can_stats.bus_error++; + stats->rx_errors++; + + cf->can_id |= CAN_ERR_BUSERROR; + + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + + can->bec.txerr = bec.txerr; + can->bec.rxerr = bec.rxerr; + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + + netif_rx(skb); + return 0; +} + +static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_packet *p) +{ + int ch_id = KVASER_PCIEFD_PACK_GET_CHID(p); + struct kvaser_pciefd_can *can; + + if (ch_id >= pcie->nr_channels) + return -EIO; + + can = pcie->can[ch_id]; + + kvaser_pciefd_rx_error_frame(can, p); + if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP) + /* Do not report more errors, until bec_poll_timer expires */ + kvaser_pciefd_disable_err_gen(can); + /* Start polling the error counters */ + mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); + return 0; +} + +static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can, + struct kvaser_pciefd_packet *p) +{ + struct can_berr_counter bec; + enum can_state old_state, new_state, tx_state, rx_state; + + old_state = can->can.state; + + bec.txerr = KVASER_PCIEFD_EPACK_GET_TXE(p); + bec.rxerr = KVASER_PCIEFD_EPACK_GET_RXE(p); + + kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, + &rx_state); + + if (new_state != old_state) { + struct net_device *ndev = can->can.dev; + struct sk_buff *skb; + struct can_frame *cf; + struct skb_shared_hwtstamps *shhwtstamps; + + skb = alloc_can_err_skb(ndev, &cf); + if (!skb) { + struct net_device_stats *stats = &ndev->stats; + + stats->rx_dropped++; + return -ENOMEM; + } + + kvaser_pciefd_change_state(can, cf, new_state, tx_state, + rx_state); + + if (old_state == CAN_STATE_BUS_OFF && + new_state == CAN_STATE_ERROR_ACTIVE && + can->can.restart_ms) { + can->can.can_stats.restarts++; + cf->can_id |= CAN_ERR_RESTARTED; + } + + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = + ns_to_ktime(div_u64(p->timestamp * 1000, + can->kv_pcie->freq_to_ticks_div)); + + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + + netif_rx(skb); + } + can->bec.txerr = bec.txerr; + can->bec.rxerr = bec.rxerr; + /* Check if we need to poll the error counters */ + if (bec.txerr || bec.rxerr) + mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); + + return 0; +} + +static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_packet *p) +{ + int ch_id = KVASER_PCIEFD_PACK_GET_CHID(p); + struct kvaser_pciefd_can *can; + int cmdseq; + u32 stat; + + if (ch_id >= pcie->nr_channels) + return -EIO; + + can = pcie->can[ch_id]; + + stat = KVASER_PCIEFD_KCAN_GET_STAT(can); + cmdseq = KVASER_PCIEFD_KCAN_STAT_GET_CMD_SEQ_NO(stat); + + /* Reset done, start abort and flush */ + if (KVASER_PCIEFD_SPACK_GET_IRM(p) && + KVASER_PCIEFD_SPACK_GET_RMCD(p) && + KVASER_PCIEFD_SPACK_GET_AUTO(p) && + cmdseq == KVASER_PCIEFD_SPACK_GET_CMDSEQ(p) && + stat & KVASER_PCIEFD_KCAN_STAT_IDLE) { + u32 cmd, ien; + + KVASER_PCIEFD_KCAN_SET_IRQ(can, KVASER_PCIEFD_KCAN_IRQ_ABD); + cmd = KVASER_PCIEFD_KCAN_CMD_AT; + cmd |= KVASER_PCIEFD_KCAN_CMD_SEQNO(++can->cmd_seq); + KVASER_PCIEFD_KCAN_SET_CMD(can, cmd); + + ien = KVASER_PCIEFD_KCAN_IRQ_TFD; + KVASER_PCIEFD_KCAN_SET_IEN(can, ien); + } else if (KVASER_PCIEFD_SPACK_GET_IDET(p) && + KVASER_PCIEFD_SPACK_GET_IRM(p) && + cmdseq == KVASER_PCIEFD_SPACK_GET_CMDSEQ(p) && + stat & KVASER_PCIEFD_KCAN_STAT_IDLE) { + /* Reset detected, send end of flush if no packet are in FIFO */ + u32 ctrl = KVASER_PCIEFD_KCAN_CTRL_EFLUSH; + int count = KVASER_PCIEFD_KCAN_TX_NPACKETS_GET_COUNT(can); + + if (!count) + KVASER_PCIEFD_KCAN_SET_CTRL(can, ctrl); + } else if (!KVASER_PCIEFD_SPACK_GET_AUTO(p) && + cmdseq == KVASER_PCIEFD_SPACK_GET_CMDSEQ(p)) { + /* Response to status request received */ + kvaser_pciefd_handle_status_resp(can, p); + if (can->can.state != CAN_STATE_BUS_OFF && + can->can.state != CAN_STATE_ERROR_ACTIVE) { + mod_timer(&can->bec_poll_timer, + KVASER_PCIEFD_BEC_POLL_FREQ); + } + } else if (KVASER_PCIEFD_SPACK_GET_RMCD(p)) { + /* Reset to bus on detected */ + if (!(KVASER_PCIEFD_KCAN_GET_STAT(can) + & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK)) { + if (!completion_done(&can->start_comp)) + complete(&can->start_comp); + } + } + + return 0; +} + +static int kvaser_pciefd_handle_eack_packet(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_packet *p) +{ + int ch_id = KVASER_PCIEFD_PACK_GET_CHID(p); + struct kvaser_pciefd_can *can; + + if (ch_id >= pcie->nr_channels) + return -EIO; + + can = pcie->can[ch_id]; + + /* If this is the last flushed packet, send end of flush */ + if (KVASER_PCIEFD_APACK_GET_FLU(p)) { + u32 ctrl = KVASER_PCIEFD_KCAN_CTRL_EFLUSH; + int count = KVASER_PCIEFD_KCAN_TX_NPACKETS_GET_COUNT(can); + + if (count == 0) + KVASER_PCIEFD_KCAN_SET_CTRL(can, ctrl); + } else { + int echo_idx = KVASER_PCIEFD_APACK_GET_SEQ(p); + int dlc = can_get_echo_skb(can->can.dev, echo_idx); + struct net_device_stats *stats = &can->can.dev->stats; + + stats->tx_bytes += dlc; + stats->tx_packets++; + + if (netif_queue_stopped(can->can.dev)) + netif_wake_queue(can->can.dev); + } + + return 0; +} + +static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can, + struct kvaser_pciefd_packet *p) +{ + struct sk_buff *skb; + struct net_device_stats *stats = &can->can.dev->stats; + struct can_frame *cf; + + skb = alloc_can_err_skb(can->can.dev, &cf); + if (!skb) { + stats->rx_dropped++; + netdev_warn(can->can.dev, "No memory left for err_skb\n"); + return; + } + + cf->can_id |= CAN_ERR_BUSERROR; + + if (KVASER_PCIEFD_APACK_GET_ABL(p)) { + cf->can_id |= CAN_ERR_LOSTARB; + can->can.can_stats.arbitration_lost++; + } else { + cf->can_id |= CAN_ERR_ACK; + } + + stats->tx_errors++; + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); +} + +static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_packet *p) +{ + int ch_id = KVASER_PCIEFD_PACK_GET_CHID(p); + struct kvaser_pciefd_can *can; + bool one_shot_fail = false; + + if (ch_id >= pcie->nr_channels) + return -EIO; + + can = pcie->can[ch_id]; + /* Ignore control packet ACK */ + if (KVASER_PCIEFD_APACK_GET_CT(p)) + return 0; + + if (KVASER_PCIEFD_APACK_GET_NACK(p)) { + kvaser_pciefd_handle_nack_packet(can, p); + one_shot_fail = true; + } + + if (KVASER_PCIEFD_APACK_GET_FLU(p)) { + netdev_dbg(can->can.dev, "Packet was flushed\n"); + } else { + int echo_idx = KVASER_PCIEFD_APACK_GET_SEQ(p); + int dlc = can_get_echo_skb(can->can.dev, echo_idx); + int count = KVASER_PCIEFD_KCAN_TX_NPACKETS_GET_COUNT(can); + + if (count < KVASER_PCIEFD_CAN_TX_MAX_COUNT && + netif_queue_stopped(can->can.dev)) + netif_wake_queue(can->can.dev); + + if (!one_shot_fail) { + struct net_device_stats *stats = &can->can.dev->stats; + + stats->tx_bytes += dlc; + stats->tx_packets++; + } + } + + return 0; +} + +static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_packet *p) +{ + int ch_id = KVASER_PCIEFD_PACK_GET_CHID(p); + struct kvaser_pciefd_can *can; + + if (ch_id >= pcie->nr_channels) + return -EIO; + + can = pcie->can[ch_id]; + + if (!completion_done(&can->flush_comp)) + complete(&can->flush_comp); + + return 0; +} + +static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos, + int dma_buf) +{ + __le32 *buffer = pcie->dma_data[dma_buf]; + __le64 timestamp; + struct kvaser_pciefd_packet packet; + struct kvaser_pciefd_packet *p = &packet; + int type; + int pos = *start_pos; + int size; + int ret = 0; + + size = le32_to_cpu(buffer[pos++]); + if (!size) { + *start_pos = 0; + return 0; + } + + p->header[0] = le32_to_cpu(buffer[pos++]); + p->header[1] = le32_to_cpu(buffer[pos++]); + + /* Read 64-bit timestamp */ + memcpy(×tamp, &buffer[pos], sizeof(__le64)); + pos += 2; + p->timestamp = le64_to_cpu(timestamp); + + type = KVASER_PCIEFD_PACK_GET_TYPE(p); + switch (type) { + case KVASER_PCIEFD_PACK_TYPE_DATA: + if (!KVASER_PCIEFD_DPACK_GET_RTR(p)) { + u8 data_len; + + data_len = can_dlc2len(KVASER_PCIEFD_PACK_GET_DLC(p)); + memcpy(p->data, &buffer[pos], data_len); + pos += DIV_ROUND_UP(data_len, 4); + } + ret = kvaser_pciefd_handle_data_packet(pcie, p); + break; + + case KVASER_PCIEFD_PACK_TYPE_ACK: + ret = kvaser_pciefd_handle_ack_packet(pcie, p); + break; + + case KVASER_PCIEFD_PACK_TYPE_STATUS: + ret = kvaser_pciefd_handle_status_packet(pcie, p); + break; + + case KVASER_PCIEFD_PACK_TYPE_ERROR: + ret = kvaser_pciefd_handle_error_packet(pcie, p); + break; + + case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK: + ret = kvaser_pciefd_handle_eack_packet(pcie, p); + break; + + case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK: + ret = kvaser_pciefd_handle_eflush_packet(pcie, p); + break; + + case KVASER_PCIEFD_PACK_TYPE_ACK_DATA: + case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD: + case KVASER_PCIEFD_PACK_TYPE_TXRQ: + dev_info(&pcie->pci->dev, + "Received unexpected packet type 0x%08X\n", type); + break; + + default: + dev_err(&pcie->pci->dev, "Unknown packet type 0x%08X\n", type); + ret = -EIO; + break; + } + + if (ret) + return ret; + + /* Position does not point to the end of the package, + * corrupted packet size? + */ + if ((*start_pos + size) != pos) + return -EIO; + + /* Point to the next packet header, if any */ + *start_pos = pos; + + return ret; +} + +static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf) +{ + int pos = 0; + int res = 0; + + do { + res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf); + } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE); + + return res; +} + +static int kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) +{ + u32 irq; + + irq = KVASER_PCIEFD_SRB_GET_IRQ(pcie); + if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) { + kvaser_pciefd_read_buffer(pcie, 0); + KVASER_PCIEFD_SRB_CMD_RDB0(pcie); + } + + if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) { + kvaser_pciefd_read_buffer(pcie, 1); + KVASER_PCIEFD_SRB_CMD_RDB1(pcie); + } + + if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 || + irq & KVASER_PCIEFD_SRB_IRQ_DOF1 || + irq & KVASER_PCIEFD_SRB_IRQ_DUF0 || + irq & KVASER_PCIEFD_SRB_IRQ_DUF1) + dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq); + + KVASER_PCIEFD_SRB_SET_IRQ(pcie, irq); + return 0; +} + +static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) +{ + u32 irq = KVASER_PCIEFD_KCAN_GET_IRQ(can); + + if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF) + netdev_err(can->can.dev, "Tx FIFO overflow\n"); + + if (irq & KVASER_PCIEFD_KCAN_IRQ_TFD) { + int count = KVASER_PCIEFD_KCAN_TX_NPACKETS_GET_COUNT(can); + + if (count == 0) + KVASER_PCIEFD_KCAN_SET_CTRL + (can, KVASER_PCIEFD_KCAN_CTRL_EFLUSH); + } + + if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP) + netdev_err(can->can.dev, + "Fail to change bittiming, when not in reset mode\n"); + + if (irq & KVASER_PCIEFD_KCAN_IRQ_FDIC) + netdev_err(can->can.dev, "CAN FD frame in CAN mode\n"); + + if (irq & KVASER_PCIEFD_KCAN_IRQ_ROF) + netdev_err(can->can.dev, "Rx FIFO overflow\n"); + + KVASER_PCIEFD_KCAN_SET_IRQ(can, irq); + return 0; +} + +static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) +{ + struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev; + u32 board_irq; + int i; + + board_irq = KVASER_PCIEFD_GET_IRQ(pcie); + + if (!(board_irq & KVASER_PCIEFD_IRQ_ALL_MSK)) + return IRQ_NONE; + + if (board_irq & KVASER_PCIEFD_IRQ_SRB) + kvaser_pciefd_receive_irq(pcie); + + for (i = 0; i < pcie->nr_channels; i++) { + if (!pcie->can[i]) { + dev_err(&pcie->pci->dev, + "IRQ mask points to unallocated controller\n"); + break; + } + + /* Check that mask matches channel (i) IRQ mask */ + if (board_irq & (1 << i)) + kvaser_pciefd_transmit_irq(pcie->can[i]); + } + + KVASER_PCIEFD_SET_IRQ(pcie, board_irq); + return IRQ_HANDLED; +} + +static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie) +{ + int i; + struct kvaser_pciefd_can *can; + + for (i = 0; i < pcie->nr_channels; i++) { + can = pcie->can[i]; + if (can) { + KVASER_PCIEFD_KCAN_SET_IEN(can, 0); + kvaser_pciefd_pwm_stop(can); + free_candev(can->can.dev); + } + } +} + +static int kvaser_pciefd_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int err; + struct kvaser_pciefd *pcie; + + pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pci_set_drvdata(pdev, pcie); + pcie->pci = pdev; + + err = pci_enable_device(pdev); + if (err) + return err; + + err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME); + if (err) + goto err_disable_pci; + + pcie->reg_base = pci_iomap(pdev, 0, 0); + if (!pcie->reg_base) { + err = -ENOMEM; + goto err_release_regions; + } + + err = kvaser_pciefd_setup_board(pcie); + if (err) + goto err_pci_iounmap; + + err = kvaser_pciefd_setup_dma(pcie); + if (err) + goto err_pci_iounmap; + + pci_set_master(pdev); + + err = kvaser_pciefd_setup_can_ctrls(pcie); + if (err) + goto err_teardown_can_ctrls; + + KVASER_PCIEFD_SRB_SET_IRQ(pcie, KVASER_PCIEFD_SRB_IRQ_DPD0 + | KVASER_PCIEFD_SRB_IRQ_DPD1); + + KVASER_PCIEFD_SRB_SET_IEN(pcie, KVASER_PCIEFD_SRB_IRQ_DPD0 + | KVASER_PCIEFD_SRB_IRQ_DPD1 + | KVASER_PCIEFD_SRB_IRQ_DOF0 + | KVASER_PCIEFD_SRB_IRQ_DOF1 + | KVASER_PCIEFD_SRB_IRQ_DUF0 + | KVASER_PCIEFD_SRB_IRQ_DUF1); + + /* Reset IRQ handling, expected to be off before */ + KVASER_PCIEFD_SET_IRQ(pcie, KVASER_PCIEFD_IRQ_ALL_MSK); + KVASER_PCIEFD_SET_IEN(pcie, KVASER_PCIEFD_IRQ_ALL_MSK); + + /* Ready the DMA buffers */ + KVASER_PCIEFD_SRB_CMD_RDB0(pcie); + KVASER_PCIEFD_SRB_CMD_RDB1(pcie); + + err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler, + IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie); + if (err) + goto err_teardown_can_ctrls; + + err = kvaser_pciefd_reg_candev(pcie); + if (err) + goto err_free_irq; + + return 0; + +err_free_irq: + free_irq(pcie->pci->irq, pcie); + +err_teardown_can_ctrls: + kvaser_pciefd_teardown_can_ctrls(pcie); + KVASER_PCIEFD_SRB_DISABLE_DMA(pcie); + pci_clear_master(pdev); + +err_pci_iounmap: + pci_iounmap(pdev, pcie->reg_base); + +err_release_regions: + pci_release_regions(pdev); + +err_disable_pci: + pci_disable_device(pdev); + + return err; +} + +static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie) +{ + struct kvaser_pciefd_can *can; + int i; + + for (i = 0; i < pcie->nr_channels; i++) { + can = pcie->can[i]; + if (can) { + KVASER_PCIEFD_KCAN_SET_IEN(can, 0); + unregister_candev(can->can.dev); + del_timer(&can->bec_poll_timer); + kvaser_pciefd_pwm_stop(can); + free_candev(can->can.dev); + } + } +} + +static void kvaser_pciefd_remove(struct pci_dev *pdev) +{ + struct kvaser_pciefd *pcie = pci_get_drvdata(pdev); + + kvaser_pciefd_remove_all_ctrls(pcie); + + /* Turn off IRQ generation */ + KVASER_PCIEFD_SRB_DISABLE_DMA(pcie); + KVASER_PCIEFD_SET_IRQ(pcie, KVASER_PCIEFD_IRQ_ALL_MSK); + KVASER_PCIEFD_SET_IEN(pcie, 0); + + free_irq(pcie->pci->irq, pcie); + + pci_clear_master(pdev); + pci_iounmap(pdev, pcie->reg_base); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static struct pci_driver kvaser_pciefd = { + .name = KVASER_PCIEFD_DRV_NAME, + .id_table = kvaser_pciefd_id_table, + .probe = kvaser_pciefd_probe, + .remove = kvaser_pciefd_remove, +}; + +module_pci_driver(kvaser_pciefd) -- 2.19.1