diff options
Diffstat (limited to 'drivers/net/can')
170 files changed, 43809 insertions, 11796 deletions
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index e0f0ad7a550a..e15e320db476 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -1,8 +1,30 @@ -menu "CAN Device Drivers" +# SPDX-License-Identifier: GPL-2.0-only + +menuconfig CAN_DEV + tristate "CAN Device Drivers" + default y + depends on CAN + help + Controller Area Network (CAN) is serial communications protocol up to + 1Mbit/s for its original release (now known as Classical CAN) and up + to 8Mbit/s for the more recent CAN with Flexible Data-Rate + (CAN-FD). The CAN bus was originally mainly for automotive, but is now + widely used in marine (NMEA2000), industrial, and medical + applications. More information on the CAN network protocol family + PF_CAN is contained in <Documentation/networking/can.rst>. + + This section contains all the CAN(-FD) device drivers including the + virtual ones. If you own such devices or plan to use the virtual CAN + interfaces to develop applications, say Y here. + + To compile as a module, choose M here: the module will be called + can-dev. + +if CAN_DEV config CAN_VCAN tristate "Virtual Local CAN Interface (vcan)" - ---help--- + help Similar to the network loopback devices, vcan offers a virtual local CAN interface. @@ -11,7 +33,7 @@ config CAN_VCAN config CAN_VXCAN tristate "Virtual CAN Tunnel (vxcan)" - ---help--- + help Similar to the virtual ethernet driver veth, vxcan implements a local CAN traffic tunnel between two virtual CAN network devices. When creating a vxcan, two vxcan devices are created as pair. @@ -27,40 +49,27 @@ config CAN_VXCAN This driver can also be built as a module. If so, the module will be called vxcan. -config CAN_SLCAN - tristate "Serial / USB serial CAN Adaptors (slcan)" - depends on TTY - ---help--- - CAN driver for several 'low cost' CAN interfaces that are attached - via serial lines or via USB-to-serial adapters using the LAWICEL - ASCII protocol. The driver implements the tty linediscipline N_SLCAN. - - As only the sending and receiving of CAN frames is implemented, this - driver should work with the (serial/USB) CAN hardware from: - www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de +config CAN_NETLINK + bool "CAN device drivers with Netlink support" + default y + help + Enables the common framework for CAN device drivers. This is the + standard library and provides features for the Netlink interface such + as bittiming validation, support of CAN error states, device restart + and others. - Userspace tools to attach the SLCAN line discipline (slcan_attach, - slcand) can be found in the can-utils at the SocketCAN SVN, see - http://developer.berlios.de/projects/socketcan for details. + The additional features selected by this option will be added to the + can-dev module. - The slcan driver supports up to 10 CAN netdevices by default which - can be changed by the 'maxdev=xx' module option. This driver can - also be built as a module. If so, the module will be called slcan. + This is required by all platform and hardware CAN drivers. If you + plan to use such devices or if unsure, say Y. -config CAN_DEV - tristate "Platform CAN drivers with Netlink support" - default y - ---help--- - Enables the common framework for platform CAN drivers with Netlink - support. This is the standard library for CAN drivers. - If unsure, say Y. - -if CAN_DEV +if CAN_NETLINK config CAN_CALC_BITTIMING bool "CAN bit-timing calculation" default y - ---help--- + help If enabled, CAN bit-timing parameters will be calculated for the bit-rate specified via Netlink argument "bitrate" when the device get started. This works fine for the most common CAN controllers @@ -68,42 +77,82 @@ config CAN_CALC_BITTIMING source clock frequencies. Disabling saves some space, but then the bit-timing parameters must be specified directly using the Netlink arguments "tq", "prop_seg", "phase_seg1", "phase_seg2" and "sjw". + + The additional features selected by this option will be added to the + can-dev module. + If unsure, say Y. -config CAN_LEDS - bool "Enable LED triggers for Netlink based drivers" - depends on LEDS_CLASS - # The netdev trigger (LEDS_TRIGGER_NETDEV) should be able to do - # everything that this driver is doing. This is marked as broken - # because it uses stuff that is intended to be changed or removed. - # Please consider switching to the netdev trigger and confirm it - # fulfills your needs instead of fixing this driver. - depends on BROKEN - select LEDS_TRIGGERS - ---help--- - This option adds two LED triggers for packet receive and transmit - events on each supported CAN device. - - Say Y here if you are working on a system with led-class supported - LEDs and you want to use them as canbus activity indicators. +config CAN_RX_OFFLOAD + bool config CAN_AT91 tristate "Atmel AT91 onchip CAN controller" depends on (ARCH_AT91 || COMPILE_TEST) && HAS_IOMEM - ---help--- + select CAN_RX_OFFLOAD + help This is a driver for the SoC CAN controller in Atmel's AT91SAM9263 and AT91SAM9X5 processors. +config CAN_BXCAN + tristate "STM32 Basic Extended CAN (bxCAN) devices" + depends on ARCH_STM32 || COMPILE_TEST + depends on HAS_IOMEM + select CAN_RX_OFFLOAD + help + Say yes here to build support for the STMicroelectronics STM32 basic + extended CAN Controller (bxCAN). + + This driver can also be built as a module. If so, the module + will be called bxcan. + +config CAN_CAN327 + tristate "Serial / USB serial ELM327 based OBD-II Interfaces (can327)" + depends on TTY + select CAN_RX_OFFLOAD + help + CAN driver for several 'low cost' OBD-II interfaces based on the + ELM327 OBD-II interpreter chip. + + This is a best effort driver - the ELM327 interface was never + designed to be used as a standalone CAN interface. However, it can + still be used for simple request-response protocols (such as OBD II), + and to monitor broadcast messages on a bus (such as in a vehicle). + + Please refer to the documentation for information on how to use it: + Documentation/networking/device_drivers/can/can327.rst + + If this driver is built as a module, it will be called can327. + +config CAN_DUMMY + tristate "Dummy CAN" + help + A dummy CAN module supporting Classical CAN, CAN FD and CAN XL. It + exposes bittiming values which can be configured through the netlink + interface. + + The module will simply echo any frame sent to it. If debug messages + are activated, it prints all the CAN bittiming information in the + kernel log. Aside from that it does nothing. + + This is convenient for testing the CAN netlink interface. Most of the + users will never need this. If unsure, say NO. + + To compile this driver as a module, choose M here: the module will be + called dummy-can. + config CAN_FLEXCAN tristate "Support for Freescale FLEXCAN based chips" - depends on OF && HAS_IOMEM - ---help--- + depends on OF || COLDFIRE || COMPILE_TEST + depends on HAS_IOMEM + select CAN_RX_OFFLOAD + help Say Y here if you want to support for Freescale FlexCAN. config CAN_GRCAN tristate "Aeroflex Gaisler GRCAN and GRHCAN CAN devices" - depends on OF && HAS_DMA - ---help--- + depends on OF && HAS_DMA && HAS_IOMEM + help Say Y here if you want to use Aeroflex Gaisler GRCAN or GRHCAN. Note that the driver supports little endian, even though little endian syntheses of the cores would need some modifications on @@ -112,27 +161,68 @@ config CAN_GRCAN config CAN_JANZ_ICAN3 tristate "Janz VMOD-ICAN3 Intelligent CAN controller" depends on MFD_JANZ_CMODIO - ---help--- + help Driver for Janz VMOD-ICAN3 Intelligent CAN controller module, which connects to a MODULbus carrier board. This driver can also be built as a module. If so, the module will be called janz-ican3.ko. +config CAN_KVASER_PCIEFD + depends on PCI + tristate "Kvaser PCIe FD cards" + select NET_DEVLINK + help + This is a driver for the Kvaser PCI Express CAN FD family. + + Supported devices: + Kvaser PCIEcan 4xHS + Kvaser PCIEcan 2xHS v2 + Kvaser PCIEcan HS v2 + Kvaser PCIEcan 1xCAN v3 + Kvaser PCIEcan 2xCAN v3 + Kvaser PCIEcan 4xCAN v2 + Kvaser Mini PCI Express HS v2 + Kvaser Mini PCI Express 2xHS v2 + Kvaser Mini PCI Express 1xCAN v3 + Kvaser Mini PCI Express 2xCAN v3 + Kvaser M.2 PCIe 4xCAN + Kvaser PCIe 8xCAN + +config CAN_SLCAN + tristate "Serial / USB serial CAN Adaptors (slcan)" + depends on TTY + help + CAN driver for several 'low cost' CAN interfaces that are attached + via serial lines or via USB-to-serial adapters using the LAWICEL + ASCII protocol. The driver implements the tty linediscipline N_SLCAN. + + As only the sending and receiving of CAN frames is implemented, this + driver should work with the (serial/USB) CAN hardware from: + www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de + + Userspace tools to attach the SLCAN line discipline (slcan_attach, + slcand) can be found in the can-utils at the linux-can project, see + https://github.com/linux-can/can-utils for details. + + This driver can also be built as a module. If so, the module + will be called slcan. + config CAN_SUN4I tristate "Allwinner A10 CAN controller" - depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST - ---help--- + depends on MACH_SUN4I || MACH_SUN7I || (RISCV && ARCH_SUNXI) || COMPILE_TEST + help Say Y here if you want to use CAN controller found on Allwinner - A10/A20 SoCs. + A10/A20/D1 SoCs. To compile this driver as a module, choose M here: the module will be called sun4i_can. config CAN_TI_HECC - depends on ARM + depends on ARM || COMPILE_TEST tristate "TI High End CAN Controller" - ---help--- + select CAN_RX_OFFLOAD + help Driver for TI HECC (High End CAN Controller) module found on many TI devices. The device specifications are available from www.ti.com @@ -140,38 +230,33 @@ config CAN_XILINXCAN tristate "Xilinx CAN" depends on ARCH_ZYNQ || ARM64 || MICROBLAZE || COMPILE_TEST depends on COMMON_CLK && HAS_IOMEM - ---help--- + help Xilinx CAN driver. This driver supports both soft AXI CAN IP and Zynq CANPS IP. -config PCH_CAN - tristate "Intel EG20T PCH CAN controller" - depends on PCI && (X86_32 || COMPILE_TEST) - ---help--- - This driver is for PCH CAN of Topcliff (Intel EG20T PCH) which - is an IOH for x86 embedded processor (Intel Atom E6xx series). - This driver can access CAN bus. - source "drivers/net/can/c_can/Kconfig" source "drivers/net/can/cc770/Kconfig" +source "drivers/net/can/ctucanfd/Kconfig" +source "drivers/net/can/esd/Kconfig" source "drivers/net/can/ifi_canfd/Kconfig" source "drivers/net/can/m_can/Kconfig" source "drivers/net/can/mscan/Kconfig" source "drivers/net/can/peak_canfd/Kconfig" source "drivers/net/can/rcar/Kconfig" +source "drivers/net/can/rockchip/Kconfig" source "drivers/net/can/sja1000/Kconfig" source "drivers/net/can/softing/Kconfig" source "drivers/net/can/spi/Kconfig" source "drivers/net/can/usb/Kconfig" -endif +endif #CAN_NETLINK config CAN_DEBUG_DEVICES bool "CAN devices debugging messages" - ---help--- + help Say Y here if you want the CAN device drivers to produce a bunch of debug messages to the system log. Select this if you are having a problem with CAN support and want to see more of what is going on. -endmenu +endif #CAN_DEV diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 44922bf29b6a..d7bc10a6b8ea 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -5,26 +5,28 @@ obj-$(CONFIG_CAN_VCAN) += vcan.o obj-$(CONFIG_CAN_VXCAN) += vxcan.o -obj-$(CONFIG_CAN_SLCAN) += slcan.o - -obj-$(CONFIG_CAN_DEV) += can-dev.o -can-dev-y += dev.o -can-dev-y += rx-offload.o - -can-dev-$(CONFIG_CAN_LEDS) += led.o +obj-$(CONFIG_CAN_SLCAN) += slcan/ +obj-y += dev/ +obj-y += esd/ obj-y += rcar/ +obj-y += rockchip/ obj-y += spi/ obj-y += usb/ obj-y += softing/ obj-$(CONFIG_CAN_AT91) += at91_can.o +obj-$(CONFIG_CAN_BXCAN) += bxcan.o +obj-$(CONFIG_CAN_CAN327) += can327.o obj-$(CONFIG_CAN_CC770) += cc770/ obj-$(CONFIG_CAN_C_CAN) += c_can/ -obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o +obj-$(CONFIG_CAN_CTUCANFD) += ctucanfd/ +obj-$(CONFIG_CAN_DUMMY) += dummy_can.o +obj-$(CONFIG_CAN_FLEXCAN) += flexcan/ obj-$(CONFIG_CAN_GRCAN) += grcan.o obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/ obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o +obj-$(CONFIG_CAN_KVASER_PCIEFD) += kvaser_pciefd/ obj-$(CONFIG_CAN_MSCAN) += mscan/ obj-$(CONFIG_CAN_M_CAN) += m_can/ obj-$(CONFIG_CAN_PEAK_PCIEFD) += peak_canfd/ @@ -32,6 +34,5 @@ obj-$(CONFIG_CAN_SJA1000) += sja1000/ obj-$(CONFIG_CAN_SUN4I) += sun4i_can.o obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o obj-$(CONFIG_CAN_XILINXCAN) += xilinx_can.o -obj-$(CONFIG_PCH_CAN) += pch_can.o subdir-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) += -DDEBUG diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index d98c69045b17..c2a3a4eef5b2 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -1,23 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * at91_can.c - CAN network driver for AT91 SoC CAN controller * * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de> - * (C) 2008, 2009, 2010, 2011 by Marc Kleine-Budde <kernel@pengutronix.de> - * - * This software may be distributed under the terms of the GNU General - * Public License ("GPL") version 2 as distributed in the 'COPYING' - * file from the main directory of the linux kernel source. - * + * (C) 2008, 2009, 2010, 2011, 2023 by Marc Kleine-Budde <kernel@pengutronix.de> */ +#include <linux/bitfield.h> #include <linux/clk.h> #include <linux/errno.h> +#include <linux/ethtool.h> #include <linux/if_arp.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> +#include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> @@ -27,91 +26,115 @@ #include <linux/can/dev.h> #include <linux/can/error.h> -#include <linux/can/led.h> +#include <linux/can/rx-offload.h> -#define AT91_MB_MASK(i) ((1 << (i)) - 1) +#define AT91_MB_MASK(i) ((1 << (i)) - 1) /* Common registers */ enum at91_reg { - AT91_MR = 0x000, - AT91_IER = 0x004, - AT91_IDR = 0x008, - AT91_IMR = 0x00C, - AT91_SR = 0x010, - AT91_BR = 0x014, - AT91_TIM = 0x018, - AT91_TIMESTP = 0x01C, - AT91_ECR = 0x020, - AT91_TCR = 0x024, - AT91_ACR = 0x028, + AT91_MR = 0x000, + AT91_IER = 0x004, + AT91_IDR = 0x008, + AT91_IMR = 0x00C, + AT91_SR = 0x010, + AT91_BR = 0x014, + AT91_TIM = 0x018, + AT91_TIMESTP = 0x01C, + AT91_ECR = 0x020, + AT91_TCR = 0x024, + AT91_ACR = 0x028, }; /* Mailbox registers (0 <= i <= 15) */ -#define AT91_MMR(i) (enum at91_reg)(0x200 + ((i) * 0x20)) -#define AT91_MAM(i) (enum at91_reg)(0x204 + ((i) * 0x20)) -#define AT91_MID(i) (enum at91_reg)(0x208 + ((i) * 0x20)) -#define AT91_MFID(i) (enum at91_reg)(0x20C + ((i) * 0x20)) -#define AT91_MSR(i) (enum at91_reg)(0x210 + ((i) * 0x20)) -#define AT91_MDL(i) (enum at91_reg)(0x214 + ((i) * 0x20)) -#define AT91_MDH(i) (enum at91_reg)(0x218 + ((i) * 0x20)) -#define AT91_MCR(i) (enum at91_reg)(0x21C + ((i) * 0x20)) +#define AT91_MMR(i) ((enum at91_reg)(0x200 + ((i) * 0x20))) +#define AT91_MAM(i) ((enum at91_reg)(0x204 + ((i) * 0x20))) +#define AT91_MID(i) ((enum at91_reg)(0x208 + ((i) * 0x20))) +#define AT91_MFID(i) ((enum at91_reg)(0x20C + ((i) * 0x20))) +#define AT91_MSR(i) ((enum at91_reg)(0x210 + ((i) * 0x20))) +#define AT91_MDL(i) ((enum at91_reg)(0x214 + ((i) * 0x20))) +#define AT91_MDH(i) ((enum at91_reg)(0x218 + ((i) * 0x20))) +#define AT91_MCR(i) ((enum at91_reg)(0x21C + ((i) * 0x20))) /* Register bits */ -#define AT91_MR_CANEN BIT(0) -#define AT91_MR_LPM BIT(1) -#define AT91_MR_ABM BIT(2) -#define AT91_MR_OVL BIT(3) -#define AT91_MR_TEOF BIT(4) -#define AT91_MR_TTM BIT(5) -#define AT91_MR_TIMFRZ BIT(6) -#define AT91_MR_DRPT BIT(7) - -#define AT91_SR_RBSY BIT(29) - -#define AT91_MMR_PRIO_SHIFT (16) - -#define AT91_MID_MIDE BIT(29) - -#define AT91_MSR_MRTR BIT(20) -#define AT91_MSR_MABT BIT(22) -#define AT91_MSR_MRDY BIT(23) -#define AT91_MSR_MMI BIT(24) - -#define AT91_MCR_MRTR BIT(20) -#define AT91_MCR_MTCR BIT(23) +#define AT91_MR_CANEN BIT(0) +#define AT91_MR_LPM BIT(1) +#define AT91_MR_ABM BIT(2) +#define AT91_MR_OVL BIT(3) +#define AT91_MR_TEOF BIT(4) +#define AT91_MR_TTM BIT(5) +#define AT91_MR_TIMFRZ BIT(6) +#define AT91_MR_DRPT BIT(7) + +#define AT91_SR_RBSY BIT(29) +#define AT91_SR_TBSY BIT(30) +#define AT91_SR_OVLSY BIT(31) + +#define AT91_BR_PHASE2_MASK GENMASK(2, 0) +#define AT91_BR_PHASE1_MASK GENMASK(6, 4) +#define AT91_BR_PROPAG_MASK GENMASK(10, 8) +#define AT91_BR_SJW_MASK GENMASK(13, 12) +#define AT91_BR_BRP_MASK GENMASK(22, 16) +#define AT91_BR_SMP BIT(24) + +#define AT91_TIM_TIMER_MASK GENMASK(15, 0) + +#define AT91_ECR_REC_MASK GENMASK(8, 0) +#define AT91_ECR_TEC_MASK GENMASK(23, 16) + +#define AT91_TCR_TIMRST BIT(31) + +#define AT91_MMR_MTIMEMARK_MASK GENMASK(15, 0) +#define AT91_MMR_PRIOR_MASK GENMASK(19, 16) +#define AT91_MMR_MOT_MASK GENMASK(26, 24) + +#define AT91_MID_MIDVB_MASK GENMASK(17, 0) +#define AT91_MID_MIDVA_MASK GENMASK(28, 18) +#define AT91_MID_MIDE BIT(29) + +#define AT91_MSR_MTIMESTAMP_MASK GENMASK(15, 0) +#define AT91_MSR_MDLC_MASK GENMASK(19, 16) +#define AT91_MSR_MRTR BIT(20) +#define AT91_MSR_MABT BIT(22) +#define AT91_MSR_MRDY BIT(23) +#define AT91_MSR_MMI BIT(24) + +#define AT91_MCR_MDLC_MASK GENMASK(19, 16) +#define AT91_MCR_MRTR BIT(20) +#define AT91_MCR_MACR BIT(22) +#define AT91_MCR_MTCR BIT(23) /* Mailbox Modes */ enum at91_mb_mode { - AT91_MB_MODE_DISABLED = 0, - AT91_MB_MODE_RX = 1, - AT91_MB_MODE_RX_OVRWR = 2, - AT91_MB_MODE_TX = 3, - AT91_MB_MODE_CONSUMER = 4, - AT91_MB_MODE_PRODUCER = 5, + AT91_MB_MODE_DISABLED = 0, + AT91_MB_MODE_RX = 1, + AT91_MB_MODE_RX_OVRWR = 2, + AT91_MB_MODE_TX = 3, + AT91_MB_MODE_CONSUMER = 4, + AT91_MB_MODE_PRODUCER = 5, }; /* Interrupt mask bits */ -#define AT91_IRQ_ERRA (1 << 16) -#define AT91_IRQ_WARN (1 << 17) -#define AT91_IRQ_ERRP (1 << 18) -#define AT91_IRQ_BOFF (1 << 19) -#define AT91_IRQ_SLEEP (1 << 20) -#define AT91_IRQ_WAKEUP (1 << 21) -#define AT91_IRQ_TOVF (1 << 22) -#define AT91_IRQ_TSTP (1 << 23) -#define AT91_IRQ_CERR (1 << 24) -#define AT91_IRQ_SERR (1 << 25) -#define AT91_IRQ_AERR (1 << 26) -#define AT91_IRQ_FERR (1 << 27) -#define AT91_IRQ_BERR (1 << 28) - -#define AT91_IRQ_ERR_ALL (0x1fff0000) -#define AT91_IRQ_ERR_FRAME (AT91_IRQ_CERR | AT91_IRQ_SERR | \ - AT91_IRQ_AERR | AT91_IRQ_FERR | AT91_IRQ_BERR) -#define AT91_IRQ_ERR_LINE (AT91_IRQ_ERRA | AT91_IRQ_WARN | \ - AT91_IRQ_ERRP | AT91_IRQ_BOFF) - -#define AT91_IRQ_ALL (0x1fffffff) +#define AT91_IRQ_ERRA BIT(16) +#define AT91_IRQ_WARN BIT(17) +#define AT91_IRQ_ERRP BIT(18) +#define AT91_IRQ_BOFF BIT(19) +#define AT91_IRQ_SLEEP BIT(20) +#define AT91_IRQ_WAKEUP BIT(21) +#define AT91_IRQ_TOVF BIT(22) +#define AT91_IRQ_TSTP BIT(23) +#define AT91_IRQ_CERR BIT(24) +#define AT91_IRQ_SERR BIT(25) +#define AT91_IRQ_AERR BIT(26) +#define AT91_IRQ_FERR BIT(27) +#define AT91_IRQ_BERR BIT(28) + +#define AT91_IRQ_ERR_ALL (0x1fff0000) +#define AT91_IRQ_ERR_FRAME (AT91_IRQ_CERR | AT91_IRQ_SERR | \ + AT91_IRQ_AERR | AT91_IRQ_FERR | AT91_IRQ_BERR) +#define AT91_IRQ_ERR_LINE (AT91_IRQ_ERRA | AT91_IRQ_WARN | \ + AT91_IRQ_ERRP | AT91_IRQ_BOFF) + +#define AT91_IRQ_ALL (0x1fffffff) enum at91_devtype { AT91_DEVTYPE_SAM9263, @@ -120,7 +143,6 @@ enum at91_devtype { struct at91_devtype_data { unsigned int rx_first; - unsigned int rx_split; unsigned int rx_last; unsigned int tx_shift; enum at91_devtype type; @@ -128,14 +150,13 @@ struct at91_devtype_data { struct at91_priv { struct can_priv can; /* must be the first member! */ - struct napi_struct napi; + struct can_rx_offload offload; + struct phy *transceiver; void __iomem *reg_base; - u32 reg_sr; - unsigned int tx_next; - unsigned int tx_echo; - unsigned int rx_next; + unsigned int tx_head; + unsigned int tx_tail; struct at91_devtype_data devtype_data; struct clk *clk; @@ -144,9 +165,13 @@ struct at91_priv { canid_t mb0_id; }; +static inline struct at91_priv *rx_offload_to_priv(struct can_rx_offload *offload) +{ + return container_of(offload, struct at91_priv, offload); +} + static const struct at91_devtype_data at91_at91sam9263_data = { .rx_first = 1, - .rx_split = 8, .rx_last = 11, .tx_shift = 2, .type = AT91_DEVTYPE_SAM9263, @@ -154,7 +179,6 @@ static const struct at91_devtype_data at91_at91sam9263_data = { static const struct at91_devtype_data at91_at91sam9x5_data = { .rx_first = 0, - .rx_split = 4, .rx_last = 5, .tx_shift = 1, .type = AT91_DEVTYPE_SAM9X5, @@ -167,13 +191,13 @@ static const struct can_bittiming_const at91_bittiming_const = { .tseg2_min = 2, .tseg2_max = 8, .sjw_max = 4, - .brp_min = 2, + .brp_min = 2, .brp_max = 128, .brp_inc = 1, }; #define AT91_IS(_model) \ -static inline int at91_is_sam##_model(const struct at91_priv *priv) \ +static inline int __maybe_unused at91_is_sam##_model(const struct at91_priv *priv) \ { \ return priv->devtype_data.type == AT91_DEVTYPE_SAM##_model; \ } @@ -191,27 +215,6 @@ static inline unsigned int get_mb_rx_last(const struct at91_priv *priv) return priv->devtype_data.rx_last; } -static inline unsigned int get_mb_rx_split(const struct at91_priv *priv) -{ - return priv->devtype_data.rx_split; -} - -static inline unsigned int get_mb_rx_num(const struct at91_priv *priv) -{ - return get_mb_rx_last(priv) - get_mb_rx_first(priv) + 1; -} - -static inline unsigned int get_mb_rx_low_last(const struct at91_priv *priv) -{ - return get_mb_rx_split(priv) - 1; -} - -static inline unsigned int get_mb_rx_low_mask(const struct at91_priv *priv) -{ - return AT91_MB_MASK(get_mb_rx_split(priv)) & - ~AT91_MB_MASK(get_mb_rx_first(priv)); -} - static inline unsigned int get_mb_tx_shift(const struct at91_priv *priv) { return priv->devtype_data.tx_shift; @@ -232,24 +235,24 @@ static inline unsigned int get_mb_tx_last(const struct at91_priv *priv) return get_mb_tx_first(priv) + get_mb_tx_num(priv) - 1; } -static inline unsigned int get_next_prio_shift(const struct at91_priv *priv) +static inline unsigned int get_head_prio_shift(const struct at91_priv *priv) { return get_mb_tx_shift(priv); } -static inline unsigned int get_next_prio_mask(const struct at91_priv *priv) +static inline unsigned int get_head_prio_mask(const struct at91_priv *priv) { return 0xf << get_mb_tx_shift(priv); } -static inline unsigned int get_next_mb_mask(const struct at91_priv *priv) +static inline unsigned int get_head_mb_mask(const struct at91_priv *priv) { return AT91_MB_MASK(get_mb_tx_shift(priv)); } -static inline unsigned int get_next_mask(const struct at91_priv *priv) +static inline unsigned int get_head_mask(const struct at91_priv *priv) { - return get_next_mb_mask(priv) | get_next_prio_mask(priv); + return get_head_mb_mask(priv) | get_head_prio_mask(priv); } static inline unsigned int get_irq_mb_rx(const struct at91_priv *priv) @@ -264,19 +267,19 @@ static inline unsigned int get_irq_mb_tx(const struct at91_priv *priv) ~AT91_MB_MASK(get_mb_tx_first(priv)); } -static inline unsigned int get_tx_next_mb(const struct at91_priv *priv) +static inline unsigned int get_tx_head_mb(const struct at91_priv *priv) { - return (priv->tx_next & get_next_mb_mask(priv)) + get_mb_tx_first(priv); + return (priv->tx_head & get_head_mb_mask(priv)) + get_mb_tx_first(priv); } -static inline unsigned int get_tx_next_prio(const struct at91_priv *priv) +static inline unsigned int get_tx_head_prio(const struct at91_priv *priv) { - return (priv->tx_next >> get_next_prio_shift(priv)) & 0xf; + return (priv->tx_head >> get_head_prio_shift(priv)) & 0xf; } -static inline unsigned int get_tx_echo_mb(const struct at91_priv *priv) +static inline unsigned int get_tx_tail_mb(const struct at91_priv *priv) { - return (priv->tx_echo & get_next_mb_mask(priv)) + get_mb_tx_first(priv); + return (priv->tx_tail & get_head_mb_mask(priv)) + get_mb_tx_first(priv); } static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg) @@ -285,19 +288,23 @@ static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg) } static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg, - u32 value) + u32 value) { writel_relaxed(value, priv->reg_base + reg); } static inline void set_mb_mode_prio(const struct at91_priv *priv, - unsigned int mb, enum at91_mb_mode mode, int prio) + unsigned int mb, enum at91_mb_mode mode, + u8 prio) { - at91_write(priv, AT91_MMR(mb), (mode << 24) | (prio << 16)); + const u32 reg_mmr = FIELD_PREP(AT91_MMR_MOT_MASK, mode) | + FIELD_PREP(AT91_MMR_PRIOR_MASK, prio); + + at91_write(priv, AT91_MMR(mb), reg_mmr); } static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb, - enum at91_mb_mode mode) + enum at91_mb_mode mode) { set_mb_mode_prio(priv, mb, mode, 0); } @@ -307,9 +314,10 @@ static inline u32 at91_can_id_to_reg_mid(canid_t can_id) u32 reg_mid; if (can_id & CAN_EFF_FLAG) - reg_mid = (can_id & CAN_EFF_MASK) | AT91_MID_MIDE; + reg_mid = FIELD_PREP(AT91_MID_MIDVA_MASK | AT91_MID_MIDVB_MASK, can_id) | + AT91_MID_MIDE; else - reg_mid = (can_id & CAN_SFF_MASK) << 18; + reg_mid = FIELD_PREP(AT91_MID_MIDVA_MASK, can_id); return reg_mid; } @@ -320,10 +328,9 @@ static void at91_setup_mailboxes(struct net_device *dev) unsigned int i; u32 reg_mid; - /* - * Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first - * mailbox is disabled. The next 11 mailboxes are used as a - * reception FIFO. The last mailbox is configured with + /* Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first + * mailbox is disabled. The next mailboxes are used as a + * reception FIFO. The last of the RX mailboxes is configured with * overwrite option. The overwrite flag indicates a FIFO * overflow. */ @@ -344,27 +351,30 @@ static void at91_setup_mailboxes(struct net_device *dev) at91_write(priv, AT91_MID(i), AT91_MID_MIDE); } - /* The last 4 mailboxes are used for transmitting. */ + /* The last mailboxes are used for transmitting. */ for (i = get_mb_tx_first(priv); i <= get_mb_tx_last(priv); i++) set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0); - /* Reset tx and rx helper pointers */ - priv->tx_next = priv->tx_echo = 0; - priv->rx_next = get_mb_rx_first(priv); + /* Reset tx helper pointers */ + priv->tx_head = priv->tx_tail = 0; } static int at91_set_bittiming(struct net_device *dev) { const struct at91_priv *priv = netdev_priv(dev); const struct can_bittiming *bt = &priv->can.bittiming; - u32 reg_br; + u32 reg_br = 0; - reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 << 24 : 0) | - ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) | - ((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) | - ((bt->phase_seg2 - 1) << 0); + if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) + reg_br |= AT91_BR_SMP; - netdev_info(dev, "writing AT91_BR: 0x%08x\n", reg_br); + reg_br |= FIELD_PREP(AT91_BR_BRP_MASK, bt->brp - 1) | + FIELD_PREP(AT91_BR_SJW_MASK, bt->sjw - 1) | + FIELD_PREP(AT91_BR_PROPAG_MASK, bt->prop_seg - 1) | + FIELD_PREP(AT91_BR_PHASE1_MASK, bt->phase_seg1 - 1) | + FIELD_PREP(AT91_BR_PHASE2_MASK, bt->phase_seg2 - 1); + + netdev_dbg(dev, "writing AT91_BR: 0x%08x\n", reg_br); at91_write(priv, AT91_BR, reg_br); @@ -372,13 +382,13 @@ static int at91_set_bittiming(struct net_device *dev) } static int at91_get_berr_counter(const struct net_device *dev, - struct can_berr_counter *bec) + struct can_berr_counter *bec) { const struct at91_priv *priv = netdev_priv(dev); u32 reg_ecr = at91_read(priv, AT91_ECR); - bec->rxerr = reg_ecr & 0xff; - bec->txerr = reg_ecr >> 16; + bec->rxerr = FIELD_GET(AT91_ECR_REC_MASK, reg_ecr); + bec->txerr = FIELD_GET(AT91_ECR_TEC_MASK, reg_ecr); return 0; } @@ -407,9 +417,13 @@ static void at91_chip_start(struct net_device *dev) priv->can.state = CAN_STATE_ERROR_ACTIVE; + /* Dummy read to clear latched line error interrupts on + * sam9x5 and newer SoCs. + */ + at91_read(priv, AT91_SR); + /* Enable interrupts */ - reg_ier = get_irq_mb_rx(priv) | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME; - at91_write(priv, AT91_IDR, AT91_IRQ_ALL); + reg_ier = get_irq_mb_rx(priv) | AT91_IRQ_ERR_LINE | AT91_IRQ_ERR_FRAME; at91_write(priv, AT91_IER, reg_ier); } @@ -418,6 +432,11 @@ static void at91_chip_stop(struct net_device *dev, enum can_state state) struct at91_priv *priv = netdev_priv(dev); u32 reg_mr; + /* Abort any pending TX requests. However this doesn't seem to + * work in case of bus-off on sama5d3. + */ + at91_write(priv, AT91_ACR, get_irq_mb_tx(priv)); + /* disable interrupts */ at91_write(priv, AT91_IDR, AT91_IRQ_ALL); @@ -427,8 +446,7 @@ static void at91_chip_stop(struct net_device *dev, enum can_state state) priv->can.state = state; } -/* - * theory of operation: +/* theory of operation: * * According to the datasheet priority 0 is the highest priority, 15 * is the lowest. If two mailboxes have the same priority level the @@ -442,27 +460,26 @@ static void at91_chip_stop(struct net_device *dev, enum can_state state) * stop sending, waiting for all messages to be delivered, then start * again with mailbox AT91_MB_TX_FIRST prio 0. * - * We use the priv->tx_next as counter for the next transmission + * We use the priv->tx_head as counter for the next transmission * mailbox, but without the offset AT91_MB_TX_FIRST. The lower bits * encode the mailbox number, the upper 4 bits the mailbox priority: * - * priv->tx_next = (prio << get_next_prio_shift(priv)) | + * priv->tx_head = (prio << get_next_prio_shift(priv)) | * (mb - get_mb_tx_first(priv)); * */ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct at91_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; struct can_frame *cf = (struct can_frame *)skb->data; unsigned int mb, prio; u32 reg_mid, reg_mcr; - if (can_dropped_invalid_skb(dev, skb)) + if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; - mb = get_tx_next_mb(priv); - prio = get_tx_next_prio(priv); + mb = get_tx_head_mb(priv); + prio = get_tx_head_prio(priv); if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) { netif_stop_queue(dev); @@ -471,8 +488,12 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } reg_mid = at91_can_id_to_reg_mid(cf->can_id); - reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) | - (cf->can_dlc << 16) | AT91_MCR_MTCR; + + reg_mcr = FIELD_PREP(AT91_MCR_MDLC_MASK, cf->len) | + AT91_MCR_MTCR; + + if (cf->can_id & CAN_RTR_FLAG) + reg_mcr |= AT91_MCR_MRTR; /* disable MB while writing ID (see datasheet) */ set_mb_mode(priv, mb, AT91_MB_MODE_DISABLED); @@ -485,23 +506,20 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev) /* This triggers transmission */ at91_write(priv, AT91_MCR(mb), reg_mcr); - stats->tx_bytes += cf->can_dlc; - /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */ - can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv)); + can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv), 0); - /* - * we have to stop the queue and deliver all messages in case + /* we have to stop the queue and deliver all messages in case * of a prio+mb counter wrap around. This is the case if - * tx_next buffer prio and mailbox equals 0. + * tx_head buffer prio and mailbox equals 0. * * also stop the queue if next buffer is still in use * (== not ready) */ - priv->tx_next++; - if (!(at91_read(priv, AT91_MSR(get_tx_next_mb(priv))) & + priv->tx_head++; + if (!(at91_read(priv, AT91_MSR(get_tx_head_mb(priv))) & AT91_MSR_MRDY) || - (priv->tx_next & get_next_mask(priv)) == 0) + (priv->tx_head & get_head_mask(priv)) == 0) netif_stop_queue(dev); /* Enable interrupt for this mailbox */ @@ -510,30 +528,20 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -/** - * at91_activate_rx_low - activate lower rx mailboxes - * @priv: a91 context - * - * Reenables the lower mailboxes for reception of new CAN messages - */ -static inline void at91_activate_rx_low(const struct at91_priv *priv) +static inline u32 at91_get_timestamp(const struct at91_priv *priv) { - u32 mask = get_mb_rx_low_mask(priv); - at91_write(priv, AT91_TCR, mask); + return at91_read(priv, AT91_TIM); } -/** - * at91_activate_rx_mb - reactive single rx mailbox - * @priv: a91 context - * @mb: mailbox to reactivate - * - * Reenables given mailbox for reception of new CAN messages - */ -static inline void at91_activate_rx_mb(const struct at91_priv *priv, - unsigned int mb) +static inline struct sk_buff * +at91_alloc_can_err_skb(struct net_device *dev, + struct can_frame **cf, u32 *timestamp) { - u32 mask = 1 << mb; - at91_write(priv, AT91_TCR, mask); + const struct at91_priv *priv = netdev_priv(dev); + + *timestamp = at91_get_timestamp(priv); + + return alloc_can_err_skb(dev, cf); } /** @@ -544,51 +552,75 @@ static void at91_rx_overflow_err(struct net_device *dev) { struct net_device_stats *stats = &dev->stats; struct sk_buff *skb; + struct at91_priv *priv = netdev_priv(dev); struct can_frame *cf; + u32 timestamp; + int err; netdev_dbg(dev, "RX buffer overflow\n"); stats->rx_over_errors++; stats->rx_errors++; - skb = alloc_can_err_skb(dev, &cf); + skb = at91_alloc_can_err_skb(dev, &cf, ×tamp); if (unlikely(!skb)) return; cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_receive_skb(skb); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); + if (err) + stats->rx_fifo_errors++; } /** - * at91_read_mb - read CAN msg from mailbox (lowlevel impl) - * @dev: net device + * at91_mailbox_read - read CAN msg from mailbox + * @offload: rx-offload * @mb: mailbox number to read from - * @cf: can frame where to store message + * @timestamp: pointer to 32 bit timestamp + * @drop: true indicated mailbox to mark as read and drop frame * - * Reads a CAN message from the given mailbox and stores data into - * given can frame. "mb" and "cf" must be valid. + * Reads a CAN message from the given mailbox if not empty. */ -static void at91_read_mb(struct net_device *dev, unsigned int mb, - struct can_frame *cf) +static struct sk_buff *at91_mailbox_read(struct can_rx_offload *offload, + unsigned int mb, u32 *timestamp, + bool drop) { - const struct at91_priv *priv = netdev_priv(dev); + const struct at91_priv *priv = rx_offload_to_priv(offload); + struct can_frame *cf; + struct sk_buff *skb; u32 reg_msr, reg_mid; + reg_msr = at91_read(priv, AT91_MSR(mb)); + if (!(reg_msr & AT91_MSR_MRDY)) + return NULL; + + if (unlikely(drop)) { + skb = ERR_PTR(-ENOBUFS); + goto mark_as_read; + } + + skb = alloc_can_skb(offload->dev, &cf); + if (unlikely(!skb)) { + skb = ERR_PTR(-ENOMEM); + goto mark_as_read; + } + reg_mid = at91_read(priv, AT91_MID(mb)); if (reg_mid & AT91_MID_MIDE) - cf->can_id = ((reg_mid >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG; + cf->can_id = FIELD_GET(AT91_MID_MIDVA_MASK | AT91_MID_MIDVB_MASK, reg_mid) | + CAN_EFF_FLAG; else - cf->can_id = (reg_mid >> 18) & CAN_SFF_MASK; + cf->can_id = FIELD_GET(AT91_MID_MIDVA_MASK, reg_mid); - reg_msr = at91_read(priv, AT91_MSR(mb)); - cf->can_dlc = get_can_dlc((reg_msr >> 16) & 0xf); + /* extend timestamp to full 32 bit */ + *timestamp = FIELD_GET(AT91_MSR_MTIMESTAMP_MASK, reg_msr) << 16; + + cf->len = can_cc_dlc2len(FIELD_GET(AT91_MSR_MDLC_MASK, reg_msr)); - if (reg_msr & AT91_MSR_MRTR) + if (reg_msr & AT91_MSR_MRTR) { cf->can_id |= CAN_RTR_FLAG; - else { + } else { *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb)); *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb)); } @@ -597,237 +629,21 @@ static void at91_read_mb(struct net_device *dev, unsigned int mb, at91_write(priv, AT91_MID(mb), AT91_MID_MIDE); if (unlikely(mb == get_mb_rx_last(priv) && reg_msr & AT91_MSR_MMI)) - at91_rx_overflow_err(dev); -} - -/** - * at91_read_msg - read CAN message from mailbox - * @dev: net device - * @mb: mail box to read from - * - * Reads a CAN message from given mailbox, and put into linux network - * RX queue, does all housekeeping chores (stats, ...) - */ -static void at91_read_msg(struct net_device *dev, unsigned int mb) -{ - struct net_device_stats *stats = &dev->stats; - struct can_frame *cf; - struct sk_buff *skb; - - skb = alloc_can_skb(dev, &cf); - if (unlikely(!skb)) { - stats->rx_dropped++; - return; - } - - at91_read_mb(dev, mb, cf); + at91_rx_overflow_err(offload->dev); - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_receive_skb(skb); + mark_as_read: + at91_write(priv, AT91_MCR(mb), AT91_MCR_MTCR); - can_led_event(dev, CAN_LED_EVENT_RX); + return skb; } -/** - * at91_poll_rx - read multiple CAN messages from mailboxes - * @dev: net device - * @quota: max number of pkgs we're allowed to receive - * - * Theory of Operation: - * - * About 3/4 of the mailboxes (get_mb_rx_first()...get_mb_rx_last()) - * on the chip are reserved for RX. We split them into 2 groups. The - * lower group ranges from get_mb_rx_first() to get_mb_rx_low_last(). +/* theory of operation: * - * Like it or not, but the chip always saves a received CAN message - * into the first free mailbox it finds (starting with the - * lowest). This makes it very difficult to read the messages in the - * right order from the chip. This is how we work around that problem: - * - * The first message goes into mb nr. 1 and issues an interrupt. All - * rx ints are disabled in the interrupt handler and a napi poll is - * scheduled. We read the mailbox, but do _not_ reenable the mb (to - * receive another message). - * - * lower mbxs upper - * ____^______ __^__ - * / \ / \ - * +-+-+-+-+-+-+-+-++-+-+-+-+ - * | |x|x|x|x|x|x|x|| | | | | - * +-+-+-+-+-+-+-+-++-+-+-+-+ - * 0 0 0 0 0 0 0 0 0 0 1 1 \ mail - * 0 1 2 3 4 5 6 7 8 9 0 1 / box - * ^ - * | - * \ - * unused, due to chip bug - * - * The variable priv->rx_next points to the next mailbox to read a - * message from. As long we're in the lower mailboxes we just read the - * mailbox but not reenable it. - * - * With completion of the last of the lower mailboxes, we reenable the - * whole first group, but continue to look for filled mailboxes in the - * upper mailboxes. Imagine the second group like overflow mailboxes, - * which takes CAN messages if the lower goup is full. While in the - * upper group we reenable the mailbox right after reading it. Giving - * the chip more room to store messages. - * - * After finishing we look again in the lower group if we've still - * quota. - * - */ -static int at91_poll_rx(struct net_device *dev, int quota) -{ - struct at91_priv *priv = netdev_priv(dev); - u32 reg_sr = at91_read(priv, AT91_SR); - const unsigned long *addr = (unsigned long *)®_sr; - unsigned int mb; - int received = 0; - - if (priv->rx_next > get_mb_rx_low_last(priv) && - reg_sr & get_mb_rx_low_mask(priv)) - netdev_info(dev, - "order of incoming frames cannot be guaranteed\n"); - - again: - for (mb = find_next_bit(addr, get_mb_tx_first(priv), priv->rx_next); - mb < get_mb_tx_first(priv) && quota > 0; - reg_sr = at91_read(priv, AT91_SR), - mb = find_next_bit(addr, get_mb_tx_first(priv), ++priv->rx_next)) { - at91_read_msg(dev, mb); - - /* reactivate mailboxes */ - if (mb == get_mb_rx_low_last(priv)) - /* all lower mailboxed, if just finished it */ - at91_activate_rx_low(priv); - else if (mb > get_mb_rx_low_last(priv)) - /* only the mailbox we read */ - at91_activate_rx_mb(priv, mb); - - received++; - quota--; - } - - /* upper group completed, look again in lower */ - if (priv->rx_next > get_mb_rx_low_last(priv) && - mb > get_mb_rx_last(priv)) { - priv->rx_next = get_mb_rx_first(priv); - if (quota > 0) - goto again; - } - - return received; -} - -static void at91_poll_err_frame(struct net_device *dev, - struct can_frame *cf, u32 reg_sr) -{ - struct at91_priv *priv = netdev_priv(dev); - - /* CRC error */ - if (reg_sr & AT91_IRQ_CERR) { - netdev_dbg(dev, "CERR irq\n"); - dev->stats.rx_errors++; - priv->can.can_stats.bus_error++; - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - } - - /* Stuffing Error */ - if (reg_sr & AT91_IRQ_SERR) { - netdev_dbg(dev, "SERR irq\n"); - dev->stats.rx_errors++; - priv->can.can_stats.bus_error++; - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - cf->data[2] |= CAN_ERR_PROT_STUFF; - } - - /* Acknowledgement Error */ - if (reg_sr & AT91_IRQ_AERR) { - netdev_dbg(dev, "AERR irq\n"); - dev->stats.tx_errors++; - cf->can_id |= CAN_ERR_ACK; - } - - /* Form error */ - if (reg_sr & AT91_IRQ_FERR) { - netdev_dbg(dev, "FERR irq\n"); - dev->stats.rx_errors++; - priv->can.can_stats.bus_error++; - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - cf->data[2] |= CAN_ERR_PROT_FORM; - } - - /* Bit Error */ - if (reg_sr & AT91_IRQ_BERR) { - netdev_dbg(dev, "BERR irq\n"); - dev->stats.tx_errors++; - priv->can.can_stats.bus_error++; - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - cf->data[2] |= CAN_ERR_PROT_BIT; - } -} - -static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr) -{ - struct sk_buff *skb; - struct can_frame *cf; - - if (quota == 0) - return 0; - - skb = alloc_can_err_skb(dev, &cf); - if (unlikely(!skb)) - return 0; - - at91_poll_err_frame(dev, cf, reg_sr); - - dev->stats.rx_packets++; - dev->stats.rx_bytes += cf->can_dlc; - netif_receive_skb(skb); - - return 1; -} - -static int at91_poll(struct napi_struct *napi, int quota) -{ - struct net_device *dev = napi->dev; - const struct at91_priv *priv = netdev_priv(dev); - u32 reg_sr = at91_read(priv, AT91_SR); - int work_done = 0; - - if (reg_sr & get_irq_mb_rx(priv)) - work_done += at91_poll_rx(dev, quota - work_done); - - /* - * The error bits are clear on read, - * so use saved value from irq handler. - */ - reg_sr |= priv->reg_sr; - if (reg_sr & AT91_IRQ_ERR_FRAME) - work_done += at91_poll_err(dev, quota - work_done, reg_sr); - - if (work_done < quota) { - /* enable IRQs for frame errors and all mailboxes >= rx_next */ - u32 reg_ier = AT91_IRQ_ERR_FRAME; - reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next); - - napi_complete_done(napi, work_done); - at91_write(priv, AT91_IER, reg_ier); - } - - return work_done; -} - -/* - * theory of operation: - * - * priv->tx_echo holds the number of the oldest can_frame put for + * priv->tx_tail holds the number of the oldest can_frame put for * transmission into the hardware, but not yet ACKed by the CAN tx * complete IRQ. * - * We iterate from priv->tx_echo to priv->tx_next and check if the + * We iterate from priv->tx_tail to priv->tx_head and check if the * packet has been transmitted, echo it back to the CAN framework. If * we discover a not yet transmitted package, stop looking for more. * @@ -838,10 +654,8 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr) u32 reg_msr; unsigned int mb; - /* masking of reg_sr not needed, already done by at91_irq */ - - for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { - mb = get_tx_echo_mb(priv); + for (/* nix */; (priv->tx_head - priv->tx_tail) > 0; priv->tx_tail++) { + mb = get_tx_tail_mb(priv); /* no event in mailbox? */ if (!(reg_sr & (1 << mb))) @@ -850,250 +664,208 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr) /* Disable irq for this TX mailbox */ at91_write(priv, AT91_IDR, 1 << mb); - /* - * only echo if mailbox signals us a transfer + /* only echo if mailbox signals us a transfer * complete (MSR_MRDY). Otherwise it's a tansfer * abort. "can_bus_off()" takes care about the skbs * parked in the echo queue. */ reg_msr = at91_read(priv, AT91_MSR(mb)); - if (likely(reg_msr & AT91_MSR_MRDY && - ~reg_msr & AT91_MSR_MABT)) { - /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */ - can_get_echo_skb(dev, mb - get_mb_tx_first(priv)); - dev->stats.tx_packets++; - can_led_event(dev, CAN_LED_EVENT_TX); - } + if (unlikely(!(reg_msr & AT91_MSR_MRDY && + ~reg_msr & AT91_MSR_MABT))) + continue; + + /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */ + dev->stats.tx_bytes += + can_get_echo_skb(dev, mb - get_mb_tx_first(priv), NULL); + dev->stats.tx_packets++; } - /* - * restart queue if we don't have a wrap around but restart if + /* restart queue if we don't have a wrap around but restart if * we get a TX int for the last can frame directly before a * wrap around. */ - if ((priv->tx_next & get_next_mask(priv)) != 0 || - (priv->tx_echo & get_next_mask(priv)) == 0) + if ((priv->tx_head & get_head_mask(priv)) != 0 || + (priv->tx_tail & get_head_mask(priv)) == 0) netif_wake_queue(dev); } -static void at91_irq_err_state(struct net_device *dev, - struct can_frame *cf, enum can_state new_state) +static void at91_irq_err_line(struct net_device *dev, const u32 reg_sr) { + struct net_device_stats *stats = &dev->stats; + enum can_state new_state, rx_state, tx_state; struct at91_priv *priv = netdev_priv(dev); - u32 reg_idr = 0, reg_ier = 0; struct can_berr_counter bec; + struct sk_buff *skb; + struct can_frame *cf; + u32 timestamp; + int err; at91_get_berr_counter(dev, &bec); + can_state_get_by_berr_counter(dev, &bec, &tx_state, &rx_state); - switch (priv->can.state) { - case CAN_STATE_ERROR_ACTIVE: - /* - * from: ERROR_ACTIVE - * to : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF - * => : there was a warning int - */ - if (new_state >= CAN_STATE_ERROR_WARNING && - new_state <= CAN_STATE_BUS_OFF) { - netdev_dbg(dev, "Error Warning IRQ\n"); - priv->can.can_stats.error_warning++; - - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = (bec.txerr > bec.rxerr) ? - CAN_ERR_CRTL_TX_WARNING : - CAN_ERR_CRTL_RX_WARNING; - } - case CAN_STATE_ERROR_WARNING: /* fallthrough */ - /* - * from: ERROR_ACTIVE, ERROR_WARNING - * to : ERROR_PASSIVE, BUS_OFF - * => : error passive int - */ - if (new_state >= CAN_STATE_ERROR_PASSIVE && - new_state <= CAN_STATE_BUS_OFF) { - netdev_dbg(dev, "Error Passive IRQ\n"); - priv->can.can_stats.error_passive++; - - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = (bec.txerr > bec.rxerr) ? - CAN_ERR_CRTL_TX_PASSIVE : - CAN_ERR_CRTL_RX_PASSIVE; - } - break; - case CAN_STATE_BUS_OFF: - /* - * from: BUS_OFF - * to : ERROR_ACTIVE, ERROR_WARNING, ERROR_PASSIVE - */ - if (new_state <= CAN_STATE_ERROR_PASSIVE) { - cf->can_id |= CAN_ERR_RESTARTED; - - netdev_dbg(dev, "restarted\n"); - priv->can.can_stats.restarts++; + /* The chip automatically recovers from bus-off after 128 + * occurrences of 11 consecutive recessive bits. + * + * After an auto-recovered bus-off, the error counters no + * longer reflect this fact. On the sam9263 the state bits in + * the SR register show the current state (based on the + * current error counters), while on sam9x5 and newer SoCs + * these bits are latched. + * + * Take any latched bus-off information from the SR register + * into account when calculating the CAN new state, to start + * the standard CAN bus off handling. + */ + if (reg_sr & AT91_IRQ_BOFF) + rx_state = CAN_STATE_BUS_OFF; - netif_carrier_on(dev); - netif_wake_queue(dev); - } - break; - default: - break; - } + new_state = max(tx_state, rx_state); + /* state hasn't changed */ + if (likely(new_state == priv->can.state)) + return; - /* process state changes depending on the new state */ - switch (new_state) { - case CAN_STATE_ERROR_ACTIVE: - /* - * actually we want to enable AT91_IRQ_WARN here, but - * it screws up the system under certain - * circumstances. so just enable AT91_IRQ_ERRP, thus - * the "fallthrough" - */ - netdev_dbg(dev, "Error Active\n"); - cf->can_id |= CAN_ERR_PROT; - cf->data[2] = CAN_ERR_PROT_ACTIVE; - case CAN_STATE_ERROR_WARNING: /* fallthrough */ - reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF; - reg_ier = AT91_IRQ_ERRP; - break; - case CAN_STATE_ERROR_PASSIVE: - reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_ERRP; - reg_ier = AT91_IRQ_BOFF; - break; - case CAN_STATE_BUS_OFF: - reg_idr = AT91_IRQ_ERRA | AT91_IRQ_ERRP | - AT91_IRQ_WARN | AT91_IRQ_BOFF; - reg_ier = 0; + /* The skb allocation might fail, but can_change_state() + * handles cf == NULL. + */ + skb = at91_alloc_can_err_skb(dev, &cf, ×tamp); + can_change_state(dev, cf, tx_state, rx_state); - cf->can_id |= CAN_ERR_BUSOFF; + if (new_state == CAN_STATE_BUS_OFF) { + at91_chip_stop(dev, CAN_STATE_BUS_OFF); + can_bus_off(dev); + } - netdev_dbg(dev, "bus-off\n"); - netif_carrier_off(dev); - priv->can.can_stats.bus_off++; + if (unlikely(!skb)) + return; - /* turn off chip, if restart is disabled */ - if (!priv->can.restart_ms) { - at91_chip_stop(dev, CAN_STATE_BUS_OFF); - return; - } - break; - default: - break; + if (new_state != CAN_STATE_BUS_OFF) { + cf->can_id |= CAN_ERR_CNT; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; } - at91_write(priv, AT91_IDR, reg_idr); - at91_write(priv, AT91_IER, reg_ier); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); + if (err) + stats->rx_fifo_errors++; } -static int at91_get_state_by_bec(const struct net_device *dev, - enum can_state *state) +static void at91_irq_err_frame(struct net_device *dev, const u32 reg_sr) { - struct can_berr_counter bec; + struct net_device_stats *stats = &dev->stats; + struct at91_priv *priv = netdev_priv(dev); + struct can_frame *cf; + struct sk_buff *skb; + u32 timestamp; int err; - err = at91_get_berr_counter(dev, &bec); - if (err) - return err; + priv->can.can_stats.bus_error++; - if (bec.txerr < 96 && bec.rxerr < 96) - *state = CAN_STATE_ERROR_ACTIVE; - else if (bec.txerr < 128 && bec.rxerr < 128) - *state = CAN_STATE_ERROR_WARNING; - else if (bec.txerr < 256 && bec.rxerr < 256) - *state = CAN_STATE_ERROR_PASSIVE; - else - *state = CAN_STATE_BUS_OFF; + skb = at91_alloc_can_err_skb(dev, &cf, ×tamp); + if (cf) + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - return 0; -} + if (reg_sr & AT91_IRQ_CERR) { + netdev_dbg(dev, "CRC error\n"); + stats->rx_errors++; + if (cf) + cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; + } -static void at91_irq_err(struct net_device *dev) -{ - struct at91_priv *priv = netdev_priv(dev); - struct sk_buff *skb; - struct can_frame *cf; - enum can_state new_state; - u32 reg_sr; - int err; + if (reg_sr & AT91_IRQ_SERR) { + netdev_dbg(dev, "Stuff error\n"); + + stats->rx_errors++; + if (cf) + cf->data[2] |= CAN_ERR_PROT_STUFF; + } + + if (reg_sr & AT91_IRQ_AERR) { + netdev_dbg(dev, "NACK error\n"); - if (at91_is_sam9263(priv)) { - reg_sr = at91_read(priv, AT91_SR); - - /* we need to look at the unmasked reg_sr */ - if (unlikely(reg_sr & AT91_IRQ_BOFF)) - new_state = CAN_STATE_BUS_OFF; - else if (unlikely(reg_sr & AT91_IRQ_ERRP)) - new_state = CAN_STATE_ERROR_PASSIVE; - else if (unlikely(reg_sr & AT91_IRQ_WARN)) - new_state = CAN_STATE_ERROR_WARNING; - else if (likely(reg_sr & AT91_IRQ_ERRA)) - new_state = CAN_STATE_ERROR_ACTIVE; - else { - netdev_err(dev, "BUG! hardware in undefined state\n"); - return; + stats->tx_errors++; + if (cf) { + cf->can_id |= CAN_ERR_ACK; + cf->data[2] |= CAN_ERR_PROT_TX; } - } else { - err = at91_get_state_by_bec(dev, &new_state); - if (err) - return; } - /* state hasn't changed */ - if (likely(new_state == priv->can.state)) - return; + if (reg_sr & AT91_IRQ_FERR) { + netdev_dbg(dev, "Format error\n"); - skb = alloc_can_err_skb(dev, &cf); - if (unlikely(!skb)) + stats->rx_errors++; + if (cf) + cf->data[2] |= CAN_ERR_PROT_FORM; + } + + if (reg_sr & AT91_IRQ_BERR) { + netdev_dbg(dev, "Bit error\n"); + + stats->tx_errors++; + if (cf) + cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT; + } + + if (!cf) return; - at91_irq_err_state(dev, cf, new_state); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); + if (err) + stats->rx_fifo_errors++; +} + +static u32 at91_get_reg_sr_rx(const struct at91_priv *priv, u32 *reg_sr_p) +{ + const u32 reg_sr = at91_read(priv, AT91_SR); - dev->stats.rx_packets++; - dev->stats.rx_bytes += cf->can_dlc; - netif_rx(skb); + *reg_sr_p |= reg_sr; - priv->can.state = new_state; + return reg_sr & get_irq_mb_rx(priv); } -/* - * interrupt handler - */ static irqreturn_t at91_irq(int irq, void *dev_id) { struct net_device *dev = dev_id; struct at91_priv *priv = netdev_priv(dev); irqreturn_t handled = IRQ_NONE; - u32 reg_sr, reg_imr; - - reg_sr = at91_read(priv, AT91_SR); - reg_imr = at91_read(priv, AT91_IMR); - - /* Ignore masked interrupts */ - reg_sr &= reg_imr; - if (!reg_sr) - goto exit; + u32 reg_sr = 0, reg_sr_rx; + int ret; - handled = IRQ_HANDLED; + /* Receive interrupt + * Some bits of AT91_SR are cleared on read, keep them in reg_sr. + */ + while ((reg_sr_rx = at91_get_reg_sr_rx(priv, ®_sr))) { + ret = can_rx_offload_irq_offload_timestamp(&priv->offload, + reg_sr_rx); + handled = IRQ_HANDLED; - /* Receive or error interrupt? -> napi */ - if (reg_sr & (get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME)) { - /* - * The error bits are clear on read, - * save for later use. - */ - priv->reg_sr = reg_sr; - at91_write(priv, AT91_IDR, - get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME); - napi_schedule(&priv->napi); + if (!ret) + break; } /* Transmission complete interrupt */ - if (reg_sr & get_irq_mb_tx(priv)) + if (reg_sr & get_irq_mb_tx(priv)) { at91_irq_tx(dev, reg_sr); + handled = IRQ_HANDLED; + } - at91_irq_err(dev); + /* Line Error interrupt */ + if (reg_sr & AT91_IRQ_ERR_LINE || + priv->can.state > CAN_STATE_ERROR_ACTIVE) { + at91_irq_err_line(dev, reg_sr); + handled = IRQ_HANDLED; + } + + /* Frame Error Interrupt */ + if (reg_sr & AT91_IRQ_ERR_FRAME) { + at91_irq_err_frame(dev, reg_sr); + handled = IRQ_HANDLED; + } + + if (handled) + can_rx_offload_irq_finish(&priv->offload); - exit: return handled; } @@ -1102,57 +874,58 @@ static int at91_open(struct net_device *dev) struct at91_priv *priv = netdev_priv(dev); int err; - err = clk_prepare_enable(priv->clk); + err = phy_power_on(priv->transceiver); if (err) return err; /* check or determine and set bittime */ err = open_candev(dev); if (err) - goto out; + goto out_phy_power_off; - /* register interrupt handler */ - if (request_irq(dev->irq, at91_irq, IRQF_SHARED, - dev->name, dev)) { - err = -EAGAIN; - goto out_close; - } + err = clk_prepare_enable(priv->clk); + if (err) + goto out_close_candev; - can_led_event(dev, CAN_LED_EVENT_OPEN); + /* register interrupt handler */ + err = request_irq(dev->irq, at91_irq, IRQF_SHARED, + dev->name, dev); + if (err) + goto out_clock_disable_unprepare; /* start chip and queuing */ at91_chip_start(dev); - napi_enable(&priv->napi); + can_rx_offload_enable(&priv->offload); netif_start_queue(dev); return 0; - out_close: - close_candev(dev); - out: + out_clock_disable_unprepare: clk_disable_unprepare(priv->clk); + out_close_candev: + close_candev(dev); + out_phy_power_off: + phy_power_off(priv->transceiver); return err; } -/* - * stop CAN bus activity +/* stop CAN bus activity */ static int at91_close(struct net_device *dev) { struct at91_priv *priv = netdev_priv(dev); netif_stop_queue(dev); - napi_disable(&priv->napi); + can_rx_offload_disable(&priv->offload); at91_chip_stop(dev, CAN_STATE_STOPPED); free_irq(dev->irq, dev); clk_disable_unprepare(priv->clk); + phy_power_off(priv->transceiver); close_candev(dev); - can_led_event(dev, CAN_LED_EVENT_STOP); - return 0; } @@ -1175,22 +948,26 @@ static const struct net_device_ops at91_netdev_ops = { .ndo_open = at91_open, .ndo_stop = at91_close, .ndo_start_xmit = at91_start_xmit, - .ndo_change_mtu = can_change_mtu, }; -static ssize_t at91_sysfs_show_mb0_id(struct device *dev, - struct device_attribute *attr, char *buf) +static const struct ethtool_ops at91_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, +}; + +static ssize_t mb0_id_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct at91_priv *priv = netdev_priv(to_net_dev(dev)); if (priv->mb0_id & CAN_EFF_FLAG) - return snprintf(buf, PAGE_SIZE, "0x%08x\n", priv->mb0_id); + return sysfs_emit(buf, "0x%08x\n", priv->mb0_id); else - return snprintf(buf, PAGE_SIZE, "0x%03x\n", priv->mb0_id); + return sysfs_emit(buf, "0x%03x\n", priv->mb0_id); } -static ssize_t at91_sysfs_set_mb0_id(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) +static ssize_t mb0_id_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { struct net_device *ndev = to_net_dev(dev); struct at91_priv *priv = netdev_priv(ndev); @@ -1224,7 +1001,7 @@ static ssize_t at91_sysfs_set_mb0_id(struct device *dev, return ret; } -static DEVICE_ATTR(mb0_id, 0644, at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id); +static DEVICE_ATTR_RW(mb0_id); static struct attribute *at91_sysfs_attrs[] = { &dev_attr_mb0_id.attr, @@ -1269,6 +1046,7 @@ static const struct at91_devtype_data *at91_can_get_driver_data(struct platform_ static int at91_can_probe(struct platform_device *pdev) { const struct at91_devtype_data *devtype_data; + struct phy *transceiver; struct net_device *dev; struct at91_priv *priv; struct resource *res; @@ -1304,7 +1082,7 @@ static int at91_can_probe(struct platform_device *pdev) goto exit_put; } - addr = ioremap_nocache(res->start, resource_size(res)); + addr = ioremap(res->start, resource_size(res)); if (!addr) { err = -ENOMEM; goto exit_release; @@ -1317,7 +1095,15 @@ static int at91_can_probe(struct platform_device *pdev) goto exit_iounmap; } + transceiver = devm_phy_optional_get(&pdev->dev, NULL); + if (IS_ERR(transceiver)) { + err = PTR_ERR(transceiver); + dev_err_probe(&pdev->dev, err, "failed to get phy\n"); + goto exit_iounmap; + } + dev->netdev_ops = &at91_netdev_ops; + dev->ethtool_ops = &at91_ethtool_ops; dev->irq = irq; dev->flags |= IFF_ECHO; @@ -1333,8 +1119,14 @@ static int at91_can_probe(struct platform_device *pdev) priv->clk = clk; priv->pdata = dev_get_platdata(&pdev->dev); priv->mb0_id = 0x7ff; + priv->offload.mailbox_read = at91_mailbox_read; + priv->offload.mb_first = devtype_data->rx_first; + priv->offload.mb_last = devtype_data->rx_last; + + can_rx_offload_add_timestamp(dev, &priv->offload); - netif_napi_add(dev, &priv->napi, at91_poll, get_mb_rx_num(priv)); + if (transceiver) + priv->can.bitrate_max = transceiver->attrs.max_link_rate; if (at91_is_sam9263(priv)) dev->sysfs_groups[0] = &at91_sysfs_attr_group; @@ -1348,8 +1140,6 @@ static int at91_can_probe(struct platform_device *pdev) goto exit_free; } - devm_can_led_init(dev); - dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n", priv->reg_base, dev->irq); @@ -1367,7 +1157,7 @@ static int at91_can_probe(struct platform_device *pdev) return err; } -static int at91_can_remove(struct platform_device *pdev) +static void at91_can_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct at91_priv *priv = netdev_priv(dev); @@ -1383,8 +1173,6 @@ static int at91_can_remove(struct platform_device *pdev) clk_put(priv->clk); free_candev(dev); - - return 0; } static const struct platform_device_id at91_can_id_table[] = { diff --git a/drivers/net/can/bxcan.c b/drivers/net/can/bxcan.c new file mode 100644 index 000000000000..baf494d20bef --- /dev/null +++ b/drivers/net/can/bxcan.c @@ -0,0 +1,1101 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// bxcan.c - STM32 Basic Extended CAN controller driver +// +// Copyright (c) 2022 Dario Binacchi <dario.binacchi@amarulasolutions.com> +// +// NOTE: The ST documentation uses the terms master/slave instead of +// primary/secondary. + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/bitfield.h> +#include <linux/can.h> +#include <linux/can/dev.h> +#include <linux/can/error.h> +#include <linux/can/rx-offload.h> +#include <linux/clk.h> +#include <linux/ethtool.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#define BXCAN_NAPI_WEIGHT 3 +#define BXCAN_TIMEOUT_US 10000 + +#define BXCAN_RX_MB_NUM 2 +#define BXCAN_TX_MB_NUM 3 + +/* Primary control register (MCR) bits */ +#define BXCAN_MCR_RESET BIT(15) +#define BXCAN_MCR_TTCM BIT(7) +#define BXCAN_MCR_ABOM BIT(6) +#define BXCAN_MCR_AWUM BIT(5) +#define BXCAN_MCR_NART BIT(4) +#define BXCAN_MCR_RFLM BIT(3) +#define BXCAN_MCR_TXFP BIT(2) +#define BXCAN_MCR_SLEEP BIT(1) +#define BXCAN_MCR_INRQ BIT(0) + +/* Primary status register (MSR) bits */ +#define BXCAN_MSR_ERRI BIT(2) +#define BXCAN_MSR_SLAK BIT(1) +#define BXCAN_MSR_INAK BIT(0) + +/* Transmit status register (TSR) bits */ +#define BXCAN_TSR_RQCP2 BIT(16) +#define BXCAN_TSR_RQCP1 BIT(8) +#define BXCAN_TSR_RQCP0 BIT(0) + +/* Receive FIFO 0 register (RF0R) bits */ +#define BXCAN_RF0R_RFOM0 BIT(5) +#define BXCAN_RF0R_FMP0_MASK GENMASK(1, 0) + +/* Interrupt enable register (IER) bits */ +#define BXCAN_IER_SLKIE BIT(17) +#define BXCAN_IER_WKUIE BIT(16) +#define BXCAN_IER_ERRIE BIT(15) +#define BXCAN_IER_LECIE BIT(11) +#define BXCAN_IER_BOFIE BIT(10) +#define BXCAN_IER_EPVIE BIT(9) +#define BXCAN_IER_EWGIE BIT(8) +#define BXCAN_IER_FOVIE1 BIT(6) +#define BXCAN_IER_FFIE1 BIT(5) +#define BXCAN_IER_FMPIE1 BIT(4) +#define BXCAN_IER_FOVIE0 BIT(3) +#define BXCAN_IER_FFIE0 BIT(2) +#define BXCAN_IER_FMPIE0 BIT(1) +#define BXCAN_IER_TMEIE BIT(0) + +/* Error status register (ESR) bits */ +#define BXCAN_ESR_REC_MASK GENMASK(31, 24) +#define BXCAN_ESR_TEC_MASK GENMASK(23, 16) +#define BXCAN_ESR_LEC_MASK GENMASK(6, 4) +#define BXCAN_ESR_BOFF BIT(2) +#define BXCAN_ESR_EPVF BIT(1) +#define BXCAN_ESR_EWGF BIT(0) + +/* Bit timing register (BTR) bits */ +#define BXCAN_BTR_SILM BIT(31) +#define BXCAN_BTR_LBKM BIT(30) +#define BXCAN_BTR_SJW_MASK GENMASK(25, 24) +#define BXCAN_BTR_TS2_MASK GENMASK(22, 20) +#define BXCAN_BTR_TS1_MASK GENMASK(19, 16) +#define BXCAN_BTR_BRP_MASK GENMASK(9, 0) + +/* TX mailbox identifier register (TIxR, x = 0..2) bits */ +#define BXCAN_TIxR_STID_MASK GENMASK(31, 21) +#define BXCAN_TIxR_EXID_MASK GENMASK(31, 3) +#define BXCAN_TIxR_IDE BIT(2) +#define BXCAN_TIxR_RTR BIT(1) +#define BXCAN_TIxR_TXRQ BIT(0) + +/* TX mailbox data length and time stamp register (TDTxR, x = 0..2 bits */ +#define BXCAN_TDTxR_DLC_MASK GENMASK(3, 0) + +/* RX FIFO mailbox identifier register (RIxR, x = 0..1 */ +#define BXCAN_RIxR_STID_MASK GENMASK(31, 21) +#define BXCAN_RIxR_EXID_MASK GENMASK(31, 3) +#define BXCAN_RIxR_IDE BIT(2) +#define BXCAN_RIxR_RTR BIT(1) + +/* RX FIFO mailbox data length and timestamp register (RDTxR, x = 0..1) bits */ +#define BXCAN_RDTxR_TIME_MASK GENMASK(31, 16) +#define BXCAN_RDTxR_DLC_MASK GENMASK(3, 0) + +#define BXCAN_FMR_REG 0x00 +#define BXCAN_FM1R_REG 0x04 +#define BXCAN_FS1R_REG 0x0c +#define BXCAN_FFA1R_REG 0x14 +#define BXCAN_FA1R_REG 0x1c +#define BXCAN_FiR1_REG(b) (0x40 + (b) * 8) +#define BXCAN_FiR2_REG(b) (0x44 + (b) * 8) + +#define BXCAN_FILTER_ID(cfg) ((cfg) == BXCAN_CFG_DUAL_SECONDARY ? 14 : 0) + +/* Filter primary register (FMR) bits */ +#define BXCAN_FMR_CANSB_MASK GENMASK(13, 8) +#define BXCAN_FMR_FINIT BIT(0) + +enum bxcan_lec_code { + BXCAN_LEC_NO_ERROR = 0, + BXCAN_LEC_STUFF_ERROR, + BXCAN_LEC_FORM_ERROR, + BXCAN_LEC_ACK_ERROR, + BXCAN_LEC_BIT1_ERROR, + BXCAN_LEC_BIT0_ERROR, + BXCAN_LEC_CRC_ERROR, + BXCAN_LEC_UNUSED +}; + +enum bxcan_cfg { + BXCAN_CFG_SINGLE = 0, + BXCAN_CFG_DUAL_PRIMARY, + BXCAN_CFG_DUAL_SECONDARY +}; + +/* Structure of the message buffer */ +struct bxcan_mb { + u32 id; /* can identifier */ + u32 dlc; /* data length control and timestamp */ + u32 data[2]; /* data */ +}; + +/* Structure of the hardware registers */ +struct bxcan_regs { + u32 mcr; /* 0x00 - primary control */ + u32 msr; /* 0x04 - primary status */ + u32 tsr; /* 0x08 - transmit status */ + u32 rf0r; /* 0x0c - FIFO 0 */ + u32 rf1r; /* 0x10 - FIFO 1 */ + u32 ier; /* 0x14 - interrupt enable */ + u32 esr; /* 0x18 - error status */ + u32 btr; /* 0x1c - bit timing*/ + u32 reserved0[88]; /* 0x20 */ + struct bxcan_mb tx_mb[BXCAN_TX_MB_NUM]; /* 0x180 - tx mailbox */ + struct bxcan_mb rx_mb[BXCAN_RX_MB_NUM]; /* 0x1b0 - rx mailbox */ +}; + +struct bxcan_priv { + struct can_priv can; + struct can_rx_offload offload; + struct device *dev; + struct net_device *ndev; + + struct bxcan_regs __iomem *regs; + struct regmap *gcan; + int tx_irq; + int sce_irq; + enum bxcan_cfg cfg; + struct clk *clk; + spinlock_t rmw_lock; /* lock for read-modify-write operations */ + unsigned int tx_head; + unsigned int tx_tail; + u32 timestamp; +}; + +static const struct can_bittiming_const bxcan_bittiming_const = { + .name = KBUILD_MODNAME, + .tseg1_min = 1, + .tseg1_max = 16, + .tseg2_min = 1, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 1024, + .brp_inc = 1, +}; + +static inline void bxcan_rmw(struct bxcan_priv *priv, void __iomem *addr, + u32 clear, u32 set) +{ + unsigned long flags; + u32 old, val; + + spin_lock_irqsave(&priv->rmw_lock, flags); + old = readl(addr); + val = (old & ~clear) | set; + if (val != old) + writel(val, addr); + + spin_unlock_irqrestore(&priv->rmw_lock, flags); +} + +static void bxcan_disable_filters(struct bxcan_priv *priv, enum bxcan_cfg cfg) +{ + unsigned int fid = BXCAN_FILTER_ID(cfg); + u32 fmask = BIT(fid); + + regmap_update_bits(priv->gcan, BXCAN_FA1R_REG, fmask, 0); +} + +static void bxcan_enable_filters(struct bxcan_priv *priv, enum bxcan_cfg cfg) +{ + unsigned int fid = BXCAN_FILTER_ID(cfg); + u32 fmask = BIT(fid); + + /* Filter settings: + * + * Accept all messages. + * Assign filter 0 to CAN1 and filter 14 to CAN2 in identifier + * mask mode with 32 bits width. + */ + + /* Enter filter initialization mode and assign filters to CAN + * controllers. + */ + regmap_update_bits(priv->gcan, BXCAN_FMR_REG, + BXCAN_FMR_CANSB_MASK | BXCAN_FMR_FINIT, + FIELD_PREP(BXCAN_FMR_CANSB_MASK, 14) | + BXCAN_FMR_FINIT); + + /* Deactivate filter */ + regmap_update_bits(priv->gcan, BXCAN_FA1R_REG, fmask, 0); + + /* Two 32-bit registers in identifier mask mode */ + regmap_update_bits(priv->gcan, BXCAN_FM1R_REG, fmask, 0); + + /* Single 32-bit scale configuration */ + regmap_update_bits(priv->gcan, BXCAN_FS1R_REG, fmask, fmask); + + /* Assign filter to FIFO 0 */ + regmap_update_bits(priv->gcan, BXCAN_FFA1R_REG, fmask, 0); + + /* Accept all messages */ + regmap_write(priv->gcan, BXCAN_FiR1_REG(fid), 0); + regmap_write(priv->gcan, BXCAN_FiR2_REG(fid), 0); + + /* Activate filter */ + regmap_update_bits(priv->gcan, BXCAN_FA1R_REG, fmask, fmask); + + /* Exit filter initialization mode */ + regmap_update_bits(priv->gcan, BXCAN_FMR_REG, BXCAN_FMR_FINIT, 0); +} + +static inline u8 bxcan_get_tx_head(const struct bxcan_priv *priv) +{ + return priv->tx_head % BXCAN_TX_MB_NUM; +} + +static inline u8 bxcan_get_tx_tail(const struct bxcan_priv *priv) +{ + return priv->tx_tail % BXCAN_TX_MB_NUM; +} + +static inline u8 bxcan_get_tx_free(const struct bxcan_priv *priv) +{ + return BXCAN_TX_MB_NUM - (priv->tx_head - priv->tx_tail); +} + +static bool bxcan_tx_busy(const struct bxcan_priv *priv) +{ + if (bxcan_get_tx_free(priv) > 0) + return false; + + netif_stop_queue(priv->ndev); + + /* Memory barrier before checking tx_free (head and tail) */ + smp_mb(); + + if (bxcan_get_tx_free(priv) == 0) { + netdev_dbg(priv->ndev, + "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n", + priv->tx_head, priv->tx_tail, + priv->tx_head - priv->tx_tail); + + return true; + } + + netif_start_queue(priv->ndev); + + return false; +} + +static int bxcan_chip_softreset(struct bxcan_priv *priv) +{ + struct bxcan_regs __iomem *regs = priv->regs; + u32 value; + + bxcan_rmw(priv, ®s->mcr, 0, BXCAN_MCR_RESET); + return readx_poll_timeout(readl, ®s->msr, value, + value & BXCAN_MSR_SLAK, BXCAN_TIMEOUT_US, + USEC_PER_SEC); +} + +static int bxcan_enter_init_mode(struct bxcan_priv *priv) +{ + struct bxcan_regs __iomem *regs = priv->regs; + u32 value; + + bxcan_rmw(priv, ®s->mcr, 0, BXCAN_MCR_INRQ); + return readx_poll_timeout(readl, ®s->msr, value, + value & BXCAN_MSR_INAK, BXCAN_TIMEOUT_US, + USEC_PER_SEC); +} + +static int bxcan_leave_init_mode(struct bxcan_priv *priv) +{ + struct bxcan_regs __iomem *regs = priv->regs; + u32 value; + + bxcan_rmw(priv, ®s->mcr, BXCAN_MCR_INRQ, 0); + return readx_poll_timeout(readl, ®s->msr, value, + !(value & BXCAN_MSR_INAK), BXCAN_TIMEOUT_US, + USEC_PER_SEC); +} + +static int bxcan_enter_sleep_mode(struct bxcan_priv *priv) +{ + struct bxcan_regs __iomem *regs = priv->regs; + u32 value; + + bxcan_rmw(priv, ®s->mcr, 0, BXCAN_MCR_SLEEP); + return readx_poll_timeout(readl, ®s->msr, value, + value & BXCAN_MSR_SLAK, BXCAN_TIMEOUT_US, + USEC_PER_SEC); +} + +static int bxcan_leave_sleep_mode(struct bxcan_priv *priv) +{ + struct bxcan_regs __iomem *regs = priv->regs; + u32 value; + + bxcan_rmw(priv, ®s->mcr, BXCAN_MCR_SLEEP, 0); + return readx_poll_timeout(readl, ®s->msr, value, + !(value & BXCAN_MSR_SLAK), BXCAN_TIMEOUT_US, + USEC_PER_SEC); +} + +static inline +struct bxcan_priv *rx_offload_to_priv(struct can_rx_offload *offload) +{ + return container_of(offload, struct bxcan_priv, offload); +} + +static struct sk_buff *bxcan_mailbox_read(struct can_rx_offload *offload, + unsigned int mbxno, u32 *timestamp, + bool drop) +{ + struct bxcan_priv *priv = rx_offload_to_priv(offload); + struct bxcan_regs __iomem *regs = priv->regs; + struct bxcan_mb __iomem *mb_regs = ®s->rx_mb[0]; + struct sk_buff *skb = NULL; + struct can_frame *cf; + u32 rf0r, id, dlc; + + rf0r = readl(®s->rf0r); + if (unlikely(drop)) { + skb = ERR_PTR(-ENOBUFS); + goto mark_as_read; + } + + if (!(rf0r & BXCAN_RF0R_FMP0_MASK)) + goto mark_as_read; + + skb = alloc_can_skb(offload->dev, &cf); + if (unlikely(!skb)) { + skb = ERR_PTR(-ENOMEM); + goto mark_as_read; + } + + id = readl(&mb_regs->id); + if (id & BXCAN_RIxR_IDE) + cf->can_id = FIELD_GET(BXCAN_RIxR_EXID_MASK, id) | CAN_EFF_FLAG; + else + cf->can_id = FIELD_GET(BXCAN_RIxR_STID_MASK, id) & CAN_SFF_MASK; + + dlc = readl(&mb_regs->dlc); + priv->timestamp = FIELD_GET(BXCAN_RDTxR_TIME_MASK, dlc); + cf->len = can_cc_dlc2len(FIELD_GET(BXCAN_RDTxR_DLC_MASK, dlc)); + + if (id & BXCAN_RIxR_RTR) { + cf->can_id |= CAN_RTR_FLAG; + } else { + int i, j; + + for (i = 0, j = 0; i < cf->len; i += 4, j++) + *(u32 *)(cf->data + i) = readl(&mb_regs->data[j]); + } + + mark_as_read: + rf0r |= BXCAN_RF0R_RFOM0; + writel(rf0r, ®s->rf0r); + return skb; +} + +static irqreturn_t bxcan_rx_isr(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct bxcan_priv *priv = netdev_priv(ndev); + struct bxcan_regs __iomem *regs = priv->regs; + u32 rf0r; + + rf0r = readl(®s->rf0r); + if (!(rf0r & BXCAN_RF0R_FMP0_MASK)) + return IRQ_NONE; + + can_rx_offload_irq_offload_fifo(&priv->offload); + can_rx_offload_irq_finish(&priv->offload); + + return IRQ_HANDLED; +} + +static irqreturn_t bxcan_tx_isr(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct bxcan_priv *priv = netdev_priv(ndev); + struct bxcan_regs __iomem *regs = priv->regs; + struct net_device_stats *stats = &ndev->stats; + u32 tsr, rqcp_bit; + int idx; + + tsr = readl(®s->tsr); + if (!(tsr & (BXCAN_TSR_RQCP0 | BXCAN_TSR_RQCP1 | BXCAN_TSR_RQCP2))) + return IRQ_NONE; + + while (priv->tx_head - priv->tx_tail > 0) { + idx = bxcan_get_tx_tail(priv); + rqcp_bit = BXCAN_TSR_RQCP0 << (idx << 3); + if (!(tsr & rqcp_bit)) + break; + + stats->tx_packets++; + stats->tx_bytes += can_get_echo_skb(ndev, idx, NULL); + priv->tx_tail++; + } + + writel(tsr, ®s->tsr); + + if (bxcan_get_tx_free(priv)) { + /* Make sure that anybody stopping the queue after + * this sees the new tx_ring->tail. + */ + smp_mb(); + netif_wake_queue(ndev); + } + + return IRQ_HANDLED; +} + +static void bxcan_handle_state_change(struct net_device *ndev, u32 esr) +{ + struct bxcan_priv *priv = netdev_priv(ndev); + enum can_state new_state = priv->can.state; + struct can_berr_counter bec; + enum can_state rx_state, tx_state; + struct sk_buff *skb; + struct can_frame *cf; + + /* Early exit if no error flag is set */ + if (!(esr & (BXCAN_ESR_EWGF | BXCAN_ESR_EPVF | BXCAN_ESR_BOFF))) + return; + + bec.txerr = FIELD_GET(BXCAN_ESR_TEC_MASK, esr); + bec.rxerr = FIELD_GET(BXCAN_ESR_REC_MASK, esr); + + if (esr & BXCAN_ESR_BOFF) + new_state = CAN_STATE_BUS_OFF; + else if (esr & BXCAN_ESR_EPVF) + new_state = CAN_STATE_ERROR_PASSIVE; + else if (esr & BXCAN_ESR_EWGF) + new_state = CAN_STATE_ERROR_WARNING; + + /* state hasn't changed */ + if (unlikely(new_state == priv->can.state)) + return; + + skb = alloc_can_err_skb(ndev, &cf); + + tx_state = bec.txerr >= bec.rxerr ? new_state : 0; + rx_state = bec.txerr <= bec.rxerr ? new_state : 0; + can_change_state(ndev, cf, tx_state, rx_state); + + if (new_state == CAN_STATE_BUS_OFF) { + can_bus_off(ndev); + } else if (skb) { + cf->can_id |= CAN_ERR_CNT; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + } + + if (skb) { + int err; + + err = can_rx_offload_queue_timestamp(&priv->offload, skb, + priv->timestamp); + if (err) + ndev->stats.rx_fifo_errors++; + } +} + +static void bxcan_handle_bus_err(struct net_device *ndev, u32 esr) +{ + struct bxcan_priv *priv = netdev_priv(ndev); + enum bxcan_lec_code lec_code; + struct can_frame *cf; + struct sk_buff *skb; + + lec_code = FIELD_GET(BXCAN_ESR_LEC_MASK, esr); + + /* Early exit if no lec update or no error. + * No lec update means that no CAN bus event has been detected + * since CPU wrote BXCAN_LEC_UNUSED value to status reg. + */ + if (lec_code == BXCAN_LEC_UNUSED || lec_code == BXCAN_LEC_NO_ERROR) + return; + + /* Common for all type of bus errors */ + priv->can.can_stats.bus_error++; + + /* Propagate the error condition to the CAN stack */ + skb = alloc_can_err_skb(ndev, &cf); + if (skb) + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + + switch (lec_code) { + case BXCAN_LEC_STUFF_ERROR: + netdev_dbg(ndev, "Stuff error\n"); + ndev->stats.rx_errors++; + if (skb) + cf->data[2] |= CAN_ERR_PROT_STUFF; + break; + + case BXCAN_LEC_FORM_ERROR: + netdev_dbg(ndev, "Form error\n"); + ndev->stats.rx_errors++; + if (skb) + cf->data[2] |= CAN_ERR_PROT_FORM; + break; + + case BXCAN_LEC_ACK_ERROR: + netdev_dbg(ndev, "Ack error\n"); + ndev->stats.tx_errors++; + if (skb) { + cf->can_id |= CAN_ERR_ACK; + cf->data[3] = CAN_ERR_PROT_LOC_ACK; + } + break; + + case BXCAN_LEC_BIT1_ERROR: + netdev_dbg(ndev, "Bit error (recessive)\n"); + ndev->stats.tx_errors++; + if (skb) + cf->data[2] |= CAN_ERR_PROT_BIT1; + break; + + case BXCAN_LEC_BIT0_ERROR: + netdev_dbg(ndev, "Bit error (dominant)\n"); + ndev->stats.tx_errors++; + if (skb) + cf->data[2] |= CAN_ERR_PROT_BIT0; + break; + + case BXCAN_LEC_CRC_ERROR: + netdev_dbg(ndev, "CRC error\n"); + ndev->stats.rx_errors++; + if (skb) { + cf->data[2] |= CAN_ERR_PROT_BIT; + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + } + break; + + default: + break; + } + + if (skb) { + int err; + + err = can_rx_offload_queue_timestamp(&priv->offload, skb, + priv->timestamp); + if (err) + ndev->stats.rx_fifo_errors++; + } +} + +static irqreturn_t bxcan_state_change_isr(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct bxcan_priv *priv = netdev_priv(ndev); + struct bxcan_regs __iomem *regs = priv->regs; + u32 msr, esr; + + msr = readl(®s->msr); + if (!(msr & BXCAN_MSR_ERRI)) + return IRQ_NONE; + + esr = readl(®s->esr); + bxcan_handle_state_change(ndev, esr); + + if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) + bxcan_handle_bus_err(ndev, esr); + + msr |= BXCAN_MSR_ERRI; + writel(msr, ®s->msr); + can_rx_offload_irq_finish(&priv->offload); + + return IRQ_HANDLED; +} + +static int bxcan_chip_start(struct net_device *ndev) +{ + struct bxcan_priv *priv = netdev_priv(ndev); + struct bxcan_regs __iomem *regs = priv->regs; + struct can_bittiming *bt = &priv->can.bittiming; + u32 clr, set; + int err; + + err = bxcan_chip_softreset(priv); + if (err) { + netdev_err(ndev, "failed to reset chip, error %pe\n", + ERR_PTR(err)); + return err; + } + + err = bxcan_leave_sleep_mode(priv); + if (err) { + netdev_err(ndev, "failed to leave sleep mode, error %pe\n", + ERR_PTR(err)); + goto failed_leave_sleep; + } + + err = bxcan_enter_init_mode(priv); + if (err) { + netdev_err(ndev, "failed to enter init mode, error %pe\n", + ERR_PTR(err)); + goto failed_enter_init; + } + + /* MCR + * + * select request order priority + * enable time triggered mode + * bus-off state left on sw request + * sleep mode left on sw request + * retransmit automatically on error + * do not lock RX FIFO on overrun + */ + bxcan_rmw(priv, ®s->mcr, + BXCAN_MCR_ABOM | BXCAN_MCR_AWUM | BXCAN_MCR_NART | + BXCAN_MCR_RFLM, BXCAN_MCR_TTCM | BXCAN_MCR_TXFP); + + /* Bit timing register settings */ + set = FIELD_PREP(BXCAN_BTR_BRP_MASK, bt->brp - 1) | + FIELD_PREP(BXCAN_BTR_TS1_MASK, bt->phase_seg1 + + bt->prop_seg - 1) | + FIELD_PREP(BXCAN_BTR_TS2_MASK, bt->phase_seg2 - 1) | + FIELD_PREP(BXCAN_BTR_SJW_MASK, bt->sjw - 1); + + /* loopback + silent mode put the controller in test mode, + * useful for hot self-test + */ + if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) + set |= BXCAN_BTR_LBKM; + + if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + set |= BXCAN_BTR_SILM; + + bxcan_rmw(priv, ®s->btr, BXCAN_BTR_SILM | BXCAN_BTR_LBKM | + BXCAN_BTR_BRP_MASK | BXCAN_BTR_TS1_MASK | BXCAN_BTR_TS2_MASK | + BXCAN_BTR_SJW_MASK, set); + + bxcan_enable_filters(priv, priv->cfg); + + /* Clear all internal status */ + priv->tx_head = 0; + priv->tx_tail = 0; + + err = bxcan_leave_init_mode(priv); + if (err) { + netdev_err(ndev, "failed to leave init mode, error %pe\n", + ERR_PTR(err)); + goto failed_leave_init; + } + + /* Set a `lec` value so that we can check for updates later */ + bxcan_rmw(priv, ®s->esr, BXCAN_ESR_LEC_MASK, + FIELD_PREP(BXCAN_ESR_LEC_MASK, BXCAN_LEC_UNUSED)); + + /* IER + * + * Enable interrupt for: + * bus-off + * passive error + * warning error + * last error code + * RX FIFO pending message + * TX mailbox empty + */ + clr = BXCAN_IER_WKUIE | BXCAN_IER_SLKIE | BXCAN_IER_FOVIE1 | + BXCAN_IER_FFIE1 | BXCAN_IER_FMPIE1 | BXCAN_IER_FOVIE0 | + BXCAN_IER_FFIE0; + set = BXCAN_IER_ERRIE | BXCAN_IER_BOFIE | BXCAN_IER_EPVIE | + BXCAN_IER_EWGIE | BXCAN_IER_FMPIE0 | BXCAN_IER_TMEIE; + + if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) + set |= BXCAN_IER_LECIE; + else + clr |= BXCAN_IER_LECIE; + + bxcan_rmw(priv, ®s->ier, clr, set); + + priv->can.state = CAN_STATE_ERROR_ACTIVE; + return 0; + +failed_leave_init: +failed_enter_init: +failed_leave_sleep: + bxcan_chip_softreset(priv); + return err; +} + +static int bxcan_open(struct net_device *ndev) +{ + struct bxcan_priv *priv = netdev_priv(ndev); + int err; + + err = clk_prepare_enable(priv->clk); + if (err) { + netdev_err(ndev, "failed to enable clock, error %pe\n", + ERR_PTR(err)); + return err; + } + + err = open_candev(ndev); + if (err) { + netdev_err(ndev, "open_candev() failed, error %pe\n", + ERR_PTR(err)); + goto out_disable_clock; + } + + can_rx_offload_enable(&priv->offload); + err = request_irq(ndev->irq, bxcan_rx_isr, IRQF_SHARED, ndev->name, + ndev); + if (err) { + netdev_err(ndev, "failed to register rx irq(%d), error %pe\n", + ndev->irq, ERR_PTR(err)); + goto out_close_candev; + } + + err = request_irq(priv->tx_irq, bxcan_tx_isr, IRQF_SHARED, ndev->name, + ndev); + if (err) { + netdev_err(ndev, "failed to register tx irq(%d), error %pe\n", + priv->tx_irq, ERR_PTR(err)); + goto out_free_rx_irq; + } + + err = request_irq(priv->sce_irq, bxcan_state_change_isr, IRQF_SHARED, + ndev->name, ndev); + if (err) { + netdev_err(ndev, "failed to register sce irq(%d), error %pe\n", + priv->sce_irq, ERR_PTR(err)); + goto out_free_tx_irq; + } + + err = bxcan_chip_start(ndev); + if (err) + goto out_free_sce_irq; + + netif_start_queue(ndev); + return 0; + +out_free_sce_irq: + free_irq(priv->sce_irq, ndev); +out_free_tx_irq: + free_irq(priv->tx_irq, ndev); +out_free_rx_irq: + free_irq(ndev->irq, ndev); +out_close_candev: + can_rx_offload_disable(&priv->offload); + close_candev(ndev); +out_disable_clock: + clk_disable_unprepare(priv->clk); + return err; +} + +static void bxcan_chip_stop(struct net_device *ndev) +{ + struct bxcan_priv *priv = netdev_priv(ndev); + struct bxcan_regs __iomem *regs = priv->regs; + + /* disable all interrupts */ + bxcan_rmw(priv, ®s->ier, BXCAN_IER_SLKIE | BXCAN_IER_WKUIE | + BXCAN_IER_ERRIE | BXCAN_IER_LECIE | BXCAN_IER_BOFIE | + BXCAN_IER_EPVIE | BXCAN_IER_EWGIE | BXCAN_IER_FOVIE1 | + BXCAN_IER_FFIE1 | BXCAN_IER_FMPIE1 | BXCAN_IER_FOVIE0 | + BXCAN_IER_FFIE0 | BXCAN_IER_FMPIE0 | BXCAN_IER_TMEIE, 0); + bxcan_disable_filters(priv, priv->cfg); + bxcan_enter_sleep_mode(priv); + priv->can.state = CAN_STATE_STOPPED; +} + +static int bxcan_stop(struct net_device *ndev) +{ + struct bxcan_priv *priv = netdev_priv(ndev); + + netif_stop_queue(ndev); + bxcan_chip_stop(ndev); + free_irq(ndev->irq, ndev); + free_irq(priv->tx_irq, ndev); + free_irq(priv->sce_irq, ndev); + can_rx_offload_disable(&priv->offload); + close_candev(ndev); + clk_disable_unprepare(priv->clk); + return 0; +} + +static netdev_tx_t bxcan_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct bxcan_priv *priv = netdev_priv(ndev); + struct can_frame *cf = (struct can_frame *)skb->data; + struct bxcan_regs __iomem *regs = priv->regs; + struct bxcan_mb __iomem *mb_regs; + unsigned int idx; + u32 id; + int i, j; + + if (can_dev_dropped_skb(ndev, skb)) + return NETDEV_TX_OK; + + if (bxcan_tx_busy(priv)) + return NETDEV_TX_BUSY; + + idx = bxcan_get_tx_head(priv); + priv->tx_head++; + if (bxcan_get_tx_free(priv) == 0) + netif_stop_queue(ndev); + + mb_regs = ®s->tx_mb[idx]; + if (cf->can_id & CAN_EFF_FLAG) + id = FIELD_PREP(BXCAN_TIxR_EXID_MASK, cf->can_id) | + BXCAN_TIxR_IDE; + else + id = FIELD_PREP(BXCAN_TIxR_STID_MASK, cf->can_id); + + if (cf->can_id & CAN_RTR_FLAG) { /* Remote transmission request */ + id |= BXCAN_TIxR_RTR; + } else { + for (i = 0, j = 0; i < cf->len; i += 4, j++) + writel(*(u32 *)(cf->data + i), &mb_regs->data[j]); + } + + writel(FIELD_PREP(BXCAN_TDTxR_DLC_MASK, cf->len), &mb_regs->dlc); + + can_put_echo_skb(skb, ndev, idx, 0); + + /* Start transmission */ + writel(id | BXCAN_TIxR_TXRQ, &mb_regs->id); + + return NETDEV_TX_OK; +} + +static const struct net_device_ops bxcan_netdev_ops = { + .ndo_open = bxcan_open, + .ndo_stop = bxcan_stop, + .ndo_start_xmit = bxcan_start_xmit, +}; + +static const struct ethtool_ops bxcan_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, +}; + +static int bxcan_do_set_mode(struct net_device *ndev, enum can_mode mode) +{ + int err; + + switch (mode) { + case CAN_MODE_START: + err = bxcan_chip_start(ndev); + if (err) + return err; + + netif_wake_queue(ndev); + break; + + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int bxcan_get_berr_counter(const struct net_device *ndev, + struct can_berr_counter *bec) +{ + struct bxcan_priv *priv = netdev_priv(ndev); + struct bxcan_regs __iomem *regs = priv->regs; + u32 esr; + int err; + + err = clk_prepare_enable(priv->clk); + if (err) + return err; + + esr = readl(®s->esr); + bec->txerr = FIELD_GET(BXCAN_ESR_TEC_MASK, esr); + bec->rxerr = FIELD_GET(BXCAN_ESR_REC_MASK, esr); + clk_disable_unprepare(priv->clk); + return 0; +} + +static int bxcan_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct device *dev = &pdev->dev; + struct net_device *ndev; + struct bxcan_priv *priv; + struct clk *clk = NULL; + void __iomem *regs; + struct regmap *gcan; + enum bxcan_cfg cfg; + int err, rx_irq, tx_irq, sce_irq; + + regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(regs)) { + dev_err(dev, "failed to get base address\n"); + return PTR_ERR(regs); + } + + gcan = syscon_regmap_lookup_by_phandle(np, "st,gcan"); + if (IS_ERR(gcan)) { + dev_err(dev, "failed to get shared memory base address\n"); + return PTR_ERR(gcan); + } + + if (of_property_read_bool(np, "st,can-primary")) + cfg = BXCAN_CFG_DUAL_PRIMARY; + else if (of_property_read_bool(np, "st,can-secondary")) + cfg = BXCAN_CFG_DUAL_SECONDARY; + else + cfg = BXCAN_CFG_SINGLE; + + clk = devm_clk_get(dev, NULL); + if (IS_ERR(clk)) { + dev_err(dev, "failed to get clock\n"); + return PTR_ERR(clk); + } + + rx_irq = platform_get_irq_byname(pdev, "rx0"); + if (rx_irq < 0) + return rx_irq; + + tx_irq = platform_get_irq_byname(pdev, "tx"); + if (tx_irq < 0) + return tx_irq; + + sce_irq = platform_get_irq_byname(pdev, "sce"); + if (sce_irq < 0) + return sce_irq; + + ndev = alloc_candev(sizeof(struct bxcan_priv), BXCAN_TX_MB_NUM); + if (!ndev) { + dev_err(dev, "alloc_candev() failed\n"); + return -ENOMEM; + } + + priv = netdev_priv(ndev); + platform_set_drvdata(pdev, ndev); + SET_NETDEV_DEV(ndev, dev); + ndev->netdev_ops = &bxcan_netdev_ops; + ndev->ethtool_ops = &bxcan_ethtool_ops; + ndev->irq = rx_irq; + ndev->flags |= IFF_ECHO; + + priv->dev = dev; + priv->ndev = ndev; + priv->regs = regs; + priv->gcan = gcan; + priv->clk = clk; + priv->tx_irq = tx_irq; + priv->sce_irq = sce_irq; + priv->cfg = cfg; + priv->can.clock.freq = clk_get_rate(clk); + spin_lock_init(&priv->rmw_lock); + priv->tx_head = 0; + priv->tx_tail = 0; + priv->can.bittiming_const = &bxcan_bittiming_const; + priv->can.do_set_mode = bxcan_do_set_mode; + priv->can.do_get_berr_counter = bxcan_get_berr_counter; + priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | + CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING; + + priv->offload.mailbox_read = bxcan_mailbox_read; + err = can_rx_offload_add_fifo(ndev, &priv->offload, BXCAN_NAPI_WEIGHT); + if (err) { + dev_err(dev, "failed to add FIFO rx_offload\n"); + goto out_free_candev; + } + + err = register_candev(ndev); + if (err) { + dev_err(dev, "failed to register netdev\n"); + goto out_can_rx_offload_del; + } + + dev_info(dev, "clk: %d Hz, IRQs: %d, %d, %d\n", priv->can.clock.freq, + tx_irq, rx_irq, sce_irq); + return 0; + +out_can_rx_offload_del: + can_rx_offload_del(&priv->offload); +out_free_candev: + free_candev(ndev); + return err; +} + +static void bxcan_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct bxcan_priv *priv = netdev_priv(ndev); + + unregister_candev(ndev); + clk_disable_unprepare(priv->clk); + can_rx_offload_del(&priv->offload); + free_candev(ndev); +} + +static int __maybe_unused bxcan_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct bxcan_priv *priv = netdev_priv(ndev); + + if (!netif_running(ndev)) + return 0; + + netif_stop_queue(ndev); + netif_device_detach(ndev); + + bxcan_enter_sleep_mode(priv); + priv->can.state = CAN_STATE_SLEEPING; + clk_disable_unprepare(priv->clk); + return 0; +} + +static int __maybe_unused bxcan_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct bxcan_priv *priv = netdev_priv(ndev); + + if (!netif_running(ndev)) + return 0; + + clk_prepare_enable(priv->clk); + bxcan_leave_sleep_mode(priv); + priv->can.state = CAN_STATE_ERROR_ACTIVE; + + netif_device_attach(ndev); + netif_start_queue(ndev); + return 0; +} + +static SIMPLE_DEV_PM_OPS(bxcan_pm_ops, bxcan_suspend, bxcan_resume); + +static const struct of_device_id bxcan_of_match[] = { + {.compatible = "st,stm32f4-bxcan"}, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, bxcan_of_match); + +static struct platform_driver bxcan_driver = { + .driver = { + .name = KBUILD_MODNAME, + .pm = &bxcan_pm_ops, + .of_match_table = bxcan_of_match, + }, + .probe = bxcan_probe, + .remove = bxcan_remove, +}; + +module_platform_driver(bxcan_driver); + +MODULE_AUTHOR("Dario Binacchi <dario.binacchi@amarulasolutions.com>"); +MODULE_DESCRIPTION("STMicroelectronics Basic Extended CAN controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig index 61ffc12d8fd8..1f0e9acb69ec 100644 --- a/drivers/net/can/c_can/Kconfig +++ b/drivers/net/can/c_can/Kconfig @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only menuconfig CAN_C_CAN tristate "Bosch C_CAN/D_CAN devices" depends on HAS_IOMEM @@ -6,7 +7,7 @@ if CAN_C_CAN config CAN_C_CAN_PLATFORM tristate "Generic Platform Bus based C_CAN/D_CAN driver" - ---help--- + help This driver adds support for the C_CAN/D_CAN chips connected to the "platform bus" (Linux abstraction for directly to the processor attached devices) which can be found on various @@ -17,7 +18,8 @@ config CAN_C_CAN_PLATFORM config CAN_C_CAN_PCI tristate "Generic PCI Bus based C_CAN/D_CAN driver" depends on PCI - ---help--- + help This driver adds support for the C_CAN/D_CAN chips connected - to the PCI bus. + to the PCI bus. E.g. for the C_CAN controller IP inside the + Intel Atom E6xx series IOH (aka EG20T 'PCH CAN'). endif diff --git a/drivers/net/can/c_can/Makefile b/drivers/net/can/c_can/Makefile index 9fdc678b5b37..6fa3b2b9e4b9 100644 --- a/drivers/net/can/c_can/Makefile +++ b/drivers/net/can/c_can/Makefile @@ -1,7 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0-only # # Makefile for the Bosch C_CAN controller drivers. # obj-$(CONFIG_CAN_C_CAN) += c_can.o + +c_can-objs := +c_can-objs += c_can_ethtool.o +c_can-objs += c_can_main.o + obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o obj-$(CONFIG_CAN_C_CAN_PCI) += c_can_pci.o diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h index 8acdc7fa4792..029cd8194ed5 100644 --- a/drivers/net/can/c_can/c_can.h +++ b/drivers/net/can/c_can/c_can.h @@ -22,23 +22,6 @@ #ifndef C_CAN_H #define C_CAN_H -/* message object split */ -#define C_CAN_NO_OF_OBJECTS 32 -#define C_CAN_MSG_OBJ_RX_NUM 16 -#define C_CAN_MSG_OBJ_TX_NUM 16 - -#define C_CAN_MSG_OBJ_RX_FIRST 1 -#define C_CAN_MSG_OBJ_RX_LAST (C_CAN_MSG_OBJ_RX_FIRST + \ - C_CAN_MSG_OBJ_RX_NUM - 1) - -#define C_CAN_MSG_OBJ_TX_FIRST (C_CAN_MSG_OBJ_RX_LAST + 1) -#define C_CAN_MSG_OBJ_TX_LAST (C_CAN_MSG_OBJ_TX_FIRST + \ - C_CAN_MSG_OBJ_TX_NUM - 1) - -#define C_CAN_MSG_OBJ_RX_SPLIT 9 -#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1) -#define RECEIVE_OBJECT_BITS 0x0000ffff - enum reg { C_CAN_CTRL_REG = 0, C_CAN_CTRL_EX_REG, @@ -76,12 +59,13 @@ enum reg { C_CAN_NEWDAT2_REG, C_CAN_INTPND1_REG, C_CAN_INTPND2_REG, + C_CAN_INTPND3_REG, C_CAN_MSGVAL1_REG, C_CAN_MSGVAL2_REG, C_CAN_FUNCTION_REG, }; -static const u16 reg_map_c_can[] = { +static const u16 __maybe_unused reg_map_c_can[] = { [C_CAN_CTRL_REG] = 0x00, [C_CAN_STS_REG] = 0x02, [C_CAN_ERR_CNT_REG] = 0x04, @@ -121,7 +105,7 @@ static const u16 reg_map_c_can[] = { [C_CAN_MSGVAL2_REG] = 0xB2, }; -static const u16 reg_map_d_can[] = { +static const u16 __maybe_unused reg_map_d_can[] = { [C_CAN_CTRL_REG] = 0x00, [C_CAN_CTRL_EX_REG] = 0x02, [C_CAN_STS_REG] = 0x04, @@ -137,6 +121,7 @@ static const u16 reg_map_d_can[] = { [C_CAN_NEWDAT2_REG] = 0x9E, [C_CAN_INTPND1_REG] = 0xB0, [C_CAN_INTPND2_REG] = 0xB2, + [C_CAN_INTPND3_REG] = 0xB4, [C_CAN_MSGVAL1_REG] = 0xC4, [C_CAN_MSGVAL2_REG] = 0xC6, [C_CAN_IF1_COMREQ_REG] = 0x100, @@ -164,7 +149,6 @@ static const u16 reg_map_d_can[] = { }; enum c_can_dev_id { - BOSCH_C_CAN_PLATFORM, BOSCH_C_CAN, BOSCH_D_CAN, }; @@ -176,6 +160,7 @@ struct raminit_bits { struct c_can_driver_data { enum c_can_dev_id id; + unsigned int msg_obj_num; /* RAMINIT register description. Optional. */ const struct raminit_bits *raminit_bits; /* Array of START/DONE bit positions */ @@ -191,31 +176,44 @@ struct c_can_raminit { bool needs_pulse; }; +/* c_can tx ring structure */ +struct c_can_tx_ring { + unsigned int head; + unsigned int tail; + unsigned int obj_num; +}; + /* c_can private data structure */ struct c_can_priv { struct can_priv can; /* must be the first member */ struct napi_struct napi; struct net_device *dev; struct device *device; - atomic_t tx_active; + unsigned int msg_obj_num; + unsigned int msg_obj_rx_num; + unsigned int msg_obj_tx_num; + unsigned int msg_obj_rx_first; + unsigned int msg_obj_rx_last; + unsigned int msg_obj_tx_first; + unsigned int msg_obj_tx_last; + u32 msg_obj_rx_mask; + atomic_t sie_pending; unsigned long tx_dir; int last_status; - u16 (*read_reg) (const struct c_can_priv *priv, enum reg index); - void (*write_reg) (const struct c_can_priv *priv, enum reg index, u16 val); - u32 (*read_reg32) (const struct c_can_priv *priv, enum reg index); - void (*write_reg32) (const struct c_can_priv *priv, enum reg index, u32 val); + struct c_can_tx_ring tx; + u16 (*read_reg)(const struct c_can_priv *priv, enum reg index); + void (*write_reg)(const struct c_can_priv *priv, enum reg index, u16 val); + u32 (*read_reg32)(const struct c_can_priv *priv, enum reg index); + void (*write_reg32)(const struct c_can_priv *priv, enum reg index, u32 val); void __iomem *base; const u16 *regs; - void *priv; /* for board-specific data */ enum c_can_dev_id type; struct c_can_raminit raminit_sys; /* RAMINIT via syscon regmap */ - void (*raminit) (const struct c_can_priv *priv, bool enable); + void (*raminit)(const struct c_can_priv *priv, bool enable); u32 comm_rcv_high; - u32 rxmasked; - u32 dlc[C_CAN_MSG_OBJ_TX_NUM]; }; -struct net_device *alloc_c_can_dev(void); +struct net_device *alloc_c_can_dev(int msg_obj_num); void free_c_can_dev(struct net_device *dev); int register_c_can_dev(struct net_device *dev); void unregister_c_can_dev(struct net_device *dev); @@ -225,4 +223,34 @@ int c_can_power_up(struct net_device *dev); int c_can_power_down(struct net_device *dev); #endif +extern const struct ethtool_ops c_can_ethtool_ops; + +static inline u8 c_can_get_tx_head(const struct c_can_tx_ring *ring) +{ + return ring->head & (ring->obj_num - 1); +} + +static inline u8 c_can_get_tx_tail(const struct c_can_tx_ring *ring) +{ + return ring->tail & (ring->obj_num - 1); +} + +static inline u8 c_can_get_tx_free(const struct c_can_priv *priv, + const struct c_can_tx_ring *ring) +{ + u8 head = c_can_get_tx_head(ring); + u8 tail = c_can_get_tx_tail(ring); + + if (priv->type == BOSCH_D_CAN) + return ring->obj_num - (ring->head - ring->tail); + + /* This is not a FIFO. C/D_CAN sends out the buffers + * prioritized. The lowest buffer number wins. + */ + if (head < tail) + return 0; + + return ring->obj_num - head; +} + #endif /* C_CAN_H */ diff --git a/drivers/net/can/c_can/c_can_ethtool.c b/drivers/net/can/c_can/c_can_ethtool.c new file mode 100644 index 000000000000..e41167eda673 --- /dev/null +++ b/drivers/net/can/c_can/c_can_ethtool.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2021, Dario Binacchi <dariobin@libero.it> + */ + +#include <linux/ethtool.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/netdevice.h> +#include <linux/can/dev.h> + +#include "c_can.h" + +static void c_can_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct c_can_priv *priv = netdev_priv(netdev); + + ring->rx_max_pending = priv->msg_obj_num; + ring->tx_max_pending = priv->msg_obj_num; + ring->rx_pending = priv->msg_obj_rx_num; + ring->tx_pending = priv->msg_obj_tx_num; +} + +const struct ethtool_ops c_can_ethtool_ops = { + .get_ringparam = c_can_get_ringparam, + .get_ts_info = ethtool_op_get_ts_info, +}; diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can_main.c index 606b7d8ffe13..3702cac7fbf0 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can_main.c @@ -40,7 +40,6 @@ #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> -#include <linux/can/led.h> #include "c_can.h" @@ -52,6 +51,7 @@ #define CONTROL_EX_PDR BIT(8) /* control register */ +#define CONTROL_SWR BIT(15) #define CONTROL_TEST BIT(7) #define CONTROL_CCE BIT(6) #define CONTROL_DISABLE_AR BIT(5) @@ -97,6 +97,9 @@ #define BTR_TSEG2_SHIFT 12 #define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT) +/* interrupt register */ +#define INT_STS_PENDING 0x8000 + /* brp extension register */ #define BRP_EXT_BRPE_MASK 0x0f #define BRP_EXT_BRPE_SHIFT 0 @@ -128,7 +131,6 @@ /* For the high buffers we clear the interrupt bit and newdat */ #define IF_COMM_RCV_HIGH (IF_COMM_RCV_LOW | IF_COMM_CLR_NEWDAT) - /* Receive setup of message objects */ #define IF_COMM_RCV_SETUP (IF_COMM_MASK | IF_COMM_ARB | IF_COMM_CONTROL) @@ -157,10 +159,8 @@ #define IF_MCONT_TX (IF_MCONT_TXIE | IF_MCONT_EOB) -/* - * Use IF1 for RX and IF2 for TX - */ -#define IF_RX 0 +/* Use IF1 in NAPI path and IF2 in TX path */ +#define IF_NAPI 0 #define IF_TX 1 /* minimum timeout for checking BUSY status */ @@ -169,9 +169,6 @@ /* Wait for ~1 sec for INIT bit */ #define INIT_WAIT_MS 1000 -/* napi related */ -#define C_CAN_NAPI_WEIGHT C_CAN_MSG_OBJ_RX_NUM - /* c_can lec values */ enum c_can_lec_type { LEC_NO_ERROR = 0, @@ -185,8 +182,7 @@ enum c_can_lec_type { LEC_MASK = LEC_UNUSED, }; -/* - * c_can error types: +/* c_can error types: * Bus errors (BUS_OFF, ERROR_WARNING, ERROR_PASSIVE) are supported */ enum c_can_bus_error_types { @@ -208,18 +204,6 @@ static const struct can_bittiming_const c_can_bittiming_const = { .brp_inc = 1, }; -static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv) -{ - if (priv->device) - pm_runtime_enable(priv->device); -} - -static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv) -{ - if (priv->device) - pm_runtime_disable(priv->device); -} - static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv) { if (priv->device) @@ -261,7 +245,6 @@ static void c_can_obj_update(struct net_device *dev, int iface, u32 cmd, u32 obj udelay(1); } netdev_err(dev, "Updating object timed out\n"); - } static inline void c_can_object_get(struct net_device *dev, int iface, @@ -276,8 +259,7 @@ static inline void c_can_object_put(struct net_device *dev, int iface, c_can_obj_update(dev, iface, cmd | IF_COMM_WR, obj); } -/* - * Note: According to documentation clearing TXIE while MSGVAL is set +/* Note: According to documentation clearing TXIE while MSGVAL is set * is not allowed, but works nicely on C/DCAN. And that lowers the I/O * load significantly. */ @@ -293,8 +275,7 @@ static void c_can_inval_msg_object(struct net_device *dev, int iface, int obj) { struct c_can_priv *priv = netdev_priv(dev); - priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0); - priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0); + priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), 0); c_can_inval_tx_object(dev, iface, obj); } @@ -302,7 +283,7 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface, struct can_frame *frame, int idx) { struct c_can_priv *priv = netdev_priv(dev); - u16 ctrl = IF_MCONT_TX | frame->can_dlc; + u16 ctrl = IF_MCONT_TX | frame->len; bool rtr = frame->can_id & CAN_RTR_FLAG; u32 arb = IF_ARB_MSGVAL; int i; @@ -317,12 +298,11 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface, if (!rtr) arb |= IF_ARB_TRANSMIT; - /* - * If we change the DIR bit, we need to invalidate the buffer + /* If we change the DIR bit, we need to invalidate the buffer * first, i.e. clear the MSGVAL flag in the arbiter. */ if (rtr != (bool)test_bit(idx, &priv->tx_dir)) { - u32 obj = idx + C_CAN_MSG_OBJ_TX_FIRST; + u32 obj = idx + priv->msg_obj_tx_first; c_can_inval_msg_object(dev, iface, obj); change_bit(idx, &priv->tx_dir); @@ -335,7 +315,7 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface, if (priv->type == BOSCH_D_CAN) { u32 data = 0, dreg = C_CAN_IFACE(DATA1_REG, iface); - for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) { + for (i = 0; i < frame->len; i += 4, dreg += 2) { data = (u32)frame->data[i]; data |= (u32)frame->data[i + 1] << 8; data |= (u32)frame->data[i + 2] << 16; @@ -343,7 +323,7 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface, priv->write_reg32(priv, dreg, data); } } else { - for (i = 0; i < frame->can_dlc; i += 2) { + for (i = 0; i < frame->len; i += 2) { priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2, frame->data[i] | @@ -352,15 +332,6 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface, } } -static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev, - int iface) -{ - int i; - - for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) - c_can_object_get(dev, iface, i, IF_COMM_CLR_NEWDAT); -} - static int c_can_handle_lost_msg_obj(struct net_device *dev, int iface, int objno, u32 ctrl) { @@ -402,7 +373,7 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl) return -ENOMEM; } - frame->can_dlc = get_can_dlc(ctrl & 0x0F); + frame->len = can_cc_dlc2len(ctrl & 0x0F); arb = priv->read_reg32(priv, C_CAN_IFACE(ARB1_REG, iface)); @@ -417,7 +388,7 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl) int i, dreg = C_CAN_IFACE(DATA1_REG, iface); if (priv->type == BOSCH_D_CAN) { - for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) { + for (i = 0; i < frame->len; i += 4, dreg += 2) { data = priv->read_reg32(priv, dreg); frame->data[i] = data; frame->data[i + 1] = data >> 8; @@ -425,16 +396,16 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl) frame->data[i + 3] = data >> 24; } } else { - for (i = 0; i < frame->can_dlc; i += 2, dreg++) { + for (i = 0; i < frame->len; i += 2, dreg++) { data = priv->read_reg(priv, dreg); frame->data[i] = data; frame->data[i + 1] = data >> 8; } } - } + stats->rx_bytes += frame->len; + } stats->rx_packets++; - stats->rx_bytes += frame->can_dlc; netif_receive_skb(skb); return 0; @@ -455,38 +426,59 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface, c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP); } +static bool c_can_tx_busy(const struct c_can_priv *priv, + const struct c_can_tx_ring *tx_ring) +{ + if (c_can_get_tx_free(priv, tx_ring) > 0) + return false; + + netif_stop_queue(priv->dev); + + /* Memory barrier before checking tx_free (head and tail) */ + smp_mb(); + + if (c_can_get_tx_free(priv, tx_ring) == 0) { + netdev_dbg(priv->dev, + "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n", + tx_ring->head, tx_ring->tail, + tx_ring->head - tx_ring->tail); + return true; + } + + netif_start_queue(priv->dev); + return false; +} + static netdev_tx_t c_can_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct can_frame *frame = (struct can_frame *)skb->data; struct c_can_priv *priv = netdev_priv(dev); - u32 idx, obj; + struct c_can_tx_ring *tx_ring = &priv->tx; + u32 idx, obj, cmd = IF_COMM_TX; - if (can_dropped_invalid_skb(dev, skb)) + if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; - /* - * This is not a FIFO. C/D_CAN sends out the buffers - * prioritized. The lowest buffer number wins. - */ - idx = fls(atomic_read(&priv->tx_active)); - obj = idx + C_CAN_MSG_OBJ_TX_FIRST; - /* If this is the last buffer, stop the xmit queue */ - if (idx == C_CAN_MSG_OBJ_TX_NUM - 1) + if (c_can_tx_busy(priv, tx_ring)) + return NETDEV_TX_BUSY; + + idx = c_can_get_tx_head(tx_ring); + tx_ring->head++; + if (c_can_get_tx_free(priv, tx_ring) == 0) netif_stop_queue(dev); - /* - * Store the message in the interface so we can call + + if (idx < c_can_get_tx_tail(tx_ring)) + cmd &= ~IF_COMM_TXRQST; /* Cache the message */ + + /* Store the message in the interface so we can call * can_put_echo_skb(). We must do this before we enable * transmit as we might race against do_tx(). */ c_can_setup_tx_object(dev, IF_TX, frame, idx); - priv->dlc[idx] = frame->can_dlc; - can_put_echo_skb(skb, dev, idx); - - /* Update the active bits */ - atomic_add((1 << idx), &priv->tx_active); - /* Start transmission */ - c_can_object_put(dev, IF_TX, obj, IF_COMM_TX); + can_put_echo_skb(skb, dev, idx, 0); + obj = idx + priv->msg_obj_tx_first; + c_can_object_put(dev, IF_TX, obj, cmd); return NETDEV_TX_OK; } @@ -528,7 +520,7 @@ static int c_can_set_bittiming(struct net_device *dev) reg_brpe = brpe & BRP_EXT_BRPE_MASK; netdev_info(dev, - "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe); + "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe); ctrl_save = priv->read_reg(priv, C_CAN_CTRL_REG); ctrl_save &= ~CONTROL_INIT; @@ -544,8 +536,7 @@ static int c_can_set_bittiming(struct net_device *dev) return c_can_wait_for_ctrl_init(dev, priv, 0); } -/* - * Configure C_CAN message objects for Tx and Rx purposes: +/* Configure C_CAN message objects for Tx and Rx purposes: * C_CAN provides a total of 32 message objects that can be configured * either for Tx or Rx purposes. Here the first 16 message objects are used as * a reception FIFO. The end of reception FIFO is signified by the EoB bit @@ -555,22 +546,42 @@ static int c_can_set_bittiming(struct net_device *dev) */ static void c_can_configure_msg_objects(struct net_device *dev) { + struct c_can_priv *priv = netdev_priv(dev); int i; /* first invalidate all message objects */ - for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++) - c_can_inval_msg_object(dev, IF_RX, i); + for (i = priv->msg_obj_rx_first; i <= priv->msg_obj_num; i++) + c_can_inval_msg_object(dev, IF_NAPI, i); /* setup receive message objects */ - for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++) - c_can_setup_receive_object(dev, IF_RX, i, 0, 0, IF_MCONT_RCV); + for (i = priv->msg_obj_rx_first; i < priv->msg_obj_rx_last; i++) + c_can_setup_receive_object(dev, IF_NAPI, i, 0, 0, IF_MCONT_RCV); - c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0, + c_can_setup_receive_object(dev, IF_NAPI, priv->msg_obj_rx_last, 0, 0, IF_MCONT_RCV_EOB); } -/* - * Configure C_CAN chip: +static int c_can_software_reset(struct net_device *dev) +{ + struct c_can_priv *priv = netdev_priv(dev); + int retry = 0; + + if (priv->type != BOSCH_D_CAN) + return 0; + + priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_SWR | CONTROL_INIT); + while (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_SWR) { + msleep(20); + if (retry++ > 100) { + netdev_err(dev, "CCTRL: software reset failed\n"); + return -EIO; + } + } + + return 0; +} + +/* Configure C_CAN chip: * - enable/disable auto-retransmission * - set operating mode * - configure message objects @@ -578,6 +589,12 @@ static void c_can_configure_msg_objects(struct net_device *dev) static int c_can_chip_config(struct net_device *dev) { struct c_can_priv *priv = netdev_priv(dev); + struct c_can_tx_ring *tx_ring = &priv->tx; + int err; + + err = c_can_software_reset(dev); + if (err) + return err; /* enable automatic retransmission */ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR); @@ -604,8 +621,8 @@ static int c_can_chip_config(struct net_device *dev) priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); /* Clear all internal status */ - atomic_set(&priv->tx_active, 0); - priv->rxmasked = 0; + tx_ring->head = 0; + tx_ring->tail = 0; priv->tx_dir = 0; /* set bittiming params */ @@ -703,48 +720,69 @@ static int c_can_get_berr_counter(const struct net_device *dev, static void c_can_do_tx(struct net_device *dev) { struct c_can_priv *priv = netdev_priv(dev); + struct c_can_tx_ring *tx_ring = &priv->tx; struct net_device_stats *stats = &dev->stats; - u32 idx, obj, pkts = 0, bytes = 0, pend, clr; + u32 idx, obj, pkts = 0, bytes = 0, pend; + u8 tail; - clr = pend = priv->read_reg(priv, C_CAN_INTPND2_REG); + if (priv->msg_obj_tx_last > 32) + pend = priv->read_reg32(priv, C_CAN_INTPND3_REG); + else + pend = priv->read_reg(priv, C_CAN_INTPND2_REG); while ((idx = ffs(pend))) { idx--; - pend &= ~(1 << idx); - obj = idx + C_CAN_MSG_OBJ_TX_FIRST; - c_can_inval_tx_object(dev, IF_RX, obj); - can_get_echo_skb(dev, idx); - bytes += priv->dlc[idx]; + pend &= ~BIT(idx); + obj = idx + priv->msg_obj_tx_first; + + /* We use IF_NAPI interface instead of IF_TX because we + * are called from c_can_poll(), which runs inside + * NAPI. We are not transmitting. + */ + c_can_inval_tx_object(dev, IF_NAPI, obj); + bytes += can_get_echo_skb(dev, idx, NULL); pkts++; } - /* Clear the bits in the tx_active mask */ - atomic_sub(clr, &priv->tx_active); + if (!pkts) + return; - if (clr & (1 << (C_CAN_MSG_OBJ_TX_NUM - 1))) - netif_wake_queue(dev); + tx_ring->tail += pkts; + if (c_can_get_tx_free(priv, tx_ring)) { + /* Make sure that anybody stopping the queue after + * this sees the new tx_ring->tail. + */ + smp_mb(); + netif_wake_queue(priv->dev); + } - if (pkts) { - stats->tx_bytes += bytes; - stats->tx_packets += pkts; - can_led_event(dev, CAN_LED_EVENT_TX); + stats->tx_bytes += bytes; + stats->tx_packets += pkts; + + tail = c_can_get_tx_tail(tx_ring); + if (priv->type == BOSCH_D_CAN && tail == 0) { + u8 head = c_can_get_tx_head(tx_ring); + + /* Start transmission for all cached messages */ + for (idx = tail; idx < head; idx++) { + obj = idx + priv->msg_obj_tx_first; + c_can_object_put(dev, IF_NAPI, obj, IF_COMM_TXRQST); + } } } -/* - * If we have a gap in the pending bits, that means we either +/* If we have a gap in the pending bits, that means we either * raced with the hardware or failed to readout all upper * objects in the last run due to quota limit. */ -static u32 c_can_adjust_pending(u32 pend) +static u32 c_can_adjust_pending(u32 pend, u32 rx_mask) { u32 weight, lasts; - if (pend == RECEIVE_OBJECT_BITS) + if (pend == rx_mask) return pend; - /* - * If the last set bit is larger than the number of pending + /* If the last set bit is larger than the number of pending * bits we have a gap. */ weight = hweight32(pend); @@ -754,26 +792,26 @@ static u32 c_can_adjust_pending(u32 pend) if (lasts == weight) return pend; - /* - * Find the first set bit after the gap. We walk backwards + /* Find the first set bit after the gap. We walk backwards * from the last set bit. */ - for (lasts--; pend & (1 << (lasts - 1)); lasts--); + for (lasts--; pend & BIT(lasts - 1); lasts--) + ; - return pend & ~((1 << lasts) - 1); + return pend & ~GENMASK(lasts - 1, 0); } static inline void c_can_rx_object_get(struct net_device *dev, struct c_can_priv *priv, u32 obj) { - c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high); + c_can_object_get(dev, IF_NAPI, obj, priv->comm_rcv_high); } static inline void c_can_rx_finalize(struct net_device *dev, struct c_can_priv *priv, u32 obj) { if (priv->type != BOSCH_D_CAN) - c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT); + c_can_object_get(dev, IF_NAPI, obj, IF_COMM_CLR_NEWDAT); } static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv, @@ -785,18 +823,19 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv, pend &= ~BIT(obj - 1); c_can_rx_object_get(dev, priv, obj); - ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX)); + ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_NAPI)); if (ctrl & IF_MCONT_MSGLST) { - int n = c_can_handle_lost_msg_obj(dev, IF_RX, obj, ctrl); + int n; + + n = c_can_handle_lost_msg_obj(dev, IF_NAPI, obj, ctrl); pkts += n; quota -= n; continue; } - /* - * This really should not happen, but this covers some + /* This really should not happen, but this covers some * odd HW behaviour. Do not remove that unless you * want to brick your machine. */ @@ -804,7 +843,7 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv, continue; /* read the data from the message object */ - c_can_read_msg_object(dev, IF_RX, ctrl); + c_can_read_msg_object(dev, IF_NAPI, ctrl); c_can_rx_finalize(dev, priv, obj); @@ -817,19 +856,22 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv, static inline u32 c_can_get_pending(struct c_can_priv *priv) { - u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG); + u32 pend; + + if (priv->msg_obj_rx_last > 16) + pend = priv->read_reg32(priv, C_CAN_NEWDAT1_REG); + else + pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG); return pend; } -/* - * theory of operation: +/* theory of operation: * * c_can core saves a received CAN message into the first free message * object it finds free (starting with the lowest). Bits NEWDAT and * INTPND are set for this message object indicating that a new message - * has arrived. To work-around this issue, we keep two groups of message - * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT. + * has arrived. * * We clear the newdat bit right away. * @@ -840,23 +882,16 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota) struct c_can_priv *priv = netdev_priv(dev); u32 pkts = 0, pend = 0, toread, n; - /* - * It is faster to read only one 16bit register. This is only possible - * for a maximum number of 16 objects. - */ - BUILD_BUG_ON_MSG(C_CAN_MSG_OBJ_RX_LAST > 16, - "Implementation does not support more message objects than 16"); - while (quota > 0) { if (!pend) { pend = c_can_get_pending(priv); if (!pend) break; - /* - * If the pending field has a gap, handle the + /* If the pending field has a gap, handle the * bits above the gap first. */ - toread = c_can_adjust_pending(pend); + toread = c_can_adjust_pending(pend, + priv->msg_obj_rx_mask); } else { toread = pend; } @@ -868,24 +903,23 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota) quota -= n; } - if (pkts) - can_led_event(dev, CAN_LED_EVENT_RX); - return pkts; } static int c_can_handle_state_change(struct net_device *dev, - enum c_can_bus_error_types error_type) + enum c_can_bus_error_types error_type) { unsigned int reg_err_counter; unsigned int rx_err_passive; struct c_can_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; struct can_berr_counter bec; switch (error_type) { + case C_CAN_NO_ERROR: + priv->can.state = CAN_STATE_ERROR_ACTIVE; + break; case C_CAN_ERROR_WARNING: /* error warning state */ priv->can.can_stats.error_warning++; @@ -916,9 +950,15 @@ static int c_can_handle_state_change(struct net_device *dev, ERR_CNT_RP_SHIFT; switch (error_type) { + case C_CAN_NO_ERROR: + cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; + cf->data[1] = CAN_ERR_CRTL_ACTIVE; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + break; case C_CAN_ERROR_WARNING: /* error warning state */ - cf->can_id |= CAN_ERR_CRTL; + cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; cf->data[1] = (bec.txerr > bec.rxerr) ? CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; @@ -928,7 +968,7 @@ static int c_can_handle_state_change(struct net_device *dev, break; case C_CAN_ERROR_PASSIVE: /* error passive state */ - cf->can_id |= CAN_ERR_CRTL; + cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; if (rx_err_passive) cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; if (bec.txerr > 127) @@ -946,8 +986,6 @@ static int c_can_handle_state_change(struct net_device *dev, break; } - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; netif_receive_skb(skb); return 1; @@ -961,8 +999,7 @@ static int c_can_handle_bus_err(struct net_device *dev, struct can_frame *cf; struct sk_buff *skb; - /* - * early exit if no lec update or no error. + /* early exit if no lec update or no error. * no lec update means that no CAN bus event has been detected * since CPU wrote 0x7 value to status reg. */ @@ -974,50 +1011,60 @@ static int c_can_handle_bus_err(struct net_device *dev, /* common for all type of bus errors */ priv->can.can_stats.bus_error++; - stats->rx_errors++; /* propagate the error condition to the CAN stack */ skb = alloc_can_err_skb(dev, &cf); - if (unlikely(!skb)) - return 0; - /* - * check for 'last error code' which tells us the + /* check for 'last error code' which tells us the * type of the last error to occur on the CAN bus */ - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + if (likely(skb)) + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; switch (lec_type) { case LEC_STUFF_ERROR: netdev_dbg(dev, "stuff error\n"); - cf->data[2] |= CAN_ERR_PROT_STUFF; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_STUFF; + stats->rx_errors++; break; case LEC_FORM_ERROR: netdev_dbg(dev, "form error\n"); - cf->data[2] |= CAN_ERR_PROT_FORM; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_FORM; + stats->rx_errors++; break; case LEC_ACK_ERROR: netdev_dbg(dev, "ack error\n"); - cf->data[3] = CAN_ERR_PROT_LOC_ACK; + if (likely(skb)) + cf->data[3] = CAN_ERR_PROT_LOC_ACK; + stats->tx_errors++; break; case LEC_BIT1_ERROR: netdev_dbg(dev, "bit1 error\n"); - cf->data[2] |= CAN_ERR_PROT_BIT1; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_BIT1; + stats->tx_errors++; break; case LEC_BIT0_ERROR: netdev_dbg(dev, "bit0 error\n"); - cf->data[2] |= CAN_ERR_PROT_BIT0; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_BIT0; + stats->tx_errors++; break; case LEC_CRC_ERROR: netdev_dbg(dev, "CRC error\n"); - cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + if (likely(skb)) + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + stats->rx_errors++; break; default: break; } - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + if (unlikely(!skb)) + return 0; + netif_receive_skb(skb); return 1; } @@ -1029,10 +1076,17 @@ static int c_can_poll(struct napi_struct *napi, int quota) u16 curr, last = priv->last_status; int work_done = 0; - priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG); - /* Ack status on C_CAN. D_CAN is self clearing */ - if (priv->type != BOSCH_D_CAN) - priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); + /* Only read the status register if a status interrupt was pending */ + if (atomic_xchg(&priv->sie_pending, 0)) { + priv->last_status = priv->read_reg(priv, C_CAN_STS_REG); + curr = priv->last_status; + /* Ack status on C_CAN. D_CAN is self clearing */ + if (priv->type != BOSCH_D_CAN) + priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); + } else { + /* no change detected ... */ + curr = last; + } /* handle state changes */ if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) { @@ -1054,11 +1108,17 @@ static int c_can_poll(struct napi_struct *napi, int quota) /* handle bus recovery events */ if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) { netdev_dbg(dev, "left bus off state\n"); - priv->can.state = CAN_STATE_ERROR_ACTIVE; + work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE); } + if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) { netdev_dbg(dev, "left error passive state\n"); - priv->can.state = CAN_STATE_ERROR_ACTIVE; + work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING); + } + + if ((!(curr & STATUS_EWARN)) && (last & STATUS_EWARN)) { + netdev_dbg(dev, "left error warning state\n"); + work_done += c_can_handle_state_change(dev, C_CAN_NO_ERROR); } /* handle lec errors on the bus */ @@ -1083,10 +1143,16 @@ static irqreturn_t c_can_isr(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct c_can_priv *priv = netdev_priv(dev); + int reg_int; - if (!priv->read_reg(priv, C_CAN_INT_REG)) + reg_int = priv->read_reg(priv, C_CAN_INT_REG); + if (!reg_int) return IRQ_NONE; + /* save for later use */ + if (reg_int & INT_STS_PENDING) + atomic_set(&priv->sie_pending, 1); + /* disable all interrupts and schedule the NAPI */ c_can_irq_control(priv, false); napi_schedule(&priv->napi); @@ -1111,7 +1177,7 @@ static int c_can_open(struct net_device *dev) /* register interrupt handler */ err = request_irq(dev->irq, &c_can_isr, IRQF_SHARED, dev->name, - dev); + dev); if (err < 0) { netdev_err(dev, "failed to request interrupt\n"); goto exit_irq_fail; @@ -1122,8 +1188,6 @@ static int c_can_open(struct net_device *dev) if (err) goto exit_start_fail; - can_led_event(dev, CAN_LED_EVENT_OPEN); - napi_enable(&priv->napi); /* enable status change, error and module interrupts */ c_can_irq_control(priv, true); @@ -1154,22 +1218,38 @@ static int c_can_close(struct net_device *dev) c_can_reset_ram(priv, false); c_can_pm_runtime_put_sync(priv); - can_led_event(dev, CAN_LED_EVENT_STOP); - return 0; } -struct net_device *alloc_c_can_dev(void) +struct net_device *alloc_c_can_dev(int msg_obj_num) { struct net_device *dev; struct c_can_priv *priv; + int msg_obj_tx_num = msg_obj_num / 2; - dev = alloc_candev(sizeof(struct c_can_priv), C_CAN_MSG_OBJ_TX_NUM); + dev = alloc_candev(sizeof(*priv), msg_obj_tx_num); if (!dev) return NULL; priv = netdev_priv(dev); - netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT); + priv->msg_obj_num = msg_obj_num; + priv->msg_obj_rx_num = msg_obj_num - msg_obj_tx_num; + priv->msg_obj_rx_first = 1; + priv->msg_obj_rx_last = + priv->msg_obj_rx_first + priv->msg_obj_rx_num - 1; + priv->msg_obj_rx_mask = GENMASK(priv->msg_obj_rx_num - 1, 0); + + priv->msg_obj_tx_num = msg_obj_tx_num; + priv->msg_obj_tx_first = priv->msg_obj_rx_last + 1; + priv->msg_obj_tx_last = + priv->msg_obj_tx_first + priv->msg_obj_tx_num - 1; + + priv->tx.head = 0; + priv->tx.tail = 0; + priv->tx.obj_num = msg_obj_tx_num; + + netif_napi_add_weight(dev, &priv->napi, c_can_poll, + priv->msg_obj_rx_num); priv->dev = dev; priv->can.bittiming_const = &c_can_bittiming_const; @@ -1203,7 +1283,7 @@ int c_can_power_down(struct net_device *dev) /* Wait for the PDA bit to get set */ time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS); while (!(priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) && - time_after(time_out, jiffies)) + time_after(time_out, jiffies)) cpu_relax(); if (time_after(jiffies, time_out)) @@ -1244,15 +1324,25 @@ int c_can_power_up(struct net_device *dev) /* Wait for the PDA bit to get clear */ time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS); while ((priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) && - time_after(time_out, jiffies)) + time_after(time_out, jiffies)) cpu_relax(); - if (time_after(jiffies, time_out)) - return -ETIMEDOUT; + if (time_after(jiffies, time_out)) { + ret = -ETIMEDOUT; + goto err_out; + } ret = c_can_start(dev); - if (!ret) - c_can_irq_control(priv, true); + if (ret) + goto err_out; + + c_can_irq_control(priv, true); + + return 0; + +err_out: + c_can_reset_ram(priv, false); + c_can_pm_runtime_put_sync(priv); return ret; } @@ -1272,14 +1362,10 @@ static const struct net_device_ops c_can_netdev_ops = { .ndo_open = c_can_open, .ndo_stop = c_can_close, .ndo_start_xmit = c_can_start_xmit, - .ndo_change_mtu = can_change_mtu, }; int register_c_can_dev(struct net_device *dev) { - struct c_can_priv *priv = netdev_priv(dev); - int err; - /* Deactivate pins to prevent DRA7 DCAN IP from being * stuck in transition when module is disabled. * Pins are activated in c_can_start() and deactivated @@ -1287,28 +1373,17 @@ int register_c_can_dev(struct net_device *dev) */ pinctrl_pm_select_sleep_state(dev->dev.parent); - c_can_pm_runtime_enable(priv); - dev->flags |= IFF_ECHO; /* we support local echo */ dev->netdev_ops = &c_can_netdev_ops; + dev->ethtool_ops = &c_can_ethtool_ops; - err = register_candev(dev); - if (err) - c_can_pm_runtime_disable(priv); - else - devm_can_led_init(dev); - - return err; + return register_candev(dev); } EXPORT_SYMBOL_GPL(register_c_can_dev); void unregister_c_can_dev(struct net_device *dev) { - struct c_can_priv *priv = netdev_priv(dev); - unregister_candev(dev); - - c_can_pm_runtime_disable(priv); } EXPORT_SYMBOL_GPL(unregister_c_can_dev); diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c index 406b4847e5dc..093bea597f4e 100644 --- a/drivers/net/can/c_can/c_can_pci.c +++ b/drivers/net/can/c_can/c_can_pci.c @@ -31,6 +31,8 @@ enum c_can_pci_reg_align { struct c_can_pci_data { /* Specify if is C_CAN or D_CAN */ enum c_can_dev_id type; + /* Number of message objects */ + unsigned int msg_obj_num; /* Set the register alignment in the memory */ enum c_can_pci_reg_align reg_align; /* Set the frequency */ @@ -41,32 +43,31 @@ struct c_can_pci_data { void (*init)(const struct c_can_priv *priv, bool enable); }; -/* - * 16-bit c_can registers can be arranged differently in the memory +/* 16-bit c_can registers can be arranged differently in the memory * architecture of different implementations. For example: 16-bit * registers can be aligned to a 16-bit boundary or 32-bit boundary etc. * Handle the same by providing a common read/write interface. */ static u16 c_can_pci_read_reg_aligned_to_16bit(const struct c_can_priv *priv, - enum reg index) + enum reg index) { return readw(priv->base + priv->regs[index]); } static void c_can_pci_write_reg_aligned_to_16bit(const struct c_can_priv *priv, - enum reg index, u16 val) + enum reg index, u16 val) { writew(val, priv->base + priv->regs[index]); } static u16 c_can_pci_read_reg_aligned_to_32bit(const struct c_can_priv *priv, - enum reg index) + enum reg index) { return readw(priv->base + 2 * priv->regs[index]); } static void c_can_pci_write_reg_aligned_to_32bit(const struct c_can_priv *priv, - enum reg index, u16 val) + enum reg index, u16 val) { writew(val, priv->base + 2 * priv->regs[index]); } @@ -88,13 +89,13 @@ static u32 c_can_pci_read_reg32(const struct c_can_priv *priv, enum reg index) u32 val; val = priv->read_reg(priv, index); - val |= ((u32) priv->read_reg(priv, index + 1)) << 16; + val |= ((u32)priv->read_reg(priv, index + 1)) << 16; return val; } static void c_can_pci_write_reg32(const struct c_can_priv *priv, enum reg index, - u32 val) + u32 val) { priv->write_reg(priv, index + 1, val >> 16); priv->write_reg(priv, index, val); @@ -142,14 +143,13 @@ static int c_can_pci_probe(struct pci_dev *pdev, pci_resource_len(pdev, c_can_pci_data->bar)); if (!addr) { dev_err(&pdev->dev, - "device has no PCI memory resources, " - "failing adapter\n"); + "device has no PCI memory resources, failing adapter\n"); ret = -ENOMEM; goto out_release_regions; } /* allocate the c_can device */ - dev = alloc_c_can_dev(); + dev = alloc_c_can_dev(c_can_pci_data->msg_obj_num); if (!dev) { ret = -ENOMEM; goto out_iounmap; @@ -217,7 +217,7 @@ static int c_can_pci_probe(struct pci_dev *pdev, } dev_dbg(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n", - KBUILD_MODNAME, priv->regs, dev->irq); + KBUILD_MODNAME, priv->regs, dev->irq); return 0; @@ -227,7 +227,6 @@ out_iounmap: pci_iounmap(pdev, addr); out_release_regions: pci_disable_msi(pdev); - pci_clear_master(pdev); pci_release_regions(pdev); out_disable_device: pci_disable_device(pdev); @@ -239,20 +238,21 @@ static void c_can_pci_remove(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct c_can_priv *priv = netdev_priv(dev); + void __iomem *addr = priv->base; unregister_c_can_dev(dev); free_c_can_dev(dev); - pci_iounmap(pdev, priv->base); + pci_iounmap(pdev, addr); pci_disable_msi(pdev); - pci_clear_master(pdev); pci_release_regions(pdev); pci_disable_device(pdev); } -static const struct c_can_pci_data c_can_sta2x11= { +static const struct c_can_pci_data c_can_sta2x11 = { .type = BOSCH_C_CAN, + .msg_obj_num = 32, .reg_align = C_CAN_REG_ALIGN_32, .freq = 52000000, /* 52 Mhz */ .bar = 0, @@ -260,6 +260,7 @@ static const struct c_can_pci_data c_can_sta2x11= { static const struct c_can_pci_data c_can_pch = { .type = BOSCH_C_CAN, + .msg_obj_num = 32, .reg_align = C_CAN_REG_32, .freq = 50000000, /* 50 MHz */ .init = c_can_pci_reset_pch, @@ -268,7 +269,7 @@ static const struct c_can_pci_data c_can_pch = { #define C_CAN_ID(_vend, _dev, _driverdata) { \ PCI_DEVICE(_vend, _dev), \ - .driver_data = (unsigned long)&_driverdata, \ + .driver_data = (unsigned long)&(_driverdata), \ } static const struct pci_device_id c_can_pci_tbl[] = { @@ -278,6 +279,7 @@ static const struct pci_device_id c_can_pci_tbl[] = { c_can_pch), {}, }; + static struct pci_driver c_can_pci_driver = { .name = KBUILD_MODNAME, .id_table = c_can_pci_tbl, diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index b5145a7f874c..19c86b94a40e 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -29,9 +29,10 @@ #include <linux/list.h> #include <linux/io.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/property.h> #include <linux/clk.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> @@ -39,10 +40,11 @@ #include "c_can.h" -#define DCAN_RAM_INIT_BIT (1 << 3) +#define DCAN_RAM_INIT_BIT BIT(3) + static DEFINE_SPINLOCK(raminit_lock); -/* - * 16-bit c_can registers can be arranged differently in the memory + +/* 16-bit c_can registers can be arranged differently in the memory * architecture of different implementations. For example: 16-bit * registers can be aligned to a 16-bit boundary or 32-bit boundary etc. * Handle the same by providing a common read/write interface. @@ -54,7 +56,7 @@ static u16 c_can_plat_read_reg_aligned_to_16bit(const struct c_can_priv *priv, } static void c_can_plat_write_reg_aligned_to_16bit(const struct c_can_priv *priv, - enum reg index, u16 val) + enum reg index, u16 val) { writew(val, priv->base + priv->regs[index]); } @@ -66,7 +68,7 @@ static u16 c_can_plat_read_reg_aligned_to_32bit(const struct c_can_priv *priv, } static void c_can_plat_write_reg_aligned_to_32bit(const struct c_can_priv *priv, - enum reg index, u16 val) + enum reg index, u16 val) { writew(val, priv->base + 2 * priv->regs[index]); } @@ -144,13 +146,13 @@ static u32 c_can_plat_read_reg32(const struct c_can_priv *priv, enum reg index) u32 val; val = priv->read_reg(priv, index); - val |= ((u32) priv->read_reg(priv, index + 1)) << 16; + val |= ((u32)priv->read_reg(priv, index + 1)) << 16; return val; } -static void c_can_plat_write_reg32(const struct c_can_priv *priv, enum reg index, - u32 val) +static void c_can_plat_write_reg32(const struct c_can_priv *priv, + enum reg index, u32 val) { priv->write_reg(priv, index + 1, val >> 16); priv->write_reg(priv, index, val); @@ -161,8 +163,8 @@ static u32 d_can_plat_read_reg32(const struct c_can_priv *priv, enum reg index) return readl(priv->base + priv->regs[index]); } -static void d_can_plat_write_reg32(const struct c_can_priv *priv, enum reg index, - u32 val) +static void d_can_plat_write_reg32(const struct c_can_priv *priv, + enum reg index, u32 val) { writel(val, priv->base + priv->regs[index]); } @@ -191,10 +193,12 @@ static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable) static const struct c_can_driver_data c_can_drvdata = { .id = BOSCH_C_CAN, + .msg_obj_num = 32, }; static const struct c_can_driver_data d_can_drvdata = { .id = BOSCH_D_CAN, + .msg_obj_num = 32, }; static const struct raminit_bits dra7_raminit_bits[] = { @@ -204,6 +208,7 @@ static const struct raminit_bits dra7_raminit_bits[] = { static const struct c_can_driver_data dra7_dcan_drvdata = { .id = BOSCH_D_CAN, + .msg_obj_num = 64, .raminit_num = ARRAY_SIZE(dra7_raminit_bits), .raminit_bits = dra7_raminit_bits, .raminit_pulse = true, @@ -216,6 +221,7 @@ static const struct raminit_bits am3352_raminit_bits[] = { static const struct c_can_driver_data am3352_dcan_drvdata = { .id = BOSCH_D_CAN, + .msg_obj_num = 64, .raminit_num = ARRAY_SIZE(am3352_raminit_bits), .raminit_bits = am3352_raminit_bits, }; @@ -253,50 +259,32 @@ static int c_can_plat_probe(struct platform_device *pdev) void __iomem *addr; struct net_device *dev; struct c_can_priv *priv; - const struct of_device_id *match; struct resource *mem; int irq; struct clk *clk; const struct c_can_driver_data *drvdata; struct device_node *np = pdev->dev.of_node; - match = of_match_device(c_can_of_table, &pdev->dev); - if (match) { - drvdata = match->data; - } else if (pdev->id_entry->driver_data) { - drvdata = (struct c_can_driver_data *) - platform_get_device_id(pdev)->driver_data; - } else { - return -ENODEV; - } + drvdata = device_get_match_data(&pdev->dev); /* get the appropriate clk */ clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(clk)) { - ret = PTR_ERR(clk); - goto exit; - } + if (IS_ERR(clk)) + return PTR_ERR(clk); /* get the platform data */ irq = platform_get_irq(pdev, 0); - if (irq <= 0) { - ret = -ENODEV; - goto exit; - } + if (irq < 0) + return irq; - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - addr = devm_ioremap_resource(&pdev->dev, mem); - if (IS_ERR(addr)) { - ret = PTR_ERR(addr); - goto exit; - } + addr = devm_platform_get_and_ioremap_resource(pdev, 0, &mem); + if (IS_ERR(addr)) + return PTR_ERR(addr); /* allocate the c_can device */ - dev = alloc_c_can_dev(); - if (!dev) { - ret = -ENOMEM; - goto exit; - } + dev = alloc_c_can_dev(drvdata->msg_obj_num); + if (!dev) + return -ENOMEM; priv = netdev_priv(dev); switch (drvdata->id) { @@ -328,33 +316,22 @@ static int c_can_plat_probe(struct platform_device *pdev) /* Check if we need custom RAMINIT via syscon. Mostly for TI * platforms. Only supported with DT boot. */ - if (np && of_property_read_bool(np, "syscon-raminit")) { + if (np && of_property_present(np, "syscon-raminit")) { + unsigned int args[2]; u32 id; struct c_can_raminit *raminit = &priv->raminit_sys; ret = -EINVAL; - raminit->syscon = syscon_regmap_lookup_by_phandle(np, - "syscon-raminit"); + raminit->syscon = syscon_regmap_lookup_by_phandle_args(np, + "syscon-raminit", + 2, args); if (IS_ERR(raminit->syscon)) { - /* can fail with -EPROBE_DEFER */ ret = PTR_ERR(raminit->syscon); - free_c_can_dev(dev); - return ret; - } - - if (of_property_read_u32_index(np, "syscon-raminit", 1, - &raminit->reg)) { - dev_err(&pdev->dev, - "couldn't get the RAMINIT reg. offset!\n"); goto exit_free_device; } - if (of_property_read_u32_index(np, "syscon-raminit", 2, - &id)) { - dev_err(&pdev->dev, - "couldn't get the CAN instance ID\n"); - goto exit_free_device; - } + raminit->reg = args[0]; + id = args[1]; if (id >= drvdata->raminit_num) { dev_err(&pdev->dev, @@ -379,40 +356,39 @@ static int c_can_plat_probe(struct platform_device *pdev) priv->base = addr; priv->device = &pdev->dev; priv->can.clock.freq = clk_get_rate(clk); - priv->priv = clk; priv->type = drvdata->id; platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); + pm_runtime_enable(priv->device); ret = register_c_can_dev(dev); if (ret) { dev_err(&pdev->dev, "registering %s failed (err=%d)\n", KBUILD_MODNAME, ret); - goto exit_free_device; + goto exit_pm_runtime; } dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n", KBUILD_MODNAME, priv->base, dev->irq); return 0; +exit_pm_runtime: + pm_runtime_disable(priv->device); exit_free_device: free_c_can_dev(dev); -exit: - dev_err(&pdev->dev, "probe failed\n"); return ret; } -static int c_can_plat_remove(struct platform_device *pdev) +static void c_can_plat_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); + struct c_can_priv *priv = netdev_priv(dev); unregister_c_can_dev(dev); - + pm_runtime_disable(priv->device); free_c_can_dev(dev); - - return 0; } #ifdef CONFIG_PM diff --git a/drivers/net/can/can327.c b/drivers/net/can/can327.c new file mode 100644 index 000000000000..b66fc16aedd2 --- /dev/null +++ b/drivers/net/can/can327.c @@ -0,0 +1,1141 @@ +// SPDX-License-Identifier: GPL-2.0 +/* ELM327 based CAN interface driver (tty line discipline) + * + * This driver started as a derivative of linux/drivers/net/can/slcan.c + * and my thanks go to the original authors for their inspiration. + * + * can327.c Author : Max Staudt <max-linux@enpas.org> + * slcan.c Author : Oliver Hartkopp <socketcan@hartkopp.net> + * slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk> + * Fred N. van Kempen <waltje@uwalt.nl.mugnet.org> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/init.h> +#include <linux/module.h> + +#include <linux/bitops.h> +#include <linux/ctype.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/lockdep.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/tty.h> +#include <linux/tty_ldisc.h> +#include <linux/workqueue.h> + +#include <uapi/linux/tty.h> + +#include <linux/can.h> +#include <linux/can/dev.h> +#include <linux/can/error.h> +#include <linux/can/rx-offload.h> + +#define CAN327_NAPI_WEIGHT 4 + +#define CAN327_SIZE_TXBUF 32 +#define CAN327_SIZE_RXBUF 1024 + +#define CAN327_CAN_CONFIG_SEND_SFF 0x8000 +#define CAN327_CAN_CONFIG_VARIABLE_DLC 0x4000 +#define CAN327_CAN_CONFIG_RECV_BOTH_SFF_EFF 0x2000 +#define CAN327_CAN_CONFIG_BAUDRATE_MULT_8_7 0x1000 + +#define CAN327_DUMMY_CHAR 'y' +#define CAN327_DUMMY_STRING "y" +#define CAN327_READY_CHAR '>' + +/* Bits in elm->cmds_todo */ +enum can327_tx_do { + CAN327_TX_DO_CAN_DATA = 0, + CAN327_TX_DO_CANID_11BIT, + CAN327_TX_DO_CANID_29BIT_LOW, + CAN327_TX_DO_CANID_29BIT_HIGH, + CAN327_TX_DO_CAN_CONFIG_PART2, + CAN327_TX_DO_CAN_CONFIG, + CAN327_TX_DO_RESPONSES, + CAN327_TX_DO_SILENT_MONITOR, + CAN327_TX_DO_INIT, +}; + +struct can327 { + /* This must be the first member when using alloc_candev() */ + struct can_priv can; + + struct can_rx_offload offload; + + /* TTY buffers */ + u8 txbuf[CAN327_SIZE_TXBUF]; + u8 rxbuf[CAN327_SIZE_RXBUF]; + + /* Per-channel lock */ + spinlock_t lock; + + /* TTY and netdev devices that we're bridging */ + struct tty_struct *tty; + struct net_device *dev; + + /* TTY buffer accounting */ + struct work_struct tx_work; /* Flushes TTY TX buffer */ + u8 *txhead; /* Next TX byte */ + size_t txleft; /* Bytes left to TX */ + int rxfill; /* Bytes already RX'd in buffer */ + + /* State machine */ + enum { + CAN327_STATE_NOTINIT = 0, + CAN327_STATE_GETDUMMYCHAR, + CAN327_STATE_GETPROMPT, + CAN327_STATE_RECEIVING, + } state; + + /* Things we have yet to send */ + char **next_init_cmd; + unsigned long cmds_todo; + + /* The CAN frame and config the ELM327 is sending/using, + * or will send/use after finishing all cmds_todo + */ + struct can_frame can_frame_to_send; + u16 can_config; + u8 can_bitrate_divisor; + + /* Parser state */ + bool drop_next_line; + + /* Stop the channel on UART side hardware failure, e.g. stray + * characters or neverending lines. This may be caused by bad + * UART wiring, a bad ELM327, a bad UART bridge... + * Once this is true, nothing will be sent to the TTY. + */ + bool uart_side_failure; +}; + +static inline void can327_uart_side_failure(struct can327 *elm); + +static void can327_send(struct can327 *elm, const void *buf, size_t len) +{ + int written; + + lockdep_assert_held(&elm->lock); + + if (elm->uart_side_failure) + return; + + memcpy(elm->txbuf, buf, len); + + /* Order of next two lines is *very* important. + * When we are sending a little amount of data, + * the transfer may be completed inside the ops->write() + * routine, because it's running with interrupts enabled. + * In this case we *never* got WRITE_WAKEUP event, + * if we did not request it before write operation. + * 14 Oct 1994 Dmitry Gorodchanin. + */ + set_bit(TTY_DO_WRITE_WAKEUP, &elm->tty->flags); + written = elm->tty->ops->write(elm->tty, elm->txbuf, len); + if (written < 0) { + netdev_err(elm->dev, "Failed to write to tty %s.\n", + elm->tty->name); + can327_uart_side_failure(elm); + return; + } + + elm->txleft = len - written; + elm->txhead = elm->txbuf + written; +} + +/* Take the ELM327 out of almost any state and back into command mode. + * We send CAN327_DUMMY_CHAR which will either abort any running + * operation, or be echoed back to us in case we're already in command + * mode. + */ +static void can327_kick_into_cmd_mode(struct can327 *elm) +{ + lockdep_assert_held(&elm->lock); + + if (elm->state != CAN327_STATE_GETDUMMYCHAR && + elm->state != CAN327_STATE_GETPROMPT) { + can327_send(elm, CAN327_DUMMY_STRING, 1); + + elm->state = CAN327_STATE_GETDUMMYCHAR; + } +} + +/* Schedule a CAN frame and necessary config changes to be sent to the TTY. */ +static void can327_send_frame(struct can327 *elm, struct can_frame *frame) +{ + lockdep_assert_held(&elm->lock); + + /* Schedule any necessary changes in ELM327's CAN configuration */ + if (elm->can_frame_to_send.can_id != frame->can_id) { + /* Set the new CAN ID for transmission. */ + if ((frame->can_id ^ elm->can_frame_to_send.can_id) + & CAN_EFF_FLAG) { + elm->can_config = + (frame->can_id & CAN_EFF_FLAG ? 0 : CAN327_CAN_CONFIG_SEND_SFF) | + CAN327_CAN_CONFIG_VARIABLE_DLC | + CAN327_CAN_CONFIG_RECV_BOTH_SFF_EFF | + elm->can_bitrate_divisor; + + set_bit(CAN327_TX_DO_CAN_CONFIG, &elm->cmds_todo); + } + + if (frame->can_id & CAN_EFF_FLAG) { + clear_bit(CAN327_TX_DO_CANID_11BIT, &elm->cmds_todo); + set_bit(CAN327_TX_DO_CANID_29BIT_LOW, &elm->cmds_todo); + set_bit(CAN327_TX_DO_CANID_29BIT_HIGH, &elm->cmds_todo); + } else { + set_bit(CAN327_TX_DO_CANID_11BIT, &elm->cmds_todo); + clear_bit(CAN327_TX_DO_CANID_29BIT_LOW, + &elm->cmds_todo); + clear_bit(CAN327_TX_DO_CANID_29BIT_HIGH, + &elm->cmds_todo); + } + } + + /* Schedule the CAN frame itself. */ + elm->can_frame_to_send = *frame; + set_bit(CAN327_TX_DO_CAN_DATA, &elm->cmds_todo); + + can327_kick_into_cmd_mode(elm); +} + +/* ELM327 initialisation sequence. + * The line length is limited by the buffer in can327_handle_prompt(). + */ +static char *can327_init_script[] = { + "AT WS\r", /* v1.0: Warm Start */ + "AT PP FF OFF\r", /* v1.0: All Programmable Parameters Off */ + "AT M0\r", /* v1.0: Memory Off */ + "AT AL\r", /* v1.0: Allow Long messages */ + "AT BI\r", /* v1.0: Bypass Initialisation */ + "AT CAF0\r", /* v1.0: CAN Auto Formatting Off */ + "AT CFC0\r", /* v1.0: CAN Flow Control Off */ + "AT CF 000\r", /* v1.0: Reset CAN ID Filter */ + "AT CM 000\r", /* v1.0: Reset CAN ID Mask */ + "AT E1\r", /* v1.0: Echo On */ + "AT H1\r", /* v1.0: Headers On */ + "AT L0\r", /* v1.0: Linefeeds Off */ + "AT SH 7DF\r", /* v1.0: Set CAN sending ID to 0x7df */ + "AT ST FF\r", /* v1.0: Set maximum Timeout for response after TX */ + "AT AT0\r", /* v1.2: Adaptive Timing Off */ + "AT D1\r", /* v1.3: Print DLC On */ + "AT S1\r", /* v1.3: Spaces On */ + "AT TP B\r", /* v1.0: Try Protocol B */ + NULL +}; + +static void can327_init_device(struct can327 *elm) +{ + lockdep_assert_held(&elm->lock); + + elm->state = CAN327_STATE_NOTINIT; + elm->can_frame_to_send.can_id = 0x7df; /* ELM327 HW default */ + elm->rxfill = 0; + elm->drop_next_line = 0; + + /* We can only set the bitrate as a fraction of 500000. + * The bitrates listed in can327_bitrate_const will + * limit the user to the right values. + */ + elm->can_bitrate_divisor = 500000 / elm->can.bittiming.bitrate; + elm->can_config = + CAN327_CAN_CONFIG_SEND_SFF | CAN327_CAN_CONFIG_VARIABLE_DLC | + CAN327_CAN_CONFIG_RECV_BOTH_SFF_EFF | elm->can_bitrate_divisor; + + /* Configure ELM327 and then start monitoring */ + elm->next_init_cmd = &can327_init_script[0]; + set_bit(CAN327_TX_DO_INIT, &elm->cmds_todo); + set_bit(CAN327_TX_DO_SILENT_MONITOR, &elm->cmds_todo); + set_bit(CAN327_TX_DO_RESPONSES, &elm->cmds_todo); + set_bit(CAN327_TX_DO_CAN_CONFIG, &elm->cmds_todo); + + can327_kick_into_cmd_mode(elm); +} + +static void can327_feed_frame_to_netdev(struct can327 *elm, struct sk_buff *skb) +{ + lockdep_assert_held(&elm->lock); + + if (!netif_running(elm->dev)) { + kfree_skb(skb); + return; + } + + /* Queue for NAPI pickup. + * rx-offload will update stats and LEDs for us. + */ + if (can_rx_offload_queue_tail(&elm->offload, skb)) + elm->dev->stats.rx_fifo_errors++; + + /* Wake NAPI */ + can_rx_offload_irq_finish(&elm->offload); +} + +/* Called when we're out of ideas and just want it all to end. */ +static inline void can327_uart_side_failure(struct can327 *elm) +{ + struct can_frame *frame; + struct sk_buff *skb; + + lockdep_assert_held(&elm->lock); + + elm->uart_side_failure = true; + + clear_bit(TTY_DO_WRITE_WAKEUP, &elm->tty->flags); + + elm->can.can_stats.bus_off++; + netif_stop_queue(elm->dev); + elm->can.state = CAN_STATE_BUS_OFF; + can_bus_off(elm->dev); + + netdev_err(elm->dev, + "ELM327 misbehaved. Blocking further communication.\n"); + + skb = alloc_can_err_skb(elm->dev, &frame); + if (!skb) + return; + + frame->can_id |= CAN_ERR_BUSOFF; + can327_feed_frame_to_netdev(elm, skb); +} + +/* Compares a byte buffer (non-NUL terminated) to the payload part of + * a string, and returns true iff the buffer (content *and* length) is + * exactly that string, without the terminating NUL byte. + * + * Example: If reference is "BUS ERROR", then this returns true iff nbytes == 9 + * and !memcmp(buf, "BUS ERROR", 9). + * + * The reason to use strings is so we can easily include them in the C + * code, and to avoid hardcoding lengths. + */ +static inline bool can327_rxbuf_cmp(const u8 *buf, size_t nbytes, + const char *reference) +{ + size_t ref_len = strlen(reference); + + return (nbytes == ref_len) && !memcmp(buf, reference, ref_len); +} + +static void can327_parse_error(struct can327 *elm, size_t len) +{ + struct can_frame *frame; + struct sk_buff *skb; + + lockdep_assert_held(&elm->lock); + + skb = alloc_can_err_skb(elm->dev, &frame); + if (!skb) + /* It's okay to return here: + * The outer parsing loop will drop this UART buffer. + */ + return; + + /* Filter possible error messages based on length of RX'd line */ + if (can327_rxbuf_cmp(elm->rxbuf, len, "UNABLE TO CONNECT")) { + netdev_err(elm->dev, + "ELM327 reported UNABLE TO CONNECT. Please check your setup.\n"); + } else if (can327_rxbuf_cmp(elm->rxbuf, len, "BUFFER FULL")) { + /* This will only happen if the last data line was complete. + * Otherwise, can327_parse_frame() will heuristically + * emit this kind of error frame instead. + */ + frame->can_id |= CAN_ERR_CRTL; + frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + } else if (can327_rxbuf_cmp(elm->rxbuf, len, "BUS ERROR")) { + frame->can_id |= CAN_ERR_BUSERROR; + } else if (can327_rxbuf_cmp(elm->rxbuf, len, "CAN ERROR")) { + frame->can_id |= CAN_ERR_PROT; + } else if (can327_rxbuf_cmp(elm->rxbuf, len, "<RX ERROR")) { + frame->can_id |= CAN_ERR_PROT; + } else if (can327_rxbuf_cmp(elm->rxbuf, len, "BUS BUSY")) { + frame->can_id |= CAN_ERR_PROT; + frame->data[2] = CAN_ERR_PROT_OVERLOAD; + } else if (can327_rxbuf_cmp(elm->rxbuf, len, "FB ERROR")) { + frame->can_id |= CAN_ERR_PROT; + frame->data[2] = CAN_ERR_PROT_TX; + } else if (len == 5 && !memcmp(elm->rxbuf, "ERR", 3)) { + /* ERR is followed by two digits, hence line length 5 */ + netdev_err(elm->dev, "ELM327 reported an ERR%c%c. Please power it off and on again.\n", + elm->rxbuf[3], elm->rxbuf[4]); + frame->can_id |= CAN_ERR_CRTL; + } else { + /* Something else has happened. + * Maybe garbage on the UART line. + * Emit a generic error frame. + */ + } + + can327_feed_frame_to_netdev(elm, skb); +} + +/* Parse CAN frames coming as ASCII from ELM327. + * They can be of various formats: + * + * 29-bit ID (EFF): 12 34 56 78 D PL PL PL PL PL PL PL PL + * 11-bit ID (!EFF): 123 D PL PL PL PL PL PL PL PL + * + * where D = DLC, PL = payload byte + * + * Instead of a payload, RTR indicates a remote request. + * + * We will use the spaces and line length to guess the format. + */ +static int can327_parse_frame(struct can327 *elm, size_t len) +{ + struct can_frame *frame; + struct sk_buff *skb; + int hexlen; + int datastart; + int i; + + lockdep_assert_held(&elm->lock); + + skb = alloc_can_skb(elm->dev, &frame); + if (!skb) + return -ENOMEM; + + /* Find first non-hex and non-space character: + * - In the simplest case, there is none. + * - For RTR frames, 'R' is the first non-hex character. + * - An error message may replace the end of the data line. + */ + for (hexlen = 0; hexlen <= len; hexlen++) { + if (hex_to_bin(elm->rxbuf[hexlen]) < 0 && + elm->rxbuf[hexlen] != ' ') { + break; + } + } + + /* Sanity check whether the line is really a clean hexdump, + * or terminated by an error message, or contains garbage. + */ + if (hexlen < len && !isdigit(elm->rxbuf[hexlen]) && + !isupper(elm->rxbuf[hexlen]) && '<' != elm->rxbuf[hexlen] && + ' ' != elm->rxbuf[hexlen]) { + /* The line is likely garbled anyway, so bail. + * The main code will restart listening. + */ + kfree_skb(skb); + return -ENODATA; + } + + /* Use spaces in CAN ID to distinguish 29 or 11 bit address length. + * No out-of-bounds access: + * We use the fact that we can always read from elm->rxbuf. + */ + if (elm->rxbuf[2] == ' ' && elm->rxbuf[5] == ' ' && + elm->rxbuf[8] == ' ' && elm->rxbuf[11] == ' ' && + elm->rxbuf[13] == ' ') { + frame->can_id = CAN_EFF_FLAG; + datastart = 14; + } else if (elm->rxbuf[3] == ' ' && elm->rxbuf[5] == ' ') { + datastart = 6; + } else { + /* This is not a well-formatted data line. + * Assume it's an error message. + */ + kfree_skb(skb); + return -ENODATA; + } + + if (hexlen < datastart) { + /* The line is too short to be a valid frame hex dump. + * Something interrupted the hex dump or it is invalid. + */ + kfree_skb(skb); + return -ENODATA; + } + + /* From here on all chars up to buf[hexlen] are hex or spaces, + * at well-defined offsets. + */ + + /* Read CAN data length */ + frame->len = (hex_to_bin(elm->rxbuf[datastart - 2]) << 0); + + /* Read CAN ID */ + if (frame->can_id & CAN_EFF_FLAG) { + frame->can_id |= (hex_to_bin(elm->rxbuf[0]) << 28) | + (hex_to_bin(elm->rxbuf[1]) << 24) | + (hex_to_bin(elm->rxbuf[3]) << 20) | + (hex_to_bin(elm->rxbuf[4]) << 16) | + (hex_to_bin(elm->rxbuf[6]) << 12) | + (hex_to_bin(elm->rxbuf[7]) << 8) | + (hex_to_bin(elm->rxbuf[9]) << 4) | + (hex_to_bin(elm->rxbuf[10]) << 0); + } else { + frame->can_id |= (hex_to_bin(elm->rxbuf[0]) << 8) | + (hex_to_bin(elm->rxbuf[1]) << 4) | + (hex_to_bin(elm->rxbuf[2]) << 0); + } + + /* Check for RTR frame */ + if (elm->rxfill >= hexlen + 3 && + !memcmp(&elm->rxbuf[hexlen], "RTR", 3)) { + frame->can_id |= CAN_RTR_FLAG; + } + + /* Is the line long enough to hold the advertised payload? + * Note: RTR frames have a DLC, but no actual payload. + */ + if (!(frame->can_id & CAN_RTR_FLAG) && + (hexlen < frame->len * 3 + datastart)) { + /* Incomplete frame. + * Probably the ELM327's RS232 TX buffer was full. + * Emit an error frame and exit. + */ + frame->can_id = CAN_ERR_FLAG | CAN_ERR_CRTL; + frame->len = CAN_ERR_DLC; + frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + can327_feed_frame_to_netdev(elm, skb); + + /* Signal failure to parse. + * The line will be re-parsed as an error line, which will fail. + * However, this will correctly drop the state machine back into + * command mode. + */ + return -ENODATA; + } + + /* Parse the data nibbles. */ + for (i = 0; i < frame->len; i++) { + frame->data[i] = + (hex_to_bin(elm->rxbuf[datastart + 3 * i]) << 4) | + (hex_to_bin(elm->rxbuf[datastart + 3 * i + 1])); + } + + /* Feed the frame to the network layer. */ + can327_feed_frame_to_netdev(elm, skb); + + return 0; +} + +static void can327_parse_line(struct can327 *elm, size_t len) +{ + lockdep_assert_held(&elm->lock); + + /* Skip empty lines */ + if (!len) + return; + + /* Skip echo lines */ + if (elm->drop_next_line) { + elm->drop_next_line = 0; + return; + } else if (!memcmp(elm->rxbuf, "AT", 2)) { + return; + } + + /* Regular parsing */ + if (elm->state == CAN327_STATE_RECEIVING && + can327_parse_frame(elm, len)) { + /* Parse an error line. */ + can327_parse_error(elm, len); + + /* Start afresh. */ + can327_kick_into_cmd_mode(elm); + } +} + +static void can327_handle_prompt(struct can327 *elm) +{ + struct can_frame *frame = &elm->can_frame_to_send; + /* Size this buffer for the largest ELM327 line we may generate, + * which is currently an 8 byte CAN frame's payload hexdump. + * Items in can327_init_script must fit here, too! + */ + char local_txbuf[sizeof("0102030405060708\r")]; + + lockdep_assert_held(&elm->lock); + + if (!elm->cmds_todo) { + /* Enter CAN monitor mode */ + can327_send(elm, "ATMA\r", 5); + elm->state = CAN327_STATE_RECEIVING; + + /* We will be in the default state once this command is + * sent, so enable the TX packet queue. + */ + netif_wake_queue(elm->dev); + + return; + } + + /* Reconfigure ELM327 step by step as indicated by elm->cmds_todo */ + if (test_bit(CAN327_TX_DO_INIT, &elm->cmds_todo)) { + snprintf(local_txbuf, sizeof(local_txbuf), "%s", + *elm->next_init_cmd); + + elm->next_init_cmd++; + if (!(*elm->next_init_cmd)) { + clear_bit(CAN327_TX_DO_INIT, &elm->cmds_todo); + /* Init finished. */ + } + + } else if (test_and_clear_bit(CAN327_TX_DO_SILENT_MONITOR, &elm->cmds_todo)) { + snprintf(local_txbuf, sizeof(local_txbuf), + "ATCSM%i\r", + !!(elm->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)); + + } else if (test_and_clear_bit(CAN327_TX_DO_RESPONSES, &elm->cmds_todo)) { + snprintf(local_txbuf, sizeof(local_txbuf), + "ATR%i\r", + !(elm->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)); + + } else if (test_and_clear_bit(CAN327_TX_DO_CAN_CONFIG, &elm->cmds_todo)) { + snprintf(local_txbuf, sizeof(local_txbuf), + "ATPC\r"); + set_bit(CAN327_TX_DO_CAN_CONFIG_PART2, &elm->cmds_todo); + + } else if (test_and_clear_bit(CAN327_TX_DO_CAN_CONFIG_PART2, &elm->cmds_todo)) { + snprintf(local_txbuf, sizeof(local_txbuf), + "ATPB%04X\r", + elm->can_config); + + } else if (test_and_clear_bit(CAN327_TX_DO_CANID_29BIT_HIGH, &elm->cmds_todo)) { + snprintf(local_txbuf, sizeof(local_txbuf), + "ATCP%02X\r", + (frame->can_id & CAN_EFF_MASK) >> 24); + + } else if (test_and_clear_bit(CAN327_TX_DO_CANID_29BIT_LOW, &elm->cmds_todo)) { + snprintf(local_txbuf, sizeof(local_txbuf), + "ATSH%06X\r", + frame->can_id & CAN_EFF_MASK & ((1 << 24) - 1)); + + } else if (test_and_clear_bit(CAN327_TX_DO_CANID_11BIT, &elm->cmds_todo)) { + snprintf(local_txbuf, sizeof(local_txbuf), + "ATSH%03X\r", + frame->can_id & CAN_SFF_MASK); + + } else if (test_and_clear_bit(CAN327_TX_DO_CAN_DATA, &elm->cmds_todo)) { + if (frame->can_id & CAN_RTR_FLAG) { + /* Send an RTR frame. Their DLC is fixed. + * Some chips don't send them at all. + */ + snprintf(local_txbuf, sizeof(local_txbuf), "ATRTR\r"); + } else { + /* Send a regular CAN data frame */ + int i; + + for (i = 0; i < frame->len; i++) { + snprintf(&local_txbuf[2 * i], + sizeof(local_txbuf), "%02X", + frame->data[i]); + } + + snprintf(&local_txbuf[2 * i], sizeof(local_txbuf), + "\r"); + } + + elm->drop_next_line = 1; + elm->state = CAN327_STATE_RECEIVING; + + /* We will be in the default state once this command is + * sent, so enable the TX packet queue. + */ + netif_wake_queue(elm->dev); + } + + can327_send(elm, local_txbuf, strlen(local_txbuf)); +} + +static bool can327_is_ready_char(char c) +{ + /* Bits 0xc0 are sometimes set (randomly), hence the mask. + * Probably bad hardware. + */ + return (c & 0x3f) == CAN327_READY_CHAR; +} + +static void can327_drop_bytes(struct can327 *elm, size_t i) +{ + lockdep_assert_held(&elm->lock); + + memmove(&elm->rxbuf[0], &elm->rxbuf[i], CAN327_SIZE_RXBUF - i); + elm->rxfill -= i; +} + +static void can327_parse_rxbuf(struct can327 *elm, size_t first_new_char_idx) +{ + size_t len, pos; + + lockdep_assert_held(&elm->lock); + + switch (elm->state) { + case CAN327_STATE_NOTINIT: + elm->rxfill = 0; + break; + + case CAN327_STATE_GETDUMMYCHAR: + /* Wait for 'y' or '>' */ + for (pos = 0; pos < elm->rxfill; pos++) { + if (elm->rxbuf[pos] == CAN327_DUMMY_CHAR) { + can327_send(elm, "\r", 1); + elm->state = CAN327_STATE_GETPROMPT; + pos++; + break; + } else if (can327_is_ready_char(elm->rxbuf[pos])) { + can327_send(elm, CAN327_DUMMY_STRING, 1); + pos++; + break; + } + } + + can327_drop_bytes(elm, pos); + break; + + case CAN327_STATE_GETPROMPT: + /* Wait for '>' */ + if (can327_is_ready_char(elm->rxbuf[elm->rxfill - 1])) + can327_handle_prompt(elm); + + elm->rxfill = 0; + break; + + case CAN327_STATE_RECEIVING: + /* Find <CR> delimiting feedback lines. */ + len = first_new_char_idx; + while (len < elm->rxfill && elm->rxbuf[len] != '\r') + len++; + + if (len == CAN327_SIZE_RXBUF) { + /* Assume the buffer ran full with garbage. + * Did we even connect at the right baud rate? + */ + netdev_err(elm->dev, + "RX buffer overflow. Faulty ELM327 or UART?\n"); + can327_uart_side_failure(elm); + } else if (len == elm->rxfill) { + if (can327_is_ready_char(elm->rxbuf[elm->rxfill - 1])) { + /* The ELM327's AT ST response timeout ran out, + * so we got a prompt. + * Clear RX buffer and restart listening. + */ + elm->rxfill = 0; + + can327_handle_prompt(elm); + } + + /* No <CR> found - we haven't received a full line yet. + * Wait for more data. + */ + } else { + /* We have a full line to parse. */ + can327_parse_line(elm, len); + + /* Remove parsed data from RX buffer. */ + can327_drop_bytes(elm, len + 1); + + /* More data to parse? */ + if (elm->rxfill) + can327_parse_rxbuf(elm, 0); + } + } +} + +static int can327_netdev_open(struct net_device *dev) +{ + struct can327 *elm = netdev_priv(dev); + int err; + + spin_lock_bh(&elm->lock); + + if (!elm->tty) { + spin_unlock_bh(&elm->lock); + return -ENODEV; + } + + if (elm->uart_side_failure) + netdev_warn(elm->dev, + "Reopening netdev after a UART side fault has been detected.\n"); + + /* Clear TTY buffers */ + elm->rxfill = 0; + elm->txleft = 0; + + /* open_candev() checks for elm->can.bittiming.bitrate != 0 */ + err = open_candev(dev); + if (err) { + spin_unlock_bh(&elm->lock); + return err; + } + + can327_init_device(elm); + spin_unlock_bh(&elm->lock); + + err = can_rx_offload_add_manual(dev, &elm->offload, CAN327_NAPI_WEIGHT); + if (err) { + close_candev(dev); + return err; + } + + can_rx_offload_enable(&elm->offload); + + elm->can.state = CAN_STATE_ERROR_ACTIVE; + netif_start_queue(dev); + + return 0; +} + +static int can327_netdev_close(struct net_device *dev) +{ + struct can327 *elm = netdev_priv(dev); + + /* Interrupt whatever the ELM327 is doing right now */ + spin_lock_bh(&elm->lock); + can327_send(elm, CAN327_DUMMY_STRING, 1); + spin_unlock_bh(&elm->lock); + + netif_stop_queue(dev); + + /* We don't flush the UART TX queue here, as we want final stop + * commands (like the above dummy char) to be flushed out. + */ + + can_rx_offload_disable(&elm->offload); + elm->can.state = CAN_STATE_STOPPED; + can_rx_offload_del(&elm->offload); + close_candev(dev); + + return 0; +} + +/* Send a can_frame to a TTY. */ +static netdev_tx_t can327_netdev_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct can327 *elm = netdev_priv(dev); + struct can_frame *frame = (struct can_frame *)skb->data; + + if (can_dev_dropped_skb(dev, skb)) + return NETDEV_TX_OK; + + /* We shouldn't get here after a hardware fault: + * can_bus_off() calls netif_carrier_off() + */ + if (elm->uart_side_failure) { + WARN_ON_ONCE(elm->uart_side_failure); + goto out; + } + + netif_stop_queue(dev); + + /* BHs are already disabled, so no spin_lock_bh(). + * See Documentation/networking/netdevices.rst + */ + spin_lock(&elm->lock); + can327_send_frame(elm, frame); + spin_unlock(&elm->lock); + + dev->stats.tx_packets++; + dev->stats.tx_bytes += frame->can_id & CAN_RTR_FLAG ? 0 : frame->len; + + skb_tx_timestamp(skb); + +out: + kfree_skb(skb); + return NETDEV_TX_OK; +} + +static const struct net_device_ops can327_netdev_ops = { + .ndo_open = can327_netdev_open, + .ndo_stop = can327_netdev_close, + .ndo_start_xmit = can327_netdev_start_xmit, +}; + +static const struct ethtool_ops can327_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, +}; + +static bool can327_is_valid_rx_char(u8 c) +{ + static const bool lut_char_is_valid['z'] = { + ['\r'] = true, + [' '] = true, + ['.'] = true, + ['0'] = true, true, true, true, true, + ['5'] = true, true, true, true, true, + ['<'] = true, + [CAN327_READY_CHAR] = true, + ['?'] = true, + ['A'] = true, true, true, true, true, true, true, + ['H'] = true, true, true, true, true, true, true, + ['O'] = true, true, true, true, true, true, true, + ['V'] = true, true, true, true, true, + ['a'] = true, + ['b'] = true, + ['v'] = true, + [CAN327_DUMMY_CHAR] = true, + }; + BUILD_BUG_ON(CAN327_DUMMY_CHAR >= 'z'); + + return (c < ARRAY_SIZE(lut_char_is_valid) && lut_char_is_valid[c]); +} + +/* Handle incoming ELM327 ASCII data. + * This will not be re-entered while running, but other ldisc + * functions may be called in parallel. + */ +static void can327_ldisc_rx(struct tty_struct *tty, const u8 *cp, + const u8 *fp, size_t count) +{ + struct can327 *elm = tty->disc_data; + size_t first_new_char_idx; + + if (elm->uart_side_failure) + return; + + spin_lock_bh(&elm->lock); + + /* Store old rxfill, so can327_parse_rxbuf() will have + * the option of skipping already checked characters. + */ + first_new_char_idx = elm->rxfill; + + while (count--) { + if (elm->rxfill >= CAN327_SIZE_RXBUF) { + netdev_err(elm->dev, + "Receive buffer overflowed. Bad chip or wiring? count = %zu", + count); + goto uart_failure; + } + if (fp && *fp++) { + netdev_err(elm->dev, + "Error in received character stream. Check your wiring."); + goto uart_failure; + } + + /* Ignore NUL characters, which the PIC microcontroller may + * inadvertently insert due to a known hardware bug. + * See ELM327 documentation, which refers to a Microchip PIC + * bug description. + */ + if (*cp) { + /* Check for stray characters on the UART line. + * Likely caused by bad hardware. + */ + if (!can327_is_valid_rx_char(*cp)) { + netdev_err(elm->dev, + "Received illegal character %02x.\n", + *cp); + goto uart_failure; + } + + elm->rxbuf[elm->rxfill++] = *cp; + } + + cp++; + } + + can327_parse_rxbuf(elm, first_new_char_idx); + spin_unlock_bh(&elm->lock); + + return; +uart_failure: + can327_uart_side_failure(elm); + spin_unlock_bh(&elm->lock); +} + +/* Write out remaining transmit buffer. + * Scheduled when TTY is writable. + */ +static void can327_ldisc_tx_worker(struct work_struct *work) +{ + struct can327 *elm = container_of(work, struct can327, tx_work); + ssize_t written; + + if (elm->uart_side_failure) + return; + + spin_lock_bh(&elm->lock); + + if (elm->txleft) { + written = elm->tty->ops->write(elm->tty, elm->txhead, + elm->txleft); + if (written < 0) { + netdev_err(elm->dev, "Failed to write to tty %s.\n", + elm->tty->name); + can327_uart_side_failure(elm); + + spin_unlock_bh(&elm->lock); + return; + } + + elm->txleft -= written; + elm->txhead += written; + } + + if (!elm->txleft) + clear_bit(TTY_DO_WRITE_WAKEUP, &elm->tty->flags); + + spin_unlock_bh(&elm->lock); +} + +/* Called by the driver when there's room for more data. */ +static void can327_ldisc_tx_wakeup(struct tty_struct *tty) +{ + struct can327 *elm = tty->disc_data; + + schedule_work(&elm->tx_work); +} + +/* ELM327 can only handle bitrates that are integer divisors of 500 kHz, + * or 7/8 of that. Divisors are 1 to 64. + * Currently we don't implement support for 7/8 rates. + */ +static const u32 can327_bitrate_const[] = { + 7812, 7936, 8064, 8196, 8333, 8474, 8620, 8771, + 8928, 9090, 9259, 9433, 9615, 9803, 10000, 10204, + 10416, 10638, 10869, 11111, 11363, 11627, 11904, 12195, + 12500, 12820, 13157, 13513, 13888, 14285, 14705, 15151, + 15625, 16129, 16666, 17241, 17857, 18518, 19230, 20000, + 20833, 21739, 22727, 23809, 25000, 26315, 27777, 29411, + 31250, 33333, 35714, 38461, 41666, 45454, 50000, 55555, + 62500, 71428, 83333, 100000, 125000, 166666, 250000, 500000 +}; + +static int can327_ldisc_open(struct tty_struct *tty) +{ + struct net_device *dev; + struct can327 *elm; + int err; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (!tty->ops->write) + return -EOPNOTSUPP; + + dev = alloc_candev(sizeof(struct can327), 0); + if (!dev) + return -ENFILE; + elm = netdev_priv(dev); + + /* Configure TTY interface */ + tty->receive_room = 65536; /* We don't flow control */ + spin_lock_init(&elm->lock); + INIT_WORK(&elm->tx_work, can327_ldisc_tx_worker); + + /* Configure CAN metadata */ + elm->can.bitrate_const = can327_bitrate_const; + elm->can.bitrate_const_cnt = ARRAY_SIZE(can327_bitrate_const); + elm->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY; + + /* Configure netdev interface */ + elm->dev = dev; + dev->netdev_ops = &can327_netdev_ops; + dev->ethtool_ops = &can327_ethtool_ops; + + /* Mark ldisc channel as alive */ + elm->tty = tty; + tty->disc_data = elm; + + /* Let 'er rip */ + err = register_candev(elm->dev); + if (err) { + free_candev(elm->dev); + return err; + } + + netdev_info(elm->dev, "can327 on %s.\n", tty->name); + + return 0; +} + +/* Close down a can327 channel. + * This means flushing out any pending queues, and then returning. + * This call is serialized against other ldisc functions: + * Once this is called, no other ldisc function of ours is entered. + * + * We also use this function for a hangup event. + */ +static void can327_ldisc_close(struct tty_struct *tty) +{ + struct can327 *elm = tty->disc_data; + + /* unregister_netdev() calls .ndo_stop() so we don't have to. */ + unregister_candev(elm->dev); + + /* Give UART one final chance to flush. + * No need to clear TTY_DO_WRITE_WAKEUP since .write_wakeup() is + * serialised against .close() and will not be called once we return. + */ + flush_work(&elm->tx_work); + + /* Mark channel as dead */ + spin_lock_bh(&elm->lock); + tty->disc_data = NULL; + elm->tty = NULL; + spin_unlock_bh(&elm->lock); + + netdev_info(elm->dev, "can327 off %s.\n", tty->name); + + free_candev(elm->dev); +} + +static int can327_ldisc_ioctl(struct tty_struct *tty, unsigned int cmd, + unsigned long arg) +{ + struct can327 *elm = tty->disc_data; + unsigned int tmp; + + switch (cmd) { + case SIOCGIFNAME: + tmp = strnlen(elm->dev->name, IFNAMSIZ - 1) + 1; + if (copy_to_user((void __user *)arg, elm->dev->name, tmp)) + return -EFAULT; + return 0; + + case SIOCSIFHWADDR: + return -EINVAL; + + default: + return tty_mode_ioctl(tty, cmd, arg); + } +} + +static struct tty_ldisc_ops can327_ldisc = { + .owner = THIS_MODULE, + .name = KBUILD_MODNAME, + .num = N_CAN327, + .receive_buf = can327_ldisc_rx, + .write_wakeup = can327_ldisc_tx_wakeup, + .open = can327_ldisc_open, + .close = can327_ldisc_close, + .ioctl = can327_ldisc_ioctl, +}; + +static int __init can327_init(void) +{ + int status; + + status = tty_register_ldisc(&can327_ldisc); + if (status) + pr_err("Can't register line discipline\n"); + + return status; +} + +static void __exit can327_exit(void) +{ + /* This will only be called when all channels have been closed by + * userspace - tty_ldisc.c takes care of the module's refcount. + */ + tty_unregister_ldisc(&can327_ldisc); +} + +module_init(can327_init); +module_exit(can327_exit); + +MODULE_ALIAS_LDISC(N_CAN327); +MODULE_DESCRIPTION("ELM327 based CAN interface"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Max Staudt <max@enpas.org>"); diff --git a/drivers/net/can/cc770/Kconfig b/drivers/net/can/cc770/Kconfig index 6a9a5ba79220..aae25c2f849e 100644 --- a/drivers/net/can/cc770/Kconfig +++ b/drivers/net/can/cc770/Kconfig @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only menuconfig CAN_CC770 tristate "Bosch CC770 and Intel AN82527 devices" depends on HAS_IOMEM @@ -6,14 +7,15 @@ if CAN_CC770 config CAN_CC770_ISA tristate "ISA Bus based legacy CC770 driver" - ---help--- + depends on HAS_IOPORT + help This driver adds legacy support for CC770 and AN82527 chips connected to the ISA bus using I/O port, memory mapped or indirect access. config CAN_CC770_PLATFORM tristate "Generic Platform Bus based CC770 driver" - ---help--- + help This driver adds support for the CC770 and AN82527 chips connected to the "platform bus" (Linux abstraction for directly to the processor attached devices). diff --git a/drivers/net/can/cc770/Makefile b/drivers/net/can/cc770/Makefile index 8657f879ae19..65e8549f2e45 100644 --- a/drivers/net/can/cc770/Makefile +++ b/drivers/net/can/cc770/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only # # Makefile for the Bosch CC770 CAN controller drivers. # diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c index da636a22c542..8d5abd643c06 100644 --- a/drivers/net/can/cc770/cc770.c +++ b/drivers/net/can/cc770/cc770.c @@ -1,16 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Core driver for the CC770 and AN82527 CAN controllers * * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -25,6 +17,7 @@ #include <linux/ptrace.h> #include <linux/string.h> #include <linux/errno.h> +#include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/if_ether.h> @@ -398,7 +391,7 @@ static void cc770_tx(struct net_device *dev, int mo) u32 id; int i; - dlc = cf->can_dlc; + dlc = cf->len; id = cf->can_id; rtr = cf->can_id & CAN_RTR_FLAG ? 0 : MSGCFG_DIR; @@ -436,7 +429,7 @@ static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) struct cc770_priv *priv = netdev_priv(dev); unsigned int mo = obj2msgobj(CC770_OBJ_TX); - if (can_dropped_invalid_skb(dev, skb)) + if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; netif_stop_queue(dev); @@ -478,7 +471,7 @@ static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1) cf->can_id = CAN_RTR_FLAG; if (config & MSGCFG_XTD) cf->can_id |= CAN_EFF_FLAG; - cf->can_dlc = 0; + cf->len = 0; } else { if (config & MSGCFG_XTD) { id = cc770_read_reg(priv, msgobj[mo].id[3]); @@ -494,20 +487,20 @@ static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1) } cf->can_id = id; - cf->can_dlc = get_can_dlc((config & 0xf0) >> 4); - for (i = 0; i < cf->can_dlc; i++) + cf->len = can_cc_dlc2len((config & 0xf0) >> 4); + for (i = 0; i < cf->len; i++) cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]); - } + stats->rx_bytes += cf->len; + } stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } static int cc770_err(struct net_device *dev, u8 status) { struct cc770_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; u8 lec; @@ -520,6 +513,7 @@ static int cc770_err(struct net_device *dev, u8 status) /* Use extended functions of the CC770 */ if (priv->control_normal_mode & CTRL_EAF) { + cf->can_id |= CAN_ERR_CNT; cf->data[6] = cc770_read_reg(priv, tx_error_counter); cf->data[7] = cc770_read_reg(priv, rx_error_counter); } @@ -546,7 +540,7 @@ static int cc770_err(struct net_device *dev, u8 status) priv->can.can_stats.error_warning++; } } else { - /* Back to error avtive */ + /* Back to error active */ cf->can_id |= CAN_ERR_PROT; cf->data[2] = CAN_ERR_PROT_ACTIVE; priv->can.state = CAN_STATE_ERROR_ACTIVE; @@ -579,8 +573,6 @@ static int cc770_err(struct net_device *dev, u8 status) } - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; netif_rx(skb); return 0; @@ -674,7 +666,6 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o) struct cc770_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; unsigned int mo = obj2msgobj(o); - struct can_frame *cf; u8 ctrl1; ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1); @@ -706,12 +697,9 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o) return; } - cf = (struct can_frame *)priv->tx_skb->data; - stats->tx_bytes += cf->can_dlc; + can_put_echo_skb(priv->tx_skb, dev, 0, 0); + stats->tx_bytes += can_get_echo_skb(dev, 0, NULL); stats->tx_packets++; - - can_put_echo_skb(priv->tx_skb, dev, 0); - can_get_echo_skb(dev, 0); priv->tx_skb = NULL; netif_wake_queue(dev); @@ -846,7 +834,10 @@ static const struct net_device_ops cc770_netdev_ops = { .ndo_open = cc770_open, .ndo_stop = cc770_close, .ndo_start_xmit = cc770_start_xmit, - .ndo_change_mtu = can_change_mtu, +}; + +static const struct ethtool_ops cc770_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, }; int register_cc770dev(struct net_device *dev) @@ -859,6 +850,7 @@ int register_cc770dev(struct net_device *dev) return err; dev->netdev_ops = &cc770_netdev_ops; + dev->ethtool_ops = &cc770_ethtool_ops; dev->flags |= IFF_ECHO; /* we support local echo */ diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h index 95752e1d1283..0628fd9e1980 100644 --- a/drivers/net/can/cc770/cc770.h +++ b/drivers/net/can/cc770/cc770.h @@ -1,16 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Core driver for the CC770 and AN82527 CAN controllers * * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef CC770_DEV_H @@ -192,7 +184,7 @@ struct cc770_priv { u8 control_normal_mode; /* Control register for normal mode */ u8 cpu_interface; /* CPU interface register */ u8 clkout; /* Clock out register */ - u8 bus_config; /* Bus conffiguration register */ + u8 bus_config; /* Bus configuration register */ struct sk_buff *tx_skb; }; diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c index fcd34698074f..d06762817153 100644 --- a/drivers/net/can/cc770/cc770_isa.c +++ b/drivers/net/can/cc770/cc770_isa.c @@ -1,16 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Driver for CC770 and AN82527 CAN controllers on the legacy ISA bus * * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ /* @@ -183,7 +175,7 @@ static int cc770_isa_probe(struct platform_device *pdev) err = -EBUSY; goto exit; } - base = ioremap_nocache(mem[idx], iosize); + base = ioremap(mem[idx], iosize); if (!base) { err = -ENOMEM; goto exit_release; @@ -272,26 +264,28 @@ static int cc770_isa_probe(struct platform_device *pdev) if (err) { dev_err(&pdev->dev, "couldn't register device (err=%d)\n", err); - goto exit_unmap; + goto exit_free; } dev_info(&pdev->dev, "device registered (reg_base=0x%p, irq=%d)\n", priv->reg_base, dev->irq); return 0; - exit_unmap: +exit_free: + free_cc770dev(dev); +exit_unmap: if (mem[idx]) iounmap(base); - exit_release: +exit_release: if (mem[idx]) release_mem_region(mem[idx], iosize); else release_region(port[idx], iosize); - exit: +exit: return err; } -static int cc770_isa_remove(struct platform_device *pdev) +static void cc770_isa_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct cc770_priv *priv = netdev_priv(dev); @@ -309,8 +303,6 @@ static int cc770_isa_remove(struct platform_device *pdev) release_region(port[idx], CC770_IOSIZE); } free_cc770dev(dev); - - return 0; } static struct platform_driver cc770_isa_driver = { diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c index 866e5e12fdd2..b6c4f02ffb97 100644 --- a/drivers/net/can/cc770/cc770_platform.c +++ b/drivers/net/can/cc770/cc770_platform.c @@ -1,16 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Driver for CC770 and AN82527 CAN controllers on the platform bus * * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ /* @@ -78,17 +70,10 @@ static void cc770_platform_write_reg(const struct cc770_priv *priv, int reg, static int cc770_get_of_node_data(struct platform_device *pdev, struct cc770_priv *priv) { + u32 clkext = CC770_PLATFORM_CAN_CLOCK, clkout = 0; struct device_node *np = pdev->dev.of_node; - const u32 *prop; - int prop_size; - u32 clkext; - - prop = of_get_property(np, "bosch,external-clock-frequency", - &prop_size); - if (prop && (prop_size == sizeof(u32))) - clkext = *prop; - else - clkext = CC770_PLATFORM_CAN_CLOCK; /* default */ + + of_property_read_u32(np, "bosch,external-clock-frequency", &clkext); priv->can.clock.freq = clkext; /* The system clock may not exceed 10 MHz */ @@ -101,41 +86,38 @@ static int cc770_get_of_node_data(struct platform_device *pdev, if (priv->can.clock.freq > 8000000) priv->cpu_interface |= CPUIF_DMC; - if (of_get_property(np, "bosch,divide-memory-clock", NULL)) + if (of_property_read_bool(np, "bosch,divide-memory-clock")) priv->cpu_interface |= CPUIF_DMC; - if (of_get_property(np, "bosch,iso-low-speed-mux", NULL)) + if (of_property_read_bool(np, "bosch,iso-low-speed-mux")) priv->cpu_interface |= CPUIF_MUX; - if (!of_get_property(np, "bosch,no-comperator-bypass", NULL)) + if (!of_property_read_bool(np, "bosch,no-comperator-bypass")) priv->bus_config |= BUSCFG_CBY; - if (of_get_property(np, "bosch,disconnect-rx0-input", NULL)) + if (of_property_read_bool(np, "bosch,disconnect-rx0-input")) priv->bus_config |= BUSCFG_DR0; - if (of_get_property(np, "bosch,disconnect-rx1-input", NULL)) + if (of_property_read_bool(np, "bosch,disconnect-rx1-input")) priv->bus_config |= BUSCFG_DR1; - if (of_get_property(np, "bosch,disconnect-tx1-output", NULL)) + if (of_property_read_bool(np, "bosch,disconnect-tx1-output")) priv->bus_config |= BUSCFG_DT1; - if (of_get_property(np, "bosch,polarity-dominant", NULL)) + if (of_property_read_bool(np, "bosch,polarity-dominant")) priv->bus_config |= BUSCFG_POL; - prop = of_get_property(np, "bosch,clock-out-frequency", &prop_size); - if (prop && (prop_size == sizeof(u32)) && *prop > 0) { - u32 cdv = clkext / *prop; - int slew; + of_property_read_u32(np, "bosch,clock-out-frequency", &clkout); + if (clkout > 0) { + u32 cdv = clkext / clkout; if (cdv > 0 && cdv < 16) { + u32 slew; + priv->cpu_interface |= CPUIF_CEN; priv->clkout |= (cdv - 1) & CLKOUT_CD_MASK; - prop = of_get_property(np, "bosch,slew-rate", - &prop_size); - if (prop && (prop_size == sizeof(u32))) { - slew = *prop; - } else { + if (of_property_read_u32(np, "bosch,slew-rate", &slew)) { /* Determine default slew rate */ slew = (CLKOUT_SL_MASK >> CLKOUT_SL_SHIFT) - ((cdv * clkext - 1) / 8000000); - if (slew < 0) + if (slew > (CLKOUT_SL_MASK >> CLKOUT_SL_SHIFT)) slew = 0; } priv->clkout |= (slew << CLKOUT_SL_SHIFT) & @@ -238,7 +220,7 @@ exit_release_mem: return err; } -static int cc770_platform_remove(struct platform_device *pdev) +static void cc770_platform_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct cc770_priv *priv = netdev_priv(dev); @@ -250,8 +232,6 @@ static int cc770_platform_remove(struct platform_device *pdev) mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(mem->start, resource_size(mem)); - - return 0; } static const struct of_device_id cc770_platform_table[] = { diff --git a/drivers/net/can/ctucanfd/Kconfig b/drivers/net/can/ctucanfd/Kconfig new file mode 100644 index 000000000000..f52407f5c5d8 --- /dev/null +++ b/drivers/net/can/ctucanfd/Kconfig @@ -0,0 +1,34 @@ +config CAN_CTUCANFD + tristate "CTU CAN-FD IP core" if COMPILE_TEST + help + This driver adds support for the CTU CAN FD open-source IP core. + More documentation and core sources at project page + (https://gitlab.fel.cvut.cz/canbus/ctucanfd_ip_core). + The core integration to Xilinx Zynq system as platform driver + is available (https://gitlab.fel.cvut.cz/canbus/zynq/zynq-can-sja1000-top). + Implementation on Intel FPGA-based PCI Express board is available + from project (https://gitlab.fel.cvut.cz/canbus/pcie-ctucanfd) and + on Intel SoC from project (https://gitlab.fel.cvut.cz/canbus/intel-soc-ctucanfd). + Guidepost CTU FEE CAN bus projects page https://canbus.pages.fel.cvut.cz/ . + +config CAN_CTUCANFD_PCI + tristate "CTU CAN-FD IP core PCI/PCIe driver" + depends on PCI + select CAN_CTUCANFD + help + This driver adds PCI/PCIe support for CTU CAN-FD IP core. + The project providing FPGA design for Intel EP4CGX15 based DB4CGX15 + PCIe board with PiKRON.com designed transceiver riser shield is available + at https://gitlab.fel.cvut.cz/canbus/pcie-ctucanfd . + +config CAN_CTUCANFD_PLATFORM + tristate "CTU CAN-FD IP core platform (FPGA, SoC) driver" + depends on HAS_IOMEM && OF + select CAN_CTUCANFD + help + The core has been tested together with OpenCores SJA1000 + modified to be CAN FD frames tolerant on MicroZed Zynq based + MZ_APO education kits designed by Petr Porazil from PiKRON.com + company. FPGA design https://gitlab.fel.cvut.cz/canbus/zynq/zynq-can-sja1000-top. + The kit description at the Computer Architectures course pages + https://cw.fel.cvut.cz/wiki/courses/b35apo/documentation/mz_apo/start . diff --git a/drivers/net/can/ctucanfd/Makefile b/drivers/net/can/ctucanfd/Makefile new file mode 100644 index 000000000000..8078f1f2c30f --- /dev/null +++ b/drivers/net/can/ctucanfd/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Makefile for the CTU CAN-FD IP module drivers +# + +obj-$(CONFIG_CAN_CTUCANFD) := ctucanfd.o +ctucanfd-y := ctucanfd_base.o + +obj-$(CONFIG_CAN_CTUCANFD_PCI) += ctucanfd_pci.o +obj-$(CONFIG_CAN_CTUCANFD_PLATFORM) += ctucanfd_platform.o diff --git a/drivers/net/can/ctucanfd/ctucanfd.h b/drivers/net/can/ctucanfd/ctucanfd.h new file mode 100644 index 000000000000..0e9904f6a05d --- /dev/null +++ b/drivers/net/can/ctucanfd/ctucanfd.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/******************************************************************************* + * + * CTU CAN FD IP Core + * + * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU + * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded + * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU + * Copyright (C) 2018-2021 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded + * + * Project advisors: + * Jiri Novak <jnovak@fel.cvut.cz> + * Pavel Pisa <pisa@cmp.felk.cvut.cz> + * + * Department of Measurement (http://meas.fel.cvut.cz/) + * Faculty of Electrical Engineering (http://www.fel.cvut.cz) + * Czech Technical University (http://www.cvut.cz/) + ******************************************************************************/ + +#ifndef __CTUCANFD__ +#define __CTUCANFD__ + +#include <linux/netdevice.h> +#include <linux/can/dev.h> +#include <linux/list.h> + +enum ctu_can_fd_can_registers; + +struct ctucan_priv { + struct can_priv can; /* must be first member! */ + + void __iomem *mem_base; + u32 (*read_reg)(struct ctucan_priv *priv, + enum ctu_can_fd_can_registers reg); + void (*write_reg)(struct ctucan_priv *priv, + enum ctu_can_fd_can_registers reg, u32 val); + + unsigned int txb_head; + unsigned int txb_tail; + u32 txb_prio; + unsigned int ntxbufs; + spinlock_t tx_lock; /* spinlock to serialize allocation and processing of TX buffers */ + + struct napi_struct napi; + struct device *dev; + struct clk *can_clk; + + int irq_flags; + unsigned long drv_flags; + + u32 rxfrm_first_word; + + struct list_head peers_on_pdev; +}; + +/** + * ctucan_probe_common - Device type independent registration call + * + * This function does all the memory allocation and registration for the CAN + * device. + * + * @dev: Handle to the generic device structure + * @addr: Base address of CTU CAN FD core address + * @irq: Interrupt number + * @ntxbufs: Number of implemented Tx buffers + * @can_clk_rate: Clock rate, if 0 then clock are taken from device node + * @pm_enable_call: Whether pm_runtime_enable should be called + * @set_drvdata_fnc: Function to set network driver data for physical device + * + * Return: 0 on success and failure value on error + */ +int ctucan_probe_common(struct device *dev, void __iomem *addr, + int irq, unsigned int ntxbufs, + unsigned long can_clk_rate, + int pm_enable_call, + void (*set_drvdata_fnc)(struct device *dev, + struct net_device *ndev)); + +int ctucan_suspend(struct device *dev) __maybe_unused; +int ctucan_resume(struct device *dev) __maybe_unused; + +#endif /*__CTUCANFD__*/ diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c new file mode 100644 index 000000000000..1e6b9e3dc2fe --- /dev/null +++ b/drivers/net/can/ctucanfd/ctucanfd_base.c @@ -0,0 +1,1460 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * + * CTU CAN FD IP Core + * + * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU + * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded + * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU + * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded + * + * Project advisors: + * Jiri Novak <jnovak@fel.cvut.cz> + * Pavel Pisa <pisa@cmp.felk.cvut.cz> + * + * Department of Measurement (http://meas.fel.cvut.cz/) + * Faculty of Electrical Engineering (http://www.fel.cvut.cz) + * Czech Technical University (http://www.cvut.cz/) + ******************************************************************************/ + +#include <linux/clk.h> +#include <linux/errno.h> +#include <linux/ethtool.h> +#include <linux/init.h> +#include <linux/bitfield.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/skbuff.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/can/error.h> +#include <linux/pm_runtime.h> + +#include "ctucanfd.h" +#include "ctucanfd_kregs.h" +#include "ctucanfd_kframe.h" + +#ifdef DEBUG +#define ctucan_netdev_dbg(ndev, args...) \ + netdev_dbg(ndev, args) +#else +#define ctucan_netdev_dbg(...) do { } while (0) +#endif + +#define CTUCANFD_ID 0xCAFD + +/* TX buffer rotation: + * - when a buffer transitions to empty state, rotate order and priorities + * - if more buffers seem to transition at the same time, rotate by the number of buffers + * - it may be assumed that buffers transition to empty state in FIFO order (because we manage + * priorities that way) + * - at frame filling, do not rotate anything, just increment buffer modulo counter + */ + +#define CTUCANFD_FLAG_RX_FFW_BUFFERED 1 + +#define CTUCAN_STATE_TO_TEXT_ENTRY(st) \ + [st] = #st + +enum ctucan_txtb_status { + TXT_NOT_EXIST = 0x0, + TXT_RDY = 0x1, + TXT_TRAN = 0x2, + TXT_ABTP = 0x3, + TXT_TOK = 0x4, + TXT_ERR = 0x6, + TXT_ABT = 0x7, + TXT_ETY = 0x8, +}; + +enum ctucan_txtb_command { + TXT_CMD_SET_EMPTY = 0x01, + TXT_CMD_SET_READY = 0x02, + TXT_CMD_SET_ABORT = 0x04 +}; + +static const struct can_bittiming_const ctu_can_fd_bit_timing_max = { + .name = "ctu_can_fd", + .tseg1_min = 2, + .tseg1_max = 190, + .tseg2_min = 1, + .tseg2_max = 63, + .sjw_max = 31, + .brp_min = 1, + .brp_max = 8, + .brp_inc = 1, +}; + +static const struct can_bittiming_const ctu_can_fd_bit_timing_data_max = { + .name = "ctu_can_fd", + .tseg1_min = 2, + .tseg1_max = 94, + .tseg2_min = 1, + .tseg2_max = 31, + .sjw_max = 31, + .brp_min = 1, + .brp_max = 2, + .brp_inc = 1, +}; + +static const char * const ctucan_state_strings[CAN_STATE_MAX] = { + CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_ACTIVE), + CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_WARNING), + CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_PASSIVE), + CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_BUS_OFF), + CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_STOPPED), + CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_SLEEPING) +}; + +static void ctucan_write32_le(struct ctucan_priv *priv, + enum ctu_can_fd_can_registers reg, u32 val) +{ + iowrite32(val, priv->mem_base + reg); +} + +static void ctucan_write32_be(struct ctucan_priv *priv, + enum ctu_can_fd_can_registers reg, u32 val) +{ + iowrite32be(val, priv->mem_base + reg); +} + +static u32 ctucan_read32_le(struct ctucan_priv *priv, + enum ctu_can_fd_can_registers reg) +{ + return ioread32(priv->mem_base + reg); +} + +static u32 ctucan_read32_be(struct ctucan_priv *priv, + enum ctu_can_fd_can_registers reg) +{ + return ioread32be(priv->mem_base + reg); +} + +static void ctucan_write32(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg, u32 val) +{ + priv->write_reg(priv, reg, val); +} + +static u32 ctucan_read32(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg) +{ + return priv->read_reg(priv, reg); +} + +static void ctucan_write_txt_buf(struct ctucan_priv *priv, enum ctu_can_fd_can_registers buf_base, + u32 offset, u32 val) +{ + priv->write_reg(priv, buf_base + offset, val); +} + +#define CTU_CAN_FD_TXTNF(priv) (!!FIELD_GET(REG_STATUS_TXNF, ctucan_read32(priv, CTUCANFD_STATUS))) +#define CTU_CAN_FD_ENABLED(priv) (!!FIELD_GET(REG_MODE_ENA, ctucan_read32(priv, CTUCANFD_MODE))) + +/** + * ctucan_state_to_str() - Converts CAN controller state code to corresponding text + * @state: CAN controller state code + * + * Return: Pointer to string representation of the error state + */ +static const char *ctucan_state_to_str(enum can_state state) +{ + const char *txt = NULL; + + if (state >= 0 && state < CAN_STATE_MAX) + txt = ctucan_state_strings[state]; + return txt ? txt : "UNKNOWN"; +} + +/** + * ctucan_reset() - Issues software reset request to CTU CAN FD + * @ndev: Pointer to net_device structure + * + * Return: 0 for success, -%ETIMEDOUT if CAN controller does not leave reset + */ +static int ctucan_reset(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + int i = 100; + + ctucan_write32(priv, CTUCANFD_MODE, REG_MODE_RST); + clear_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags); + + do { + u16 device_id = FIELD_GET(REG_DEVICE_ID_DEVICE_ID, + ctucan_read32(priv, CTUCANFD_DEVICE_ID)); + + if (device_id == 0xCAFD) + return 0; + if (!i--) { + netdev_warn(ndev, "device did not leave reset\n"); + return -ETIMEDOUT; + } + usleep_range(100, 200); + } while (1); +} + +/** + * ctucan_set_btr() - Sets CAN bus bit timing in CTU CAN FD + * @ndev: Pointer to net_device structure + * @bt: Pointer to Bit timing structure + * @nominal: True - Nominal bit timing, False - Data bit timing + * + * Return: 0 - OK, -%EPERM if controller is enabled + */ +static int ctucan_set_btr(struct net_device *ndev, struct can_bittiming *bt, bool nominal) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + int max_ph1_len = 31; + u32 btr = 0; + u32 prop_seg = bt->prop_seg; + u32 phase_seg1 = bt->phase_seg1; + + if (CTU_CAN_FD_ENABLED(priv)) { + netdev_err(ndev, "BUG! Cannot set bittiming - CAN is enabled\n"); + return -EPERM; + } + + if (nominal) + max_ph1_len = 63; + + /* The timing calculation functions have only constraints on tseg1, which is prop_seg + + * phase1_seg combined. tseg1 is then split in half and stored into prog_seg and phase_seg1. + * In CTU CAN FD, PROP is 6/7 bits wide but PH1 only 6/5, so we must re-distribute the + * values here. + */ + if (phase_seg1 > max_ph1_len) { + prop_seg += phase_seg1 - max_ph1_len; + phase_seg1 = max_ph1_len; + bt->prop_seg = prop_seg; + bt->phase_seg1 = phase_seg1; + } + + if (nominal) { + btr = FIELD_PREP(REG_BTR_PROP, prop_seg); + btr |= FIELD_PREP(REG_BTR_PH1, phase_seg1); + btr |= FIELD_PREP(REG_BTR_PH2, bt->phase_seg2); + btr |= FIELD_PREP(REG_BTR_BRP, bt->brp); + btr |= FIELD_PREP(REG_BTR_SJW, bt->sjw); + + ctucan_write32(priv, CTUCANFD_BTR, btr); + } else { + btr = FIELD_PREP(REG_BTR_FD_PROP_FD, prop_seg); + btr |= FIELD_PREP(REG_BTR_FD_PH1_FD, phase_seg1); + btr |= FIELD_PREP(REG_BTR_FD_PH2_FD, bt->phase_seg2); + btr |= FIELD_PREP(REG_BTR_FD_BRP_FD, bt->brp); + btr |= FIELD_PREP(REG_BTR_FD_SJW_FD, bt->sjw); + + ctucan_write32(priv, CTUCANFD_BTR_FD, btr); + } + + return 0; +} + +/** + * ctucan_set_bittiming() - CAN set nominal bit timing routine + * @ndev: Pointer to net_device structure + * + * Return: 0 on success, -%EPERM on error + */ +static int ctucan_set_bittiming(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + struct can_bittiming *bt = &priv->can.bittiming; + + /* Note that bt may be modified here */ + return ctucan_set_btr(ndev, bt, true); +} + +/** + * ctucan_set_data_bittiming() - CAN set data bit timing routine + * @ndev: Pointer to net_device structure + * + * Return: 0 on success, -%EPERM on error + */ +static int ctucan_set_data_bittiming(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + struct can_bittiming *dbt = &priv->can.fd.data_bittiming; + + /* Note that dbt may be modified here */ + return ctucan_set_btr(ndev, dbt, false); +} + +/** + * ctucan_set_secondary_sample_point() - Sets secondary sample point in CTU CAN FD + * @ndev: Pointer to net_device structure + * + * Return: 0 on success, -%EPERM if controller is enabled + */ +static int ctucan_set_secondary_sample_point(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + struct can_bittiming *dbt = &priv->can.fd.data_bittiming; + int ssp_offset = 0; + u32 ssp_cfg = 0; /* No SSP by default */ + + if (CTU_CAN_FD_ENABLED(priv)) { + netdev_err(ndev, "BUG! Cannot set SSP - CAN is enabled\n"); + return -EPERM; + } + + /* Use SSP for bit-rates above 1 Mbits/s */ + if (dbt->bitrate > 1000000) { + /* Calculate SSP in minimal time quanta */ + ssp_offset = (priv->can.clock.freq / 1000) * dbt->sample_point / dbt->bitrate; + + if (ssp_offset > 127) { + netdev_warn(ndev, "SSP offset saturated to 127\n"); + ssp_offset = 127; + } + + ssp_cfg = FIELD_PREP(REG_TRV_DELAY_SSP_OFFSET, ssp_offset); + ssp_cfg |= FIELD_PREP(REG_TRV_DELAY_SSP_SRC, 0x1); + } + + ctucan_write32(priv, CTUCANFD_TRV_DELAY, ssp_cfg); + + return 0; +} + +/** + * ctucan_set_mode() - Sets CTU CAN FDs mode + * @priv: Pointer to private data + * @mode: Pointer to controller modes to be set + */ +static void ctucan_set_mode(struct ctucan_priv *priv, const struct can_ctrlmode *mode) +{ + u32 mode_reg = ctucan_read32(priv, CTUCANFD_MODE); + + mode_reg = (mode->flags & CAN_CTRLMODE_LOOPBACK) ? + (mode_reg | REG_MODE_ILBP) : + (mode_reg & ~REG_MODE_ILBP); + + mode_reg = (mode->flags & CAN_CTRLMODE_LISTENONLY) ? + (mode_reg | REG_MODE_BMM) : + (mode_reg & ~REG_MODE_BMM); + + mode_reg = (mode->flags & CAN_CTRLMODE_FD) ? + (mode_reg | REG_MODE_FDE) : + (mode_reg & ~REG_MODE_FDE); + + mode_reg = (mode->flags & CAN_CTRLMODE_PRESUME_ACK) ? + (mode_reg | REG_MODE_ACF) : + (mode_reg & ~REG_MODE_ACF); + + mode_reg = (mode->flags & CAN_CTRLMODE_FD_NON_ISO) ? + (mode_reg | REG_MODE_NISOFD) : + (mode_reg & ~REG_MODE_NISOFD); + + /* One shot mode supported indirectly via Retransmit limit */ + mode_reg &= ~FIELD_PREP(REG_MODE_RTRTH, 0xF); + mode_reg = (mode->flags & CAN_CTRLMODE_ONE_SHOT) ? + (mode_reg | REG_MODE_RTRLE) : + (mode_reg & ~REG_MODE_RTRLE); + + /* Some bits fixed: + * TSTM - Off, User shall not be able to change REC/TEC by hand during operation + */ + mode_reg &= ~REG_MODE_TSTM; + + ctucan_write32(priv, CTUCANFD_MODE, mode_reg); +} + +/** + * ctucan_chip_start() - This routine starts the driver + * @ndev: Pointer to net_device structure + * + * Routine expects that chip is in reset state. It setups initial + * Tx buffers for FIFO priorities, sets bittiming, enables interrupts, + * switches core to operational mode and changes controller + * state to %CAN_STATE_STOPPED. + * + * Return: 0 on success and failure value on error + */ +static int ctucan_chip_start(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + u32 int_ena, int_msk; + u32 mode_reg; + int err; + struct can_ctrlmode mode; + + priv->txb_prio = 0x01234567; + priv->txb_head = 0; + priv->txb_tail = 0; + ctucan_write32(priv, CTUCANFD_TX_PRIORITY, priv->txb_prio); + + /* Configure bit-rates and ssp */ + err = ctucan_set_bittiming(ndev); + if (err < 0) + return err; + + err = ctucan_set_data_bittiming(ndev); + if (err < 0) + return err; + + err = ctucan_set_secondary_sample_point(ndev); + if (err < 0) + return err; + + /* Configure modes */ + mode.flags = priv->can.ctrlmode; + mode.mask = 0xFFFFFFFF; + ctucan_set_mode(priv, &mode); + + /* Configure interrupts */ + int_ena = REG_INT_STAT_RBNEI | + REG_INT_STAT_TXBHCI | + REG_INT_STAT_EWLI | + REG_INT_STAT_FCSI; + + /* Bus error reporting -> Allow Error/Arb.lost interrupts */ + if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) { + int_ena |= REG_INT_STAT_ALI | + REG_INT_STAT_BEI; + } + + int_msk = ~int_ena; /* Mask all disabled interrupts */ + + /* It's after reset, so there is no need to clear anything */ + ctucan_write32(priv, CTUCANFD_INT_MASK_SET, int_msk); + ctucan_write32(priv, CTUCANFD_INT_ENA_SET, int_ena); + + /* Controller enters ERROR_ACTIVE on initial FCSI */ + priv->can.state = CAN_STATE_STOPPED; + + /* Enable the controller */ + mode_reg = ctucan_read32(priv, CTUCANFD_MODE); + mode_reg |= REG_MODE_ENA; + ctucan_write32(priv, CTUCANFD_MODE, mode_reg); + + return 0; +} + +/** + * ctucan_do_set_mode() - Sets mode of the driver + * @ndev: Pointer to net_device structure + * @mode: Tells the mode of the driver + * + * This check the drivers state and calls the corresponding modes to set. + * + * Return: 0 on success and failure value on error + */ +static int ctucan_do_set_mode(struct net_device *ndev, enum can_mode mode) +{ + int ret; + + switch (mode) { + case CAN_MODE_START: + ret = ctucan_reset(ndev); + if (ret < 0) + return ret; + ret = ctucan_chip_start(ndev); + if (ret < 0) { + netdev_err(ndev, "ctucan_chip_start failed!\n"); + return ret; + } + netif_wake_queue(ndev); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +/** + * ctucan_get_tx_status() - Gets status of TXT buffer + * @priv: Pointer to private data + * @buf: Buffer index (0-based) + * + * Return: Status of TXT buffer + */ +static enum ctucan_txtb_status ctucan_get_tx_status(struct ctucan_priv *priv, u8 buf) +{ + u32 tx_status = ctucan_read32(priv, CTUCANFD_TX_STATUS); + enum ctucan_txtb_status status = (tx_status >> (buf * 4)) & 0x7; + + return status; +} + +/** + * ctucan_is_txt_buf_writable() - Checks if frame can be inserted to TXT Buffer + * @priv: Pointer to private data + * @buf: Buffer index (0-based) + * + * Return: True - Frame can be inserted to TXT Buffer, False - If attempted, frame will not be + * inserted to TXT Buffer + */ +static bool ctucan_is_txt_buf_writable(struct ctucan_priv *priv, u8 buf) +{ + enum ctucan_txtb_status buf_status; + + buf_status = ctucan_get_tx_status(priv, buf); + if (buf_status == TXT_RDY || buf_status == TXT_TRAN || buf_status == TXT_ABTP) + return false; + + return true; +} + +/** + * ctucan_insert_frame() - Inserts frame to TXT buffer + * @priv: Pointer to private data + * @cf: Pointer to CAN frame to be inserted + * @buf: TXT Buffer index to which frame is inserted (0-based) + * @isfdf: True - CAN FD Frame, False - CAN 2.0 Frame + * + * Return: + * * True - Frame inserted successfully + * * False - Frame was not inserted due to one of: + * 1. TXT Buffer is not writable (it is in wrong state) + * 2. Invalid TXT buffer index + * 3. Invalid frame length + */ +static bool ctucan_insert_frame(struct ctucan_priv *priv, const struct canfd_frame *cf, u8 buf, + bool isfdf) +{ + u32 buf_base; + u32 ffw = 0; + u32 idw = 0; + unsigned int i; + + if (buf >= priv->ntxbufs) + return false; + + if (!ctucan_is_txt_buf_writable(priv, buf)) + return false; + + if (cf->len > CANFD_MAX_DLEN) + return false; + + /* Prepare Frame format */ + if (cf->can_id & CAN_RTR_FLAG) + ffw |= REG_FRAME_FORMAT_W_RTR; + + if (cf->can_id & CAN_EFF_FLAG) + ffw |= REG_FRAME_FORMAT_W_IDE; + + if (isfdf) { + ffw |= REG_FRAME_FORMAT_W_FDF; + if (cf->flags & CANFD_BRS) + ffw |= REG_FRAME_FORMAT_W_BRS; + } + + ffw |= FIELD_PREP(REG_FRAME_FORMAT_W_DLC, can_fd_len2dlc(cf->len)); + + /* Prepare identifier */ + if (cf->can_id & CAN_EFF_FLAG) + idw = cf->can_id & CAN_EFF_MASK; + else + idw = FIELD_PREP(REG_IDENTIFIER_W_IDENTIFIER_BASE, cf->can_id & CAN_SFF_MASK); + + /* Write ID, Frame format, Don't write timestamp -> Time triggered transmission disabled */ + buf_base = (buf + 1) * 0x100; + ctucan_write_txt_buf(priv, buf_base, CTUCANFD_FRAME_FORMAT_W, ffw); + ctucan_write_txt_buf(priv, buf_base, CTUCANFD_IDENTIFIER_W, idw); + + /* Write Data payload */ + if (!(cf->can_id & CAN_RTR_FLAG)) { + for (i = 0; i < cf->len; i += 4) { + u32 data = le32_to_cpu(*(__le32 *)(cf->data + i)); + + ctucan_write_txt_buf(priv, buf_base, CTUCANFD_DATA_1_4_W + i, data); + } + } + + return true; +} + +/** + * ctucan_give_txtb_cmd() - Applies command on TXT buffer + * @priv: Pointer to private data + * @cmd: Command to give + * @buf: Buffer index (0-based) + */ +static void ctucan_give_txtb_cmd(struct ctucan_priv *priv, enum ctucan_txtb_command cmd, u8 buf) +{ + u32 tx_cmd = cmd; + + tx_cmd |= 1 << (buf + 8); + ctucan_write32(priv, CTUCANFD_TX_COMMAND, tx_cmd); +} + +/** + * ctucan_start_xmit() - Starts the transmission + * @skb: sk_buff pointer that contains data to be Txed + * @ndev: Pointer to net_device structure + * + * Invoked from upper layers to initiate transmission. Uses the next available free TXT Buffer and + * populates its fields to start the transmission. + * + * Return: %NETDEV_TX_OK on success, %NETDEV_TX_BUSY when no free TXT buffer is available, + * negative return values reserved for error cases + */ +static netdev_tx_t ctucan_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + u32 txtb_id; + bool ok; + unsigned long flags; + + if (can_dev_dropped_skb(ndev, skb)) + return NETDEV_TX_OK; + + if (unlikely(!CTU_CAN_FD_TXTNF(priv))) { + netif_stop_queue(ndev); + netdev_err(ndev, "BUG!, no TXB free when queue awake!\n"); + return NETDEV_TX_BUSY; + } + + txtb_id = priv->txb_head % priv->ntxbufs; + ctucan_netdev_dbg(ndev, "%s: using TXB#%u\n", __func__, txtb_id); + ok = ctucan_insert_frame(priv, cf, txtb_id, can_is_canfd_skb(skb)); + + if (!ok) { + netdev_err(ndev, "BUG! TXNF set but cannot insert frame into TXTB! HW Bug?"); + kfree_skb(skb); + ndev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + can_put_echo_skb(skb, ndev, txtb_id, 0); + + spin_lock_irqsave(&priv->tx_lock, flags); + ctucan_give_txtb_cmd(priv, TXT_CMD_SET_READY, txtb_id); + priv->txb_head++; + + /* Check if all TX buffers are full */ + if (!CTU_CAN_FD_TXTNF(priv)) + netif_stop_queue(ndev); + + spin_unlock_irqrestore(&priv->tx_lock, flags); + + return NETDEV_TX_OK; +} + +/** + * ctucan_read_rx_frame() - Reads frame from RX FIFO + * @priv: Pointer to CTU CAN FD's private data + * @cf: Pointer to CAN frame struct + * @ffw: Previously read frame format word + * + * Note: Frame format word must be read separately and provided in 'ffw'. + */ +static void ctucan_read_rx_frame(struct ctucan_priv *priv, struct canfd_frame *cf, u32 ffw) +{ + u32 idw; + unsigned int i; + unsigned int wc; + unsigned int len; + + idw = ctucan_read32(priv, CTUCANFD_RX_DATA); + if (FIELD_GET(REG_FRAME_FORMAT_W_IDE, ffw)) + cf->can_id = (idw & CAN_EFF_MASK) | CAN_EFF_FLAG; + else + cf->can_id = (idw >> 18) & CAN_SFF_MASK; + + /* BRS, ESI, RTR Flags */ + if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw)) { + if (FIELD_GET(REG_FRAME_FORMAT_W_BRS, ffw)) + cf->flags |= CANFD_BRS; + if (FIELD_GET(REG_FRAME_FORMAT_W_ESI_RSV, ffw)) + cf->flags |= CANFD_ESI; + } else if (FIELD_GET(REG_FRAME_FORMAT_W_RTR, ffw)) { + cf->can_id |= CAN_RTR_FLAG; + } + + wc = FIELD_GET(REG_FRAME_FORMAT_W_RWCNT, ffw) - 3; + + /* DLC */ + if (FIELD_GET(REG_FRAME_FORMAT_W_DLC, ffw) <= 8) { + len = FIELD_GET(REG_FRAME_FORMAT_W_DLC, ffw); + } else { + if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw)) + len = wc << 2; + else + len = 8; + } + cf->len = len; + if (unlikely(len > wc * 4)) + len = wc * 4; + + /* Timestamp - Read and throw away */ + ctucan_read32(priv, CTUCANFD_RX_DATA); + ctucan_read32(priv, CTUCANFD_RX_DATA); + + /* Data */ + for (i = 0; i < len; i += 4) { + u32 data = ctucan_read32(priv, CTUCANFD_RX_DATA); + *(__le32 *)(cf->data + i) = cpu_to_le32(data); + } + while (unlikely(i < wc * 4)) { + ctucan_read32(priv, CTUCANFD_RX_DATA); + i += 4; + } +} + +/** + * ctucan_rx() - Called from CAN ISR to complete the received frame processing + * @ndev: Pointer to net_device structure + * + * This function is invoked from the CAN isr(poll) to process the Rx frames. It does minimal + * processing and invokes "netif_receive_skb" to complete further processing. + * Return: 1 when frame is passed to the network layer, 0 when the first frame word is read but + * system is out of free SKBs temporally and left code to resolve SKB allocation later, + * -%EAGAIN in a case of empty Rx FIFO. + */ +static int ctucan_rx(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + struct canfd_frame *cf; + struct sk_buff *skb; + u32 ffw; + + if (test_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags)) { + ffw = priv->rxfrm_first_word; + clear_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags); + } else { + ffw = ctucan_read32(priv, CTUCANFD_RX_DATA); + } + + if (!FIELD_GET(REG_FRAME_FORMAT_W_RWCNT, ffw)) + return -EAGAIN; + + if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw)) + skb = alloc_canfd_skb(ndev, &cf); + else + skb = alloc_can_skb(ndev, (struct can_frame **)&cf); + + if (unlikely(!skb)) { + priv->rxfrm_first_word = ffw; + set_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags); + return 0; + } + + ctucan_read_rx_frame(priv, cf, ffw); + + stats->rx_bytes += cf->len; + stats->rx_packets++; + netif_receive_skb(skb); + + return 1; +} + +/** + * ctucan_read_fault_state() - Reads CTU CAN FDs fault confinement state. + * @priv: Pointer to private data + * + * Returns: Fault confinement state of controller + */ +static enum can_state ctucan_read_fault_state(struct ctucan_priv *priv) +{ + u32 fs; + u32 rec_tec; + u32 ewl; + + fs = ctucan_read32(priv, CTUCANFD_EWL); + rec_tec = ctucan_read32(priv, CTUCANFD_REC); + ewl = FIELD_GET(REG_EWL_EW_LIMIT, fs); + + if (FIELD_GET(REG_EWL_ERA, fs)) { + if (ewl > FIELD_GET(REG_REC_REC_VAL, rec_tec) && + ewl > FIELD_GET(REG_REC_TEC_VAL, rec_tec)) + return CAN_STATE_ERROR_ACTIVE; + else + return CAN_STATE_ERROR_WARNING; + } else if (FIELD_GET(REG_EWL_ERP, fs)) { + return CAN_STATE_ERROR_PASSIVE; + } else if (FIELD_GET(REG_EWL_BOF, fs)) { + return CAN_STATE_BUS_OFF; + } + + WARN(true, "Invalid error state"); + return CAN_STATE_ERROR_PASSIVE; +} + +/** + * ctucan_get_rec_tec() - Reads REC/TEC counter values from controller + * @priv: Pointer to private data + * @bec: Pointer to Error counter structure + */ +static void ctucan_get_rec_tec(struct ctucan_priv *priv, struct can_berr_counter *bec) +{ + u32 err_ctrs = ctucan_read32(priv, CTUCANFD_REC); + + bec->rxerr = FIELD_GET(REG_REC_REC_VAL, err_ctrs); + bec->txerr = FIELD_GET(REG_REC_TEC_VAL, err_ctrs); +} + +/** + * ctucan_err_interrupt() - Error frame ISR + * @ndev: net_device pointer + * @isr: interrupt status register value + * + * This is the CAN error interrupt and it will check the type of error and forward the error + * frame to upper layers. + */ +static void ctucan_err_interrupt(struct net_device *ndev, u32 isr) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + struct can_frame *cf; + struct sk_buff *skb; + enum can_state state; + struct can_berr_counter bec; + u32 err_capt_alc; + int dologerr = net_ratelimit(); + + ctucan_get_rec_tec(priv, &bec); + state = ctucan_read_fault_state(priv); + err_capt_alc = ctucan_read32(priv, CTUCANFD_ERR_CAPT); + + if (dologerr) + netdev_info(ndev, "%s: ISR = 0x%08x, rxerr %d, txerr %d, error type %lu, pos %lu, ALC id_field %lu, bit %lu\n", + __func__, isr, bec.rxerr, bec.txerr, + FIELD_GET(REG_ERR_CAPT_ERR_TYPE, err_capt_alc), + FIELD_GET(REG_ERR_CAPT_ERR_POS, err_capt_alc), + FIELD_GET(REG_ERR_CAPT_ALC_ID_FIELD, err_capt_alc), + FIELD_GET(REG_ERR_CAPT_ALC_BIT, err_capt_alc)); + + skb = alloc_can_err_skb(ndev, &cf); + + /* EWLI: error warning limit condition met + * FCSI: fault confinement state changed + * ALI: arbitration lost (just informative) + * BEI: bus error interrupt + */ + if (FIELD_GET(REG_INT_STAT_FCSI, isr) || FIELD_GET(REG_INT_STAT_EWLI, isr)) { + netdev_info(ndev, "state changes from %s to %s\n", + ctucan_state_to_str(priv->can.state), + ctucan_state_to_str(state)); + + if (priv->can.state == state) + netdev_warn(ndev, + "current and previous state is the same! (missed interrupt?)\n"); + + priv->can.state = state; + switch (state) { + case CAN_STATE_BUS_OFF: + priv->can.can_stats.bus_off++; + can_bus_off(ndev); + if (skb) + cf->can_id |= CAN_ERR_BUSOFF; + break; + case CAN_STATE_ERROR_PASSIVE: + priv->can.can_stats.error_passive++; + if (skb) { + cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; + cf->data[1] = (bec.rxerr > 127) ? + CAN_ERR_CRTL_RX_PASSIVE : + CAN_ERR_CRTL_TX_PASSIVE; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + } + break; + case CAN_STATE_ERROR_WARNING: + priv->can.can_stats.error_warning++; + if (skb) { + cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; + cf->data[1] |= (bec.txerr > bec.rxerr) ? + CAN_ERR_CRTL_TX_WARNING : + CAN_ERR_CRTL_RX_WARNING; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + } + break; + case CAN_STATE_ERROR_ACTIVE: + if (skb) { + cf->can_id |= CAN_ERR_CNT; + cf->data[1] = CAN_ERR_CRTL_ACTIVE; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + } + break; + default: + netdev_warn(ndev, "unhandled error state (%d:%s)!\n", + state, ctucan_state_to_str(state)); + break; + } + } + + /* Check for Arbitration Lost interrupt */ + if (FIELD_GET(REG_INT_STAT_ALI, isr)) { + if (dologerr) + netdev_info(ndev, "arbitration lost\n"); + priv->can.can_stats.arbitration_lost++; + if (skb) { + cf->can_id |= CAN_ERR_LOSTARB; + cf->data[0] = CAN_ERR_LOSTARB_UNSPEC; + } + } + + /* Check for Bus Error interrupt */ + if (FIELD_GET(REG_INT_STAT_BEI, isr)) { + netdev_info(ndev, "bus error\n"); + priv->can.can_stats.bus_error++; + stats->rx_errors++; + if (skb) { + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + cf->data[2] = CAN_ERR_PROT_UNSPEC; + cf->data[3] = CAN_ERR_PROT_LOC_UNSPEC; + } + } + + if (skb) { + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); + } +} + +/** + * ctucan_rx_poll() - Poll routine for rx packets (NAPI) + * @napi: NAPI structure pointer + * @quota: Max number of rx packets to be processed. + * + * This is the poll routine for rx part. It will process the packets maximux quota value. + * + * Return: Number of packets received + */ +static int ctucan_rx_poll(struct napi_struct *napi, int quota) +{ + struct net_device *ndev = napi->dev; + struct ctucan_priv *priv = netdev_priv(ndev); + int work_done = 0; + u32 status; + u32 framecnt; + int res = 1; + + framecnt = FIELD_GET(REG_RX_STATUS_RXFRC, ctucan_read32(priv, CTUCANFD_RX_STATUS)); + while (framecnt && work_done < quota && res > 0) { + res = ctucan_rx(ndev); + work_done++; + framecnt = FIELD_GET(REG_RX_STATUS_RXFRC, ctucan_read32(priv, CTUCANFD_RX_STATUS)); + } + + /* Check for RX FIFO Overflow */ + status = ctucan_read32(priv, CTUCANFD_STATUS); + if (FIELD_GET(REG_STATUS_DOR, status)) { + struct net_device_stats *stats = &ndev->stats; + struct can_frame *cf; + struct sk_buff *skb; + + netdev_info(ndev, "rx_poll: rx fifo overflow\n"); + stats->rx_over_errors++; + stats->rx_errors++; + skb = alloc_can_err_skb(ndev, &cf); + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); + } + + /* Clear Data Overrun */ + ctucan_write32(priv, CTUCANFD_COMMAND, REG_COMMAND_CDO); + } + + if (!framecnt && res != 0) { + if (napi_complete_done(napi, work_done)) { + /* Clear and enable RBNEI. It is level-triggered, so + * there is no race condition. + */ + ctucan_write32(priv, CTUCANFD_INT_STAT, REG_INT_STAT_RBNEI); + ctucan_write32(priv, CTUCANFD_INT_MASK_CLR, REG_INT_STAT_RBNEI); + } + } + + return work_done; +} + +/** + * ctucan_rotate_txb_prio() - Rotates priorities of TXT Buffers + * @ndev: net_device pointer + */ +static void ctucan_rotate_txb_prio(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + u32 prio = priv->txb_prio; + + prio = (prio << 4) | ((prio >> ((priv->ntxbufs - 1) * 4)) & 0xF); + ctucan_netdev_dbg(ndev, "%s: from 0x%08x to 0x%08x\n", __func__, priv->txb_prio, prio); + priv->txb_prio = prio; + ctucan_write32(priv, CTUCANFD_TX_PRIORITY, prio); +} + +/** + * ctucan_tx_interrupt() - Tx done Isr + * @ndev: net_device pointer + */ +static void ctucan_tx_interrupt(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + bool first = true; + bool some_buffers_processed; + unsigned long flags; + enum ctucan_txtb_status txtb_status; + u32 txtb_id; + + /* read tx_status + * if txb[n].finished (bit 2) + * if ok -> echo + * if error / aborted -> ?? (find how to handle oneshot mode) + * txb_tail++ + */ + do { + spin_lock_irqsave(&priv->tx_lock, flags); + + some_buffers_processed = false; + while ((int)(priv->txb_head - priv->txb_tail) > 0) { + txtb_id = priv->txb_tail % priv->ntxbufs; + txtb_status = ctucan_get_tx_status(priv, txtb_id); + + ctucan_netdev_dbg(ndev, "TXI: TXB#%u: status 0x%x\n", txtb_id, txtb_status); + + switch (txtb_status) { + case TXT_TOK: + ctucan_netdev_dbg(ndev, "TXT_OK\n"); + stats->tx_bytes += can_get_echo_skb(ndev, txtb_id, NULL); + stats->tx_packets++; + break; + case TXT_ERR: + /* This indicated that retransmit limit has been reached. Obviously + * we should not echo the frame, but also not indicate any kind of + * error. If desired, it was already reported (possible multiple + * times) on each arbitration lost. + */ + netdev_warn(ndev, "TXB in Error state\n"); + can_free_echo_skb(ndev, txtb_id, NULL); + stats->tx_dropped++; + break; + case TXT_ABT: + /* Same as for TXT_ERR, only with different cause. We *could* + * re-queue the frame, but multiqueue/abort is not supported yet + * anyway. + */ + netdev_warn(ndev, "TXB in Aborted state\n"); + can_free_echo_skb(ndev, txtb_id, NULL); + stats->tx_dropped++; + break; + default: + /* Bug only if the first buffer is not finished, otherwise it is + * pretty much expected. + */ + if (first) { + netdev_err(ndev, + "BUG: TXB#%u not in a finished state (0x%x)!\n", + txtb_id, txtb_status); + spin_unlock_irqrestore(&priv->tx_lock, flags); + /* do not clear nor wake */ + return; + } + goto clear; + } + priv->txb_tail++; + first = false; + some_buffers_processed = true; + /* Adjust priorities *before* marking the buffer as empty. */ + ctucan_rotate_txb_prio(ndev); + ctucan_give_txtb_cmd(priv, TXT_CMD_SET_EMPTY, txtb_id); + } +clear: + spin_unlock_irqrestore(&priv->tx_lock, flags); + + /* If no buffers were processed this time, we cannot clear - that would introduce + * a race condition. + */ + if (some_buffers_processed) { + /* Clear the interrupt again. We do not want to receive again interrupt for + * the buffer already handled. If it is the last finished one then it would + * cause log of spurious interrupt. + */ + ctucan_write32(priv, CTUCANFD_INT_STAT, REG_INT_STAT_TXBHCI); + } + } while (some_buffers_processed); + + spin_lock_irqsave(&priv->tx_lock, flags); + + /* Check if at least one TX buffer is free */ + if (CTU_CAN_FD_TXTNF(priv)) + netif_wake_queue(ndev); + + spin_unlock_irqrestore(&priv->tx_lock, flags); +} + +/** + * ctucan_interrupt() - CAN Isr + * @irq: irq number + * @dev_id: device id pointer + * + * This is the CTU CAN FD ISR. It checks for the type of interrupt + * and invokes the corresponding ISR. + * + * Return: + * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise + */ +static irqreturn_t ctucan_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = (struct net_device *)dev_id; + struct ctucan_priv *priv = netdev_priv(ndev); + u32 isr, icr; + u32 imask; + int irq_loops; + + for (irq_loops = 0; irq_loops < 10000; irq_loops++) { + /* Get the interrupt status */ + isr = ctucan_read32(priv, CTUCANFD_INT_STAT); + + if (!isr) + return irq_loops ? IRQ_HANDLED : IRQ_NONE; + + /* Receive Buffer Not Empty Interrupt */ + if (FIELD_GET(REG_INT_STAT_RBNEI, isr)) { + ctucan_netdev_dbg(ndev, "RXBNEI\n"); + /* Mask RXBNEI the first, then clear interrupt and schedule NAPI. Even if + * another IRQ fires, RBNEI will always be 0 (masked). + */ + icr = REG_INT_STAT_RBNEI; + ctucan_write32(priv, CTUCANFD_INT_MASK_SET, icr); + ctucan_write32(priv, CTUCANFD_INT_STAT, icr); + napi_schedule(&priv->napi); + } + + /* TXT Buffer HW Command Interrupt */ + if (FIELD_GET(REG_INT_STAT_TXBHCI, isr)) { + ctucan_netdev_dbg(ndev, "TXBHCI\n"); + /* Cleared inside */ + ctucan_tx_interrupt(ndev); + } + + /* Error interrupts */ + if (FIELD_GET(REG_INT_STAT_EWLI, isr) || + FIELD_GET(REG_INT_STAT_FCSI, isr) || + FIELD_GET(REG_INT_STAT_ALI, isr)) { + icr = isr & (REG_INT_STAT_EWLI | REG_INT_STAT_FCSI | REG_INT_STAT_ALI); + + ctucan_netdev_dbg(ndev, "some ERR interrupt: clearing 0x%08x\n", icr); + ctucan_write32(priv, CTUCANFD_INT_STAT, icr); + ctucan_err_interrupt(ndev, isr); + } + /* Ignore RI, TI, LFI, RFI, BSI */ + } + + netdev_err(ndev, "%s: stuck interrupt (isr=0x%08x), stopping\n", __func__, isr); + + if (FIELD_GET(REG_INT_STAT_TXBHCI, isr)) { + int i; + + netdev_err(ndev, "txb_head=0x%08x txb_tail=0x%08x\n", + priv->txb_head, priv->txb_tail); + for (i = 0; i < priv->ntxbufs; i++) { + u32 status = ctucan_get_tx_status(priv, i); + + netdev_err(ndev, "txb[%d] txb status=0x%08x\n", i, status); + } + } + + imask = 0xffffffff; + ctucan_write32(priv, CTUCANFD_INT_ENA_CLR, imask); + ctucan_write32(priv, CTUCANFD_INT_MASK_SET, imask); + + return IRQ_HANDLED; +} + +/** + * ctucan_chip_stop() - Driver stop routine + * @ndev: Pointer to net_device structure + * + * This is the drivers stop routine. It will disable the + * interrupts and disable the controller. + */ +static void ctucan_chip_stop(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + u32 mask = 0xffffffff; + u32 mode; + + /* Disable interrupts and disable CAN */ + ctucan_write32(priv, CTUCANFD_INT_ENA_CLR, mask); + ctucan_write32(priv, CTUCANFD_INT_MASK_SET, mask); + mode = ctucan_read32(priv, CTUCANFD_MODE); + mode &= ~REG_MODE_ENA; + ctucan_write32(priv, CTUCANFD_MODE, mode); + + priv->can.state = CAN_STATE_STOPPED; +} + +/** + * ctucan_open() - Driver open routine + * @ndev: Pointer to net_device structure + * + * This is the driver open routine. + * Return: 0 on success and failure value on error + */ +static int ctucan_open(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + int ret; + + ret = pm_runtime_get_sync(priv->dev); + if (ret < 0) { + netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", + __func__, ret); + pm_runtime_put_noidle(priv->dev); + return ret; + } + + ret = ctucan_reset(ndev); + if (ret < 0) + goto err_reset; + + /* Common open */ + ret = open_candev(ndev); + if (ret) { + netdev_warn(ndev, "open_candev failed!\n"); + goto err_open; + } + + ret = request_irq(ndev->irq, ctucan_interrupt, priv->irq_flags, ndev->name, ndev); + if (ret < 0) { + netdev_err(ndev, "irq allocation for CAN failed\n"); + goto err_irq; + } + + ret = ctucan_chip_start(ndev); + if (ret < 0) { + netdev_err(ndev, "ctucan_chip_start failed!\n"); + goto err_chip_start; + } + + netdev_info(ndev, "ctu_can_fd device registered\n"); + napi_enable(&priv->napi); + netif_start_queue(ndev); + + return 0; + +err_chip_start: + free_irq(ndev->irq, ndev); +err_irq: + close_candev(ndev); +err_open: +err_reset: + pm_runtime_put(priv->dev); + + return ret; +} + +/** + * ctucan_close() - Driver close routine + * @ndev: Pointer to net_device structure + * + * Return: 0 always + */ +static int ctucan_close(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + + netif_stop_queue(ndev); + napi_disable(&priv->napi); + ctucan_chip_stop(ndev); + free_irq(ndev->irq, ndev); + close_candev(ndev); + + pm_runtime_put(priv->dev); + + return 0; +} + +/** + * ctucan_get_berr_counter() - error counter routine + * @ndev: Pointer to net_device structure + * @bec: Pointer to can_berr_counter structure + * + * This is the driver error counter routine. + * Return: 0 on success and failure value on error + */ +static int ctucan_get_berr_counter(const struct net_device *ndev, struct can_berr_counter *bec) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + int ret; + + ret = pm_runtime_get_sync(priv->dev); + if (ret < 0) { + netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", __func__, ret); + pm_runtime_put_noidle(priv->dev); + return ret; + } + + ctucan_get_rec_tec(priv, bec); + pm_runtime_put(priv->dev); + + return 0; +} + +static const struct net_device_ops ctucan_netdev_ops = { + .ndo_open = ctucan_open, + .ndo_stop = ctucan_close, + .ndo_start_xmit = ctucan_start_xmit, +}; + +static const struct ethtool_ops ctucan_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, +}; + +int ctucan_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct ctucan_priv *priv = netdev_priv(ndev); + + if (netif_running(ndev)) { + netif_stop_queue(ndev); + netif_device_detach(ndev); + } + + priv->can.state = CAN_STATE_SLEEPING; + + return 0; +} +EXPORT_SYMBOL(ctucan_suspend); + +int ctucan_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct ctucan_priv *priv = netdev_priv(ndev); + + priv->can.state = CAN_STATE_ERROR_ACTIVE; + + if (netif_running(ndev)) { + netif_device_attach(ndev); + netif_start_queue(ndev); + } + + return 0; +} +EXPORT_SYMBOL(ctucan_resume); + +int ctucan_probe_common(struct device *dev, void __iomem *addr, int irq, unsigned int ntxbufs, + unsigned long can_clk_rate, int pm_enable_call, + void (*set_drvdata_fnc)(struct device *dev, struct net_device *ndev)) +{ + struct ctucan_priv *priv; + struct net_device *ndev; + int ret; + + /* Create a CAN device instance */ + ndev = alloc_candev(sizeof(struct ctucan_priv), ntxbufs); + if (!ndev) + return -ENOMEM; + + priv = netdev_priv(ndev); + spin_lock_init(&priv->tx_lock); + INIT_LIST_HEAD(&priv->peers_on_pdev); + priv->ntxbufs = ntxbufs; + priv->dev = dev; + priv->can.bittiming_const = &ctu_can_fd_bit_timing_max; + priv->can.fd.data_bittiming_const = &ctu_can_fd_bit_timing_data_max; + priv->can.do_set_mode = ctucan_do_set_mode; + + /* Needed for timing adjustment to be performed as soon as possible */ + priv->can.do_set_bittiming = ctucan_set_bittiming; + priv->can.fd.do_set_data_bittiming = ctucan_set_data_bittiming; + + priv->can.do_get_berr_counter = ctucan_get_berr_counter; + priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK + | CAN_CTRLMODE_LISTENONLY + | CAN_CTRLMODE_FD + | CAN_CTRLMODE_PRESUME_ACK + | CAN_CTRLMODE_BERR_REPORTING + | CAN_CTRLMODE_FD_NON_ISO + | CAN_CTRLMODE_ONE_SHOT; + priv->mem_base = addr; + + /* Get IRQ for the device */ + ndev->irq = irq; + ndev->flags |= IFF_ECHO; /* We support local echo */ + + if (set_drvdata_fnc) + set_drvdata_fnc(dev, ndev); + SET_NETDEV_DEV(ndev, dev); + ndev->netdev_ops = &ctucan_netdev_ops; + ndev->ethtool_ops = &ctucan_ethtool_ops; + + /* Getting the can_clk info */ + if (!can_clk_rate) { + priv->can_clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->can_clk)) { + dev_err(dev, "Device clock not found.\n"); + ret = PTR_ERR(priv->can_clk); + goto err_free; + } + can_clk_rate = clk_get_rate(priv->can_clk); + } + + priv->write_reg = ctucan_write32_le; + priv->read_reg = ctucan_read32_le; + + if (pm_enable_call) + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", + __func__, ret); + pm_runtime_put_noidle(priv->dev); + goto err_pmdisable; + } + + /* Check for big-endianity and set according IO-accessors */ + if ((ctucan_read32(priv, CTUCANFD_DEVICE_ID) & 0xFFFF) != CTUCANFD_ID) { + priv->write_reg = ctucan_write32_be; + priv->read_reg = ctucan_read32_be; + if ((ctucan_read32(priv, CTUCANFD_DEVICE_ID) & 0xFFFF) != CTUCANFD_ID) { + netdev_err(ndev, "CTU_CAN_FD signature not found\n"); + ret = -ENODEV; + goto err_deviceoff; + } + } + + ret = ctucan_reset(ndev); + if (ret < 0) + goto err_deviceoff; + + priv->can.clock.freq = can_clk_rate; + + netif_napi_add(ndev, &priv->napi, ctucan_rx_poll); + + ret = register_candev(ndev); + if (ret) { + dev_err(dev, "fail to register failed (err=%d)\n", ret); + goto err_deviceoff; + } + + pm_runtime_put(dev); + + netdev_dbg(ndev, "mem_base=0x%p irq=%d clock=%d, no. of txt buffers:%d\n", + priv->mem_base, ndev->irq, priv->can.clock.freq, priv->ntxbufs); + + return 0; + +err_deviceoff: + pm_runtime_put(priv->dev); +err_pmdisable: + if (pm_enable_call) + pm_runtime_disable(dev); +err_free: + list_del_init(&priv->peers_on_pdev); + free_candev(ndev); + return ret; +} +EXPORT_SYMBOL(ctucan_probe_common); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Martin Jerabek <martin.jerabek01@gmail.com>"); +MODULE_AUTHOR("Pavel Pisa <pisa@cmp.felk.cvut.cz>"); +MODULE_AUTHOR("Ondrej Ille <ondrej.ille@gmail.com>"); +MODULE_DESCRIPTION("CTU CAN FD interface"); diff --git a/drivers/net/can/ctucanfd/ctucanfd_kframe.h b/drivers/net/can/ctucanfd/ctucanfd_kframe.h new file mode 100644 index 000000000000..3491299eaac2 --- /dev/null +++ b/drivers/net/can/ctucanfd/ctucanfd_kframe.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/******************************************************************************* + * + * CTU CAN FD IP Core + * + * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU + * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded + * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU + * Copyright (C) 2018-2021 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded + * + * Project advisors: + * Jiri Novak <jnovak@fel.cvut.cz> + * Pavel Pisa <pisa@cmp.felk.cvut.cz> + * + * Department of Measurement (http://meas.fel.cvut.cz/) + * Faculty of Electrical Engineering (http://www.fel.cvut.cz) + * Czech Technical University (http://www.cvut.cz/) + ******************************************************************************/ + +/* This file is autogenerated, DO NOT EDIT! */ + +#ifndef __CTU_CAN_FD_CAN_FD_FRAME_FORMAT__ +#define __CTU_CAN_FD_CAN_FD_FRAME_FORMAT__ + +#include <linux/bits.h> + +/* CAN_Frame_format memory map */ +enum ctu_can_fd_can_frame_format { + CTUCANFD_FRAME_FORMAT_W = 0x0, + CTUCANFD_IDENTIFIER_W = 0x4, + CTUCANFD_TIMESTAMP_L_W = 0x8, + CTUCANFD_TIMESTAMP_U_W = 0xc, + CTUCANFD_DATA_1_4_W = 0x10, + CTUCANFD_DATA_5_8_W = 0x14, + CTUCANFD_DATA_61_64_W = 0x4c, +}; + +/* CAN_FD_Frame_format memory region */ + +/* FRAME_FORMAT_W registers */ +#define REG_FRAME_FORMAT_W_DLC GENMASK(3, 0) +#define REG_FRAME_FORMAT_W_RTR BIT(5) +#define REG_FRAME_FORMAT_W_IDE BIT(6) +#define REG_FRAME_FORMAT_W_FDF BIT(7) +#define REG_FRAME_FORMAT_W_BRS BIT(9) +#define REG_FRAME_FORMAT_W_ESI_RSV BIT(10) +#define REG_FRAME_FORMAT_W_RWCNT GENMASK(15, 11) + +/* IDENTIFIER_W registers */ +#define REG_IDENTIFIER_W_IDENTIFIER_EXT GENMASK(17, 0) +#define REG_IDENTIFIER_W_IDENTIFIER_BASE GENMASK(28, 18) + +/* TIMESTAMP_L_W registers */ +#define REG_TIMESTAMP_L_W_TIME_STAMP_L_W GENMASK(31, 0) + +/* TIMESTAMP_U_W registers */ +#define REG_TIMESTAMP_U_W_TIMESTAMP_U_W GENMASK(31, 0) + +/* DATA_1_4_W registers */ +#define REG_DATA_1_4_W_DATA_1 GENMASK(7, 0) +#define REG_DATA_1_4_W_DATA_2 GENMASK(15, 8) +#define REG_DATA_1_4_W_DATA_3 GENMASK(23, 16) +#define REG_DATA_1_4_W_DATA_4 GENMASK(31, 24) + +/* DATA_5_8_W registers */ +#define REG_DATA_5_8_W_DATA_5 GENMASK(7, 0) +#define REG_DATA_5_8_W_DATA_6 GENMASK(15, 8) +#define REG_DATA_5_8_W_DATA_7 GENMASK(23, 16) +#define REG_DATA_5_8_W_DATA_8 GENMASK(31, 24) + +/* DATA_61_64_W registers */ +#define REG_DATA_61_64_W_DATA_61 GENMASK(7, 0) +#define REG_DATA_61_64_W_DATA_62 GENMASK(15, 8) +#define REG_DATA_61_64_W_DATA_63 GENMASK(23, 16) +#define REG_DATA_61_64_W_DATA_64 GENMASK(31, 24) + +#endif diff --git a/drivers/net/can/ctucanfd/ctucanfd_kregs.h b/drivers/net/can/ctucanfd/ctucanfd_kregs.h new file mode 100644 index 000000000000..0c181ab51bf8 --- /dev/null +++ b/drivers/net/can/ctucanfd/ctucanfd_kregs.h @@ -0,0 +1,349 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/******************************************************************************* + * + * CTU CAN FD IP Core + * + * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU + * Copyright (C) 2018-2022 Ondrej Ille <ondrej.ille@gmail.com> self-funded + * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU + * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded + * + * Project advisors: + * Jiri Novak <jnovak@fel.cvut.cz> + * Pavel Pisa <pisa@cmp.felk.cvut.cz> + * + * Department of Measurement (http://meas.fel.cvut.cz/) + * Faculty of Electrical Engineering (http://www.fel.cvut.cz) + * Czech Technical University (http://www.cvut.cz/) + ******************************************************************************/ + +/* This file is autogenerated, DO NOT EDIT! */ + +#ifndef __CTU_CAN_FD_CAN_FD_REGISTER_MAP__ +#define __CTU_CAN_FD_CAN_FD_REGISTER_MAP__ + +#include <linux/bits.h> + +/* CAN_Registers memory map */ +enum ctu_can_fd_can_registers { + CTUCANFD_DEVICE_ID = 0x0, + CTUCANFD_VERSION = 0x2, + CTUCANFD_MODE = 0x4, + CTUCANFD_SETTINGS = 0x6, + CTUCANFD_STATUS = 0x8, + CTUCANFD_COMMAND = 0xc, + CTUCANFD_INT_STAT = 0x10, + CTUCANFD_INT_ENA_SET = 0x14, + CTUCANFD_INT_ENA_CLR = 0x18, + CTUCANFD_INT_MASK_SET = 0x1c, + CTUCANFD_INT_MASK_CLR = 0x20, + CTUCANFD_BTR = 0x24, + CTUCANFD_BTR_FD = 0x28, + CTUCANFD_EWL = 0x2c, + CTUCANFD_ERP = 0x2d, + CTUCANFD_FAULT_STATE = 0x2e, + CTUCANFD_REC = 0x30, + CTUCANFD_TEC = 0x32, + CTUCANFD_ERR_NORM = 0x34, + CTUCANFD_ERR_FD = 0x36, + CTUCANFD_CTR_PRES = 0x38, + CTUCANFD_FILTER_A_MASK = 0x3c, + CTUCANFD_FILTER_A_VAL = 0x40, + CTUCANFD_FILTER_B_MASK = 0x44, + CTUCANFD_FILTER_B_VAL = 0x48, + CTUCANFD_FILTER_C_MASK = 0x4c, + CTUCANFD_FILTER_C_VAL = 0x50, + CTUCANFD_FILTER_RAN_LOW = 0x54, + CTUCANFD_FILTER_RAN_HIGH = 0x58, + CTUCANFD_FILTER_CONTROL = 0x5c, + CTUCANFD_FILTER_STATUS = 0x5e, + CTUCANFD_RX_MEM_INFO = 0x60, + CTUCANFD_RX_POINTERS = 0x64, + CTUCANFD_RX_STATUS = 0x68, + CTUCANFD_RX_SETTINGS = 0x6a, + CTUCANFD_RX_DATA = 0x6c, + CTUCANFD_TX_STATUS = 0x70, + CTUCANFD_TX_COMMAND = 0x74, + CTUCANFD_TXTB_INFO = 0x76, + CTUCANFD_TX_PRIORITY = 0x78, + CTUCANFD_ERR_CAPT = 0x7c, + CTUCANFD_RETR_CTR = 0x7d, + CTUCANFD_ALC = 0x7e, + CTUCANFD_TS_INFO = 0x7f, + CTUCANFD_TRV_DELAY = 0x80, + CTUCANFD_SSP_CFG = 0x82, + CTUCANFD_RX_FR_CTR = 0x84, + CTUCANFD_TX_FR_CTR = 0x88, + CTUCANFD_DEBUG_REGISTER = 0x8c, + CTUCANFD_YOLO_REG = 0x90, + CTUCANFD_TIMESTAMP_LOW = 0x94, + CTUCANFD_TIMESTAMP_HIGH = 0x98, + CTUCANFD_TXTB1_DATA_1 = 0x100, + CTUCANFD_TXTB1_DATA_2 = 0x104, + CTUCANFD_TXTB1_DATA_20 = 0x14c, + CTUCANFD_TXTB2_DATA_1 = 0x200, + CTUCANFD_TXTB2_DATA_2 = 0x204, + CTUCANFD_TXTB2_DATA_20 = 0x24c, + CTUCANFD_TXTB3_DATA_1 = 0x300, + CTUCANFD_TXTB3_DATA_2 = 0x304, + CTUCANFD_TXTB3_DATA_20 = 0x34c, + CTUCANFD_TXTB4_DATA_1 = 0x400, + CTUCANFD_TXTB4_DATA_2 = 0x404, + CTUCANFD_TXTB4_DATA_20 = 0x44c, +}; + +/* Control_registers memory region */ + +/* DEVICE_ID VERSION registers */ +#define REG_DEVICE_ID_DEVICE_ID GENMASK(15, 0) +#define REG_DEVICE_ID_VER_MINOR GENMASK(23, 16) +#define REG_DEVICE_ID_VER_MAJOR GENMASK(31, 24) + +/* MODE SETTINGS registers */ +#define REG_MODE_RST BIT(0) +#define REG_MODE_BMM BIT(1) +#define REG_MODE_STM BIT(2) +#define REG_MODE_AFM BIT(3) +#define REG_MODE_FDE BIT(4) +#define REG_MODE_TTTM BIT(5) +#define REG_MODE_ROM BIT(6) +#define REG_MODE_ACF BIT(7) +#define REG_MODE_TSTM BIT(8) +#define REG_MODE_RXBAM BIT(9) +#define REG_MODE_SAM BIT(11) +#define REG_MODE_RTRLE BIT(16) +#define REG_MODE_RTRTH GENMASK(20, 17) +#define REG_MODE_ILBP BIT(21) +#define REG_MODE_ENA BIT(22) +#define REG_MODE_NISOFD BIT(23) +#define REG_MODE_PEX BIT(24) +#define REG_MODE_TBFBO BIT(25) +#define REG_MODE_FDRF BIT(26) + +/* STATUS registers */ +#define REG_STATUS_RXNE BIT(0) +#define REG_STATUS_DOR BIT(1) +#define REG_STATUS_TXNF BIT(2) +#define REG_STATUS_EFT BIT(3) +#define REG_STATUS_RXS BIT(4) +#define REG_STATUS_TXS BIT(5) +#define REG_STATUS_EWL BIT(6) +#define REG_STATUS_IDLE BIT(7) +#define REG_STATUS_PEXS BIT(8) +#define REG_STATUS_STCNT BIT(16) + +/* COMMAND registers */ +#define REG_COMMAND_RXRPMV BIT(1) +#define REG_COMMAND_RRB BIT(2) +#define REG_COMMAND_CDO BIT(3) +#define REG_COMMAND_ERCRST BIT(4) +#define REG_COMMAND_RXFCRST BIT(5) +#define REG_COMMAND_TXFCRST BIT(6) +#define REG_COMMAND_CPEXS BIT(7) + +/* INT_STAT registers */ +#define REG_INT_STAT_RXI BIT(0) +#define REG_INT_STAT_TXI BIT(1) +#define REG_INT_STAT_EWLI BIT(2) +#define REG_INT_STAT_DOI BIT(3) +#define REG_INT_STAT_FCSI BIT(4) +#define REG_INT_STAT_ALI BIT(5) +#define REG_INT_STAT_BEI BIT(6) +#define REG_INT_STAT_OFI BIT(7) +#define REG_INT_STAT_RXFI BIT(8) +#define REG_INT_STAT_BSI BIT(9) +#define REG_INT_STAT_RBNEI BIT(10) +#define REG_INT_STAT_TXBHCI BIT(11) + +/* INT_ENA_SET registers */ +#define REG_INT_ENA_SET_INT_ENA_SET GENMASK(11, 0) + +/* INT_ENA_CLR registers */ +#define REG_INT_ENA_CLR_INT_ENA_CLR GENMASK(11, 0) + +/* INT_MASK_SET registers */ +#define REG_INT_MASK_SET_INT_MASK_SET GENMASK(11, 0) + +/* INT_MASK_CLR registers */ +#define REG_INT_MASK_CLR_INT_MASK_CLR GENMASK(11, 0) + +/* BTR registers */ +#define REG_BTR_PROP GENMASK(6, 0) +#define REG_BTR_PH1 GENMASK(12, 7) +#define REG_BTR_PH2 GENMASK(18, 13) +#define REG_BTR_BRP GENMASK(26, 19) +#define REG_BTR_SJW GENMASK(31, 27) + +/* BTR_FD registers */ +#define REG_BTR_FD_PROP_FD GENMASK(5, 0) +#define REG_BTR_FD_PH1_FD GENMASK(11, 7) +#define REG_BTR_FD_PH2_FD GENMASK(17, 13) +#define REG_BTR_FD_BRP_FD GENMASK(26, 19) +#define REG_BTR_FD_SJW_FD GENMASK(31, 27) + +/* EWL ERP FAULT_STATE registers */ +#define REG_EWL_EW_LIMIT GENMASK(7, 0) +#define REG_EWL_ERP_LIMIT GENMASK(15, 8) +#define REG_EWL_ERA BIT(16) +#define REG_EWL_ERP BIT(17) +#define REG_EWL_BOF BIT(18) + +/* REC TEC registers */ +#define REG_REC_REC_VAL GENMASK(8, 0) +#define REG_REC_TEC_VAL GENMASK(24, 16) + +/* ERR_NORM ERR_FD registers */ +#define REG_ERR_NORM_ERR_NORM_VAL GENMASK(15, 0) +#define REG_ERR_NORM_ERR_FD_VAL GENMASK(31, 16) + +/* CTR_PRES registers */ +#define REG_CTR_PRES_CTPV GENMASK(8, 0) +#define REG_CTR_PRES_PTX BIT(9) +#define REG_CTR_PRES_PRX BIT(10) +#define REG_CTR_PRES_ENORM BIT(11) +#define REG_CTR_PRES_EFD BIT(12) + +/* FILTER_A_MASK registers */ +#define REG_FILTER_A_MASK_BIT_MASK_A_VAL GENMASK(28, 0) + +/* FILTER_A_VAL registers */ +#define REG_FILTER_A_VAL_BIT_VAL_A_VAL GENMASK(28, 0) + +/* FILTER_B_MASK registers */ +#define REG_FILTER_B_MASK_BIT_MASK_B_VAL GENMASK(28, 0) + +/* FILTER_B_VAL registers */ +#define REG_FILTER_B_VAL_BIT_VAL_B_VAL GENMASK(28, 0) + +/* FILTER_C_MASK registers */ +#define REG_FILTER_C_MASK_BIT_MASK_C_VAL GENMASK(28, 0) + +/* FILTER_C_VAL registers */ +#define REG_FILTER_C_VAL_BIT_VAL_C_VAL GENMASK(28, 0) + +/* FILTER_RAN_LOW registers */ +#define REG_FILTER_RAN_LOW_BIT_RAN_LOW_VAL GENMASK(28, 0) + +/* FILTER_RAN_HIGH registers */ +#define REG_FILTER_RAN_HIGH_BIT_RAN_HIGH_VAL GENMASK(28, 0) + +/* FILTER_CONTROL FILTER_STATUS registers */ +#define REG_FILTER_CONTROL_FANB BIT(0) +#define REG_FILTER_CONTROL_FANE BIT(1) +#define REG_FILTER_CONTROL_FAFB BIT(2) +#define REG_FILTER_CONTROL_FAFE BIT(3) +#define REG_FILTER_CONTROL_FBNB BIT(4) +#define REG_FILTER_CONTROL_FBNE BIT(5) +#define REG_FILTER_CONTROL_FBFB BIT(6) +#define REG_FILTER_CONTROL_FBFE BIT(7) +#define REG_FILTER_CONTROL_FCNB BIT(8) +#define REG_FILTER_CONTROL_FCNE BIT(9) +#define REG_FILTER_CONTROL_FCFB BIT(10) +#define REG_FILTER_CONTROL_FCFE BIT(11) +#define REG_FILTER_CONTROL_FRNB BIT(12) +#define REG_FILTER_CONTROL_FRNE BIT(13) +#define REG_FILTER_CONTROL_FRFB BIT(14) +#define REG_FILTER_CONTROL_FRFE BIT(15) +#define REG_FILTER_CONTROL_SFA BIT(16) +#define REG_FILTER_CONTROL_SFB BIT(17) +#define REG_FILTER_CONTROL_SFC BIT(18) +#define REG_FILTER_CONTROL_SFR BIT(19) + +/* RX_MEM_INFO registers */ +#define REG_RX_MEM_INFO_RX_BUFF_SIZE GENMASK(12, 0) +#define REG_RX_MEM_INFO_RX_MEM_FREE GENMASK(28, 16) + +/* RX_POINTERS registers */ +#define REG_RX_POINTERS_RX_WPP GENMASK(11, 0) +#define REG_RX_POINTERS_RX_RPP GENMASK(27, 16) + +/* RX_STATUS RX_SETTINGS registers */ +#define REG_RX_STATUS_RXE BIT(0) +#define REG_RX_STATUS_RXF BIT(1) +#define REG_RX_STATUS_RXMOF BIT(2) +#define REG_RX_STATUS_RXFRC GENMASK(14, 4) +#define REG_RX_STATUS_RTSOP BIT(16) + +/* RX_DATA registers */ +#define REG_RX_DATA_RX_DATA GENMASK(31, 0) + +/* TX_STATUS registers */ +#define REG_TX_STATUS_TX1S GENMASK(3, 0) +#define REG_TX_STATUS_TX2S GENMASK(7, 4) +#define REG_TX_STATUS_TX3S GENMASK(11, 8) +#define REG_TX_STATUS_TX4S GENMASK(15, 12) +#define REG_TX_STATUS_TX5S GENMASK(19, 16) +#define REG_TX_STATUS_TX6S GENMASK(23, 20) +#define REG_TX_STATUS_TX7S GENMASK(27, 24) +#define REG_TX_STATUS_TX8S GENMASK(31, 28) + +/* TX_COMMAND TXTB_INFO registers */ +#define REG_TX_COMMAND_TXCE BIT(0) +#define REG_TX_COMMAND_TXCR BIT(1) +#define REG_TX_COMMAND_TXCA BIT(2) +#define REG_TX_COMMAND_TXB1 BIT(8) +#define REG_TX_COMMAND_TXB2 BIT(9) +#define REG_TX_COMMAND_TXB3 BIT(10) +#define REG_TX_COMMAND_TXB4 BIT(11) +#define REG_TX_COMMAND_TXB5 BIT(12) +#define REG_TX_COMMAND_TXB6 BIT(13) +#define REG_TX_COMMAND_TXB7 BIT(14) +#define REG_TX_COMMAND_TXB8 BIT(15) +#define REG_TX_COMMAND_TXT_BUFFER_COUNT GENMASK(19, 16) + +/* TX_PRIORITY registers */ +#define REG_TX_PRIORITY_TXT1P GENMASK(2, 0) +#define REG_TX_PRIORITY_TXT2P GENMASK(6, 4) +#define REG_TX_PRIORITY_TXT3P GENMASK(10, 8) +#define REG_TX_PRIORITY_TXT4P GENMASK(14, 12) +#define REG_TX_PRIORITY_TXT5P GENMASK(18, 16) +#define REG_TX_PRIORITY_TXT6P GENMASK(22, 20) +#define REG_TX_PRIORITY_TXT7P GENMASK(26, 24) +#define REG_TX_PRIORITY_TXT8P GENMASK(30, 28) + +/* ERR_CAPT RETR_CTR ALC TS_INFO registers */ +#define REG_ERR_CAPT_ERR_POS GENMASK(4, 0) +#define REG_ERR_CAPT_ERR_TYPE GENMASK(7, 5) +#define REG_ERR_CAPT_RETR_CTR_VAL GENMASK(11, 8) +#define REG_ERR_CAPT_ALC_BIT GENMASK(20, 16) +#define REG_ERR_CAPT_ALC_ID_FIELD GENMASK(23, 21) +#define REG_ERR_CAPT_TS_BITS GENMASK(29, 24) + +/* TRV_DELAY SSP_CFG registers */ +#define REG_TRV_DELAY_TRV_DELAY_VALUE GENMASK(6, 0) +#define REG_TRV_DELAY_SSP_OFFSET GENMASK(23, 16) +#define REG_TRV_DELAY_SSP_SRC GENMASK(25, 24) + +/* RX_FR_CTR registers */ +#define REG_RX_FR_CTR_RX_FR_CTR_VAL GENMASK(31, 0) + +/* TX_FR_CTR registers */ +#define REG_TX_FR_CTR_TX_FR_CTR_VAL GENMASK(31, 0) + +/* DEBUG_REGISTER registers */ +#define REG_DEBUG_REGISTER_STUFF_COUNT GENMASK(2, 0) +#define REG_DEBUG_REGISTER_DESTUFF_COUNT GENMASK(5, 3) +#define REG_DEBUG_REGISTER_PC_ARB BIT(6) +#define REG_DEBUG_REGISTER_PC_CON BIT(7) +#define REG_DEBUG_REGISTER_PC_DAT BIT(8) +#define REG_DEBUG_REGISTER_PC_STC BIT(9) +#define REG_DEBUG_REGISTER_PC_CRC BIT(10) +#define REG_DEBUG_REGISTER_PC_CRCD BIT(11) +#define REG_DEBUG_REGISTER_PC_ACK BIT(12) +#define REG_DEBUG_REGISTER_PC_ACKD BIT(13) +#define REG_DEBUG_REGISTER_PC_EOF BIT(14) +#define REG_DEBUG_REGISTER_PC_INT BIT(15) +#define REG_DEBUG_REGISTER_PC_SUSP BIT(16) +#define REG_DEBUG_REGISTER_PC_OVR BIT(17) +#define REG_DEBUG_REGISTER_PC_SOF BIT(18) + +/* YOLO_REG registers */ +#define REG_YOLO_REG_YOLO_VAL GENMASK(31, 0) + +/* TIMESTAMP_LOW registers */ +#define REG_TIMESTAMP_LOW_TIMESTAMP_LOW GENMASK(31, 0) + +/* TIMESTAMP_HIGH registers */ +#define REG_TIMESTAMP_HIGH_TIMESTAMP_HIGH GENMASK(31, 0) + +#endif diff --git a/drivers/net/can/ctucanfd/ctucanfd_pci.c b/drivers/net/can/ctucanfd/ctucanfd_pci.c new file mode 100644 index 000000000000..9da09e7dd63a --- /dev/null +++ b/drivers/net/can/ctucanfd/ctucanfd_pci.c @@ -0,0 +1,290 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * + * CTU CAN FD IP Core + * + * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU + * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded + * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU + * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded + * + * Project advisors: + * Jiri Novak <jnovak@fel.cvut.cz> + * Pavel Pisa <pisa@cmp.felk.cvut.cz> + * + * Department of Measurement (http://meas.fel.cvut.cz/) + * Faculty of Electrical Engineering (http://www.fel.cvut.cz) + * Czech Technical University (http://www.cvut.cz/) + ******************************************************************************/ + +#include <linux/module.h> +#include <linux/pci.h> + +#include "ctucanfd.h" + +#ifndef PCI_DEVICE_DATA +#define PCI_DEVICE_DATA(vend, dev, data) \ +.vendor = PCI_VENDOR_ID_##vend, \ +.device = PCI_DEVICE_ID_##vend##_##dev, \ +.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \ +.driver_data = (kernel_ulong_t)(data) +#endif + +#ifndef PCI_VENDOR_ID_TEDIA +#define PCI_VENDOR_ID_TEDIA 0x1760 +#endif + +#ifndef PCI_DEVICE_ID_TEDIA_CTUCAN_VER21 +#define PCI_DEVICE_ID_TEDIA_CTUCAN_VER21 0xff00 +#endif + +#define CTUCAN_BAR0_CTUCAN_ID 0x0000 +#define CTUCAN_BAR0_CRA_BASE 0x4000 +#define CYCLONE_IV_CRA_A2P_IE (0x0050) + +#define CTUCAN_WITHOUT_CTUCAN_ID 0 +#define CTUCAN_WITH_CTUCAN_ID 1 + +struct ctucan_pci_board_data { + void __iomem *bar0_base; + void __iomem *cra_base; + void __iomem *bar1_base; + struct list_head ndev_list_head; + int use_msi; +}; + +static struct ctucan_pci_board_data *ctucan_pci_get_bdata(struct pci_dev *pdev) +{ + return (struct ctucan_pci_board_data *)pci_get_drvdata(pdev); +} + +static void ctucan_pci_set_drvdata(struct device *dev, + struct net_device *ndev) +{ + struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); + struct ctucan_priv *priv = netdev_priv(ndev); + struct ctucan_pci_board_data *bdata = ctucan_pci_get_bdata(pdev); + + list_add(&priv->peers_on_pdev, &bdata->ndev_list_head); + priv->irq_flags = IRQF_SHARED; +} + +/** + * ctucan_pci_probe - PCI registration call + * @pdev: Handle to the pci device structure + * @ent: Pointer to the entry from ctucan_pci_tbl + * + * This function does all the memory allocation and registration for the CAN + * device. + * + * Return: 0 on success and failure value on error + */ +static int ctucan_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct device *dev = &pdev->dev; + unsigned long driver_data = ent->driver_data; + struct ctucan_pci_board_data *bdata; + void __iomem *addr; + void __iomem *cra_addr; + void __iomem *bar0_base; + u32 cra_a2p_ie; + u32 ctucan_id = 0; + int ret; + unsigned int ntxbufs; + unsigned int num_cores = 1; + unsigned int core_i = 0; + int irq; + int msi_ok = 0; + + ret = pci_enable_device(pdev); + if (ret) { + dev_err(dev, "pci_enable_device FAILED\n"); + goto err; + } + + ret = pci_request_regions(pdev, KBUILD_MODNAME); + if (ret) { + dev_err(dev, "pci_request_regions FAILED\n"); + goto err_disable_device; + } + + ret = pci_enable_msi(pdev); + if (!ret) { + dev_info(dev, "MSI enabled\n"); + pci_set_master(pdev); + msi_ok = 1; + } + + dev_info(dev, "ctucan BAR0 0x%08llx 0x%08llx\n", + (long long)pci_resource_start(pdev, 0), + (long long)pci_resource_len(pdev, 0)); + + dev_info(dev, "ctucan BAR1 0x%08llx 0x%08llx\n", + (long long)pci_resource_start(pdev, 1), + (long long)pci_resource_len(pdev, 1)); + + addr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1)); + if (!addr) { + dev_err(dev, "PCI BAR 1 cannot be mapped\n"); + ret = -ENOMEM; + goto err_release_regions; + } + + /* Cyclone IV PCI Express Control Registers Area */ + bar0_base = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); + if (!bar0_base) { + dev_err(dev, "PCI BAR 0 cannot be mapped\n"); + ret = -EIO; + goto err_pci_iounmap_bar1; + } + + if (driver_data == CTUCAN_WITHOUT_CTUCAN_ID) { + cra_addr = bar0_base; + num_cores = 2; + } else { + cra_addr = bar0_base + CTUCAN_BAR0_CRA_BASE; + ctucan_id = ioread32(bar0_base + CTUCAN_BAR0_CTUCAN_ID); + dev_info(dev, "ctucan_id 0x%08lx\n", (unsigned long)ctucan_id); + num_cores = ctucan_id & 0xf; + } + + irq = pdev->irq; + + ntxbufs = 4; + + bdata = kzalloc(sizeof(*bdata), GFP_KERNEL); + if (!bdata) { + ret = -ENOMEM; + goto err_pci_iounmap_bar0; + } + + INIT_LIST_HEAD(&bdata->ndev_list_head); + bdata->bar0_base = bar0_base; + bdata->cra_base = cra_addr; + bdata->bar1_base = addr; + bdata->use_msi = msi_ok; + + pci_set_drvdata(pdev, bdata); + + ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 100000000, + 0, ctucan_pci_set_drvdata); + if (ret < 0) + goto err_free_board; + + core_i++; + + while (core_i < num_cores) { + addr += 0x4000; + ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 100000000, + 0, ctucan_pci_set_drvdata); + if (ret < 0) { + dev_info(dev, "CTU CAN FD core %d initialization failed\n", + core_i); + break; + } + core_i++; + } + + /* enable interrupt in + * Avalon-MM to PCI Express Interrupt Enable Register + */ + cra_a2p_ie = ioread32(cra_addr + CYCLONE_IV_CRA_A2P_IE); + dev_info(dev, "cra_a2p_ie 0x%08x\n", cra_a2p_ie); + cra_a2p_ie |= 1; + iowrite32(cra_a2p_ie, cra_addr + CYCLONE_IV_CRA_A2P_IE); + cra_a2p_ie = ioread32(cra_addr + CYCLONE_IV_CRA_A2P_IE); + dev_info(dev, "cra_a2p_ie 0x%08x\n", cra_a2p_ie); + + return 0; + +err_free_board: + pci_set_drvdata(pdev, NULL); + kfree(bdata); +err_pci_iounmap_bar0: + pci_iounmap(pdev, cra_addr); +err_pci_iounmap_bar1: + pci_iounmap(pdev, addr); +err_release_regions: + if (msi_ok) + pci_disable_msi(pdev); + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); +err: + return ret; +} + +/** + * ctucan_pci_remove - Unregister the device after releasing the resources + * @pdev: Handle to the pci device structure + * + * This function frees all the resources allocated to the device. + * Return: 0 always + */ +static void ctucan_pci_remove(struct pci_dev *pdev) +{ + struct net_device *ndev; + struct ctucan_priv *priv = NULL; + struct ctucan_pci_board_data *bdata = ctucan_pci_get_bdata(pdev); + + dev_dbg(&pdev->dev, "ctucan_remove"); + + if (!bdata) { + dev_err(&pdev->dev, "%s: no list of devices\n", __func__); + return; + } + + /* disable interrupt in + * Avalon-MM to PCI Express Interrupt Enable Register + */ + if (bdata->cra_base) + iowrite32(0, bdata->cra_base + CYCLONE_IV_CRA_A2P_IE); + + while ((priv = list_first_entry_or_null(&bdata->ndev_list_head, struct ctucan_priv, + peers_on_pdev)) != NULL) { + ndev = priv->can.dev; + + unregister_candev(ndev); + + netif_napi_del(&priv->napi); + + list_del_init(&priv->peers_on_pdev); + free_candev(ndev); + } + + pci_iounmap(pdev, bdata->bar1_base); + + if (bdata->use_msi) + pci_disable_msi(pdev); + + pci_release_regions(pdev); + pci_disable_device(pdev); + + pci_iounmap(pdev, bdata->bar0_base); + + pci_set_drvdata(pdev, NULL); + kfree(bdata); +} + +static SIMPLE_DEV_PM_OPS(ctucan_pci_pm_ops, ctucan_suspend, ctucan_resume); + +static const struct pci_device_id ctucan_pci_tbl[] = { + {PCI_DEVICE_DATA(TEDIA, CTUCAN_VER21, + CTUCAN_WITH_CTUCAN_ID)}, + {}, +}; + +static struct pci_driver ctucan_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = ctucan_pci_tbl, + .probe = ctucan_pci_probe, + .remove = ctucan_pci_remove, + .driver.pm = &ctucan_pci_pm_ops, +}; + +module_pci_driver(ctucan_pci_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Pavel Pisa <pisa@cmp.felk.cvut.cz>"); +MODULE_DESCRIPTION("CTU CAN FD for PCI bus"); diff --git a/drivers/net/can/ctucanfd/ctucanfd_platform.c b/drivers/net/can/ctucanfd/ctucanfd_platform.c new file mode 100644 index 000000000000..70e2577c8541 --- /dev/null +++ b/drivers/net/can/ctucanfd/ctucanfd_platform.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * + * CTU CAN FD IP Core + * + * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU + * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded + * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU + * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded + * + * Project advisors: + * Jiri Novak <jnovak@fel.cvut.cz> + * Pavel Pisa <pisa@cmp.felk.cvut.cz> + * + * Department of Measurement (http://meas.fel.cvut.cz/) + * Faculty of Electrical Engineering (http://www.fel.cvut.cz) + * Czech Technical University (http://www.cvut.cz/) + ******************************************************************************/ + +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> + +#include "ctucanfd.h" + +#define DRV_NAME "ctucanfd" + +static void ctucan_platform_set_drvdata(struct device *dev, + struct net_device *ndev) +{ + struct platform_device *pdev = container_of(dev, struct platform_device, + dev); + + platform_set_drvdata(pdev, ndev); +} + +/** + * ctucan_platform_probe - Platform registration call + * @pdev: Handle to the platform device structure + * + * This function does all the memory allocation and registration for the CAN + * device. + * + * Return: 0 on success and failure value on error + */ +static int ctucan_platform_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + void __iomem *addr; + int ret; + unsigned int ntxbufs; + int irq; + + /* Get the virtual base address for the device */ + addr = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(addr)) { + ret = PTR_ERR(addr); + goto err; + } + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + goto err; + } + + /* Number of tx bufs might be change in HW for future. If so, + * it will be passed as property via device tree + */ + ntxbufs = 4; + ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 0, + 1, ctucan_platform_set_drvdata); + + if (ret < 0) + platform_set_drvdata(pdev, NULL); + +err: + return ret; +} + +/** + * ctucan_platform_remove - Unregister the device after releasing the resources + * @pdev: Handle to the platform device structure + * + * This function frees all the resources allocated to the device. + * Return: 0 always + */ +static void ctucan_platform_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct ctucan_priv *priv = netdev_priv(ndev); + + netdev_dbg(ndev, "ctucan_remove"); + + unregister_candev(ndev); + pm_runtime_disable(&pdev->dev); + netif_napi_del(&priv->napi); + free_candev(ndev); +} + +static SIMPLE_DEV_PM_OPS(ctucan_platform_pm_ops, ctucan_suspend, ctucan_resume); + +/* Match table for OF platform binding */ +static const struct of_device_id ctucan_of_match[] = { + { .compatible = "ctu,ctucanfd-2", }, + { .compatible = "ctu,ctucanfd", }, + { /* end of list */ }, +}; +MODULE_DEVICE_TABLE(of, ctucan_of_match); + +static struct platform_driver ctucanfd_driver = { + .probe = ctucan_platform_probe, + .remove = ctucan_platform_remove, + .driver = { + .name = DRV_NAME, + .pm = &ctucan_platform_pm_ops, + .of_match_table = ctucan_of_match, + }, +}; + +module_platform_driver(ctucanfd_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Martin Jerabek"); +MODULE_DESCRIPTION("CTU CAN FD for platform"); diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c deleted file mode 100644 index 3b3f88ffab53..000000000000 --- a/drivers/net/can/dev.c +++ /dev/null @@ -1,1312 +0,0 @@ -/* - * Copyright (C) 2005 Marc Kleine-Budde, Pengutronix - * Copyright (C) 2006 Andrey Volkov, Varma Electronics - * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/netdevice.h> -#include <linux/if_arp.h> -#include <linux/workqueue.h> -#include <linux/can.h> -#include <linux/can/dev.h> -#include <linux/can/skb.h> -#include <linux/can/netlink.h> -#include <linux/can/led.h> -#include <linux/of.h> -#include <net/rtnetlink.h> - -#define MOD_DESC "CAN device driver interface" - -MODULE_DESCRIPTION(MOD_DESC); -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); - -/* CAN DLC to real data length conversion helpers */ - -static const u8 dlc2len[] = {0, 1, 2, 3, 4, 5, 6, 7, - 8, 12, 16, 20, 24, 32, 48, 64}; - -/* get data length from can_dlc with sanitized can_dlc */ -u8 can_dlc2len(u8 can_dlc) -{ - return dlc2len[can_dlc & 0x0F]; -} -EXPORT_SYMBOL_GPL(can_dlc2len); - -static const u8 len2dlc[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */ - 9, 9, 9, 9, /* 9 - 12 */ - 10, 10, 10, 10, /* 13 - 16 */ - 11, 11, 11, 11, /* 17 - 20 */ - 12, 12, 12, 12, /* 21 - 24 */ - 13, 13, 13, 13, 13, 13, 13, 13, /* 25 - 32 */ - 14, 14, 14, 14, 14, 14, 14, 14, /* 33 - 40 */ - 14, 14, 14, 14, 14, 14, 14, 14, /* 41 - 48 */ - 15, 15, 15, 15, 15, 15, 15, 15, /* 49 - 56 */ - 15, 15, 15, 15, 15, 15, 15, 15}; /* 57 - 64 */ - -/* map the sanitized data length to an appropriate data length code */ -u8 can_len2dlc(u8 len) -{ - if (unlikely(len > 64)) - return 0xF; - - return len2dlc[len]; -} -EXPORT_SYMBOL_GPL(can_len2dlc); - -#ifdef CONFIG_CAN_CALC_BITTIMING -#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */ -#define CAN_CALC_SYNC_SEG 1 - -/* - * Bit-timing calculation derived from: - * - * Code based on LinCAN sources and H8S2638 project - * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz - * Copyright 2005 Stanislav Marek - * email: pisa@cmp.felk.cvut.cz - * - * Calculates proper bit-timing parameters for a specified bit-rate - * and sample-point, which can then be used to set the bit-timing - * registers of the CAN controller. You can find more information - * in the header file linux/can/netlink.h. - */ -static int can_update_sample_point(const struct can_bittiming_const *btc, - unsigned int sample_point_nominal, unsigned int tseg, - unsigned int *tseg1_ptr, unsigned int *tseg2_ptr, - unsigned int *sample_point_error_ptr) -{ - unsigned int sample_point_error, best_sample_point_error = UINT_MAX; - unsigned int sample_point, best_sample_point = 0; - unsigned int tseg1, tseg2; - int i; - - for (i = 0; i <= 1; i++) { - tseg2 = tseg + CAN_CALC_SYNC_SEG - (sample_point_nominal * (tseg + CAN_CALC_SYNC_SEG)) / 1000 - i; - tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max); - tseg1 = tseg - tseg2; - if (tseg1 > btc->tseg1_max) { - tseg1 = btc->tseg1_max; - tseg2 = tseg - tseg1; - } - - sample_point = 1000 * (tseg + CAN_CALC_SYNC_SEG - tseg2) / (tseg + CAN_CALC_SYNC_SEG); - sample_point_error = abs(sample_point_nominal - sample_point); - - if ((sample_point <= sample_point_nominal) && (sample_point_error < best_sample_point_error)) { - best_sample_point = sample_point; - best_sample_point_error = sample_point_error; - *tseg1_ptr = tseg1; - *tseg2_ptr = tseg2; - } - } - - if (sample_point_error_ptr) - *sample_point_error_ptr = best_sample_point_error; - - return best_sample_point; -} - -static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, - const struct can_bittiming_const *btc) -{ - struct can_priv *priv = netdev_priv(dev); - unsigned int bitrate; /* current bitrate */ - unsigned int bitrate_error; /* difference between current and nominal value */ - unsigned int best_bitrate_error = UINT_MAX; - unsigned int sample_point_error; /* difference between current and nominal value */ - unsigned int best_sample_point_error = UINT_MAX; - unsigned int sample_point_nominal; /* nominal sample point */ - unsigned int best_tseg = 0; /* current best value for tseg */ - unsigned int best_brp = 0; /* current best value for brp */ - unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0; - u64 v64; - - /* Use CiA recommended sample points */ - if (bt->sample_point) { - sample_point_nominal = bt->sample_point; - } else { - if (bt->bitrate > 800000) - sample_point_nominal = 750; - else if (bt->bitrate > 500000) - sample_point_nominal = 800; - else - sample_point_nominal = 875; - } - - /* tseg even = round down, odd = round up */ - for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1; - tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) { - tsegall = CAN_CALC_SYNC_SEG + tseg / 2; - - /* Compute all possible tseg choices (tseg=tseg1+tseg2) */ - brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2; - - /* choose brp step which is possible in system */ - brp = (brp / btc->brp_inc) * btc->brp_inc; - if ((brp < btc->brp_min) || (brp > btc->brp_max)) - continue; - - bitrate = priv->clock.freq / (brp * tsegall); - bitrate_error = abs(bt->bitrate - bitrate); - - /* tseg brp biterror */ - if (bitrate_error > best_bitrate_error) - continue; - - /* reset sample point error if we have a better bitrate */ - if (bitrate_error < best_bitrate_error) - best_sample_point_error = UINT_MAX; - - can_update_sample_point(btc, sample_point_nominal, tseg / 2, &tseg1, &tseg2, &sample_point_error); - if (sample_point_error > best_sample_point_error) - continue; - - best_sample_point_error = sample_point_error; - best_bitrate_error = bitrate_error; - best_tseg = tseg / 2; - best_brp = brp; - - if (bitrate_error == 0 && sample_point_error == 0) - break; - } - - if (best_bitrate_error) { - /* Error in one-tenth of a percent */ - v64 = (u64)best_bitrate_error * 1000; - do_div(v64, bt->bitrate); - bitrate_error = (u32)v64; - if (bitrate_error > CAN_CALC_MAX_ERROR) { - netdev_err(dev, - "bitrate error %d.%d%% too high\n", - bitrate_error / 10, bitrate_error % 10); - return -EDOM; - } - netdev_warn(dev, "bitrate error %d.%d%%\n", - bitrate_error / 10, bitrate_error % 10); - } - - /* real sample point */ - bt->sample_point = can_update_sample_point(btc, sample_point_nominal, best_tseg, - &tseg1, &tseg2, NULL); - - v64 = (u64)best_brp * 1000 * 1000 * 1000; - do_div(v64, priv->clock.freq); - bt->tq = (u32)v64; - bt->prop_seg = tseg1 / 2; - bt->phase_seg1 = tseg1 - bt->prop_seg; - bt->phase_seg2 = tseg2; - - /* check for sjw user settings */ - if (!bt->sjw || !btc->sjw_max) { - bt->sjw = 1; - } else { - /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */ - if (bt->sjw > btc->sjw_max) - bt->sjw = btc->sjw_max; - /* bt->sjw must not be higher than tseg2 */ - if (tseg2 < bt->sjw) - bt->sjw = tseg2; - } - - bt->brp = best_brp; - - /* real bitrate */ - bt->bitrate = priv->clock.freq / (bt->brp * (CAN_CALC_SYNC_SEG + tseg1 + tseg2)); - - return 0; -} -#else /* !CONFIG_CAN_CALC_BITTIMING */ -static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, - const struct can_bittiming_const *btc) -{ - netdev_err(dev, "bit-timing calculation not available\n"); - return -EINVAL; -} -#endif /* CONFIG_CAN_CALC_BITTIMING */ - -/* - * Checks the validity of the specified bit-timing parameters prop_seg, - * phase_seg1, phase_seg2 and sjw and tries to determine the bitrate - * prescaler value brp. You can find more information in the header - * file linux/can/netlink.h. - */ -static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt, - const struct can_bittiming_const *btc) -{ - struct can_priv *priv = netdev_priv(dev); - int tseg1, alltseg; - u64 brp64; - - tseg1 = bt->prop_seg + bt->phase_seg1; - if (!bt->sjw) - bt->sjw = 1; - if (bt->sjw > btc->sjw_max || - tseg1 < btc->tseg1_min || tseg1 > btc->tseg1_max || - bt->phase_seg2 < btc->tseg2_min || bt->phase_seg2 > btc->tseg2_max) - return -ERANGE; - - brp64 = (u64)priv->clock.freq * (u64)bt->tq; - if (btc->brp_inc > 1) - do_div(brp64, btc->brp_inc); - brp64 += 500000000UL - 1; - do_div(brp64, 1000000000UL); /* the practicable BRP */ - if (btc->brp_inc > 1) - brp64 *= btc->brp_inc; - bt->brp = (u32)brp64; - - if (bt->brp < btc->brp_min || bt->brp > btc->brp_max) - return -EINVAL; - - alltseg = bt->prop_seg + bt->phase_seg1 + bt->phase_seg2 + 1; - bt->bitrate = priv->clock.freq / (bt->brp * alltseg); - bt->sample_point = ((tseg1 + 1) * 1000) / alltseg; - - return 0; -} - -/* Checks the validity of predefined bitrate settings */ -static int can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt, - const u32 *bitrate_const, - const unsigned int bitrate_const_cnt) -{ - struct can_priv *priv = netdev_priv(dev); - unsigned int i; - - for (i = 0; i < bitrate_const_cnt; i++) { - if (bt->bitrate == bitrate_const[i]) - break; - } - - if (i >= priv->bitrate_const_cnt) - return -EINVAL; - - return 0; -} - -static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt, - const struct can_bittiming_const *btc, - const u32 *bitrate_const, - const unsigned int bitrate_const_cnt) -{ - int err; - - /* - * Depending on the given can_bittiming parameter structure the CAN - * timing parameters are calculated based on the provided bitrate OR - * alternatively the CAN timing parameters (tq, prop_seg, etc.) are - * provided directly which are then checked and fixed up. - */ - if (!bt->tq && bt->bitrate && btc) - err = can_calc_bittiming(dev, bt, btc); - else if (bt->tq && !bt->bitrate && btc) - err = can_fixup_bittiming(dev, bt, btc); - else if (!bt->tq && bt->bitrate && bitrate_const) - err = can_validate_bitrate(dev, bt, bitrate_const, - bitrate_const_cnt); - else - err = -EINVAL; - - return err; -} - -static void can_update_state_error_stats(struct net_device *dev, - enum can_state new_state) -{ - struct can_priv *priv = netdev_priv(dev); - - if (new_state <= priv->state) - return; - - switch (new_state) { - case CAN_STATE_ERROR_WARNING: - priv->can_stats.error_warning++; - break; - case CAN_STATE_ERROR_PASSIVE: - priv->can_stats.error_passive++; - break; - case CAN_STATE_BUS_OFF: - priv->can_stats.bus_off++; - break; - default: - break; - } -} - -static int can_tx_state_to_frame(struct net_device *dev, enum can_state state) -{ - switch (state) { - case CAN_STATE_ERROR_ACTIVE: - return CAN_ERR_CRTL_ACTIVE; - case CAN_STATE_ERROR_WARNING: - return CAN_ERR_CRTL_TX_WARNING; - case CAN_STATE_ERROR_PASSIVE: - return CAN_ERR_CRTL_TX_PASSIVE; - default: - return 0; - } -} - -static int can_rx_state_to_frame(struct net_device *dev, enum can_state state) -{ - switch (state) { - case CAN_STATE_ERROR_ACTIVE: - return CAN_ERR_CRTL_ACTIVE; - case CAN_STATE_ERROR_WARNING: - return CAN_ERR_CRTL_RX_WARNING; - case CAN_STATE_ERROR_PASSIVE: - return CAN_ERR_CRTL_RX_PASSIVE; - default: - return 0; - } -} - -void can_change_state(struct net_device *dev, struct can_frame *cf, - enum can_state tx_state, enum can_state rx_state) -{ - struct can_priv *priv = netdev_priv(dev); - enum can_state new_state = max(tx_state, rx_state); - - if (unlikely(new_state == priv->state)) { - netdev_warn(dev, "%s: oops, state did not change", __func__); - return; - } - - netdev_dbg(dev, "New error state: %d\n", new_state); - - can_update_state_error_stats(dev, new_state); - priv->state = new_state; - - if (!cf) - return; - - if (unlikely(new_state == CAN_STATE_BUS_OFF)) { - cf->can_id |= CAN_ERR_BUSOFF; - return; - } - - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] |= tx_state >= rx_state ? - can_tx_state_to_frame(dev, tx_state) : 0; - cf->data[1] |= tx_state <= rx_state ? - can_rx_state_to_frame(dev, rx_state) : 0; -} -EXPORT_SYMBOL_GPL(can_change_state); - -/* - * Local echo of CAN messages - * - * CAN network devices *should* support a local echo functionality - * (see Documentation/networking/can.rst). To test the handling of CAN - * interfaces that do not support the local echo both driver types are - * implemented. In the case that the driver does not support the echo - * the IFF_ECHO remains clear in dev->flags. This causes the PF_CAN core - * to perform the echo as a fallback solution. - */ -static void can_flush_echo_skb(struct net_device *dev) -{ - struct can_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; - int i; - - for (i = 0; i < priv->echo_skb_max; i++) { - if (priv->echo_skb[i]) { - kfree_skb(priv->echo_skb[i]); - priv->echo_skb[i] = NULL; - stats->tx_dropped++; - stats->tx_aborted_errors++; - } - } -} - -/* - * Put the skb on the stack to be looped backed locally lateron - * - * The function is typically called in the start_xmit function - * of the device driver. The driver must protect access to - * priv->echo_skb, if necessary. - */ -void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, - unsigned int idx) -{ - struct can_priv *priv = netdev_priv(dev); - - BUG_ON(idx >= priv->echo_skb_max); - - /* check flag whether this packet has to be looped back */ - if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK || - (skb->protocol != htons(ETH_P_CAN) && - skb->protocol != htons(ETH_P_CANFD))) { - kfree_skb(skb); - return; - } - - if (!priv->echo_skb[idx]) { - - skb = can_create_echo_skb(skb); - if (!skb) - return; - - /* make settings for echo to reduce code in irq context */ - skb->pkt_type = PACKET_BROADCAST; - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->dev = dev; - - /* save this skb for tx interrupt echo handling */ - priv->echo_skb[idx] = skb; - } else { - /* locking problem with netif_stop_queue() ?? */ - netdev_err(dev, "%s: BUG! echo_skb is occupied!\n", __func__); - kfree_skb(skb); - } -} -EXPORT_SYMBOL_GPL(can_put_echo_skb); - -struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr) -{ - struct can_priv *priv = netdev_priv(dev); - struct sk_buff *skb = priv->echo_skb[idx]; - struct canfd_frame *cf; - - if (idx >= priv->echo_skb_max) { - netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n", - __func__, idx, priv->echo_skb_max); - return NULL; - } - - if (!skb) { - netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n", - __func__, idx); - return NULL; - } - - /* Using "struct canfd_frame::len" for the frame - * length is supported on both CAN and CANFD frames. - */ - cf = (struct canfd_frame *)skb->data; - *len_ptr = cf->len; - priv->echo_skb[idx] = NULL; - - return skb; -} - -/* - * Get the skb from the stack and loop it back locally - * - * The function is typically called when the TX done interrupt - * is handled in the device driver. The driver must protect - * access to priv->echo_skb, if necessary. - */ -unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx) -{ - struct sk_buff *skb; - u8 len; - - skb = __can_get_echo_skb(dev, idx, &len); - if (!skb) - return 0; - - netif_rx(skb); - - return len; -} -EXPORT_SYMBOL_GPL(can_get_echo_skb); - -/* - * Remove the skb from the stack and free it. - * - * The function is typically called when TX failed. - */ -void can_free_echo_skb(struct net_device *dev, unsigned int idx) -{ - struct can_priv *priv = netdev_priv(dev); - - BUG_ON(idx >= priv->echo_skb_max); - - if (priv->echo_skb[idx]) { - dev_kfree_skb_any(priv->echo_skb[idx]); - priv->echo_skb[idx] = NULL; - } -} -EXPORT_SYMBOL_GPL(can_free_echo_skb); - -/* - * CAN device restart for bus-off recovery - */ -static void can_restart(struct net_device *dev) -{ - struct can_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; - struct sk_buff *skb; - struct can_frame *cf; - int err; - - BUG_ON(netif_carrier_ok(dev)); - - /* - * No synchronization needed because the device is bus-off and - * no messages can come in or go out. - */ - can_flush_echo_skb(dev); - - /* send restart message upstream */ - skb = alloc_can_err_skb(dev, &cf); - if (skb == NULL) { - err = -ENOMEM; - goto restart; - } - cf->can_id |= CAN_ERR_RESTARTED; - - netif_rx(skb); - - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - -restart: - netdev_dbg(dev, "restarted\n"); - priv->can_stats.restarts++; - - /* Now restart the device */ - err = priv->do_set_mode(dev, CAN_MODE_START); - - netif_carrier_on(dev); - if (err) - netdev_err(dev, "Error %d during restart", err); -} - -static void can_restart_work(struct work_struct *work) -{ - struct delayed_work *dwork = to_delayed_work(work); - struct can_priv *priv = container_of(dwork, struct can_priv, restart_work); - - can_restart(priv->dev); -} - -int can_restart_now(struct net_device *dev) -{ - struct can_priv *priv = netdev_priv(dev); - - /* - * A manual restart is only permitted if automatic restart is - * disabled and the device is in the bus-off state - */ - if (priv->restart_ms) - return -EINVAL; - if (priv->state != CAN_STATE_BUS_OFF) - return -EBUSY; - - cancel_delayed_work_sync(&priv->restart_work); - can_restart(dev); - - return 0; -} - -/* - * CAN bus-off - * - * This functions should be called when the device goes bus-off to - * tell the netif layer that no more packets can be sent or received. - * If enabled, a timer is started to trigger bus-off recovery. - */ -void can_bus_off(struct net_device *dev) -{ - struct can_priv *priv = netdev_priv(dev); - - netdev_info(dev, "bus-off\n"); - - netif_carrier_off(dev); - - if (priv->restart_ms) - schedule_delayed_work(&priv->restart_work, - msecs_to_jiffies(priv->restart_ms)); -} -EXPORT_SYMBOL_GPL(can_bus_off); - -static void can_setup(struct net_device *dev) -{ - dev->type = ARPHRD_CAN; - dev->mtu = CAN_MTU; - dev->hard_header_len = 0; - dev->addr_len = 0; - dev->tx_queue_len = 10; - - /* New-style flags. */ - dev->flags = IFF_NOARP; - dev->features = NETIF_F_HW_CSUM; -} - -struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf) -{ - struct sk_buff *skb; - - skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) + - sizeof(struct can_frame)); - if (unlikely(!skb)) - return NULL; - - skb->protocol = htons(ETH_P_CAN); - skb->pkt_type = PACKET_BROADCAST; - skb->ip_summed = CHECKSUM_UNNECESSARY; - - skb_reset_mac_header(skb); - skb_reset_network_header(skb); - skb_reset_transport_header(skb); - - can_skb_reserve(skb); - can_skb_prv(skb)->ifindex = dev->ifindex; - can_skb_prv(skb)->skbcnt = 0; - - *cf = skb_put_zero(skb, sizeof(struct can_frame)); - - return skb; -} -EXPORT_SYMBOL_GPL(alloc_can_skb); - -struct sk_buff *alloc_canfd_skb(struct net_device *dev, - struct canfd_frame **cfd) -{ - struct sk_buff *skb; - - skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) + - sizeof(struct canfd_frame)); - if (unlikely(!skb)) - return NULL; - - skb->protocol = htons(ETH_P_CANFD); - skb->pkt_type = PACKET_BROADCAST; - skb->ip_summed = CHECKSUM_UNNECESSARY; - - skb_reset_mac_header(skb); - skb_reset_network_header(skb); - skb_reset_transport_header(skb); - - can_skb_reserve(skb); - can_skb_prv(skb)->ifindex = dev->ifindex; - can_skb_prv(skb)->skbcnt = 0; - - *cfd = skb_put_zero(skb, sizeof(struct canfd_frame)); - - return skb; -} -EXPORT_SYMBOL_GPL(alloc_canfd_skb); - -struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf) -{ - struct sk_buff *skb; - - skb = alloc_can_skb(dev, cf); - if (unlikely(!skb)) - return NULL; - - (*cf)->can_id = CAN_ERR_FLAG; - (*cf)->can_dlc = CAN_ERR_DLC; - - return skb; -} -EXPORT_SYMBOL_GPL(alloc_can_err_skb); - -/* - * Allocate and setup space for the CAN network device - */ -struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max, - unsigned int txqs, unsigned int rxqs) -{ - struct net_device *dev; - struct can_priv *priv; - int size; - - if (echo_skb_max) - size = ALIGN(sizeof_priv, sizeof(struct sk_buff *)) + - echo_skb_max * sizeof(struct sk_buff *); - else - size = sizeof_priv; - - dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup, - txqs, rxqs); - if (!dev) - return NULL; - - priv = netdev_priv(dev); - priv->dev = dev; - - if (echo_skb_max) { - priv->echo_skb_max = echo_skb_max; - priv->echo_skb = (void *)priv + - ALIGN(sizeof_priv, sizeof(struct sk_buff *)); - } - - priv->state = CAN_STATE_STOPPED; - - INIT_DELAYED_WORK(&priv->restart_work, can_restart_work); - - return dev; -} -EXPORT_SYMBOL_GPL(alloc_candev_mqs); - -/* - * Free space of the CAN network device - */ -void free_candev(struct net_device *dev) -{ - free_netdev(dev); -} -EXPORT_SYMBOL_GPL(free_candev); - -/* - * changing MTU and control mode for CAN/CANFD devices - */ -int can_change_mtu(struct net_device *dev, int new_mtu) -{ - struct can_priv *priv = netdev_priv(dev); - - /* Do not allow changing the MTU while running */ - if (dev->flags & IFF_UP) - return -EBUSY; - - /* allow change of MTU according to the CANFD ability of the device */ - switch (new_mtu) { - case CAN_MTU: - /* 'CANFD-only' controllers can not switch to CAN_MTU */ - if (priv->ctrlmode_static & CAN_CTRLMODE_FD) - return -EINVAL; - - priv->ctrlmode &= ~CAN_CTRLMODE_FD; - break; - - case CANFD_MTU: - /* check for potential CANFD ability */ - if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) && - !(priv->ctrlmode_static & CAN_CTRLMODE_FD)) - return -EINVAL; - - priv->ctrlmode |= CAN_CTRLMODE_FD; - break; - - default: - return -EINVAL; - } - - dev->mtu = new_mtu; - return 0; -} -EXPORT_SYMBOL_GPL(can_change_mtu); - -/* - * Common open function when the device gets opened. - * - * This function should be called in the open function of the device - * driver. - */ -int open_candev(struct net_device *dev) -{ - struct can_priv *priv = netdev_priv(dev); - - if (!priv->bittiming.bitrate) { - netdev_err(dev, "bit-timing not yet defined\n"); - return -EINVAL; - } - - /* For CAN FD the data bitrate has to be >= the arbitration bitrate */ - if ((priv->ctrlmode & CAN_CTRLMODE_FD) && - (!priv->data_bittiming.bitrate || - (priv->data_bittiming.bitrate < priv->bittiming.bitrate))) { - netdev_err(dev, "incorrect/missing data bit-timing\n"); - return -EINVAL; - } - - /* Switch carrier on if device was stopped while in bus-off state */ - if (!netif_carrier_ok(dev)) - netif_carrier_on(dev); - - return 0; -} -EXPORT_SYMBOL_GPL(open_candev); - -#ifdef CONFIG_OF -/* Common function that can be used to understand the limitation of - * a transceiver when it provides no means to determine these limitations - * at runtime. - */ -void of_can_transceiver(struct net_device *dev) -{ - struct device_node *dn; - struct can_priv *priv = netdev_priv(dev); - struct device_node *np = dev->dev.parent->of_node; - int ret; - - dn = of_get_child_by_name(np, "can-transceiver"); - if (!dn) - return; - - ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max); - if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max)) - netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n"); -} -EXPORT_SYMBOL_GPL(of_can_transceiver); -#endif - -/* - * Common close function for cleanup before the device gets closed. - * - * This function should be called in the close function of the device - * driver. - */ -void close_candev(struct net_device *dev) -{ - struct can_priv *priv = netdev_priv(dev); - - cancel_delayed_work_sync(&priv->restart_work); - can_flush_echo_skb(dev); -} -EXPORT_SYMBOL_GPL(close_candev); - -/* - * CAN netlink interface - */ -static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = { - [IFLA_CAN_STATE] = { .type = NLA_U32 }, - [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) }, - [IFLA_CAN_RESTART_MS] = { .type = NLA_U32 }, - [IFLA_CAN_RESTART] = { .type = NLA_U32 }, - [IFLA_CAN_BITTIMING] = { .len = sizeof(struct can_bittiming) }, - [IFLA_CAN_BITTIMING_CONST] - = { .len = sizeof(struct can_bittiming_const) }, - [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) }, - [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) }, - [IFLA_CAN_DATA_BITTIMING] - = { .len = sizeof(struct can_bittiming) }, - [IFLA_CAN_DATA_BITTIMING_CONST] - = { .len = sizeof(struct can_bittiming_const) }, -}; - -static int can_validate(struct nlattr *tb[], struct nlattr *data[], - struct netlink_ext_ack *extack) -{ - bool is_can_fd = false; - - /* Make sure that valid CAN FD configurations always consist of - * - nominal/arbitration bittiming - * - data bittiming - * - control mode with CAN_CTRLMODE_FD set - */ - - if (!data) - return 0; - - if (data[IFLA_CAN_CTRLMODE]) { - struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]); - - is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD; - } - - if (is_can_fd) { - if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING]) - return -EOPNOTSUPP; - } - - if (data[IFLA_CAN_DATA_BITTIMING]) { - if (!is_can_fd || !data[IFLA_CAN_BITTIMING]) - return -EOPNOTSUPP; - } - - return 0; -} - -static int can_changelink(struct net_device *dev, struct nlattr *tb[], - struct nlattr *data[], - struct netlink_ext_ack *extack) -{ - struct can_priv *priv = netdev_priv(dev); - int err; - - /* We need synchronization with dev->stop() */ - ASSERT_RTNL(); - - if (data[IFLA_CAN_BITTIMING]) { - struct can_bittiming bt; - - /* Do not allow changing bittiming while running */ - if (dev->flags & IFF_UP) - return -EBUSY; - - /* Calculate bittiming parameters based on - * bittiming_const if set, otherwise pass bitrate - * directly via do_set_bitrate(). Bail out if neither - * is given. - */ - if (!priv->bittiming_const && !priv->do_set_bittiming) - return -EOPNOTSUPP; - - memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt)); - err = can_get_bittiming(dev, &bt, - priv->bittiming_const, - priv->bitrate_const, - priv->bitrate_const_cnt); - if (err) - return err; - - if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) { - netdev_err(dev, "arbitration bitrate surpasses transceiver capabilities of %d bps\n", - priv->bitrate_max); - return -EINVAL; - } - - memcpy(&priv->bittiming, &bt, sizeof(bt)); - - if (priv->do_set_bittiming) { - /* Finally, set the bit-timing registers */ - err = priv->do_set_bittiming(dev); - if (err) - return err; - } - } - - if (data[IFLA_CAN_CTRLMODE]) { - struct can_ctrlmode *cm; - u32 ctrlstatic; - u32 maskedflags; - - /* Do not allow changing controller mode while running */ - if (dev->flags & IFF_UP) - return -EBUSY; - cm = nla_data(data[IFLA_CAN_CTRLMODE]); - ctrlstatic = priv->ctrlmode_static; - maskedflags = cm->flags & cm->mask; - - /* check whether provided bits are allowed to be passed */ - if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic)) - return -EOPNOTSUPP; - - /* do not check for static fd-non-iso if 'fd' is disabled */ - if (!(maskedflags & CAN_CTRLMODE_FD)) - ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO; - - /* make sure static options are provided by configuration */ - if ((maskedflags & ctrlstatic) != ctrlstatic) - return -EOPNOTSUPP; - - /* clear bits to be modified and copy the flag values */ - priv->ctrlmode &= ~cm->mask; - priv->ctrlmode |= maskedflags; - - /* CAN_CTRLMODE_FD can only be set when driver supports FD */ - if (priv->ctrlmode & CAN_CTRLMODE_FD) - dev->mtu = CANFD_MTU; - else - dev->mtu = CAN_MTU; - } - - if (data[IFLA_CAN_RESTART_MS]) { - /* Do not allow changing restart delay while running */ - if (dev->flags & IFF_UP) - return -EBUSY; - priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]); - } - - if (data[IFLA_CAN_RESTART]) { - /* Do not allow a restart while not running */ - if (!(dev->flags & IFF_UP)) - return -EINVAL; - err = can_restart_now(dev); - if (err) - return err; - } - - if (data[IFLA_CAN_DATA_BITTIMING]) { - struct can_bittiming dbt; - - /* Do not allow changing bittiming while running */ - if (dev->flags & IFF_UP) - return -EBUSY; - - /* Calculate bittiming parameters based on - * data_bittiming_const if set, otherwise pass bitrate - * directly via do_set_bitrate(). Bail out if neither - * is given. - */ - if (!priv->data_bittiming_const && !priv->do_set_data_bittiming) - return -EOPNOTSUPP; - - memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]), - sizeof(dbt)); - err = can_get_bittiming(dev, &dbt, - priv->data_bittiming_const, - priv->data_bitrate_const, - priv->data_bitrate_const_cnt); - if (err) - return err; - - if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) { - netdev_err(dev, "canfd data bitrate surpasses transceiver capabilities of %d bps\n", - priv->bitrate_max); - return -EINVAL; - } - - memcpy(&priv->data_bittiming, &dbt, sizeof(dbt)); - - if (priv->do_set_data_bittiming) { - /* Finally, set the bit-timing registers */ - err = priv->do_set_data_bittiming(dev); - if (err) - return err; - } - } - - if (data[IFLA_CAN_TERMINATION]) { - const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]); - const unsigned int num_term = priv->termination_const_cnt; - unsigned int i; - - if (!priv->do_set_termination) - return -EOPNOTSUPP; - - /* check whether given value is supported by the interface */ - for (i = 0; i < num_term; i++) { - if (termval == priv->termination_const[i]) - break; - } - if (i >= num_term) - return -EINVAL; - - /* Finally, set the termination value */ - err = priv->do_set_termination(dev, termval); - if (err) - return err; - - priv->termination = termval; - } - - return 0; -} - -static size_t can_get_size(const struct net_device *dev) -{ - struct can_priv *priv = netdev_priv(dev); - size_t size = 0; - - if (priv->bittiming.bitrate) /* IFLA_CAN_BITTIMING */ - size += nla_total_size(sizeof(struct can_bittiming)); - if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */ - size += nla_total_size(sizeof(struct can_bittiming_const)); - size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */ - size += nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */ - size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */ - size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */ - if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */ - size += nla_total_size(sizeof(struct can_berr_counter)); - if (priv->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */ - size += nla_total_size(sizeof(struct can_bittiming)); - if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */ - size += nla_total_size(sizeof(struct can_bittiming_const)); - if (priv->termination_const) { - size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */ - size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */ - priv->termination_const_cnt); - } - if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */ - size += nla_total_size(sizeof(*priv->bitrate_const) * - priv->bitrate_const_cnt); - if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */ - size += nla_total_size(sizeof(*priv->data_bitrate_const) * - priv->data_bitrate_const_cnt); - size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */ - - return size; -} - -static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) -{ - struct can_priv *priv = netdev_priv(dev); - struct can_ctrlmode cm = {.flags = priv->ctrlmode}; - struct can_berr_counter bec; - enum can_state state = priv->state; - - if (priv->do_get_state) - priv->do_get_state(dev, &state); - - if ((priv->bittiming.bitrate && - nla_put(skb, IFLA_CAN_BITTIMING, - sizeof(priv->bittiming), &priv->bittiming)) || - - (priv->bittiming_const && - nla_put(skb, IFLA_CAN_BITTIMING_CONST, - sizeof(*priv->bittiming_const), priv->bittiming_const)) || - - nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) || - nla_put_u32(skb, IFLA_CAN_STATE, state) || - nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) || - nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) || - - (priv->do_get_berr_counter && - !priv->do_get_berr_counter(dev, &bec) && - nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) || - - (priv->data_bittiming.bitrate && - nla_put(skb, IFLA_CAN_DATA_BITTIMING, - sizeof(priv->data_bittiming), &priv->data_bittiming)) || - - (priv->data_bittiming_const && - nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST, - sizeof(*priv->data_bittiming_const), - priv->data_bittiming_const)) || - - (priv->termination_const && - (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) || - nla_put(skb, IFLA_CAN_TERMINATION_CONST, - sizeof(*priv->termination_const) * - priv->termination_const_cnt, - priv->termination_const))) || - - (priv->bitrate_const && - nla_put(skb, IFLA_CAN_BITRATE_CONST, - sizeof(*priv->bitrate_const) * - priv->bitrate_const_cnt, - priv->bitrate_const)) || - - (priv->data_bitrate_const && - nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST, - sizeof(*priv->data_bitrate_const) * - priv->data_bitrate_const_cnt, - priv->data_bitrate_const)) || - - (nla_put(skb, IFLA_CAN_BITRATE_MAX, - sizeof(priv->bitrate_max), - &priv->bitrate_max)) - ) - - return -EMSGSIZE; - - return 0; -} - -static size_t can_get_xstats_size(const struct net_device *dev) -{ - return sizeof(struct can_device_stats); -} - -static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev) -{ - struct can_priv *priv = netdev_priv(dev); - - if (nla_put(skb, IFLA_INFO_XSTATS, - sizeof(priv->can_stats), &priv->can_stats)) - goto nla_put_failure; - return 0; - -nla_put_failure: - return -EMSGSIZE; -} - -static int can_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], - struct netlink_ext_ack *extack) -{ - return -EOPNOTSUPP; -} - -static void can_dellink(struct net_device *dev, struct list_head *head) -{ - return; -} - -static struct rtnl_link_ops can_link_ops __read_mostly = { - .kind = "can", - .maxtype = IFLA_CAN_MAX, - .policy = can_policy, - .setup = can_setup, - .validate = can_validate, - .newlink = can_newlink, - .changelink = can_changelink, - .dellink = can_dellink, - .get_size = can_get_size, - .fill_info = can_fill_info, - .get_xstats_size = can_get_xstats_size, - .fill_xstats = can_fill_xstats, -}; - -/* - * Register the CAN network device - */ -int register_candev(struct net_device *dev) -{ - struct can_priv *priv = netdev_priv(dev); - - /* Ensure termination_const, termination_const_cnt and - * do_set_termination consistency. All must be either set or - * unset. - */ - if ((!priv->termination_const != !priv->termination_const_cnt) || - (!priv->termination_const != !priv->do_set_termination)) - return -EINVAL; - - if (!priv->bitrate_const != !priv->bitrate_const_cnt) - return -EINVAL; - - if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt) - return -EINVAL; - - dev->rtnl_link_ops = &can_link_ops; - return register_netdev(dev); -} -EXPORT_SYMBOL_GPL(register_candev); - -/* - * Unregister the CAN network device - */ -void unregister_candev(struct net_device *dev) -{ - unregister_netdev(dev); -} -EXPORT_SYMBOL_GPL(unregister_candev); - -/* - * Test if a network device is a candev based device - * and return the can_priv* if so. - */ -struct can_priv *safe_candev_priv(struct net_device *dev) -{ - if ((dev->type != ARPHRD_CAN) || (dev->rtnl_link_ops != &can_link_ops)) - return NULL; - - return netdev_priv(dev); -} -EXPORT_SYMBOL_GPL(safe_candev_priv); - -static __init int can_dev_init(void) -{ - int err; - - can_led_notifier_init(); - - err = rtnl_link_register(&can_link_ops); - if (!err) - printk(KERN_INFO MOD_DESC "\n"); - - return err; -} -module_init(can_dev_init); - -static __exit void can_dev_exit(void) -{ - rtnl_link_unregister(&can_link_ops); - - can_led_notifier_exit(); -} -module_exit(can_dev_exit); - -MODULE_ALIAS_RTNL_LINK("can"); diff --git a/drivers/net/can/dev/Makefile b/drivers/net/can/dev/Makefile new file mode 100644 index 000000000000..633687d6b6c0 --- /dev/null +++ b/drivers/net/can/dev/Makefile @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_CAN_DEV) += can-dev.o + +can-dev-y += skb.o + +can-dev-$(CONFIG_CAN_CALC_BITTIMING) += calc_bittiming.o +can-dev-$(CONFIG_CAN_NETLINK) += bittiming.o +can-dev-$(CONFIG_CAN_NETLINK) += dev.o +can-dev-$(CONFIG_CAN_NETLINK) += length.o +can-dev-$(CONFIG_CAN_NETLINK) += netlink.o +can-dev-$(CONFIG_CAN_RX_OFFLOAD) += rx-offload.o diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c new file mode 100644 index 000000000000..8f82418230ce --- /dev/null +++ b/drivers/net/can/dev/bittiming.c @@ -0,0 +1,216 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix + * Copyright (C) 2006 Andrey Volkov, Varma Electronics + * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com> + * Copyright (c) 2025 Vincent Mailhol <mailhol@kernel.org> + */ + +#include <linux/can/dev.h> + +void can_sjw_set_default(struct can_bittiming *bt) +{ + if (bt->sjw) + return; + + /* If user space provides no sjw, use sane default of phase_seg2 / 2 */ + bt->sjw = max(1U, min(bt->phase_seg1, bt->phase_seg2 / 2)); +} + +int can_sjw_check(const struct net_device *dev, const struct can_bittiming *bt, + const struct can_bittiming_const *btc, struct netlink_ext_ack *extack) +{ + if (bt->sjw > btc->sjw_max) { + NL_SET_ERR_MSG_FMT(extack, "sjw: %u greater than max sjw: %u", + bt->sjw, btc->sjw_max); + return -EINVAL; + } + + if (bt->sjw > bt->phase_seg1) { + NL_SET_ERR_MSG_FMT(extack, + "sjw: %u greater than phase-seg1: %u", + bt->sjw, bt->phase_seg1); + return -EINVAL; + } + + if (bt->sjw > bt->phase_seg2) { + NL_SET_ERR_MSG_FMT(extack, + "sjw: %u greater than phase-seg2: %u", + bt->sjw, bt->phase_seg2); + return -EINVAL; + } + + return 0; +} + +/* Checks the validity of the specified bit-timing parameters prop_seg, + * phase_seg1, phase_seg2 and sjw and tries to determine the bitrate + * prescaler value brp. You can find more information in the header + * file linux/can/netlink.h. + */ +static int can_fixup_bittiming(const struct net_device *dev, struct can_bittiming *bt, + const struct can_bittiming_const *btc, + struct netlink_ext_ack *extack) +{ + const unsigned int tseg1 = bt->prop_seg + bt->phase_seg1; + const struct can_priv *priv = netdev_priv(dev); + u64 brp64; + int err; + + if (tseg1 < btc->tseg1_min) { + NL_SET_ERR_MSG_FMT(extack, "prop-seg + phase-seg1: %u less than tseg1-min: %u", + tseg1, btc->tseg1_min); + return -EINVAL; + } + if (tseg1 > btc->tseg1_max) { + NL_SET_ERR_MSG_FMT(extack, "prop-seg + phase-seg1: %u greater than tseg1-max: %u", + tseg1, btc->tseg1_max); + return -EINVAL; + } + if (bt->phase_seg2 < btc->tseg2_min) { + NL_SET_ERR_MSG_FMT(extack, "phase-seg2: %u less than tseg2-min: %u", + bt->phase_seg2, btc->tseg2_min); + return -EINVAL; + } + if (bt->phase_seg2 > btc->tseg2_max) { + NL_SET_ERR_MSG_FMT(extack, "phase-seg2: %u greater than tseg2-max: %u", + bt->phase_seg2, btc->tseg2_max); + return -EINVAL; + } + + can_sjw_set_default(bt); + + err = can_sjw_check(dev, bt, btc, extack); + if (err) + return err; + + brp64 = (u64)priv->clock.freq * (u64)bt->tq; + if (btc->brp_inc > 1) + do_div(brp64, btc->brp_inc); + brp64 += 500000000UL - 1; + do_div(brp64, 1000000000UL); /* the practicable BRP */ + if (btc->brp_inc > 1) + brp64 *= btc->brp_inc; + bt->brp = (u32)brp64; + + if (bt->brp < btc->brp_min) { + NL_SET_ERR_MSG_FMT(extack, "resulting brp: %u less than brp-min: %u", + bt->brp, btc->brp_min); + return -EINVAL; + } + if (bt->brp > btc->brp_max) { + NL_SET_ERR_MSG_FMT(extack, "resulting brp: %u greater than brp-max: %u", + bt->brp, btc->brp_max); + return -EINVAL; + } + + bt->bitrate = priv->clock.freq / (bt->brp * can_bit_time(bt)); + bt->sample_point = ((CAN_SYNC_SEG + tseg1) * 1000) / can_bit_time(bt); + bt->tq = DIV_U64_ROUND_CLOSEST(mul_u32_u32(bt->brp, NSEC_PER_SEC), + priv->clock.freq); + + return 0; +} + +/* Checks the validity of predefined bitrate settings */ +static int +can_validate_bitrate(const struct net_device *dev, const struct can_bittiming *bt, + const u32 *bitrate_const, + const unsigned int bitrate_const_cnt, + struct netlink_ext_ack *extack) +{ + unsigned int i; + + for (i = 0; i < bitrate_const_cnt; i++) { + if (bt->bitrate == bitrate_const[i]) + return 0; + } + + NL_SET_ERR_MSG_FMT(extack, "bitrate %u bps not supported", + bt->brp); + + return -EINVAL; +} + +int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt, + const struct can_bittiming_const *btc, + const u32 *bitrate_const, + const unsigned int bitrate_const_cnt, + struct netlink_ext_ack *extack) +{ + /* Depending on the given can_bittiming parameter structure the CAN + * timing parameters are calculated based on the provided bitrate OR + * alternatively the CAN timing parameters (tq, prop_seg, etc.) are + * provided directly which are then checked and fixed up. + */ + if (!bt->tq && bt->bitrate && btc) + return can_calc_bittiming(dev, bt, btc, extack); + if (bt->tq && !bt->bitrate && btc) + return can_fixup_bittiming(dev, bt, btc, extack); + if (!bt->tq && bt->bitrate && bitrate_const) + return can_validate_bitrate(dev, bt, bitrate_const, + bitrate_const_cnt, extack); + + return -EINVAL; +} + +int can_validate_pwm_bittiming(const struct net_device *dev, + const struct can_pwm *pwm, + struct netlink_ext_ack *extack) +{ + const struct can_priv *priv = netdev_priv(dev); + u32 xl_bit_time_tqmin = can_bit_time_tqmin(&priv->xl.data_bittiming); + u32 nom_bit_time_tqmin = can_bit_time_tqmin(&priv->bittiming); + u32 pwms_ns = can_tqmin_to_ns(pwm->pwms, priv->clock.freq); + u32 pwml_ns = can_tqmin_to_ns(pwm->pwml, priv->clock.freq); + + if (pwms_ns + pwml_ns > CAN_PWM_NS_MAX) { + NL_SET_ERR_MSG_FMT(extack, + "The PWM symbol duration: %u ns may not exceed %u ns", + pwms_ns + pwml_ns, CAN_PWM_NS_MAX); + return -EINVAL; + } + + if (pwms_ns < CAN_PWM_DECODE_NS) { + NL_SET_ERR_MSG_FMT(extack, + "PWMS: %u ns shall be at least %u ns", + pwms_ns, CAN_PWM_DECODE_NS); + return -EINVAL; + } + + if (pwm->pwms >= pwm->pwml) { + NL_SET_ERR_MSG_FMT(extack, + "PWMS: %u tqmin shall be smaller than PWML: %u tqmin", + pwm->pwms, pwm->pwml); + return -EINVAL; + } + + if (pwml_ns - pwms_ns < 2 * CAN_PWM_DECODE_NS) { + NL_SET_ERR_MSG_FMT(extack, + "At least %u ns shall separate PWMS: %u ns from PMWL: %u ns", + 2 * CAN_PWM_DECODE_NS, pwms_ns, pwml_ns); + return -EINVAL; + } + + if (xl_bit_time_tqmin % (pwm->pwms + pwm->pwml) != 0) { + NL_SET_ERR_MSG_FMT(extack, + "PWM duration: %u tqmin does not divide XL's bit time: %u tqmin", + pwm->pwms + pwm->pwml, xl_bit_time_tqmin); + return -EINVAL; + } + + if (pwm->pwmo >= pwm->pwms + pwm->pwml) { + NL_SET_ERR_MSG_FMT(extack, + "PWMO: %u tqmin can not be greater than PWMS + PWML: %u tqmin", + pwm->pwmo, pwm->pwms + pwm->pwml); + return -EINVAL; + } + + if (nom_bit_time_tqmin % (pwm->pwms + pwm->pwml) != pwm->pwmo) { + NL_SET_ERR_MSG_FMT(extack, + "Can not assemble nominal bit time: %u tqmin out of PWMS + PMWL and PWMO", + nom_bit_time_tqmin); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/net/can/dev/calc_bittiming.c b/drivers/net/can/dev/calc_bittiming.c new file mode 100644 index 000000000000..cc4022241553 --- /dev/null +++ b/drivers/net/can/dev/calc_bittiming.c @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix + * Copyright (C) 2006 Andrey Volkov, Varma Electronics + * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com> + * Copyright (C) 2021-2025 Vincent Mailhol <mailhol@kernel.org> + */ + +#include <linux/units.h> +#include <linux/can/dev.h> + +#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */ + +/* CiA recommended sample points for Non Return to Zero encoding. */ +static int can_calc_sample_point_nrz(const struct can_bittiming *bt) +{ + if (bt->bitrate > 800 * KILO /* BPS */) + return 750; + + if (bt->bitrate > 500 * KILO /* BPS */) + return 800; + + return 875; +} + +/* Sample points for Pulse-Width Modulation encoding. */ +static int can_calc_sample_point_pwm(const struct can_bittiming *bt) +{ + if (bt->bitrate > 15 * MEGA /* BPS */) + return 625; + + if (bt->bitrate > 9 * MEGA /* BPS */) + return 600; + + if (bt->bitrate > 4 * MEGA /* BPS */) + return 560; + + return 520; +} + +/* Bit-timing calculation derived from: + * + * Code based on LinCAN sources and H8S2638 project + * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz + * Copyright 2005 Stanislav Marek + * email: pisa@cmp.felk.cvut.cz + * + * Calculates proper bit-timing parameters for a specified bit-rate + * and sample-point, which can then be used to set the bit-timing + * registers of the CAN controller. You can find more information + * in the header file linux/can/netlink.h. + */ +static int +can_update_sample_point(const struct can_bittiming_const *btc, + const unsigned int sample_point_reference, const unsigned int tseg, + unsigned int *tseg1_ptr, unsigned int *tseg2_ptr, + unsigned int *sample_point_error_ptr) +{ + unsigned int sample_point_error, best_sample_point_error = UINT_MAX; + unsigned int sample_point, best_sample_point = 0; + unsigned int tseg1, tseg2; + int i; + + for (i = 0; i <= 1; i++) { + tseg2 = tseg + CAN_SYNC_SEG - + (sample_point_reference * (tseg + CAN_SYNC_SEG)) / + 1000 - i; + tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max); + tseg1 = tseg - tseg2; + if (tseg1 > btc->tseg1_max) { + tseg1 = btc->tseg1_max; + tseg2 = tseg - tseg1; + } + + sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) / + (tseg + CAN_SYNC_SEG); + sample_point_error = abs(sample_point_reference - sample_point); + + if (sample_point <= sample_point_reference && + sample_point_error < best_sample_point_error) { + best_sample_point = sample_point; + best_sample_point_error = sample_point_error; + *tseg1_ptr = tseg1; + *tseg2_ptr = tseg2; + } + } + + if (sample_point_error_ptr) + *sample_point_error_ptr = best_sample_point_error; + + return best_sample_point; +} + +int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt, + const struct can_bittiming_const *btc, struct netlink_ext_ack *extack) +{ + struct can_priv *priv = netdev_priv(dev); + unsigned int bitrate; /* current bitrate */ + unsigned int bitrate_error; /* diff between calculated and reference value */ + unsigned int best_bitrate_error = UINT_MAX; + unsigned int sample_point_error; /* diff between calculated and reference value */ + unsigned int best_sample_point_error = UINT_MAX; + unsigned int sample_point_reference; /* reference sample point */ + unsigned int best_tseg = 0; /* current best value for tseg */ + unsigned int best_brp = 0; /* current best value for brp */ + unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0; + u64 v64; + int err; + + if (bt->sample_point) + sample_point_reference = bt->sample_point; + else if (btc == priv->xl.data_bittiming_const && + (priv->ctrlmode & CAN_CTRLMODE_XL_TMS)) + sample_point_reference = can_calc_sample_point_pwm(bt); + else + sample_point_reference = can_calc_sample_point_nrz(bt); + + /* tseg even = round down, odd = round up */ + for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1; + tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) { + tsegall = CAN_SYNC_SEG + tseg / 2; + + /* Compute all possible tseg choices (tseg=tseg1+tseg2) */ + brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2; + + /* choose brp step which is possible in system */ + brp = (brp / btc->brp_inc) * btc->brp_inc; + if (brp < btc->brp_min || brp > btc->brp_max) + continue; + + bitrate = priv->clock.freq / (brp * tsegall); + bitrate_error = abs(bt->bitrate - bitrate); + + /* tseg brp biterror */ + if (bitrate_error > best_bitrate_error) + continue; + + /* reset sample point error if we have a better bitrate */ + if (bitrate_error < best_bitrate_error) + best_sample_point_error = UINT_MAX; + + can_update_sample_point(btc, sample_point_reference, tseg / 2, + &tseg1, &tseg2, &sample_point_error); + if (sample_point_error >= best_sample_point_error) + continue; + + best_sample_point_error = sample_point_error; + best_bitrate_error = bitrate_error; + best_tseg = tseg / 2; + best_brp = brp; + + if (bitrate_error == 0 && sample_point_error == 0) + break; + } + + if (best_bitrate_error) { + /* Error in one-hundredth of a percent */ + v64 = (u64)best_bitrate_error * 10000; + do_div(v64, bt->bitrate); + bitrate_error = (u32)v64; + /* print at least 0.01% if the error is smaller */ + bitrate_error = max(bitrate_error, 1U); + if (bitrate_error > CAN_CALC_MAX_ERROR) { + NL_SET_ERR_MSG_FMT(extack, + "bitrate error: %u.%02u%% too high", + bitrate_error / 100, + bitrate_error % 100); + return -EINVAL; + } + NL_SET_ERR_MSG_FMT(extack, + "bitrate error: %u.%02u%%", + bitrate_error / 100, bitrate_error % 100); + } + + /* real sample point */ + bt->sample_point = can_update_sample_point(btc, sample_point_reference, + best_tseg, &tseg1, &tseg2, + NULL); + + v64 = (u64)best_brp * 1000 * 1000 * 1000; + do_div(v64, priv->clock.freq); + bt->tq = (u32)v64; + bt->prop_seg = tseg1 / 2; + bt->phase_seg1 = tseg1 - bt->prop_seg; + bt->phase_seg2 = tseg2; + + can_sjw_set_default(bt); + + err = can_sjw_check(dev, bt, btc, extack); + if (err) + return err; + + bt->brp = best_brp; + + /* real bitrate */ + bt->bitrate = priv->clock.freq / + (bt->brp * can_bit_time(bt)); + + return 0; +} + +void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const, + const struct can_bittiming *dbt, + u32 tdc_mask, u32 *ctrlmode, u32 ctrlmode_supported) + +{ + u32 tdc_auto = tdc_mask & CAN_CTRLMODE_TDC_AUTO_MASK; + + if (!tdc_const || !(ctrlmode_supported & tdc_auto)) + return; + + *ctrlmode &= ~tdc_mask; + + /* As specified in ISO 11898-1 section 11.3.3 "Transmitter + * delay compensation" (TDC) is only applicable if data BRP is + * one or two. + */ + if (dbt->brp == 1 || dbt->brp == 2) { + /* Sample point in clock periods */ + u32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg + + dbt->phase_seg1) * dbt->brp; + + if (sample_point_in_tc < tdc_const->tdco_min) + return; + tdc->tdco = min(sample_point_in_tc, tdc_const->tdco_max); + *ctrlmode |= tdc_auto; + } +} + +int can_calc_pwm(struct net_device *dev, struct netlink_ext_ack *extack) +{ + struct can_priv *priv = netdev_priv(dev); + const struct can_pwm_const *pwm_const = priv->xl.pwm_const; + struct can_pwm *pwm = &priv->xl.pwm; + u32 xl_tqmin = can_bit_time_tqmin(&priv->xl.data_bittiming); + u32 xl_ns = can_tqmin_to_ns(xl_tqmin, priv->clock.freq); + u32 nom_tqmin = can_bit_time_tqmin(&priv->bittiming); + int pwm_per_bit_max = xl_tqmin / (pwm_const->pwms_min + pwm_const->pwml_min); + int pwm_per_bit; + u32 pwm_tqmin; + + /* For 5 MB/s databitrate or greater, xl_ns < CAN_PWM_NS_MAX + * giving us a pwm_per_bit of 1 and the loop immediately breaks + */ + for (pwm_per_bit = DIV_ROUND_UP(xl_ns, CAN_PWM_NS_MAX); + pwm_per_bit <= pwm_per_bit_max; pwm_per_bit++) + if (xl_tqmin % pwm_per_bit == 0) + break; + + if (pwm_per_bit > pwm_per_bit_max) { + NL_SET_ERR_MSG_FMT(extack, + "Can not divide the XL data phase's bit time: %u tqmin into multiple PWM symbols", + xl_tqmin); + return -EINVAL; + } + + pwm_tqmin = xl_tqmin / pwm_per_bit; + pwm->pwms = DIV_ROUND_UP_POW2(pwm_tqmin, 4); + pwm->pwml = pwm_tqmin - pwm->pwms; + pwm->pwmo = nom_tqmin % pwm_tqmin; + + return 0; +} diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c new file mode 100644 index 000000000000..091f30e94c61 --- /dev/null +++ b/drivers/net/can/dev/dev.c @@ -0,0 +1,661 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix + * Copyright (C) 2006 Andrey Volkov, Varma Electronics + * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com> + */ + +#include <linux/can.h> +#include <linux/can/can-ml.h> +#include <linux/can/dev.h> +#include <linux/can/skb.h> +#include <linux/gpio/consumer.h> +#include <linux/if_arp.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/of.h> +#include <linux/slab.h> +#include <linux/workqueue.h> + +static void can_update_state_error_stats(struct net_device *dev, + enum can_state new_state) +{ + struct can_priv *priv = netdev_priv(dev); + + if (new_state <= priv->state) + return; + + switch (new_state) { + case CAN_STATE_ERROR_WARNING: + priv->can_stats.error_warning++; + break; + case CAN_STATE_ERROR_PASSIVE: + priv->can_stats.error_passive++; + break; + case CAN_STATE_BUS_OFF: + priv->can_stats.bus_off++; + break; + default: + break; + } +} + +static int can_tx_state_to_frame(struct net_device *dev, enum can_state state) +{ + switch (state) { + case CAN_STATE_ERROR_ACTIVE: + return CAN_ERR_CRTL_ACTIVE; + case CAN_STATE_ERROR_WARNING: + return CAN_ERR_CRTL_TX_WARNING; + case CAN_STATE_ERROR_PASSIVE: + return CAN_ERR_CRTL_TX_PASSIVE; + default: + return 0; + } +} + +static int can_rx_state_to_frame(struct net_device *dev, enum can_state state) +{ + switch (state) { + case CAN_STATE_ERROR_ACTIVE: + return CAN_ERR_CRTL_ACTIVE; + case CAN_STATE_ERROR_WARNING: + return CAN_ERR_CRTL_RX_WARNING; + case CAN_STATE_ERROR_PASSIVE: + return CAN_ERR_CRTL_RX_PASSIVE; + default: + return 0; + } +} + +const char *can_get_state_str(const enum can_state state) +{ + switch (state) { + case CAN_STATE_ERROR_ACTIVE: + return "Error Active"; + case CAN_STATE_ERROR_WARNING: + return "Error Warning"; + case CAN_STATE_ERROR_PASSIVE: + return "Error Passive"; + case CAN_STATE_BUS_OFF: + return "Bus Off"; + case CAN_STATE_STOPPED: + return "Stopped"; + case CAN_STATE_SLEEPING: + return "Sleeping"; + default: + return "<unknown>"; + } +} +EXPORT_SYMBOL_GPL(can_get_state_str); + +const char *can_get_ctrlmode_str(u32 ctrlmode) +{ + switch (ctrlmode & ~(ctrlmode - 1)) { + case 0: + return "(none)"; + case CAN_CTRLMODE_LOOPBACK: + return "LOOPBACK"; + case CAN_CTRLMODE_LISTENONLY: + return "LISTEN-ONLY"; + case CAN_CTRLMODE_3_SAMPLES: + return "TRIPLE-SAMPLING"; + case CAN_CTRLMODE_ONE_SHOT: + return "ONE-SHOT"; + case CAN_CTRLMODE_BERR_REPORTING: + return "BERR-REPORTING"; + case CAN_CTRLMODE_FD: + return "FD"; + case CAN_CTRLMODE_PRESUME_ACK: + return "PRESUME-ACK"; + case CAN_CTRLMODE_FD_NON_ISO: + return "FD-NON-ISO"; + case CAN_CTRLMODE_CC_LEN8_DLC: + return "CC-LEN8-DLC"; + case CAN_CTRLMODE_TDC_AUTO: + return "TDC-AUTO"; + case CAN_CTRLMODE_TDC_MANUAL: + return "TDC-MANUAL"; + case CAN_CTRLMODE_RESTRICTED: + return "RESTRICTED"; + case CAN_CTRLMODE_XL: + return "XL"; + case CAN_CTRLMODE_XL_TDC_AUTO: + return "XL-TDC-AUTO"; + case CAN_CTRLMODE_XL_TDC_MANUAL: + return "XL-TDC-MANUAL"; + case CAN_CTRLMODE_XL_TMS: + return "TMS"; + default: + return "<unknown>"; + } +} +EXPORT_SYMBOL_GPL(can_get_ctrlmode_str); + +static enum can_state can_state_err_to_state(u16 err) +{ + if (err < CAN_ERROR_WARNING_THRESHOLD) + return CAN_STATE_ERROR_ACTIVE; + if (err < CAN_ERROR_PASSIVE_THRESHOLD) + return CAN_STATE_ERROR_WARNING; + if (err < CAN_BUS_OFF_THRESHOLD) + return CAN_STATE_ERROR_PASSIVE; + + return CAN_STATE_BUS_OFF; +} + +void can_state_get_by_berr_counter(const struct net_device *dev, + const struct can_berr_counter *bec, + enum can_state *tx_state, + enum can_state *rx_state) +{ + *tx_state = can_state_err_to_state(bec->txerr); + *rx_state = can_state_err_to_state(bec->rxerr); +} +EXPORT_SYMBOL_GPL(can_state_get_by_berr_counter); + +void can_change_state(struct net_device *dev, struct can_frame *cf, + enum can_state tx_state, enum can_state rx_state) +{ + struct can_priv *priv = netdev_priv(dev); + enum can_state new_state = max(tx_state, rx_state); + + if (unlikely(new_state == priv->state)) { + netdev_warn(dev, "%s: oops, state did not change", __func__); + return; + } + + netdev_dbg(dev, "Controller changed from %s State (%d) into %s State (%d).\n", + can_get_state_str(priv->state), priv->state, + can_get_state_str(new_state), new_state); + + can_update_state_error_stats(dev, new_state); + priv->state = new_state; + + if (!cf) + return; + + if (unlikely(new_state == CAN_STATE_BUS_OFF)) { + cf->can_id |= CAN_ERR_BUSOFF; + return; + } + + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] |= tx_state >= rx_state ? + can_tx_state_to_frame(dev, tx_state) : 0; + cf->data[1] |= tx_state <= rx_state ? + can_rx_state_to_frame(dev, rx_state) : 0; +} +EXPORT_SYMBOL_GPL(can_change_state); + +/* CAN device restart for bus-off recovery */ +static int can_restart(struct net_device *dev) +{ + struct can_priv *priv = netdev_priv(dev); + struct sk_buff *skb; + struct can_frame *cf; + int err; + + if (!priv->do_set_mode) + return -EOPNOTSUPP; + + if (netif_carrier_ok(dev)) + netdev_err(dev, "Attempt to restart for bus-off recovery, but carrier is OK?\n"); + + /* No synchronization needed because the device is bus-off and + * no messages can come in or go out. + */ + can_flush_echo_skb(dev); + + /* send restart message upstream */ + skb = alloc_can_err_skb(dev, &cf); + if (skb) { + cf->can_id |= CAN_ERR_RESTARTED; + netif_rx(skb); + } + + /* Now restart the device */ + netif_carrier_on(dev); + err = priv->do_set_mode(dev, CAN_MODE_START); + if (err) { + netdev_err(dev, "Restart failed, error %pe\n", ERR_PTR(err)); + netif_carrier_off(dev); + + return err; + } else { + netdev_dbg(dev, "Restarted\n"); + priv->can_stats.restarts++; + } + + return 0; +} + +static void can_restart_work(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct can_priv *priv = container_of(dwork, struct can_priv, + restart_work); + + can_restart(priv->dev); +} + +int can_restart_now(struct net_device *dev) +{ + struct can_priv *priv = netdev_priv(dev); + + /* A manual restart is only permitted if automatic restart is + * disabled and the device is in the bus-off state + */ + if (priv->restart_ms) + return -EINVAL; + if (priv->state != CAN_STATE_BUS_OFF) + return -EBUSY; + + cancel_delayed_work_sync(&priv->restart_work); + + return can_restart(dev); +} + +/* CAN bus-off + * + * This functions should be called when the device goes bus-off to + * tell the netif layer that no more packets can be sent or received. + * If enabled, a timer is started to trigger bus-off recovery. + */ +void can_bus_off(struct net_device *dev) +{ + struct can_priv *priv = netdev_priv(dev); + + if (priv->restart_ms) + netdev_info(dev, "bus-off, scheduling restart in %d ms\n", + priv->restart_ms); + else + netdev_info(dev, "bus-off\n"); + + netif_carrier_off(dev); + + if (priv->restart_ms) + schedule_delayed_work(&priv->restart_work, + msecs_to_jiffies(priv->restart_ms)); +} +EXPORT_SYMBOL_GPL(can_bus_off); + +void can_setup(struct net_device *dev) +{ + dev->type = ARPHRD_CAN; + dev->mtu = CAN_MTU; + dev->min_mtu = CAN_MTU; + dev->max_mtu = CAN_MTU; + dev->hard_header_len = 0; + dev->addr_len = 0; + dev->tx_queue_len = 10; + + /* New-style flags. */ + dev->flags = IFF_NOARP; + dev->features = NETIF_F_HW_CSUM; +} + +/* Allocate and setup space for the CAN network device */ +struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max, + unsigned int txqs, unsigned int rxqs) +{ + struct can_ml_priv *can_ml; + struct net_device *dev; + struct can_priv *priv; + int size; + + /* We put the driver's priv, the CAN mid layer priv and the + * echo skb into the netdevice's priv. The memory layout for + * the netdev_priv is like this: + * + * +-------------------------+ + * | driver's priv | + * +-------------------------+ + * | struct can_ml_priv | + * +-------------------------+ + * | array of struct sk_buff | + * +-------------------------+ + */ + + size = ALIGN(sizeof_priv, NETDEV_ALIGN) + sizeof(struct can_ml_priv); + + if (echo_skb_max) + size = ALIGN(size, sizeof(struct sk_buff *)) + + echo_skb_max * sizeof(struct sk_buff *); + + dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup, + txqs, rxqs); + if (!dev) + return NULL; + + priv = netdev_priv(dev); + priv->dev = dev; + + can_ml = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN); + can_set_ml_priv(dev, can_ml); + + if (echo_skb_max) { + priv->echo_skb_max = echo_skb_max; + priv->echo_skb = (void *)priv + + (size - echo_skb_max * sizeof(struct sk_buff *)); + } + + priv->state = CAN_STATE_STOPPED; + + INIT_DELAYED_WORK(&priv->restart_work, can_restart_work); + + return dev; +} +EXPORT_SYMBOL_GPL(alloc_candev_mqs); + +/* Free space of the CAN network device */ +void free_candev(struct net_device *dev) +{ + free_netdev(dev); +} +EXPORT_SYMBOL_GPL(free_candev); + +void can_set_default_mtu(struct net_device *dev) +{ + struct can_priv *priv = netdev_priv(dev); + + if (priv->ctrlmode & CAN_CTRLMODE_XL) { + if (can_is_canxl_dev_mtu(dev->mtu)) + return; + dev->mtu = CANXL_MTU; + dev->min_mtu = CANXL_MIN_MTU; + dev->max_mtu = CANXL_MAX_MTU; + } else if (priv->ctrlmode & CAN_CTRLMODE_FD) { + dev->mtu = CANFD_MTU; + dev->min_mtu = CANFD_MTU; + dev->max_mtu = CANFD_MTU; + } else { + dev->mtu = CAN_MTU; + dev->min_mtu = CAN_MTU; + dev->max_mtu = CAN_MTU; + } +} + +/* helper to define static CAN controller features at device creation time */ +int can_set_static_ctrlmode(struct net_device *dev, u32 static_mode) +{ + struct can_priv *priv = netdev_priv(dev); + + /* alloc_candev() succeeded => netdev_priv() is valid at this point */ + if (priv->ctrlmode_supported & static_mode) { + netdev_warn(dev, + "Controller features can not be supported and static at the same time\n"); + return -EINVAL; + } + priv->ctrlmode = static_mode; + + /* override MTU which was set by default in can_setup()? */ + can_set_default_mtu(dev); + + return 0; +} +EXPORT_SYMBOL_GPL(can_set_static_ctrlmode); + +/* generic implementation of netdev_ops::ndo_hwtstamp_get for CAN devices + * supporting hardware timestamps + */ +int can_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *cfg) +{ + cfg->tx_type = HWTSTAMP_TX_ON; + cfg->rx_filter = HWTSTAMP_FILTER_ALL; + + return 0; +} +EXPORT_SYMBOL(can_hwtstamp_get); + +/* generic implementation of netdev_ops::ndo_hwtstamp_set for CAN devices + * supporting hardware timestamps + */ +int can_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) +{ + if (cfg->tx_type == HWTSTAMP_TX_ON && + cfg->rx_filter == HWTSTAMP_FILTER_ALL) + return 0; + NL_SET_ERR_MSG_MOD(extack, "Only TX on and RX all packets filter supported"); + return -ERANGE; +} +EXPORT_SYMBOL(can_hwtstamp_set); + +/* generic implementation of ethtool_ops::get_ts_info for CAN devices + * supporting hardware timestamps + */ +int can_ethtool_op_get_ts_info_hwts(struct net_device *dev, + struct kernel_ethtool_ts_info *info) +{ + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->tx_types = BIT(HWTSTAMP_TX_ON); + info->rx_filters = BIT(HWTSTAMP_FILTER_ALL); + + return 0; +} +EXPORT_SYMBOL(can_ethtool_op_get_ts_info_hwts); + +/* Common open function when the device gets opened. + * + * This function should be called in the open function of the device + * driver. + */ +int open_candev(struct net_device *dev) +{ + struct can_priv *priv = netdev_priv(dev); + + if (!priv->bittiming.bitrate) { + netdev_err(dev, "bit-timing not yet defined\n"); + return -EINVAL; + } + + /* For CAN FD the data bitrate has to be >= the arbitration bitrate */ + if ((priv->ctrlmode & CAN_CTRLMODE_FD) && + (!priv->fd.data_bittiming.bitrate || + priv->fd.data_bittiming.bitrate < priv->bittiming.bitrate)) { + netdev_err(dev, "incorrect/missing data bit-timing\n"); + return -EINVAL; + } + + /* Switch carrier on if device was stopped while in bus-off state */ + if (!netif_carrier_ok(dev)) + netif_carrier_on(dev); + + return 0; +} +EXPORT_SYMBOL_GPL(open_candev); + +#ifdef CONFIG_OF +/* Common function that can be used to understand the limitation of + * a transceiver when it provides no means to determine these limitations + * at runtime. + */ +void of_can_transceiver(struct net_device *dev) +{ + struct device_node *dn; + struct can_priv *priv = netdev_priv(dev); + struct device_node *np = dev->dev.parent->of_node; + int ret; + + dn = of_get_child_by_name(np, "can-transceiver"); + if (!dn) + return; + + ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max); + of_node_put(dn); + if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max)) + netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n"); +} +EXPORT_SYMBOL_GPL(of_can_transceiver); +#endif + +/* Common close function for cleanup before the device gets closed. + * + * This function should be called in the close function of the device + * driver. + */ +void close_candev(struct net_device *dev) +{ + struct can_priv *priv = netdev_priv(dev); + + cancel_delayed_work_sync(&priv->restart_work); + can_flush_echo_skb(dev); +} +EXPORT_SYMBOL_GPL(close_candev); + +static int can_set_termination(struct net_device *ndev, u16 term) +{ + struct can_priv *priv = netdev_priv(ndev); + int set; + + if (term == priv->termination_gpio_ohms[CAN_TERMINATION_GPIO_ENABLED]) + set = 1; + else + set = 0; + + gpiod_set_value_cansleep(priv->termination_gpio, set); + + return 0; +} + +static int can_get_termination(struct net_device *ndev) +{ + struct can_priv *priv = netdev_priv(ndev); + struct device *dev = ndev->dev.parent; + struct gpio_desc *gpio; + u32 term; + int ret; + + /* Disabling termination by default is the safe choice: Else if many + * bus participants enable it, no communication is possible at all. + */ + gpio = devm_gpiod_get_optional(dev, "termination", GPIOD_OUT_LOW); + if (IS_ERR(gpio)) + return dev_err_probe(dev, PTR_ERR(gpio), + "Cannot get termination-gpios\n"); + + if (!gpio) + return 0; + + ret = device_property_read_u32(dev, "termination-ohms", &term); + if (ret) { + netdev_err(ndev, "Cannot get termination-ohms: %pe\n", + ERR_PTR(ret)); + return ret; + } + + if (term > U16_MAX) { + netdev_err(ndev, "Invalid termination-ohms value (%u > %u)\n", + term, U16_MAX); + return -EINVAL; + } + + priv->termination_const_cnt = ARRAY_SIZE(priv->termination_gpio_ohms); + priv->termination_const = priv->termination_gpio_ohms; + priv->termination_gpio = gpio; + priv->termination_gpio_ohms[CAN_TERMINATION_GPIO_DISABLED] = + CAN_TERMINATION_DISABLED; + priv->termination_gpio_ohms[CAN_TERMINATION_GPIO_ENABLED] = term; + priv->do_set_termination = can_set_termination; + + return 0; +} + +static bool +can_bittiming_const_valid(const struct can_bittiming_const *btc) +{ + if (!btc) + return true; + + if (!btc->sjw_max) + return false; + + return true; +} + +/* Register the CAN network device */ +int register_candev(struct net_device *dev) +{ + struct can_priv *priv = netdev_priv(dev); + int err; + + /* Ensure termination_const, termination_const_cnt and + * do_set_termination consistency. All must be either set or + * unset. + */ + if ((!priv->termination_const != !priv->termination_const_cnt) || + (!priv->termination_const != !priv->do_set_termination)) + return -EINVAL; + + if (!priv->bitrate_const != !priv->bitrate_const_cnt) + return -EINVAL; + + if (!priv->fd.data_bitrate_const != !priv->fd.data_bitrate_const_cnt) + return -EINVAL; + + /* We only support either fixed bit rates or bit timing const. */ + if ((priv->bitrate_const || priv->fd.data_bitrate_const) && + (priv->bittiming_const || priv->fd.data_bittiming_const)) + return -EINVAL; + + if (!can_bittiming_const_valid(priv->bittiming_const) || + !can_bittiming_const_valid(priv->fd.data_bittiming_const)) + return -EINVAL; + + if (!priv->termination_const) { + err = can_get_termination(dev); + if (err) + return err; + } + + dev->rtnl_link_ops = &can_link_ops; + netif_carrier_off(dev); + + return register_netdev(dev); +} +EXPORT_SYMBOL_GPL(register_candev); + +/* Unregister the CAN network device */ +void unregister_candev(struct net_device *dev) +{ + unregister_netdev(dev); +} +EXPORT_SYMBOL_GPL(unregister_candev); + +/* Test if a network device is a candev based device + * and return the can_priv* if so. + */ +struct can_priv *safe_candev_priv(struct net_device *dev) +{ + if (dev->type != ARPHRD_CAN || dev->rtnl_link_ops != &can_link_ops) + return NULL; + + return netdev_priv(dev); +} +EXPORT_SYMBOL_GPL(safe_candev_priv); + +static __init int can_dev_init(void) +{ + int err; + + err = can_netlink_register(); + if (!err) + pr_info("CAN device driver interface\n"); + + return err; +} +module_init(can_dev_init); + +static __exit void can_dev_exit(void) +{ + can_netlink_unregister(); +} +module_exit(can_dev_exit); + +MODULE_ALIAS_RTNL_LINK("can"); diff --git a/drivers/net/can/dev/length.c b/drivers/net/can/dev/length.c new file mode 100644 index 000000000000..b7f4d76dd444 --- /dev/null +++ b/drivers/net/can/dev/length.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2012, 2020 Oliver Hartkopp <socketcan@hartkopp.net> + */ + +#include <linux/can/dev.h> + +/* CAN DLC to real data length conversion helpers */ + +static const u8 dlc2len[] = { + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 12, 16, 20, 24, 32, 48, 64 +}; + +/* get data length from raw data length code (DLC) */ +u8 can_fd_dlc2len(u8 dlc) +{ + return dlc2len[dlc & 0x0F]; +} +EXPORT_SYMBOL_GPL(can_fd_dlc2len); + +static const u8 len2dlc[] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */ + 9, 9, 9, 9, /* 9 - 12 */ + 10, 10, 10, 10, /* 13 - 16 */ + 11, 11, 11, 11, /* 17 - 20 */ + 12, 12, 12, 12, /* 21 - 24 */ + 13, 13, 13, 13, 13, 13, 13, 13, /* 25 - 32 */ + 14, 14, 14, 14, 14, 14, 14, 14, /* 33 - 40 */ + 14, 14, 14, 14, 14, 14, 14, 14, /* 41 - 48 */ + 15, 15, 15, 15, 15, 15, 15, 15, /* 49 - 56 */ + 15, 15, 15, 15, 15, 15, 15, 15 /* 57 - 64 */ +}; + +/* map the sanitized data length to an appropriate data length code */ +u8 can_fd_len2dlc(u8 len) +{ + /* check for length mapping table size at build time */ + BUILD_BUG_ON(ARRAY_SIZE(len2dlc) != CANFD_MAX_DLEN + 1); + + if (unlikely(len > CANFD_MAX_DLEN)) + return CANFD_MAX_DLC; + + return len2dlc[len]; +} +EXPORT_SYMBOL_GPL(can_fd_len2dlc); + +/** + * can_skb_get_frame_len() - Calculate the CAN Frame length in bytes + * of a given skb. + * @skb: socket buffer of a CAN message. + * + * Do a rough calculation: bit stuffing is ignored and length in bits + * is rounded up to a length in bytes. + * + * Rationale: this function is to be used for the BQL functions + * (netdev_sent_queue() and netdev_completed_queue()) which expect a + * value in bytes. Just using skb->len is insufficient because it will + * return the constant value of CAN(FD)_MTU. Doing the bit stuffing + * calculation would be too expensive in term of computing resources + * for no noticeable gain. + * + * Remarks: The payload of CAN FD frames with BRS flag are sent at a + * different bitrate. Currently, the can-utils canbusload tool does + * not support CAN-FD yet and so we could not run any benchmark to + * measure the impact. There might be possible improvement here. + * + * Return: length in bytes. + */ +unsigned int can_skb_get_frame_len(const struct sk_buff *skb) +{ + const struct canfd_frame *cf = (const struct canfd_frame *)skb->data; + u8 len; + + if (can_is_canfd_skb(skb)) + len = canfd_sanitize_len(cf->len); + else if (cf->can_id & CAN_RTR_FLAG) + len = 0; + else + len = cf->len; + + return can_frame_bytes(can_is_canfd_skb(skb), cf->can_id & CAN_EFF_FLAG, + false, len); +} +EXPORT_SYMBOL_GPL(can_skb_get_frame_len); diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c new file mode 100644 index 000000000000..d6b0e686fb11 --- /dev/null +++ b/drivers/net/can/dev/netlink.c @@ -0,0 +1,1111 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix + * Copyright (C) 2006 Andrey Volkov, Varma Electronics + * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com> + * Copyright (C) 2021-2025 Vincent Mailhol <mailhol@kernel.org> + */ + +#include <linux/can/dev.h> +#include <net/rtnetlink.h> + +static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = { + [IFLA_CAN_STATE] = { .type = NLA_U32 }, + [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) }, + [IFLA_CAN_RESTART_MS] = { .type = NLA_U32 }, + [IFLA_CAN_RESTART] = { .type = NLA_U32 }, + [IFLA_CAN_BITTIMING] = { .len = sizeof(struct can_bittiming) }, + [IFLA_CAN_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) }, + [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) }, + [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) }, + [IFLA_CAN_DATA_BITTIMING] = { .len = sizeof(struct can_bittiming) }, + [IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) }, + [IFLA_CAN_TERMINATION] = { .type = NLA_U16 }, + [IFLA_CAN_TDC] = { .type = NLA_NESTED }, + [IFLA_CAN_CTRLMODE_EXT] = { .type = NLA_NESTED }, + [IFLA_CAN_XL_DATA_BITTIMING] = { .len = sizeof(struct can_bittiming) }, + [IFLA_CAN_XL_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) }, + [IFLA_CAN_XL_TDC] = { .type = NLA_NESTED }, + [IFLA_CAN_XL_PWM] = { .type = NLA_NESTED }, +}; + +static const struct nla_policy can_tdc_policy[IFLA_CAN_TDC_MAX + 1] = { + [IFLA_CAN_TDC_TDCV_MIN] = { .type = NLA_U32 }, + [IFLA_CAN_TDC_TDCV_MAX] = { .type = NLA_U32 }, + [IFLA_CAN_TDC_TDCO_MIN] = { .type = NLA_U32 }, + [IFLA_CAN_TDC_TDCO_MAX] = { .type = NLA_U32 }, + [IFLA_CAN_TDC_TDCF_MIN] = { .type = NLA_U32 }, + [IFLA_CAN_TDC_TDCF_MAX] = { .type = NLA_U32 }, + [IFLA_CAN_TDC_TDCV] = { .type = NLA_U32 }, + [IFLA_CAN_TDC_TDCO] = { .type = NLA_U32 }, + [IFLA_CAN_TDC_TDCF] = { .type = NLA_U32 }, +}; + +static const struct nla_policy can_pwm_policy[IFLA_CAN_PWM_MAX + 1] = { + [IFLA_CAN_PWM_PWMS_MIN] = { .type = NLA_U32 }, + [IFLA_CAN_PWM_PWMS_MAX] = { .type = NLA_U32 }, + [IFLA_CAN_PWM_PWML_MIN] = { .type = NLA_U32 }, + [IFLA_CAN_PWM_PWML_MAX] = { .type = NLA_U32 }, + [IFLA_CAN_PWM_PWMO_MIN] = { .type = NLA_U32 }, + [IFLA_CAN_PWM_PWMO_MAX] = { .type = NLA_U32 }, + [IFLA_CAN_PWM_PWMS] = { .type = NLA_U32 }, + [IFLA_CAN_PWM_PWML] = { .type = NLA_U32 }, + [IFLA_CAN_PWM_PWMO] = { .type = NLA_U32 }, +}; + +static int can_validate_bittiming(struct nlattr *data[], + struct netlink_ext_ack *extack, + int ifla_can_bittiming) +{ + struct can_bittiming *bt; + + if (!data[ifla_can_bittiming]) + return 0; + + static_assert(__alignof__(*bt) <= NLA_ALIGNTO); + bt = nla_data(data[ifla_can_bittiming]); + + /* sample point is in one-tenth of a percent */ + if (bt->sample_point >= 1000) { + NL_SET_ERR_MSG(extack, "sample point must be between 0 and 100%"); + return -EINVAL; + } + + return 0; +} + +static int can_validate_tdc(struct nlattr *data_tdc, + struct netlink_ext_ack *extack, u32 tdc_flags) +{ + bool tdc_manual = tdc_flags & CAN_CTRLMODE_TDC_MANUAL_MASK; + bool tdc_auto = tdc_flags & CAN_CTRLMODE_TDC_AUTO_MASK; + int err; + + if (tdc_auto && tdc_manual) { + NL_SET_ERR_MSG(extack, + "TDC manual and auto modes are mutually exclusive"); + return -EOPNOTSUPP; + } + + /* If one of the CAN_CTRLMODE_{,XL}_TDC_* flags is set then TDC + * must be set and vice-versa + */ + if ((tdc_auto || tdc_manual) && !data_tdc) { + NL_SET_ERR_MSG(extack, "TDC parameters are missing"); + return -EOPNOTSUPP; + } + if (!(tdc_auto || tdc_manual) && data_tdc) { + NL_SET_ERR_MSG(extack, "TDC mode (auto or manual) is missing"); + return -EOPNOTSUPP; + } + + /* If providing TDC parameters, at least TDCO is needed. TDCV is + * needed if and only if CAN_CTRLMODE_{,XL}_TDC_MANUAL is set + */ + if (data_tdc) { + struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1]; + + err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX, + data_tdc, can_tdc_policy, extack); + if (err) + return err; + + if (tb_tdc[IFLA_CAN_TDC_TDCV]) { + if (tdc_auto) { + NL_SET_ERR_MSG(extack, + "TDCV is incompatible with TDC auto mode"); + return -EOPNOTSUPP; + } + } else { + if (tdc_manual) { + NL_SET_ERR_MSG(extack, + "TDC manual mode requires TDCV"); + return -EOPNOTSUPP; + } + } + + if (!tb_tdc[IFLA_CAN_TDC_TDCO]) { + NL_SET_ERR_MSG(extack, "TDCO is missing"); + return -EOPNOTSUPP; + } + } + + return 0; +} + +static int can_validate_pwm(struct nlattr *data[], + struct netlink_ext_ack *extack, u32 flags) +{ + struct nlattr *tb_pwm[IFLA_CAN_PWM_MAX + 1]; + int err; + + if (!data[IFLA_CAN_XL_PWM]) + return 0; + + if (!(flags & CAN_CTRLMODE_XL_TMS)) { + NL_SET_ERR_MSG(extack, "PWM requires TMS"); + return -EOPNOTSUPP; + } + + err = nla_parse_nested(tb_pwm, IFLA_CAN_PWM_MAX, data[IFLA_CAN_XL_PWM], + can_pwm_policy, extack); + if (err) + return err; + + if (!tb_pwm[IFLA_CAN_PWM_PWMS] != !tb_pwm[IFLA_CAN_PWM_PWML]) { + NL_SET_ERR_MSG(extack, + "Provide either both PWMS and PWML, or none for automatic calculation"); + return -EOPNOTSUPP; + } + + if (tb_pwm[IFLA_CAN_PWM_PWMO] && + (!tb_pwm[IFLA_CAN_PWM_PWMS] || !tb_pwm[IFLA_CAN_PWM_PWML])) { + NL_SET_ERR_MSG(extack, "PWMO requires both PWMS and PWML"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int can_validate_databittiming(struct nlattr *data[], + struct netlink_ext_ack *extack, + int ifla_can_data_bittiming, u32 flags) +{ + struct nlattr *data_tdc; + const char *type; + u32 tdc_flags; + bool is_on; + int err; + + /* Make sure that valid CAN FD/XL configurations always consist of + * - nominal/arbitration bittiming + * - data bittiming + * - control mode with CAN_CTRLMODE_{FD,XL} set + * - TDC parameters are coherent (details in can_validate_tdc()) + */ + + if (ifla_can_data_bittiming == IFLA_CAN_DATA_BITTIMING) { + data_tdc = data[IFLA_CAN_TDC]; + tdc_flags = flags & CAN_CTRLMODE_FD_TDC_MASK; + is_on = flags & CAN_CTRLMODE_FD; + type = "FD"; + } else { + data_tdc = data[IFLA_CAN_XL_TDC]; + tdc_flags = flags & CAN_CTRLMODE_XL_TDC_MASK; + is_on = flags & CAN_CTRLMODE_XL; + type = "XL"; + } + + if (is_on) { + if (!data[IFLA_CAN_BITTIMING] || !data[ifla_can_data_bittiming]) { + NL_SET_ERR_MSG_FMT(extack, + "Provide both nominal and %s data bittiming", + type); + return -EOPNOTSUPP; + } + } else { + if (data[ifla_can_data_bittiming]) { + NL_SET_ERR_MSG_FMT(extack, + "%s data bittiming requires CAN %s", + type, type); + return -EOPNOTSUPP; + } + if (data_tdc) { + NL_SET_ERR_MSG_FMT(extack, + "%s TDC requires CAN %s", + type, type); + return -EOPNOTSUPP; + } + } + + err = can_validate_bittiming(data, extack, ifla_can_data_bittiming); + if (err) + return err; + + err = can_validate_tdc(data_tdc, extack, tdc_flags); + if (err) + return err; + + return 0; +} + +static int can_validate_xl_flags(struct netlink_ext_ack *extack, + u32 masked_flags, u32 mask) +{ + if (masked_flags & CAN_CTRLMODE_XL) { + if (masked_flags & CAN_CTRLMODE_XL_TMS) { + const u32 tms_conflicts_mask = CAN_CTRLMODE_FD | + CAN_CTRLMODE_XL_TDC_MASK; + u32 tms_conflicts = masked_flags & tms_conflicts_mask; + + if (tms_conflicts) { + NL_SET_ERR_MSG_FMT(extack, + "TMS and %s are mutually exclusive", + can_get_ctrlmode_str(tms_conflicts)); + return -EOPNOTSUPP; + } + } + } else { + if (mask & CAN_CTRLMODE_XL_TMS) { + NL_SET_ERR_MSG(extack, "TMS requires CAN XL"); + return -EOPNOTSUPP; + } + } + + return 0; +} + +static int can_validate(struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + u32 flags = 0; + int err; + + if (!data) + return 0; + + if (data[IFLA_CAN_CTRLMODE]) { + struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]); + + flags = cm->flags & cm->mask; + + if ((flags & CAN_CTRLMODE_LISTENONLY) && + (flags & CAN_CTRLMODE_RESTRICTED)) { + NL_SET_ERR_MSG(extack, + "LISTEN-ONLY and RESTRICTED modes are mutually exclusive"); + return -EOPNOTSUPP; + } + + err = can_validate_xl_flags(extack, flags, cm->mask); + if (err) + return err; + } + + err = can_validate_bittiming(data, extack, IFLA_CAN_BITTIMING); + if (err) + return err; + + err = can_validate_databittiming(data, extack, + IFLA_CAN_DATA_BITTIMING, flags); + if (err) + return err; + + err = can_validate_databittiming(data, extack, + IFLA_CAN_XL_DATA_BITTIMING, flags); + if (err) + return err; + + err = can_validate_pwm(data, extack, flags); + if (err) + return err; + + return 0; +} + +static int can_ctrlmode_changelink(struct net_device *dev, + struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct can_priv *priv = netdev_priv(dev); + struct can_ctrlmode *cm; + u32 ctrlstatic, maskedflags, deactivated, notsupp, ctrlstatic_missing; + + if (!data[IFLA_CAN_CTRLMODE]) + return 0; + + /* Do not allow changing controller mode while running */ + if (dev->flags & IFF_UP) + return -EBUSY; + + cm = nla_data(data[IFLA_CAN_CTRLMODE]); + ctrlstatic = can_get_static_ctrlmode(priv); + maskedflags = cm->flags & cm->mask; + deactivated = ~cm->flags & cm->mask; + notsupp = maskedflags & ~(priv->ctrlmode_supported | ctrlstatic); + ctrlstatic_missing = (maskedflags & ctrlstatic) ^ ctrlstatic; + + if (notsupp) { + NL_SET_ERR_MSG_FMT(extack, + "requested control mode %s not supported", + can_get_ctrlmode_str(notsupp)); + return -EOPNOTSUPP; + } + + /* do not check for static fd-non-iso if 'fd' is disabled */ + if (!(maskedflags & CAN_CTRLMODE_FD)) + ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO; + + if (ctrlstatic_missing) { + NL_SET_ERR_MSG_FMT(extack, + "missing required %s static control mode", + can_get_ctrlmode_str(ctrlstatic_missing)); + return -EOPNOTSUPP; + } + + /* If FD was active and is not turned off, check for XL conflicts */ + if (priv->ctrlmode & CAN_CTRLMODE_FD & ~deactivated) { + if (maskedflags & CAN_CTRLMODE_XL_TMS) { + NL_SET_ERR_MSG(extack, + "TMS can not be activated while CAN FD is on"); + return -EOPNOTSUPP; + } + } + + /* If a top dependency flag is provided, reset all its dependencies */ + if (cm->mask & CAN_CTRLMODE_FD) + priv->ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK; + if (cm->mask & CAN_CTRLMODE_XL) + priv->ctrlmode &= ~(CAN_CTRLMODE_XL_TDC_MASK | + CAN_CTRLMODE_XL_TMS); + + /* clear bits to be modified and copy the flag values */ + priv->ctrlmode &= ~cm->mask; + priv->ctrlmode |= maskedflags; + + /* Wipe potential leftovers from previous CAN FD/XL config */ + if (!(priv->ctrlmode & CAN_CTRLMODE_FD)) { + memset(&priv->fd.data_bittiming, 0, + sizeof(priv->fd.data_bittiming)); + priv->ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK; + memset(&priv->fd.tdc, 0, sizeof(priv->fd.tdc)); + } + if (!(priv->ctrlmode & CAN_CTRLMODE_XL)) { + memset(&priv->xl.data_bittiming, 0, + sizeof(priv->fd.data_bittiming)); + priv->ctrlmode &= ~CAN_CTRLMODE_XL_TDC_MASK; + memset(&priv->xl.tdc, 0, sizeof(priv->xl.tdc)); + memset(&priv->xl.pwm, 0, sizeof(priv->xl.pwm)); + } + + can_set_default_mtu(dev); + + return 0; +} + +static int can_tdc_changelink(struct data_bittiming_params *dbt_params, + const struct nlattr *nla, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1]; + struct can_tdc tdc = { 0 }; + const struct can_tdc_const *tdc_const = dbt_params->tdc_const; + int err; + + if (!tdc_const) { + NL_SET_ERR_MSG(extack, "The device does not support TDC"); + return -EOPNOTSUPP; + } + + err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX, nla, + can_tdc_policy, extack); + if (err) + return err; + + if (tb_tdc[IFLA_CAN_TDC_TDCV]) { + u32 tdcv = nla_get_u32(tb_tdc[IFLA_CAN_TDC_TDCV]); + + if (tdcv < tdc_const->tdcv_min || tdcv > tdc_const->tdcv_max) + return -EINVAL; + + tdc.tdcv = tdcv; + } + + if (tb_tdc[IFLA_CAN_TDC_TDCO]) { + u32 tdco = nla_get_u32(tb_tdc[IFLA_CAN_TDC_TDCO]); + + if (tdco < tdc_const->tdco_min || tdco > tdc_const->tdco_max) + return -EINVAL; + + tdc.tdco = tdco; + } + + if (tb_tdc[IFLA_CAN_TDC_TDCF]) { + u32 tdcf = nla_get_u32(tb_tdc[IFLA_CAN_TDC_TDCF]); + + if (tdcf < tdc_const->tdcf_min || tdcf > tdc_const->tdcf_max) + return -EINVAL; + + tdc.tdcf = tdcf; + } + + dbt_params->tdc = tdc; + + return 0; +} + +static int can_dbt_changelink(struct net_device *dev, struct nlattr *data[], + bool fd, struct netlink_ext_ack *extack) +{ + struct nlattr *data_bittiming, *data_tdc; + struct can_priv *priv = netdev_priv(dev); + struct data_bittiming_params *dbt_params; + struct can_bittiming dbt; + bool need_tdc_calc = false; + u32 tdc_mask; + int err; + + if (fd) { + data_bittiming = data[IFLA_CAN_DATA_BITTIMING]; + data_tdc = data[IFLA_CAN_TDC]; + dbt_params = &priv->fd; + tdc_mask = CAN_CTRLMODE_FD_TDC_MASK; + } else { + data_bittiming = data[IFLA_CAN_XL_DATA_BITTIMING]; + data_tdc = data[IFLA_CAN_XL_TDC]; + dbt_params = &priv->xl; + tdc_mask = CAN_CTRLMODE_XL_TDC_MASK; + } + + if (!data_bittiming) + return 0; + + /* Do not allow changing bittiming while running */ + if (dev->flags & IFF_UP) + return -EBUSY; + + /* Calculate bittiming parameters based on data_bittiming_const + * if set, otherwise pass bitrate directly via do_set_bitrate(). + * Bail out if neither is given. + */ + if (!dbt_params->data_bittiming_const && !dbt_params->do_set_data_bittiming && + !dbt_params->data_bitrate_const) + return -EOPNOTSUPP; + + memcpy(&dbt, nla_data(data_bittiming), sizeof(dbt)); + err = can_get_bittiming(dev, &dbt, dbt_params->data_bittiming_const, + dbt_params->data_bitrate_const, + dbt_params->data_bitrate_const_cnt, extack); + if (err) + return err; + + if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) { + NL_SET_ERR_MSG_FMT(extack, + "CAN data bitrate %u bps surpasses transceiver capabilities of %u bps", + dbt.bitrate, priv->bitrate_max); + return -EINVAL; + } + + memset(&dbt_params->tdc, 0, sizeof(dbt_params->tdc)); + if (data[IFLA_CAN_CTRLMODE]) { + struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]); + + if (fd || !(priv->ctrlmode & CAN_CTRLMODE_XL_TMS)) + need_tdc_calc = !(cm->mask & tdc_mask); + } + if (data_tdc) { + /* TDC parameters are provided: use them */ + err = can_tdc_changelink(dbt_params, data_tdc, extack); + if (err) { + priv->ctrlmode &= ~tdc_mask; + return err; + } + } else if (need_tdc_calc) { + /* Neither of TDC parameters nor TDC flags are provided: + * do calculation + */ + can_calc_tdco(&dbt_params->tdc, dbt_params->tdc_const, &dbt, + tdc_mask, &priv->ctrlmode, priv->ctrlmode_supported); + } /* else: both CAN_CTRLMODE_{,XL}_TDC_{AUTO,MANUAL} are explicitly + * turned off. TDC is disabled: do nothing + */ + + memcpy(&dbt_params->data_bittiming, &dbt, sizeof(dbt)); + + if (dbt_params->do_set_data_bittiming) { + /* Finally, set the bit-timing registers */ + err = dbt_params->do_set_data_bittiming(dev); + if (err) + return err; + } + + return 0; +} + +static int can_pwm_changelink(struct net_device *dev, + const struct nlattr *pwm_nla, + struct netlink_ext_ack *extack) +{ + struct can_priv *priv = netdev_priv(dev); + const struct can_pwm_const *pwm_const = priv->xl.pwm_const; + struct nlattr *tb_pwm[IFLA_CAN_PWM_MAX + 1]; + struct can_pwm pwm = { 0 }; + int err; + + if (!(priv->ctrlmode & CAN_CTRLMODE_XL_TMS)) + return 0; + + if (!pwm_const) { + NL_SET_ERR_MSG(extack, "The device does not support PWM"); + return -EOPNOTSUPP; + } + + if (!pwm_nla) + return can_calc_pwm(dev, extack); + + err = nla_parse_nested(tb_pwm, IFLA_CAN_PWM_MAX, pwm_nla, + can_pwm_policy, extack); + if (err) + return err; + + if (tb_pwm[IFLA_CAN_PWM_PWMS]) { + pwm.pwms = nla_get_u32(tb_pwm[IFLA_CAN_PWM_PWMS]); + if (pwm.pwms < pwm_const->pwms_min || + pwm.pwms > pwm_const->pwms_max) { + NL_SET_ERR_MSG_FMT(extack, + "PWMS: %u tqmin is out of range: %u...%u", + pwm.pwms, pwm_const->pwms_min, + pwm_const->pwms_max); + return -EINVAL; + } + } + + if (tb_pwm[IFLA_CAN_PWM_PWML]) { + pwm.pwml = nla_get_u32(tb_pwm[IFLA_CAN_PWM_PWML]); + if (pwm.pwml < pwm_const->pwml_min || + pwm.pwml > pwm_const->pwml_max) { + NL_SET_ERR_MSG_FMT(extack, + "PWML: %u tqmin is out of range: %u...%u", + pwm.pwml, pwm_const->pwml_min, + pwm_const->pwml_max); + return -EINVAL; + } + } + + if (tb_pwm[IFLA_CAN_PWM_PWMO]) { + pwm.pwmo = nla_get_u32(tb_pwm[IFLA_CAN_PWM_PWMO]); + if (pwm.pwmo < pwm_const->pwmo_min || + pwm.pwmo > pwm_const->pwmo_max) { + NL_SET_ERR_MSG_FMT(extack, + "PWMO: %u tqmin is out of range: %u...%u", + pwm.pwmo, pwm_const->pwmo_min, + pwm_const->pwmo_max); + return -EINVAL; + } + } + + err = can_validate_pwm_bittiming(dev, &pwm, extack); + if (err) + return err; + + priv->xl.pwm = pwm; + return 0; +} + +static int can_changelink(struct net_device *dev, struct nlattr *tb[], + struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct can_priv *priv = netdev_priv(dev); + int err; + + /* We need synchronization with dev->stop() */ + ASSERT_RTNL(); + + can_ctrlmode_changelink(dev, data, extack); + + if (data[IFLA_CAN_BITTIMING]) { + struct can_bittiming bt; + + /* Do not allow changing bittiming while running */ + if (dev->flags & IFF_UP) + return -EBUSY; + + /* Calculate bittiming parameters based on + * bittiming_const if set, otherwise pass bitrate + * directly via do_set_bitrate(). Bail out if neither + * is given. + */ + if (!priv->bittiming_const && !priv->do_set_bittiming && + !priv->bitrate_const) + return -EOPNOTSUPP; + + memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt)); + err = can_get_bittiming(dev, &bt, + priv->bittiming_const, + priv->bitrate_const, + priv->bitrate_const_cnt, + extack); + if (err) + return err; + + if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) { + NL_SET_ERR_MSG_FMT(extack, + "arbitration bitrate %u bps surpasses transceiver capabilities of %u bps", + bt.bitrate, priv->bitrate_max); + return -EINVAL; + } + + memcpy(&priv->bittiming, &bt, sizeof(bt)); + + if (priv->do_set_bittiming) { + /* Finally, set the bit-timing registers */ + err = priv->do_set_bittiming(dev); + if (err) + return err; + } + } + + if (data[IFLA_CAN_RESTART_MS]) { + unsigned int restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]); + + if (restart_ms != 0 && !priv->do_set_mode) { + NL_SET_ERR_MSG(extack, + "Device doesn't support restart from Bus Off"); + return -EOPNOTSUPP; + } + + /* Do not allow changing restart delay while running */ + if (dev->flags & IFF_UP) + return -EBUSY; + priv->restart_ms = restart_ms; + } + + if (data[IFLA_CAN_RESTART]) { + if (!priv->do_set_mode) { + NL_SET_ERR_MSG(extack, + "Device doesn't support restart from Bus Off"); + return -EOPNOTSUPP; + } + + /* Do not allow a restart while not running */ + if (!(dev->flags & IFF_UP)) + return -EINVAL; + err = can_restart_now(dev); + if (err) + return err; + } + + /* CAN FD */ + err = can_dbt_changelink(dev, data, true, extack); + if (err) + return err; + + /* CAN XL */ + err = can_dbt_changelink(dev, data, false, extack); + if (err) + return err; + err = can_pwm_changelink(dev, data[IFLA_CAN_XL_PWM], extack); + if (err) + return err; + + if (data[IFLA_CAN_TERMINATION]) { + const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]); + const unsigned int num_term = priv->termination_const_cnt; + unsigned int i; + + if (!priv->do_set_termination) { + NL_SET_ERR_MSG(extack, + "Termination is not configurable on this device"); + return -EOPNOTSUPP; + } + + /* check whether given value is supported by the interface */ + for (i = 0; i < num_term; i++) { + if (termval == priv->termination_const[i]) + break; + } + if (i >= num_term) + return -EINVAL; + + /* Finally, set the termination value */ + err = priv->do_set_termination(dev, termval); + if (err) + return err; + + priv->termination = termval; + } + + return 0; +} + +static size_t can_tdc_get_size(struct data_bittiming_params *dbt_params, + u32 tdc_flags) +{ + bool tdc_manual = tdc_flags & CAN_CTRLMODE_TDC_MANUAL_MASK; + size_t size; + + if (!dbt_params->tdc_const) + return 0; + + size = nla_total_size(0); /* nest IFLA_CAN_TDC */ + if (tdc_manual) { + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV_MIN */ + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV_MAX */ + } + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MIN */ + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MAX */ + if (dbt_params->tdc_const->tdcf_max) { + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MIN */ + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MAX */ + } + + if (tdc_flags) { + if (tdc_manual || dbt_params->do_get_auto_tdcv) + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV */ + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO */ + if (dbt_params->tdc_const->tdcf_max) + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF */ + } + + return size; +} + +static size_t can_data_bittiming_get_size(struct data_bittiming_params *dbt_params, + u32 tdc_flags) +{ + size_t size = 0; + + if (dbt_params->data_bittiming.bitrate) /* IFLA_CAN_{,XL}_DATA_BITTIMING */ + size += nla_total_size(sizeof(dbt_params->data_bittiming)); + if (dbt_params->data_bittiming_const) /* IFLA_CAN_{,XL}_DATA_BITTIMING_CONST */ + size += nla_total_size(sizeof(*dbt_params->data_bittiming_const)); + if (dbt_params->data_bitrate_const) /* IFLA_CAN_{,XL}_DATA_BITRATE_CONST */ + size += nla_total_size(sizeof(*dbt_params->data_bitrate_const) * + dbt_params->data_bitrate_const_cnt); + size += can_tdc_get_size(dbt_params, tdc_flags);/* IFLA_CAN_{,XL}_TDC */ + + return size; +} + +static size_t can_ctrlmode_ext_get_size(void) +{ + return nla_total_size(0) + /* nest IFLA_CAN_CTRLMODE_EXT */ + nla_total_size(sizeof(u32)); /* IFLA_CAN_CTRLMODE_SUPPORTED */ +} + +static size_t can_pwm_get_size(const struct can_pwm_const *pwm_const, + bool pwm_on) +{ + size_t size; + + if (!pwm_const || !pwm_on) + return 0; + + size = nla_total_size(0); /* nest IFLA_CAN_PWM */ + + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMS_MIN */ + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMS_MAX */ + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWML_MIN */ + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWML_MAX */ + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMO_MIN */ + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMO_MAX */ + + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMS */ + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWML */ + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMO */ + + return size; +} + +static size_t can_get_size(const struct net_device *dev) +{ + struct can_priv *priv = netdev_priv(dev); + size_t size = 0; + + if (priv->bittiming.bitrate) /* IFLA_CAN_BITTIMING */ + size += nla_total_size(sizeof(struct can_bittiming)); + if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */ + size += nla_total_size(sizeof(struct can_bittiming_const)); + size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */ + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */ + size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */ + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */ + if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */ + size += nla_total_size(sizeof(struct can_berr_counter)); + if (priv->termination_const) { + size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */ + size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */ + priv->termination_const_cnt); + } + if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */ + size += nla_total_size(sizeof(*priv->bitrate_const) * + priv->bitrate_const_cnt); + size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */ + size += can_ctrlmode_ext_get_size(); /* IFLA_CAN_CTRLMODE_EXT */ + + size += can_data_bittiming_get_size(&priv->fd, + priv->ctrlmode & CAN_CTRLMODE_FD_TDC_MASK); + + size += can_data_bittiming_get_size(&priv->xl, + priv->ctrlmode & CAN_CTRLMODE_XL_TDC_MASK); + size += can_pwm_get_size(priv->xl.pwm_const, /* IFLA_CAN_XL_PWM */ + priv->ctrlmode & CAN_CTRLMODE_XL_TMS); + + return size; +} + +static int can_bittiming_fill_info(struct sk_buff *skb, int ifla_can_bittiming, + struct can_bittiming *bittiming) +{ + return bittiming->bitrate != CAN_BITRATE_UNSET && + bittiming->bitrate != CAN_BITRATE_UNKNOWN && + nla_put(skb, ifla_can_bittiming, sizeof(*bittiming), bittiming); +} + +static int can_bittiming_const_fill_info(struct sk_buff *skb, + int ifla_can_bittiming_const, + const struct can_bittiming_const *bittiming_const) +{ + return bittiming_const && + nla_put(skb, ifla_can_bittiming_const, + sizeof(*bittiming_const), bittiming_const); +} + +static int can_bitrate_const_fill_info(struct sk_buff *skb, + int ifla_can_bitrate_const, + const u32 *bitrate_const, unsigned int cnt) +{ + return bitrate_const && + nla_put(skb, ifla_can_bitrate_const, + sizeof(*bitrate_const) * cnt, bitrate_const); +} + +static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev, + int ifla_can_tdc) +{ + struct can_priv *priv = netdev_priv(dev); + struct data_bittiming_params *dbt_params; + const struct can_tdc_const *tdc_const; + struct can_tdc *tdc; + struct nlattr *nest; + bool tdc_is_enabled, tdc_manual; + + if (ifla_can_tdc == IFLA_CAN_TDC) { + dbt_params = &priv->fd; + tdc_is_enabled = can_fd_tdc_is_enabled(priv); + tdc_manual = priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL; + } else { + dbt_params = &priv->xl; + tdc_is_enabled = can_xl_tdc_is_enabled(priv); + tdc_manual = priv->ctrlmode & CAN_CTRLMODE_XL_TDC_MANUAL; + } + tdc_const = dbt_params->tdc_const; + tdc = &dbt_params->tdc; + + if (!tdc_const) + return 0; + + nest = nla_nest_start(skb, ifla_can_tdc); + if (!nest) + return -EMSGSIZE; + + if (tdc_manual && + (nla_put_u32(skb, IFLA_CAN_TDC_TDCV_MIN, tdc_const->tdcv_min) || + nla_put_u32(skb, IFLA_CAN_TDC_TDCV_MAX, tdc_const->tdcv_max))) + goto err_cancel; + if (nla_put_u32(skb, IFLA_CAN_TDC_TDCO_MIN, tdc_const->tdco_min) || + nla_put_u32(skb, IFLA_CAN_TDC_TDCO_MAX, tdc_const->tdco_max)) + goto err_cancel; + if (tdc_const->tdcf_max && + (nla_put_u32(skb, IFLA_CAN_TDC_TDCF_MIN, tdc_const->tdcf_min) || + nla_put_u32(skb, IFLA_CAN_TDC_TDCF_MAX, tdc_const->tdcf_max))) + goto err_cancel; + + if (tdc_is_enabled) { + u32 tdcv; + int err = -EINVAL; + + if (tdc_manual) { + tdcv = tdc->tdcv; + err = 0; + } else if (dbt_params->do_get_auto_tdcv) { + err = dbt_params->do_get_auto_tdcv(dev, &tdcv); + } + if (!err && nla_put_u32(skb, IFLA_CAN_TDC_TDCV, tdcv)) + goto err_cancel; + if (nla_put_u32(skb, IFLA_CAN_TDC_TDCO, tdc->tdco)) + goto err_cancel; + if (tdc_const->tdcf_max && + nla_put_u32(skb, IFLA_CAN_TDC_TDCF, tdc->tdcf)) + goto err_cancel; + } + + nla_nest_end(skb, nest); + return 0; + +err_cancel: + nla_nest_cancel(skb, nest); + return -EMSGSIZE; +} + +static int can_pwm_fill_info(struct sk_buff *skb, const struct can_priv *priv) +{ + const struct can_pwm_const *pwm_const = priv->xl.pwm_const; + const struct can_pwm *pwm = &priv->xl.pwm; + struct nlattr *nest; + + if (!pwm_const) + return 0; + + nest = nla_nest_start(skb, IFLA_CAN_XL_PWM); + if (!nest) + return -EMSGSIZE; + + if (nla_put_u32(skb, IFLA_CAN_PWM_PWMS_MIN, pwm_const->pwms_min) || + nla_put_u32(skb, IFLA_CAN_PWM_PWMS_MAX, pwm_const->pwms_max) || + nla_put_u32(skb, IFLA_CAN_PWM_PWML_MIN, pwm_const->pwml_min) || + nla_put_u32(skb, IFLA_CAN_PWM_PWML_MAX, pwm_const->pwml_max) || + nla_put_u32(skb, IFLA_CAN_PWM_PWMO_MIN, pwm_const->pwmo_min) || + nla_put_u32(skb, IFLA_CAN_PWM_PWMO_MAX, pwm_const->pwmo_max)) + goto err_cancel; + + if (priv->ctrlmode & CAN_CTRLMODE_XL_TMS) { + if (nla_put_u32(skb, IFLA_CAN_PWM_PWMS, pwm->pwms) || + nla_put_u32(skb, IFLA_CAN_PWM_PWML, pwm->pwml) || + nla_put_u32(skb, IFLA_CAN_PWM_PWMO, pwm->pwmo)) + goto err_cancel; + } + + nla_nest_end(skb, nest); + return 0; + +err_cancel: + nla_nest_cancel(skb, nest); + return -EMSGSIZE; +} + +static int can_ctrlmode_ext_fill_info(struct sk_buff *skb, + const struct can_priv *priv) +{ + struct nlattr *nest; + + nest = nla_nest_start(skb, IFLA_CAN_CTRLMODE_EXT); + if (!nest) + return -EMSGSIZE; + + if (nla_put_u32(skb, IFLA_CAN_CTRLMODE_SUPPORTED, + priv->ctrlmode_supported)) { + nla_nest_cancel(skb, nest); + return -EMSGSIZE; + } + + nla_nest_end(skb, nest); + return 0; +} + +static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) +{ + struct can_priv *priv = netdev_priv(dev); + struct can_ctrlmode cm = {.flags = priv->ctrlmode}; + struct can_berr_counter bec = { }; + enum can_state state = priv->state; + + if (priv->do_get_state) + priv->do_get_state(dev, &state); + + if (can_bittiming_fill_info(skb, IFLA_CAN_BITTIMING, + &priv->bittiming) || + + can_bittiming_const_fill_info(skb, IFLA_CAN_BITTIMING_CONST, + priv->bittiming_const) || + + nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) || + nla_put_u32(skb, IFLA_CAN_STATE, state) || + nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) || + nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) || + + (priv->do_get_berr_counter && + !priv->do_get_berr_counter(dev, &bec) && + nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) || + + can_bittiming_fill_info(skb, IFLA_CAN_DATA_BITTIMING, + &priv->fd.data_bittiming) || + + can_bittiming_const_fill_info(skb, IFLA_CAN_DATA_BITTIMING_CONST, + priv->fd.data_bittiming_const) || + + (priv->termination_const && + (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) || + nla_put(skb, IFLA_CAN_TERMINATION_CONST, + sizeof(*priv->termination_const) * + priv->termination_const_cnt, + priv->termination_const))) || + + can_bitrate_const_fill_info(skb, IFLA_CAN_BITRATE_CONST, + priv->bitrate_const, + priv->bitrate_const_cnt) || + + can_bitrate_const_fill_info(skb, IFLA_CAN_DATA_BITRATE_CONST, + priv->fd.data_bitrate_const, + priv->fd.data_bitrate_const_cnt) || + + (nla_put(skb, IFLA_CAN_BITRATE_MAX, + sizeof(priv->bitrate_max), + &priv->bitrate_max)) || + + can_tdc_fill_info(skb, dev, IFLA_CAN_TDC) || + + can_ctrlmode_ext_fill_info(skb, priv) || + + can_bittiming_fill_info(skb, IFLA_CAN_XL_DATA_BITTIMING, + &priv->xl.data_bittiming) || + + can_bittiming_const_fill_info(skb, IFLA_CAN_XL_DATA_BITTIMING_CONST, + priv->xl.data_bittiming_const) || + + can_bitrate_const_fill_info(skb, IFLA_CAN_XL_DATA_BITRATE_CONST, + priv->xl.data_bitrate_const, + priv->xl.data_bitrate_const_cnt) || + + can_tdc_fill_info(skb, dev, IFLA_CAN_XL_TDC) || + + can_pwm_fill_info(skb, priv) + ) + return -EMSGSIZE; + + return 0; +} + +static size_t can_get_xstats_size(const struct net_device *dev) +{ + return sizeof(struct can_device_stats); +} + +static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev) +{ + struct can_priv *priv = netdev_priv(dev); + + if (nla_put(skb, IFLA_INFO_XSTATS, + sizeof(priv->can_stats), &priv->can_stats)) + goto nla_put_failure; + return 0; + +nla_put_failure: + return -EMSGSIZE; +} + +static int can_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, + struct netlink_ext_ack *extack) +{ + return -EOPNOTSUPP; +} + +static void can_dellink(struct net_device *dev, struct list_head *head) +{ +} + +struct rtnl_link_ops can_link_ops __read_mostly = { + .kind = "can", + .netns_refund = true, + .maxtype = IFLA_CAN_MAX, + .policy = can_policy, + .setup = can_setup, + .validate = can_validate, + .newlink = can_newlink, + .changelink = can_changelink, + .dellink = can_dellink, + .get_size = can_get_size, + .fill_info = can_fill_info, + .get_xstats_size = can_get_xstats_size, + .fill_xstats = can_fill_xstats, +}; + +int can_netlink_register(void) +{ + return rtnl_link_register(&can_link_ops); +} + +void can_netlink_unregister(void) +{ + rtnl_link_unregister(&can_link_ops); +} diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c new file mode 100644 index 000000000000..46e7b6db4a1e --- /dev/null +++ b/drivers/net/can/dev/rx-offload.c @@ -0,0 +1,427 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2014 Protonic Holland, + * David Jander + * Copyright (C) 2014-2021, 2023 Pengutronix, + * Marc Kleine-Budde <kernel@pengutronix.de> + */ + +#include <linux/can/dev.h> +#include <linux/can/rx-offload.h> + +struct can_rx_offload_cb { + u32 timestamp; +}; + +static inline struct can_rx_offload_cb * +can_rx_offload_get_cb(struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb)); + + return (struct can_rx_offload_cb *)skb->cb; +} + +static inline bool +can_rx_offload_le(struct can_rx_offload *offload, + unsigned int a, unsigned int b) +{ + if (offload->inc) + return a <= b; + else + return a >= b; +} + +static inline unsigned int +can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val) +{ + if (offload->inc) + return (*val)++; + else + return (*val)--; +} + +static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota) +{ + struct can_rx_offload *offload = container_of(napi, + struct can_rx_offload, + napi); + struct net_device *dev = offload->dev; + struct net_device_stats *stats = &dev->stats; + struct sk_buff *skb; + int work_done = 0; + + while ((work_done < quota) && + (skb = skb_dequeue(&offload->skb_queue))) { + struct can_frame *cf = (struct can_frame *)skb->data; + + work_done++; + if (!(cf->can_id & CAN_ERR_FLAG)) { + stats->rx_packets++; + if (!(cf->can_id & CAN_RTR_FLAG)) + stats->rx_bytes += cf->len; + } + netif_receive_skb(skb); + } + + if (work_done < quota) { + napi_complete_done(napi, work_done); + + /* Check if there was another interrupt */ + if (!skb_queue_empty(&offload->skb_queue)) + napi_schedule(&offload->napi); + } + + return work_done; +} + +static inline void +__skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new, + int (*compare)(struct sk_buff *a, struct sk_buff *b)) +{ + struct sk_buff *pos, *insert = NULL; + + skb_queue_reverse_walk(head, pos) { + const struct can_rx_offload_cb *cb_pos, *cb_new; + + cb_pos = can_rx_offload_get_cb(pos); + cb_new = can_rx_offload_get_cb(new); + + netdev_dbg(new->dev, + "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n", + __func__, + cb_pos->timestamp, cb_new->timestamp, + cb_new->timestamp - cb_pos->timestamp, + skb_queue_len(head)); + + if (compare(pos, new) < 0) + continue; + insert = pos; + break; + } + if (!insert) + __skb_queue_head(head, new); + else + __skb_queue_after(head, insert, new); +} + +static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b) +{ + const struct can_rx_offload_cb *cb_a, *cb_b; + + cb_a = can_rx_offload_get_cb(a); + cb_b = can_rx_offload_get_cb(b); + + /* Subtract two u32 and return result as int, to keep + * difference steady around the u32 overflow. + */ + return cb_b->timestamp - cb_a->timestamp; +} + +/** + * can_rx_offload_offload_one() - Read one CAN frame from HW + * @offload: pointer to rx_offload context + * @n: number of mailbox to read + * + * The task of this function is to read a CAN frame from mailbox @n + * from the device and return the mailbox's content as a struct + * sk_buff. + * + * If the struct can_rx_offload::skb_queue exceeds the maximal queue + * length (struct can_rx_offload::skb_queue_len_max) or no skb can be + * allocated, the mailbox contents is discarded by reading it into an + * overflow buffer. This way the mailbox is marked as free by the + * driver. + * + * Return: A pointer to skb containing the CAN frame on success. + * + * NULL if the mailbox @n is empty. + * + * ERR_PTR() in case of an error + */ +static struct sk_buff * +can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) +{ + struct sk_buff *skb; + struct can_rx_offload_cb *cb; + bool drop = false; + u32 timestamp; + + /* If queue is full drop frame */ + if (unlikely(skb_queue_len(&offload->skb_queue) > + offload->skb_queue_len_max)) + drop = true; + + skb = offload->mailbox_read(offload, n, ×tamp, drop); + /* Mailbox was empty. */ + if (unlikely(!skb)) + return NULL; + + /* There was a problem reading the mailbox, propagate + * error value. + */ + if (IS_ERR(skb)) { + offload->dev->stats.rx_dropped++; + offload->dev->stats.rx_fifo_errors++; + + return skb; + } + + /* Mailbox was read. */ + cb = can_rx_offload_get_cb(skb); + cb->timestamp = timestamp; + + return skb; +} + +int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, + u64 pending) +{ + unsigned int i; + int received = 0; + + for (i = offload->mb_first; + can_rx_offload_le(offload, i, offload->mb_last); + can_rx_offload_inc(offload, &i)) { + struct sk_buff *skb; + + if (!(pending & BIT_ULL(i))) + continue; + + skb = can_rx_offload_offload_one(offload, i); + if (IS_ERR_OR_NULL(skb)) + continue; + + __skb_queue_add_sort(&offload->skb_irq_queue, skb, + can_rx_offload_compare); + received++; + } + + return received; +} +EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp); + +int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload) +{ + struct sk_buff *skb; + int received = 0; + + while (1) { + skb = can_rx_offload_offload_one(offload, 0); + if (IS_ERR(skb)) + continue; + if (!skb) + break; + + __skb_queue_tail(&offload->skb_irq_queue, skb); + received++; + } + + return received; +} +EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo); + +int can_rx_offload_queue_timestamp(struct can_rx_offload *offload, + struct sk_buff *skb, u32 timestamp) +{ + struct can_rx_offload_cb *cb; + + if (skb_queue_len(&offload->skb_queue) > + offload->skb_queue_len_max) { + dev_kfree_skb_any(skb); + return -ENOBUFS; + } + + cb = can_rx_offload_get_cb(skb); + cb->timestamp = timestamp; + + __skb_queue_add_sort(&offload->skb_irq_queue, skb, + can_rx_offload_compare); + + return 0; +} +EXPORT_SYMBOL_GPL(can_rx_offload_queue_timestamp); + +unsigned int +can_rx_offload_get_echo_skb_queue_timestamp(struct can_rx_offload *offload, + unsigned int idx, u32 timestamp, + unsigned int *frame_len_ptr) +{ + struct net_device *dev = offload->dev; + struct net_device_stats *stats = &dev->stats; + struct sk_buff *skb; + unsigned int len; + int err; + + skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr); + if (!skb) + return 0; + + err = can_rx_offload_queue_timestamp(offload, skb, timestamp); + if (err) { + stats->rx_errors++; + stats->tx_fifo_errors++; + } + + return len; +} +EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb_queue_timestamp); + +int can_rx_offload_queue_tail(struct can_rx_offload *offload, + struct sk_buff *skb) +{ + if (skb_queue_len(&offload->skb_queue) > + offload->skb_queue_len_max) { + dev_kfree_skb_any(skb); + return -ENOBUFS; + } + + __skb_queue_tail(&offload->skb_irq_queue, skb); + + return 0; +} +EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail); + +unsigned int +can_rx_offload_get_echo_skb_queue_tail(struct can_rx_offload *offload, + unsigned int idx, + unsigned int *frame_len_ptr) +{ + struct net_device *dev = offload->dev; + struct net_device_stats *stats = &dev->stats; + struct sk_buff *skb; + unsigned int len; + int err; + + skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr); + if (!skb) + return 0; + + err = can_rx_offload_queue_tail(offload, skb); + if (err) { + stats->rx_errors++; + stats->tx_fifo_errors++; + } + + return len; +} +EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb_queue_tail); + +void can_rx_offload_irq_finish(struct can_rx_offload *offload) +{ + unsigned long flags; + int queue_len; + + if (skb_queue_empty_lockless(&offload->skb_irq_queue)) + return; + + spin_lock_irqsave(&offload->skb_queue.lock, flags); + skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue); + spin_unlock_irqrestore(&offload->skb_queue.lock, flags); + + queue_len = skb_queue_len(&offload->skb_queue); + if (queue_len > offload->skb_queue_len_max / 8) + netdev_dbg(offload->dev, "%s: queue_len=%d\n", + __func__, queue_len); + + napi_schedule(&offload->napi); +} +EXPORT_SYMBOL_GPL(can_rx_offload_irq_finish); + +void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload) +{ + unsigned long flags; + int queue_len; + + if (skb_queue_empty_lockless(&offload->skb_irq_queue)) + return; + + spin_lock_irqsave(&offload->skb_queue.lock, flags); + skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue); + spin_unlock_irqrestore(&offload->skb_queue.lock, flags); + + queue_len = skb_queue_len(&offload->skb_queue); + if (queue_len > offload->skb_queue_len_max / 8) + netdev_dbg(offload->dev, "%s: queue_len=%d\n", + __func__, queue_len); + + local_bh_disable(); + napi_schedule(&offload->napi); + local_bh_enable(); +} +EXPORT_SYMBOL_GPL(can_rx_offload_threaded_irq_finish); + +static int can_rx_offload_init_queue(struct net_device *dev, + struct can_rx_offload *offload, + unsigned int weight) +{ + offload->dev = dev; + + /* Limit queue len to 4x the weight (rounded to next power of two) */ + offload->skb_queue_len_max = 2 << fls(weight); + offload->skb_queue_len_max *= 4; + skb_queue_head_init(&offload->skb_queue); + __skb_queue_head_init(&offload->skb_irq_queue); + + netif_napi_add_weight(dev, &offload->napi, can_rx_offload_napi_poll, + weight); + + dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n", + __func__, offload->skb_queue_len_max); + + return 0; +} + +int can_rx_offload_add_timestamp(struct net_device *dev, + struct can_rx_offload *offload) +{ + unsigned int weight; + + if (offload->mb_first > BITS_PER_LONG_LONG || + offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read) + return -EINVAL; + + if (offload->mb_first < offload->mb_last) { + offload->inc = true; + weight = offload->mb_last - offload->mb_first; + } else { + offload->inc = false; + weight = offload->mb_first - offload->mb_last; + } + + return can_rx_offload_init_queue(dev, offload, weight); +} +EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp); + +int can_rx_offload_add_fifo(struct net_device *dev, + struct can_rx_offload *offload, unsigned int weight) +{ + if (!offload->mailbox_read) + return -EINVAL; + + return can_rx_offload_init_queue(dev, offload, weight); +} +EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo); + +int can_rx_offload_add_manual(struct net_device *dev, + struct can_rx_offload *offload, + unsigned int weight) +{ + if (offload->mailbox_read) + return -EINVAL; + + return can_rx_offload_init_queue(dev, offload, weight); +} +EXPORT_SYMBOL_GPL(can_rx_offload_add_manual); + +void can_rx_offload_enable(struct can_rx_offload *offload) +{ + napi_enable(&offload->napi); +} +EXPORT_SYMBOL_GPL(can_rx_offload_enable); + +void can_rx_offload_del(struct can_rx_offload *offload) +{ + netif_napi_del(&offload->napi); + skb_queue_purge(&offload->skb_queue); + __skb_queue_purge(&offload->skb_irq_queue); +} +EXPORT_SYMBOL_GPL(can_rx_offload_del); diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c new file mode 100644 index 000000000000..3ebd4f779b9b --- /dev/null +++ b/drivers/net/can/dev/skb.c @@ -0,0 +1,374 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix + * Copyright (C) 2006 Andrey Volkov, Varma Electronics + * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com> + */ + +#include <linux/can/dev.h> +#include <linux/module.h> + +#define MOD_DESC "CAN device driver interface" + +MODULE_DESCRIPTION(MOD_DESC); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); + +/* Local echo of CAN messages + * + * CAN network devices *should* support a local echo functionality + * (see Documentation/networking/can.rst). To test the handling of CAN + * interfaces that do not support the local echo both driver types are + * implemented. In the case that the driver does not support the echo + * the IFF_ECHO remains clear in dev->flags. This causes the PF_CAN core + * to perform the echo as a fallback solution. + */ +void can_flush_echo_skb(struct net_device *dev) +{ + struct can_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + int i; + + for (i = 0; i < priv->echo_skb_max; i++) { + if (priv->echo_skb[i]) { + kfree_skb(priv->echo_skb[i]); + priv->echo_skb[i] = NULL; + stats->tx_dropped++; + stats->tx_aborted_errors++; + } + } +} + +/* Put the skb on the stack to be looped backed locally lateron + * + * The function is typically called in the start_xmit function + * of the device driver. The driver must protect access to + * priv->echo_skb, if necessary. + */ +int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, + unsigned int idx, unsigned int frame_len) +{ + struct can_priv *priv = netdev_priv(dev); + + if (idx >= priv->echo_skb_max) { + netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n", + __func__, idx, priv->echo_skb_max); + return -EINVAL; + } + + /* check flag whether this packet has to be looped back */ + if (!(dev->flags & IFF_ECHO) || + (skb->protocol != htons(ETH_P_CAN) && + skb->protocol != htons(ETH_P_CANFD) && + skb->protocol != htons(ETH_P_CANXL))) { + kfree_skb(skb); + return 0; + } + + if (!priv->echo_skb[idx]) { + skb = can_create_echo_skb(skb); + if (!skb) + return -ENOMEM; + + /* make settings for echo to reduce code in irq context */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->dev = dev; + + /* save frame_len to reuse it when transmission is completed */ + can_skb_prv(skb)->frame_len = frame_len; + + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + + skb_tx_timestamp(skb); + + /* save this skb for tx interrupt echo handling */ + priv->echo_skb[idx] = skb; + } else { + /* locking problem with netif_stop_queue() ?? */ + netdev_err(dev, "%s: BUG! echo_skb %d is occupied!\n", __func__, idx); + kfree_skb(skb); + return -EBUSY; + } + + return 0; +} +EXPORT_SYMBOL_GPL(can_put_echo_skb); + +struct sk_buff * +__can_get_echo_skb(struct net_device *dev, unsigned int idx, + unsigned int *len_ptr, unsigned int *frame_len_ptr) +{ + struct can_priv *priv = netdev_priv(dev); + + if (idx >= priv->echo_skb_max) { + netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n", + __func__, idx, priv->echo_skb_max); + return NULL; + } + + if (priv->echo_skb[idx]) { + /* Using "struct canfd_frame::len" for the frame + * length is supported on both CAN and CANFD frames. + */ + struct sk_buff *skb = priv->echo_skb[idx]; + struct can_skb_priv *can_skb_priv = can_skb_prv(skb); + + if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) + skb_tstamp_tx(skb, skb_hwtstamps(skb)); + + /* get the real payload length for netdev statistics */ + *len_ptr = can_skb_get_data_len(skb); + + if (frame_len_ptr) + *frame_len_ptr = can_skb_priv->frame_len; + + priv->echo_skb[idx] = NULL; + + if (skb->pkt_type == PACKET_LOOPBACK) { + skb->pkt_type = PACKET_BROADCAST; + } else { + dev_consume_skb_any(skb); + return NULL; + } + + return skb; + } + + return NULL; +} + +/* Get the skb from the stack and loop it back locally + * + * The function is typically called when the TX done interrupt + * is handled in the device driver. The driver must protect + * access to priv->echo_skb, if necessary. + */ +unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx, + unsigned int *frame_len_ptr) +{ + struct sk_buff *skb; + unsigned int len; + + skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr); + if (!skb) + return 0; + + skb_get(skb); + if (netif_rx(skb) == NET_RX_SUCCESS) + dev_consume_skb_any(skb); + else + dev_kfree_skb_any(skb); + + return len; +} +EXPORT_SYMBOL_GPL(can_get_echo_skb); + +/* Remove the skb from the stack and free it. + * + * The function is typically called when TX failed. + */ +void can_free_echo_skb(struct net_device *dev, unsigned int idx, + unsigned int *frame_len_ptr) +{ + struct can_priv *priv = netdev_priv(dev); + + if (idx >= priv->echo_skb_max) { + netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n", + __func__, idx, priv->echo_skb_max); + return; + } + + if (priv->echo_skb[idx]) { + struct sk_buff *skb = priv->echo_skb[idx]; + struct can_skb_priv *can_skb_priv = can_skb_prv(skb); + + if (frame_len_ptr) + *frame_len_ptr = can_skb_priv->frame_len; + + dev_kfree_skb_any(skb); + priv->echo_skb[idx] = NULL; + } +} +EXPORT_SYMBOL_GPL(can_free_echo_skb); + +/* fill common values for CAN sk_buffs */ +static void init_can_skb_reserve(struct sk_buff *skb) +{ + skb->pkt_type = PACKET_BROADCAST; + skb->ip_summed = CHECKSUM_UNNECESSARY; + + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + + can_skb_reserve(skb); + can_skb_prv(skb)->skbcnt = 0; +} + +struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf) +{ + struct sk_buff *skb; + + skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) + + sizeof(struct can_frame)); + if (unlikely(!skb)) { + *cf = NULL; + + return NULL; + } + + skb->protocol = htons(ETH_P_CAN); + init_can_skb_reserve(skb); + can_skb_prv(skb)->ifindex = dev->ifindex; + + *cf = skb_put_zero(skb, sizeof(struct can_frame)); + + return skb; +} +EXPORT_SYMBOL_GPL(alloc_can_skb); + +struct sk_buff *alloc_canfd_skb(struct net_device *dev, + struct canfd_frame **cfd) +{ + struct sk_buff *skb; + + skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) + + sizeof(struct canfd_frame)); + if (unlikely(!skb)) { + *cfd = NULL; + + return NULL; + } + + skb->protocol = htons(ETH_P_CANFD); + init_can_skb_reserve(skb); + can_skb_prv(skb)->ifindex = dev->ifindex; + + *cfd = skb_put_zero(skb, sizeof(struct canfd_frame)); + + /* set CAN FD flag by default */ + (*cfd)->flags = CANFD_FDF; + + return skb; +} +EXPORT_SYMBOL_GPL(alloc_canfd_skb); + +struct sk_buff *alloc_canxl_skb(struct net_device *dev, + struct canxl_frame **cxl, + unsigned int data_len) +{ + struct sk_buff *skb; + + if (data_len < CANXL_MIN_DLEN || data_len > CANXL_MAX_DLEN) + goto out_error; + + skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) + + CANXL_HDR_SIZE + data_len); + if (unlikely(!skb)) + goto out_error; + + skb->protocol = htons(ETH_P_CANXL); + init_can_skb_reserve(skb); + can_skb_prv(skb)->ifindex = dev->ifindex; + + *cxl = skb_put_zero(skb, CANXL_HDR_SIZE + data_len); + + /* set CAN XL flag and length information by default */ + (*cxl)->flags = CANXL_XLF; + (*cxl)->len = data_len; + + return skb; + +out_error: + *cxl = NULL; + + return NULL; +} +EXPORT_SYMBOL_GPL(alloc_canxl_skb); + +struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf) +{ + struct sk_buff *skb; + + skb = alloc_can_skb(dev, cf); + if (unlikely(!skb)) + return NULL; + + (*cf)->can_id = CAN_ERR_FLAG; + (*cf)->len = CAN_ERR_DLC; + + return skb; +} +EXPORT_SYMBOL_GPL(alloc_can_err_skb); + +/* Check for outgoing skbs that have not been created by the CAN subsystem */ +static bool can_skb_headroom_valid(struct net_device *dev, struct sk_buff *skb) +{ + /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */ + if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv))) + return false; + + /* af_packet does not apply CAN skb specific settings */ + if (skb->ip_summed == CHECKSUM_NONE) { + /* init headroom */ + can_skb_prv(skb)->ifindex = dev->ifindex; + can_skb_prv(skb)->skbcnt = 0; + + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* perform proper loopback on capable devices */ + if (dev->flags & IFF_ECHO) + skb->pkt_type = PACKET_LOOPBACK; + else + skb->pkt_type = PACKET_HOST; + + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + + /* set CANFD_FDF flag for CAN FD frames */ + if (can_is_canfd_skb(skb)) { + struct canfd_frame *cfd; + + cfd = (struct canfd_frame *)skb->data; + cfd->flags |= CANFD_FDF; + } + } + + return true; +} + +/* Drop a given socketbuffer if it does not contain a valid CAN frame. */ +bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb) +{ + switch (ntohs(skb->protocol)) { + case ETH_P_CAN: + if (!can_is_can_skb(skb)) + goto inval_skb; + break; + + case ETH_P_CANFD: + if (!can_is_canfd_skb(skb)) + goto inval_skb; + break; + + case ETH_P_CANXL: + if (!can_is_canxl_skb(skb)) + goto inval_skb; + break; + + default: + goto inval_skb; + } + + if (!can_skb_headroom_valid(dev, skb)) + goto inval_skb; + + return false; + +inval_skb: + kfree_skb(skb); + dev->stats.tx_dropped++; + return true; +} +EXPORT_SYMBOL_GPL(can_dropped_invalid_skb); diff --git a/drivers/net/can/dummy_can.c b/drivers/net/can/dummy_can.c new file mode 100644 index 000000000000..41953655e3d3 --- /dev/null +++ b/drivers/net/can/dummy_can.c @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Copyright (c) 2025 Vincent Mailhol <mailhol@kernel.org> */ + +#include <linux/array_size.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/units.h> +#include <linux/string_choices.h> + +#include <linux/can.h> +#include <linux/can/bittiming.h> +#include <linux/can/dev.h> +#include <linux/can/skb.h> + +struct dummy_can { + struct can_priv can; + struct net_device *dev; +}; + +static struct dummy_can *dummy_can; + +static const struct can_bittiming_const dummy_can_bittiming_const = { + .name = "dummy_can CC", + .tseg1_min = 2, + .tseg1_max = 256, + .tseg2_min = 2, + .tseg2_max = 128, + .sjw_max = 128, + .brp_min = 1, + .brp_max = 512, + .brp_inc = 1 +}; + +static const struct can_bittiming_const dummy_can_fd_databittiming_const = { + .name = "dummy_can FD", + .tseg1_min = 2, + .tseg1_max = 256, + .tseg2_min = 2, + .tseg2_max = 128, + .sjw_max = 128, + .brp_min = 1, + .brp_max = 512, + .brp_inc = 1 +}; + +static const struct can_tdc_const dummy_can_fd_tdc_const = { + .tdcv_min = 0, + .tdcv_max = 0, /* Manual mode not supported. */ + .tdco_min = 0, + .tdco_max = 127, + .tdcf_min = 0, + .tdcf_max = 127 +}; + +static const struct can_bittiming_const dummy_can_xl_databittiming_const = { + .name = "dummy_can XL", + .tseg1_min = 2, + .tseg1_max = 256, + .tseg2_min = 2, + .tseg2_max = 128, + .sjw_max = 128, + .brp_min = 1, + .brp_max = 512, + .brp_inc = 1 +}; + +static const struct can_tdc_const dummy_can_xl_tdc_const = { + .tdcv_min = 0, + .tdcv_max = 0, /* Manual mode not supported. */ + .tdco_min = 0, + .tdco_max = 127, + .tdcf_min = 0, + .tdcf_max = 127 +}; + +static const struct can_pwm_const dummy_can_pwm_const = { + .pwms_min = 1, + .pwms_max = 8, + .pwml_min = 2, + .pwml_max = 24, + .pwmo_min = 0, + .pwmo_max = 16, +}; + +static void dummy_can_print_bittiming(struct net_device *dev, + struct can_bittiming *bt) +{ + netdev_dbg(dev, "\tbitrate: %u\n", bt->bitrate); + netdev_dbg(dev, "\tsample_point: %u\n", bt->sample_point); + netdev_dbg(dev, "\ttq: %u\n", bt->tq); + netdev_dbg(dev, "\tprop_seg: %u\n", bt->prop_seg); + netdev_dbg(dev, "\tphase_seg1: %u\n", bt->phase_seg1); + netdev_dbg(dev, "\tphase_seg2: %u\n", bt->phase_seg2); + netdev_dbg(dev, "\tsjw: %u\n", bt->sjw); + netdev_dbg(dev, "\tbrp: %u\n", bt->brp); +} + +static void dummy_can_print_tdc(struct net_device *dev, struct can_tdc *tdc) +{ + netdev_dbg(dev, "\t\ttdcv: %u\n", tdc->tdcv); + netdev_dbg(dev, "\t\ttdco: %u\n", tdc->tdco); + netdev_dbg(dev, "\t\ttdcf: %u\n", tdc->tdcf); +} + +static void dummy_can_print_pwm(struct net_device *dev, struct can_pwm *pwm, + struct can_bittiming *dbt) +{ + netdev_dbg(dev, "\t\tpwms: %u\n", pwm->pwms); + netdev_dbg(dev, "\t\tpwml: %u\n", pwm->pwml); + netdev_dbg(dev, "\t\tpwmo: %u\n", pwm->pwmo); +} + +static void dummy_can_print_ctrlmode(struct net_device *dev) +{ + struct dummy_can *priv = netdev_priv(dev); + struct can_priv *can_priv = &priv->can; + unsigned long supported = can_priv->ctrlmode_supported; + u32 enabled = can_priv->ctrlmode; + + netdev_dbg(dev, "Control modes:\n"); + netdev_dbg(dev, "\tsupported: 0x%08x\n", (u32)supported); + netdev_dbg(dev, "\tenabled: 0x%08x\n", enabled); + + if (supported) { + int idx; + + netdev_dbg(dev, "\tlist:"); + for_each_set_bit(idx, &supported, BITS_PER_TYPE(u32)) + netdev_dbg(dev, "\t\t%s: %s\n", + can_get_ctrlmode_str(BIT(idx)), + enabled & BIT(idx) ? "on" : "off"); + } +} + +static void dummy_can_print_bittiming_info(struct net_device *dev) +{ + struct dummy_can *priv = netdev_priv(dev); + struct can_priv *can_priv = &priv->can; + + netdev_dbg(dev, "Clock frequency: %u\n", can_priv->clock.freq); + netdev_dbg(dev, "Maximum bitrate: %u\n", can_priv->bitrate_max); + netdev_dbg(dev, "MTU: %u\n", dev->mtu); + netdev_dbg(dev, "\n"); + + dummy_can_print_ctrlmode(dev); + netdev_dbg(dev, "\n"); + + netdev_dbg(dev, "Classical CAN nominal bittiming:\n"); + dummy_can_print_bittiming(dev, &can_priv->bittiming); + netdev_dbg(dev, "\n"); + + if (can_priv->ctrlmode & CAN_CTRLMODE_FD) { + netdev_dbg(dev, "CAN FD databittiming:\n"); + dummy_can_print_bittiming(dev, &can_priv->fd.data_bittiming); + if (can_fd_tdc_is_enabled(can_priv)) { + netdev_dbg(dev, "\tCAN FD TDC:\n"); + dummy_can_print_tdc(dev, &can_priv->fd.tdc); + } + } + netdev_dbg(dev, "\n"); + + if (can_priv->ctrlmode & CAN_CTRLMODE_XL) { + netdev_dbg(dev, "CAN XL databittiming:\n"); + dummy_can_print_bittiming(dev, &can_priv->xl.data_bittiming); + if (can_xl_tdc_is_enabled(can_priv)) { + netdev_dbg(dev, "\tCAN XL TDC:\n"); + dummy_can_print_tdc(dev, &can_priv->xl.tdc); + } + if (can_priv->ctrlmode & CAN_CTRLMODE_XL_TMS) { + netdev_dbg(dev, "\tCAN XL PWM:\n"); + dummy_can_print_pwm(dev, &can_priv->xl.pwm, + &can_priv->xl.data_bittiming); + } + } + netdev_dbg(dev, "\n"); +} + +static int dummy_can_netdev_open(struct net_device *dev) +{ + int ret; + struct can_priv *priv = netdev_priv(dev); + + dummy_can_print_bittiming_info(dev); + netdev_dbg(dev, "error-signalling is %s\n", + str_enabled_disabled(!can_dev_in_xl_only_mode(priv))); + + ret = open_candev(dev); + if (ret) + return ret; + netif_start_queue(dev); + netdev_dbg(dev, "dummy-can is up\n"); + + return 0; +} + +static int dummy_can_netdev_close(struct net_device *dev) +{ + netif_stop_queue(dev); + close_candev(dev); + netdev_dbg(dev, "dummy-can is down\n"); + + return 0; +} + +static netdev_tx_t dummy_can_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + if (can_dev_dropped_skb(dev, skb)) + return NETDEV_TX_OK; + + can_put_echo_skb(skb, dev, 0, 0); + dev->stats.tx_packets++; + dev->stats.tx_bytes += can_get_echo_skb(dev, 0, NULL); + + return NETDEV_TX_OK; +} + +static const struct net_device_ops dummy_can_netdev_ops = { + .ndo_open = dummy_can_netdev_open, + .ndo_stop = dummy_can_netdev_close, + .ndo_start_xmit = dummy_can_start_xmit, +}; + +static const struct ethtool_ops dummy_can_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, +}; + +static int __init dummy_can_init(void) +{ + struct net_device *dev; + struct dummy_can *priv; + int ret; + + dev = alloc_candev(sizeof(*priv), 1); + if (!dev) + return -ENOMEM; + + dev->netdev_ops = &dummy_can_netdev_ops; + dev->ethtool_ops = &dummy_can_ethtool_ops; + priv = netdev_priv(dev); + priv->can.bittiming_const = &dummy_can_bittiming_const; + priv->can.bitrate_max = 20 * MEGA /* BPS */; + priv->can.clock.freq = 160 * MEGA /* Hz */; + priv->can.fd.data_bittiming_const = &dummy_can_fd_databittiming_const; + priv->can.fd.tdc_const = &dummy_can_fd_tdc_const; + priv->can.xl.data_bittiming_const = &dummy_can_xl_databittiming_const; + priv->can.xl.tdc_const = &dummy_can_xl_tdc_const; + priv->can.xl.pwm_const = &dummy_can_pwm_const; + priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_FD | CAN_CTRLMODE_TDC_AUTO | + CAN_CTRLMODE_RESTRICTED | CAN_CTRLMODE_XL | + CAN_CTRLMODE_XL_TDC_AUTO | CAN_CTRLMODE_XL_TMS; + priv->dev = dev; + + ret = register_candev(priv->dev); + if (ret) { + free_candev(priv->dev); + return ret; + } + + dummy_can = priv; + netdev_dbg(dev, "dummy-can ready\n"); + + return 0; +} + +static void __exit dummy_can_exit(void) +{ + struct net_device *dev = dummy_can->dev; + + netdev_dbg(dev, "dummy-can bye bye\n"); + unregister_candev(dev); + free_candev(dev); +} + +module_init(dummy_can_init); +module_exit(dummy_can_exit); + +MODULE_DESCRIPTION("A dummy CAN driver, mainly to test the netlink interface"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Vincent Mailhol <mailhol@kernel.org>"); diff --git a/drivers/net/can/esd/Kconfig b/drivers/net/can/esd/Kconfig new file mode 100644 index 000000000000..54bfc366634c --- /dev/null +++ b/drivers/net/can/esd/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only +config CAN_ESD_402_PCI + tristate "esd electronics gmbh CAN-PCI(e)/402 family" + depends on PCI && HAS_DMA + help + Support for C402 card family from esd electronics gmbh. + This card family is based on the ESDACC CAN controller and + available in several form factors: PCI, PCIe, PCIe Mini, + M.2 PCIe, CPCIserial, PMC, XMC (see https://esd.eu/en) + + This driver can also be built as a module. In this case the + module will be called esd_402_pci. diff --git a/drivers/net/can/esd/Makefile b/drivers/net/can/esd/Makefile new file mode 100644 index 000000000000..5dd2d470c286 --- /dev/null +++ b/drivers/net/can/esd/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for esd gmbh ESDACC controller driver +# +esd_402_pci-objs := esdacc.o esd_402_pci-core.o + +obj-$(CONFIG_CAN_ESD_402_PCI) += esd_402_pci.o diff --git a/drivers/net/can/esd/esd_402_pci-core.c b/drivers/net/can/esd/esd_402_pci-core.c new file mode 100644 index 000000000000..c826f00c551b --- /dev/null +++ b/drivers/net/can/esd/esd_402_pci-core.c @@ -0,0 +1,515 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2015 - 2016 Thomas Körper, esd electronic system design gmbh + * Copyright (C) 2017 - 2023 Stefan Mätje, esd electronics gmbh + */ + +#include <linux/can/dev.h> +#include <linux/can.h> +#include <linux/can/netlink.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/ethtool.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/pci.h> + +#include "esdacc.h" + +#define ESD_PCI_DEVICE_ID_PCIE402 0x0402 + +#define PCI402_FPGA_VER_MIN 0x003d +#define PCI402_MAX_CORES 6 +#define PCI402_BAR 0 +#define PCI402_IO_OV_OFFS 0 +#define PCI402_IO_PCIEP_OFFS 0x10000 +#define PCI402_IO_LEN_TOTAL 0x20000 +#define PCI402_IO_LEN_CORE 0x2000 +#define PCI402_PCICFG_MSICAP 0x50 + +#define PCI402_DMA_MASK DMA_BIT_MASK(32) +#define PCI402_DMA_SIZE ALIGN(0x10000, PAGE_SIZE) + +#define PCI402_PCIEP_OF_INT_ENABLE 0x0050 +#define PCI402_PCIEP_OF_BM_ADDR_LO 0x1000 +#define PCI402_PCIEP_OF_BM_ADDR_HI 0x1004 +#define PCI402_PCIEP_OF_MSI_ADDR_LO 0x1008 +#define PCI402_PCIEP_OF_MSI_ADDR_HI 0x100c + +struct pci402_card { + /* Actually mapped io space, all other iomem derived from this */ + void __iomem *addr; + void __iomem *addr_pciep; + + void *dma_buf; + dma_addr_t dma_hnd; + + struct acc_ov ov; + struct acc_core *cores; + + bool msi_enabled; +}; + +/* The BTR register capabilities described by the can_bittiming_const structures + * below are valid since esdACC version 0x0032. + */ + +/* Used if the esdACC FPGA is built as CAN-Classic version. */ +static const struct can_bittiming_const pci402_bittiming_const = { + .name = "esd_402", + .tseg1_min = 1, + .tseg1_max = 16, + .tseg2_min = 1, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 512, + .brp_inc = 1, +}; + +/* Used if the esdACC FPGA is built as CAN-FD version. */ +static const struct can_bittiming_const pci402_bittiming_const_canfd = { + .name = "esd_402fd", + .tseg1_min = 1, + .tseg1_max = 256, + .tseg2_min = 1, + .tseg2_max = 128, + .sjw_max = 128, + .brp_min = 1, + .brp_max = 256, + .brp_inc = 1, +}; + +static const struct net_device_ops pci402_acc_netdev_ops = { + .ndo_open = acc_open, + .ndo_stop = acc_close, + .ndo_start_xmit = acc_start_xmit, + .ndo_hwtstamp_get = can_hwtstamp_get, + .ndo_hwtstamp_set = can_hwtstamp_set, +}; + +static const struct ethtool_ops pci402_acc_ethtool_ops = { + .get_ts_info = can_ethtool_op_get_ts_info_hwts, +}; + +static irqreturn_t pci402_interrupt(int irq, void *dev_id) +{ + struct pci_dev *pdev = dev_id; + struct pci402_card *card = pci_get_drvdata(pdev); + irqreturn_t irq_status; + + irq_status = acc_card_interrupt(&card->ov, card->cores); + + return irq_status; +} + +static int pci402_set_msiconfig(struct pci_dev *pdev) +{ + struct pci402_card *card = pci_get_drvdata(pdev); + u32 addr_lo_offs = 0; + u32 addr_lo = 0; + u32 addr_hi = 0; + u32 data = 0; + u16 csr = 0; + int err; + + /* The FPGA hard IP PCIe core implements a 64-bit MSI Capability + * Register Format + */ + err = pci_read_config_word(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_FLAGS, &csr); + if (err) + goto failed; + + err = pci_read_config_dword(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_ADDRESS_LO, + &addr_lo); + if (err) + goto failed; + err = pci_read_config_dword(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_ADDRESS_HI, + &addr_hi); + if (err) + goto failed; + + err = pci_read_config_dword(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_DATA_64, + &data); + if (err) + goto failed; + + addr_lo_offs = addr_lo & 0x0000ffff; + addr_lo &= 0xffff0000; + + if (addr_hi) + addr_lo |= 1; /* To enable 64-Bit addressing in PCIe endpoint */ + + if (!(csr & PCI_MSI_FLAGS_ENABLE)) { + err = -EINVAL; + goto failed; + } + + iowrite32(addr_lo, card->addr_pciep + PCI402_PCIEP_OF_MSI_ADDR_LO); + iowrite32(addr_hi, card->addr_pciep + PCI402_PCIEP_OF_MSI_ADDR_HI); + acc_ov_write32(&card->ov, ACC_OV_OF_MSI_ADDRESSOFFSET, addr_lo_offs); + acc_ov_write32(&card->ov, ACC_OV_OF_MSI_DATA, data); + + return 0; + +failed: + pci_warn(pdev, "Error while setting MSI configuration:\n" + "CSR: 0x%.4x, addr: 0x%.8x%.8x, offs: 0x%.4x, data: 0x%.8x\n", + csr, addr_hi, addr_lo, addr_lo_offs, data); + + return err; +} + +static int pci402_init_card(struct pci_dev *pdev) +{ + struct pci402_card *card = pci_get_drvdata(pdev); + + card->ov.addr = card->addr + PCI402_IO_OV_OFFS; + card->addr_pciep = card->addr + PCI402_IO_PCIEP_OFFS; + + acc_reset_fpga(&card->ov); + acc_init_ov(&card->ov, &pdev->dev); + + if (card->ov.version < PCI402_FPGA_VER_MIN) { + pci_err(pdev, + "esdACC version (0x%.4x) outdated, please update\n", + card->ov.version); + return -EINVAL; + } + + if (card->ov.timestamp_frequency != ACC_TS_FREQ_80MHZ) { + pci_err(pdev, + "esdACC timestamp frequency of %uHz not supported by driver. Aborted.\n", + card->ov.timestamp_frequency); + return -EINVAL; + } + + if (card->ov.active_cores > PCI402_MAX_CORES) { + pci_err(pdev, + "Card with %u active cores not supported by driver. Aborted.\n", + card->ov.active_cores); + return -EINVAL; + } + card->cores = devm_kcalloc(&pdev->dev, card->ov.active_cores, + sizeof(struct acc_core), GFP_KERNEL); + if (!card->cores) + return -ENOMEM; + + if (card->ov.features & ACC_OV_REG_FEAT_MASK_CANFD) { + pci_warn(pdev, + "esdACC with CAN-FD feature detected. This driver doesn't support CAN-FD yet.\n"); + } + +#ifdef __LITTLE_ENDIAN + /* So card converts all busmastered data to LE for us: */ + acc_ov_set_bits(&card->ov, ACC_OV_OF_MODE, + ACC_OV_REG_MODE_MASK_ENDIAN_LITTLE); +#endif + + return 0; +} + +static int pci402_init_interrupt(struct pci_dev *pdev) +{ + struct pci402_card *card = pci_get_drvdata(pdev); + int err; + + err = pci_enable_msi(pdev); + if (!err) { + err = pci402_set_msiconfig(pdev); + if (!err) { + card->msi_enabled = true; + acc_ov_set_bits(&card->ov, ACC_OV_OF_MODE, + ACC_OV_REG_MODE_MASK_MSI_ENABLE); + pci_dbg(pdev, "MSI preparation done\n"); + } + } + + err = devm_request_irq(&pdev->dev, pdev->irq, pci402_interrupt, + IRQF_SHARED, dev_name(&pdev->dev), pdev); + if (err) + goto failure_msidis; + + iowrite32(1, card->addr_pciep + PCI402_PCIEP_OF_INT_ENABLE); + + return 0; + +failure_msidis: + if (card->msi_enabled) { + acc_ov_clear_bits(&card->ov, ACC_OV_OF_MODE, + ACC_OV_REG_MODE_MASK_MSI_ENABLE); + pci_disable_msi(pdev); + card->msi_enabled = false; + } + + return err; +} + +static void pci402_finish_interrupt(struct pci_dev *pdev) +{ + struct pci402_card *card = pci_get_drvdata(pdev); + + iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_INT_ENABLE); + devm_free_irq(&pdev->dev, pdev->irq, pdev); + + if (card->msi_enabled) { + acc_ov_clear_bits(&card->ov, ACC_OV_OF_MODE, + ACC_OV_REG_MODE_MASK_MSI_ENABLE); + pci_disable_msi(pdev); + card->msi_enabled = false; + } +} + +static int pci402_init_dma(struct pci_dev *pdev) +{ + struct pci402_card *card = pci_get_drvdata(pdev); + int err; + + err = dma_set_coherent_mask(&pdev->dev, PCI402_DMA_MASK); + if (err) { + pci_err(pdev, "DMA set mask failed!\n"); + return err; + } + + /* The esdACC DMA engine needs the DMA buffer aligned to a 64k + * boundary. The DMA API guarantees to align the returned buffer to the + * smallest PAGE_SIZE order which is greater than or equal to the + * requested size. With PCI402_DMA_SIZE == 64kB this suffices here. + */ + card->dma_buf = dma_alloc_coherent(&pdev->dev, PCI402_DMA_SIZE, + &card->dma_hnd, GFP_KERNEL); + if (!card->dma_buf) + return -ENOMEM; + + acc_init_bm_ptr(&card->ov, card->cores, card->dma_buf); + + iowrite32(card->dma_hnd, + card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_LO); + iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_HI); + + pci_set_master(pdev); + + acc_ov_set_bits(&card->ov, ACC_OV_OF_MODE, + ACC_OV_REG_MODE_MASK_BM_ENABLE); + + return 0; +} + +static void pci402_finish_dma(struct pci_dev *pdev) +{ + struct pci402_card *card = pci_get_drvdata(pdev); + int i; + + acc_ov_clear_bits(&card->ov, ACC_OV_OF_MODE, + ACC_OV_REG_MODE_MASK_BM_ENABLE); + + pci_clear_master(pdev); + + iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_LO); + iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_HI); + + card->ov.bmfifo.messages = NULL; + card->ov.bmfifo.irq_cnt = NULL; + for (i = 0; i < card->ov.active_cores; i++) { + struct acc_core *core = &card->cores[i]; + + core->bmfifo.messages = NULL; + core->bmfifo.irq_cnt = NULL; + } + + dma_free_coherent(&pdev->dev, PCI402_DMA_SIZE, card->dma_buf, + card->dma_hnd); + card->dma_buf = NULL; +} + +static void pci402_unregister_core(struct acc_core *core) +{ + netdev_info(core->netdev, "unregister\n"); + unregister_candev(core->netdev); + + free_candev(core->netdev); + core->netdev = NULL; +} + +static int pci402_init_cores(struct pci_dev *pdev) +{ + struct pci402_card *card = pci_get_drvdata(pdev); + int err; + int i; + + for (i = 0; i < card->ov.active_cores; i++) { + struct acc_core *core = &card->cores[i]; + struct acc_net_priv *priv; + struct net_device *netdev; + u32 fifo_config; + + core->addr = card->ov.addr + (i + 1) * PCI402_IO_LEN_CORE; + + fifo_config = acc_read32(core, ACC_CORE_OF_TXFIFO_CONFIG); + core->tx_fifo_size = (fifo_config >> 24); + if (core->tx_fifo_size <= 1) { + pci_err(pdev, "Invalid tx_fifo_size!\n"); + err = -EINVAL; + goto failure; + } + + netdev = alloc_candev(sizeof(*priv), core->tx_fifo_size); + if (!netdev) { + err = -ENOMEM; + goto failure; + } + core->netdev = netdev; + + netdev->flags |= IFF_ECHO; + netdev->dev_port = i; + netdev->netdev_ops = &pci402_acc_netdev_ops; + netdev->ethtool_ops = &pci402_acc_ethtool_ops; + SET_NETDEV_DEV(netdev, &pdev->dev); + + priv = netdev_priv(netdev); + priv->can.clock.freq = card->ov.core_frequency; + priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | + CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_BERR_REPORTING | + CAN_CTRLMODE_CC_LEN8_DLC; + if (card->ov.features & ACC_OV_REG_FEAT_MASK_DAR) + priv->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; + if (card->ov.features & ACC_OV_REG_FEAT_MASK_CANFD) + priv->can.bittiming_const = &pci402_bittiming_const_canfd; + else + priv->can.bittiming_const = &pci402_bittiming_const; + priv->can.do_set_bittiming = acc_set_bittiming; + priv->can.do_set_mode = acc_set_mode; + priv->can.do_get_berr_counter = acc_get_berr_counter; + + priv->core = core; + priv->ov = &card->ov; + + err = register_candev(netdev); + if (err) { + free_candev(core->netdev); + core->netdev = NULL; + goto failure; + } + + netdev_info(netdev, "registered\n"); + } + + return 0; + +failure: + for (i--; i >= 0; i--) + pci402_unregister_core(&card->cores[i]); + + return err; +} + +static void pci402_finish_cores(struct pci_dev *pdev) +{ + struct pci402_card *card = pci_get_drvdata(pdev); + int i; + + for (i = 0; i < card->ov.active_cores; i++) + pci402_unregister_core(&card->cores[i]); +} + +static int pci402_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct pci402_card *card = NULL; + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + + card = devm_kzalloc(&pdev->dev, sizeof(*card), GFP_KERNEL); + if (!card) { + err = -ENOMEM; + goto failure_disable_pci; + } + + pci_set_drvdata(pdev, card); + + err = pci_request_regions(pdev, pci_name(pdev)); + if (err) + goto failure_disable_pci; + + card->addr = pci_iomap(pdev, PCI402_BAR, PCI402_IO_LEN_TOTAL); + if (!card->addr) { + err = -ENOMEM; + goto failure_release_regions; + } + + err = pci402_init_card(pdev); + if (err) + goto failure_unmap; + + err = pci402_init_dma(pdev); + if (err) + goto failure_unmap; + + err = pci402_init_interrupt(pdev); + if (err) + goto failure_finish_dma; + + err = pci402_init_cores(pdev); + if (err) + goto failure_finish_interrupt; + + return 0; + +failure_finish_interrupt: + pci402_finish_interrupt(pdev); + +failure_finish_dma: + pci402_finish_dma(pdev); + +failure_unmap: + pci_iounmap(pdev, card->addr); + +failure_release_regions: + pci_release_regions(pdev); + +failure_disable_pci: + pci_disable_device(pdev); + + return err; +} + +static void pci402_remove(struct pci_dev *pdev) +{ + struct pci402_card *card = pci_get_drvdata(pdev); + + pci402_finish_interrupt(pdev); + pci402_finish_cores(pdev); + pci402_finish_dma(pdev); + pci_iounmap(pdev, card->addr); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static const struct pci_device_id pci402_tbl[] = { + { + .vendor = PCI_VENDOR_ID_ESDGMBH, + .device = ESD_PCI_DEVICE_ID_PCIE402, + .subvendor = PCI_VENDOR_ID_ESDGMBH, + .subdevice = PCI_ANY_ID, + }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, pci402_tbl); + +static struct pci_driver pci402_driver = { + .name = KBUILD_MODNAME, + .id_table = pci402_tbl, + .probe = pci402_probe, + .remove = pci402_remove, +}; +module_pci_driver(pci402_driver); + +MODULE_DESCRIPTION("Socket-CAN driver for esd CAN 402 card family with esdACC core on PCIe"); +MODULE_AUTHOR("Thomas Körper <socketcan@esd.eu>"); +MODULE_AUTHOR("Stefan Mätje <stefan.maetje@esd.eu>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/can/esd/esdacc.c b/drivers/net/can/esd/esdacc.c new file mode 100644 index 000000000000..73e66f9a3781 --- /dev/null +++ b/drivers/net/can/esd/esdacc.c @@ -0,0 +1,769 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2015 - 2016 Thomas Körper, esd electronic system design gmbh + * Copyright (C) 2017 - 2023 Stefan Mätje, esd electronics gmbh + */ + +#include "esdacc.h" + +#include <linux/bitfield.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/ktime.h> + +/* esdACC ID register layout */ +#define ACC_ID_ID_MASK GENMASK(28, 0) +#define ACC_ID_EFF_FLAG BIT(29) + +/* esdACC DLC register layout */ +#define ACC_DLC_DLC_MASK GENMASK(3, 0) +#define ACC_DLC_RTR_FLAG BIT(4) +#define ACC_DLC_SSTX_FLAG BIT(24) /* Single Shot TX */ + +/* esdACC DLC in struct acc_bmmsg_rxtxdone::acc_dlc.len only! */ +#define ACC_DLC_TXD_FLAG BIT(5) + +/* ecc value of esdACC equals SJA1000's ECC register */ +#define ACC_ECC_SEG 0x1f +#define ACC_ECC_DIR 0x20 +#define ACC_ECC_BIT 0x00 +#define ACC_ECC_FORM 0x40 +#define ACC_ECC_STUFF 0x80 +#define ACC_ECC_MASK 0xc0 + +/* esdACC Status Register bits. Unused bits not documented. */ +#define ACC_REG_STATUS_MASK_STATUS_ES BIT(17) +#define ACC_REG_STATUS_MASK_STATUS_EP BIT(18) +#define ACC_REG_STATUS_MASK_STATUS_BS BIT(19) + +/* esdACC Overview Module BM_IRQ_Mask register related defines */ +/* Two bit wide command masks to mask or unmask a single core IRQ */ +#define ACC_BM_IRQ_UNMASK BIT(0) +#define ACC_BM_IRQ_MASK (ACC_BM_IRQ_UNMASK << 1) +/* Command to unmask all IRQ sources. Created by shifting + * and oring the two bit wide ACC_BM_IRQ_UNMASK 16 times. + */ +#define ACC_BM_IRQ_UNMASK_ALL 0x55555555U + +static void acc_resetmode_enter(struct acc_core *core) +{ + acc_set_bits(core, ACC_CORE_OF_CTRL, + ACC_REG_CTRL_MASK_RESETMODE); + + /* Read back reset mode bit to flush PCI write posting */ + acc_resetmode_entered(core); +} + +static void acc_resetmode_leave(struct acc_core *core) +{ + acc_clear_bits(core, ACC_CORE_OF_CTRL, + ACC_REG_CTRL_MASK_RESETMODE); + + /* Read back reset mode bit to flush PCI write posting */ + acc_resetmode_entered(core); +} + +static void acc_txq_put(struct acc_core *core, u32 acc_id, u32 acc_dlc, + const void *data) +{ + acc_write32_noswap(core, ACC_CORE_OF_TXFIFO_DATA_1, + *((const u32 *)(data + 4))); + acc_write32_noswap(core, ACC_CORE_OF_TXFIFO_DATA_0, + *((const u32 *)data)); + acc_write32(core, ACC_CORE_OF_TXFIFO_DLC, acc_dlc); + /* CAN id must be written at last. This write starts TX. */ + acc_write32(core, ACC_CORE_OF_TXFIFO_ID, acc_id); +} + +static u8 acc_tx_fifo_next(struct acc_core *core, u8 tx_fifo_idx) +{ + ++tx_fifo_idx; + if (tx_fifo_idx >= core->tx_fifo_size) + tx_fifo_idx = 0U; + return tx_fifo_idx; +} + +/* Convert timestamp from esdACC time stamp ticks to ns + * + * The conversion factor ts2ns from time stamp counts to ns is basically + * ts2ns = NSEC_PER_SEC / timestamp_frequency + * + * We handle here only a fixed timestamp frequency of 80MHz. The + * resulting ts2ns factor would be 12.5. + * + * At the end we multiply by 12 and add the half of the HW timestamp + * to get a multiplication by 12.5. This way any overflow is + * avoided until ktime_t itself overflows. + */ +#define ACC_TS_FACTOR (NSEC_PER_SEC / ACC_TS_FREQ_80MHZ) +#define ACC_TS_80MHZ_SHIFT 1 + +static ktime_t acc_ts2ktime(struct acc_ov *ov, u64 ts) +{ + u64 ns; + + ns = (ts * ACC_TS_FACTOR) + (ts >> ACC_TS_80MHZ_SHIFT); + + return ns_to_ktime(ns); +} + +#undef ACC_TS_FACTOR +#undef ACC_TS_80MHZ_SHIFT + +void acc_init_ov(struct acc_ov *ov, struct device *dev) +{ + u32 temp; + + temp = acc_ov_read32(ov, ACC_OV_OF_VERSION); + ov->version = temp; + ov->features = (temp >> 16); + + temp = acc_ov_read32(ov, ACC_OV_OF_INFO); + ov->total_cores = temp; + ov->active_cores = (temp >> 8); + + ov->core_frequency = acc_ov_read32(ov, ACC_OV_OF_CANCORE_FREQ); + ov->timestamp_frequency = acc_ov_read32(ov, ACC_OV_OF_TS_FREQ_LO); + + /* Depending on esdACC feature NEW_PSC enable the new prescaler + * or adjust core_frequency according to the implicit division by 2. + */ + if (ov->features & ACC_OV_REG_FEAT_MASK_NEW_PSC) { + acc_ov_set_bits(ov, ACC_OV_OF_MODE, + ACC_OV_REG_MODE_MASK_NEW_PSC_ENABLE); + } else { + ov->core_frequency /= 2; + } + + dev_dbg(dev, + "esdACC v%u, freq: %u/%u, feat/strap: 0x%x/0x%x, cores: %u/%u\n", + ov->version, ov->core_frequency, ov->timestamp_frequency, + ov->features, acc_ov_read32(ov, ACC_OV_OF_INFO) >> 16, + ov->active_cores, ov->total_cores); +} + +void acc_init_bm_ptr(struct acc_ov *ov, struct acc_core *cores, const void *mem) +{ + unsigned int u; + + /* DMA buffer layout as follows where N is the number of CAN cores + * implemented in the FPGA, i.e. N = ov->total_cores + * + * Section Layout Section size + * ---------------------------------------------- + * FIFO Card/Overview ACC_CORE_DMABUF_SIZE + * FIFO Core0 ACC_CORE_DMABUF_SIZE + * ... ... + * FIFO CoreN ACC_CORE_DMABUF_SIZE + * irq_cnt Card/Overview sizeof(u32) + * irq_cnt Core0 sizeof(u32) + * ... ... + * irq_cnt CoreN sizeof(u32) + */ + ov->bmfifo.messages = mem; + ov->bmfifo.irq_cnt = mem + (ov->total_cores + 1U) * ACC_CORE_DMABUF_SIZE; + + for (u = 0U; u < ov->active_cores; u++) { + struct acc_core *core = &cores[u]; + + core->bmfifo.messages = mem + (u + 1U) * ACC_CORE_DMABUF_SIZE; + core->bmfifo.irq_cnt = ov->bmfifo.irq_cnt + (u + 1U); + } +} + +int acc_open(struct net_device *netdev) +{ + struct acc_net_priv *priv = netdev_priv(netdev); + struct acc_core *core = priv->core; + u32 tx_fifo_status; + u32 ctrl; + int err; + + /* Retry to enter RESET mode if out of sync. */ + if (priv->can.state != CAN_STATE_STOPPED) { + netdev_warn(netdev, "Entered %s() with bad can.state: %s\n", + __func__, can_get_state_str(priv->can.state)); + acc_resetmode_enter(core); + priv->can.state = CAN_STATE_STOPPED; + } + + err = open_candev(netdev); + if (err) + return err; + + ctrl = ACC_REG_CTRL_MASK_IE_RXTX | + ACC_REG_CTRL_MASK_IE_TXERROR | + ACC_REG_CTRL_MASK_IE_ERRWARN | + ACC_REG_CTRL_MASK_IE_OVERRUN | + ACC_REG_CTRL_MASK_IE_ERRPASS; + + if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) + ctrl |= ACC_REG_CTRL_MASK_IE_BUSERR; + + if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + ctrl |= ACC_REG_CTRL_MASK_LOM; + + acc_set_bits(core, ACC_CORE_OF_CTRL, ctrl); + + acc_resetmode_leave(core); + priv->can.state = CAN_STATE_ERROR_ACTIVE; + + /* Resync TX FIFO indices to HW state after (re-)start. */ + tx_fifo_status = acc_read32(core, ACC_CORE_OF_TXFIFO_STATUS); + core->tx_fifo_head = tx_fifo_status & 0xff; + core->tx_fifo_tail = (tx_fifo_status >> 8) & 0xff; + + netif_start_queue(netdev); + return 0; +} + +int acc_close(struct net_device *netdev) +{ + struct acc_net_priv *priv = netdev_priv(netdev); + struct acc_core *core = priv->core; + + acc_clear_bits(core, ACC_CORE_OF_CTRL, + ACC_REG_CTRL_MASK_IE_RXTX | + ACC_REG_CTRL_MASK_IE_TXERROR | + ACC_REG_CTRL_MASK_IE_ERRWARN | + ACC_REG_CTRL_MASK_IE_OVERRUN | + ACC_REG_CTRL_MASK_IE_ERRPASS | + ACC_REG_CTRL_MASK_IE_BUSERR); + + netif_stop_queue(netdev); + acc_resetmode_enter(core); + priv->can.state = CAN_STATE_STOPPED; + + /* Mark pending TX requests to be aborted after controller restart. */ + acc_write32(core, ACC_CORE_OF_TX_ABORT_MASK, 0xffff); + + /* ACC_REG_CTRL_MASK_LOM is only accessible in RESET mode */ + acc_clear_bits(core, ACC_CORE_OF_CTRL, + ACC_REG_CTRL_MASK_LOM); + + close_candev(netdev); + return 0; +} + +netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct acc_net_priv *priv = netdev_priv(netdev); + struct acc_core *core = priv->core; + struct can_frame *cf = (struct can_frame *)skb->data; + u8 tx_fifo_head = core->tx_fifo_head; + int fifo_usage; + u32 acc_id; + u32 acc_dlc; + + if (can_dev_dropped_skb(netdev, skb)) + return NETDEV_TX_OK; + + /* Access core->tx_fifo_tail only once because it may be changed + * from the interrupt level. + */ + fifo_usage = tx_fifo_head - core->tx_fifo_tail; + if (fifo_usage < 0) + fifo_usage += core->tx_fifo_size; + + if (fifo_usage >= core->tx_fifo_size - 1) { + netdev_err(core->netdev, + "BUG: TX ring full when queue awake!\n"); + netif_stop_queue(netdev); + return NETDEV_TX_BUSY; + } + + if (fifo_usage == core->tx_fifo_size - 2) + netif_stop_queue(netdev); + + acc_dlc = can_get_cc_dlc(cf, priv->can.ctrlmode); + if (cf->can_id & CAN_RTR_FLAG) + acc_dlc |= ACC_DLC_RTR_FLAG; + if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) + acc_dlc |= ACC_DLC_SSTX_FLAG; + + if (cf->can_id & CAN_EFF_FLAG) { + acc_id = cf->can_id & CAN_EFF_MASK; + acc_id |= ACC_ID_EFF_FLAG; + } else { + acc_id = cf->can_id & CAN_SFF_MASK; + } + + can_put_echo_skb(skb, netdev, core->tx_fifo_head, 0); + + core->tx_fifo_head = acc_tx_fifo_next(core, tx_fifo_head); + + acc_txq_put(core, acc_id, acc_dlc, cf->data); + + return NETDEV_TX_OK; +} + +int acc_get_berr_counter(const struct net_device *netdev, + struct can_berr_counter *bec) +{ + struct acc_net_priv *priv = netdev_priv(netdev); + u32 core_status = acc_read32(priv->core, ACC_CORE_OF_STATUS); + + bec->txerr = (core_status >> 8) & 0xff; + bec->rxerr = core_status & 0xff; + + return 0; +} + +int acc_set_mode(struct net_device *netdev, enum can_mode mode) +{ + struct acc_net_priv *priv = netdev_priv(netdev); + + switch (mode) { + case CAN_MODE_START: + /* Paranoid FIFO index check. */ + { + const u32 tx_fifo_status = + acc_read32(priv->core, ACC_CORE_OF_TXFIFO_STATUS); + const u8 hw_fifo_head = tx_fifo_status; + + if (hw_fifo_head != priv->core->tx_fifo_head || + hw_fifo_head != priv->core->tx_fifo_tail) { + netdev_warn(netdev, + "TX FIFO mismatch: T %2u H %2u; TFHW %#08x\n", + priv->core->tx_fifo_tail, + priv->core->tx_fifo_head, + tx_fifo_status); + } + } + acc_resetmode_leave(priv->core); + /* To leave the bus-off state the esdACC controller begins + * here a grace period where it counts 128 "idle conditions" (each + * of 11 consecutive recessive bits) on the bus as required + * by the CAN spec. + * + * During this time the TX FIFO may still contain already + * aborted "zombie" frames that are only drained from the FIFO + * at the end of the grace period. + * + * To not to interfere with this drain process we don't + * call netif_wake_queue() here. When the controller reaches + * the error-active state again, it informs us about that + * with an acc_bmmsg_errstatechange message. Then + * netif_wake_queue() is called from + * handle_core_msg_errstatechange() instead. + */ + break; + + default: + return -EOPNOTSUPP; + } + + return 0; +} + +int acc_set_bittiming(struct net_device *netdev) +{ + struct acc_net_priv *priv = netdev_priv(netdev); + const struct can_bittiming *bt = &priv->can.bittiming; + u32 brp; + u32 btr; + + if (priv->ov->features & ACC_OV_REG_FEAT_MASK_CANFD) { + u32 fbtr = 0; + + netdev_dbg(netdev, "bit timing: brp %u, prop %u, ph1 %u ph2 %u, sjw %u\n", + bt->brp, bt->prop_seg, + bt->phase_seg1, bt->phase_seg2, bt->sjw); + + brp = FIELD_PREP(ACC_REG_BRP_FD_MASK_BRP, bt->brp - 1); + + btr = FIELD_PREP(ACC_REG_BTR_FD_MASK_TSEG1, bt->phase_seg1 + bt->prop_seg - 1); + btr |= FIELD_PREP(ACC_REG_BTR_FD_MASK_TSEG2, bt->phase_seg2 - 1); + btr |= FIELD_PREP(ACC_REG_BTR_FD_MASK_SJW, bt->sjw - 1); + + /* Keep order of accesses to ACC_CORE_OF_BRP and ACC_CORE_OF_BTR. */ + acc_write32(priv->core, ACC_CORE_OF_BRP, brp); + acc_write32(priv->core, ACC_CORE_OF_BTR, btr); + + netdev_dbg(netdev, "esdACC: BRP %u, NBTR 0x%08x, DBTR 0x%08x", + brp, btr, fbtr); + } else { + netdev_dbg(netdev, "bit timing: brp %u, prop %u, ph1 %u ph2 %u, sjw %u\n", + bt->brp, bt->prop_seg, + bt->phase_seg1, bt->phase_seg2, bt->sjw); + + brp = FIELD_PREP(ACC_REG_BRP_CL_MASK_BRP, bt->brp - 1); + + btr = FIELD_PREP(ACC_REG_BTR_CL_MASK_TSEG1, bt->phase_seg1 + bt->prop_seg - 1); + btr |= FIELD_PREP(ACC_REG_BTR_CL_MASK_TSEG2, bt->phase_seg2 - 1); + btr |= FIELD_PREP(ACC_REG_BTR_CL_MASK_SJW, bt->sjw - 1); + + /* Keep order of accesses to ACC_CORE_OF_BRP and ACC_CORE_OF_BTR. */ + acc_write32(priv->core, ACC_CORE_OF_BRP, brp); + acc_write32(priv->core, ACC_CORE_OF_BTR, btr); + + netdev_dbg(netdev, "esdACC: BRP %u, BTR 0x%08x", brp, btr); + } + + return 0; +} + +static void handle_core_msg_rxtxdone(struct acc_core *core, + const struct acc_bmmsg_rxtxdone *msg) +{ + struct acc_net_priv *priv = netdev_priv(core->netdev); + struct net_device_stats *stats = &core->netdev->stats; + struct sk_buff *skb; + + if (msg->acc_dlc.len & ACC_DLC_TXD_FLAG) { + u8 tx_fifo_tail = core->tx_fifo_tail; + + if (core->tx_fifo_head == tx_fifo_tail) { + netdev_warn(core->netdev, + "TX interrupt, but queue is empty!?\n"); + return; + } + + /* Direct access echo skb to attach HW time stamp. */ + skb = priv->can.echo_skb[tx_fifo_tail]; + if (skb) { + skb_hwtstamps(skb)->hwtstamp = + acc_ts2ktime(priv->ov, msg->ts); + } + + stats->tx_packets++; + stats->tx_bytes += can_get_echo_skb(core->netdev, tx_fifo_tail, + NULL); + + core->tx_fifo_tail = acc_tx_fifo_next(core, tx_fifo_tail); + + netif_wake_queue(core->netdev); + + } else { + struct can_frame *cf; + + skb = alloc_can_skb(core->netdev, &cf); + if (!skb) { + stats->rx_dropped++; + return; + } + + cf->can_id = msg->id & ACC_ID_ID_MASK; + if (msg->id & ACC_ID_EFF_FLAG) + cf->can_id |= CAN_EFF_FLAG; + + can_frame_set_cc_len(cf, msg->acc_dlc.len & ACC_DLC_DLC_MASK, + priv->can.ctrlmode); + + if (msg->acc_dlc.len & ACC_DLC_RTR_FLAG) { + cf->can_id |= CAN_RTR_FLAG; + } else { + memcpy(cf->data, msg->data, cf->len); + stats->rx_bytes += cf->len; + } + stats->rx_packets++; + + skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts); + + netif_rx(skb); + } +} + +static void handle_core_msg_txabort(struct acc_core *core, + const struct acc_bmmsg_txabort *msg) +{ + struct net_device_stats *stats = &core->netdev->stats; + u8 tx_fifo_tail = core->tx_fifo_tail; + u32 abort_mask = msg->abort_mask; /* u32 extend to avoid warnings later */ + + /* The abort_mask shows which frames were aborted in esdACC's FIFO. */ + while (tx_fifo_tail != core->tx_fifo_head && (abort_mask)) { + const u32 tail_mask = (1U << tx_fifo_tail); + + if (!(abort_mask & tail_mask)) + break; + abort_mask &= ~tail_mask; + + can_free_echo_skb(core->netdev, tx_fifo_tail, NULL); + stats->tx_dropped++; + stats->tx_aborted_errors++; + + tx_fifo_tail = acc_tx_fifo_next(core, tx_fifo_tail); + } + core->tx_fifo_tail = tx_fifo_tail; + if (abort_mask) + netdev_warn(core->netdev, "Unhandled aborted messages\n"); + + if (!acc_resetmode_entered(core)) + netif_wake_queue(core->netdev); +} + +static void handle_core_msg_overrun(struct acc_core *core, + const struct acc_bmmsg_overrun *msg) +{ + struct acc_net_priv *priv = netdev_priv(core->netdev); + struct net_device_stats *stats = &core->netdev->stats; + struct can_frame *cf; + struct sk_buff *skb; + + /* lost_cnt may be 0 if not supported by esdACC version */ + if (msg->lost_cnt) { + stats->rx_errors += msg->lost_cnt; + stats->rx_over_errors += msg->lost_cnt; + } else { + stats->rx_errors++; + stats->rx_over_errors++; + } + + skb = alloc_can_err_skb(core->netdev, &cf); + if (!skb) + return; + + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + + skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts); + + netif_rx(skb); +} + +static void handle_core_msg_buserr(struct acc_core *core, + const struct acc_bmmsg_buserr *msg) +{ + struct acc_net_priv *priv = netdev_priv(core->netdev); + struct net_device_stats *stats = &core->netdev->stats; + struct can_frame *cf; + struct sk_buff *skb; + const u32 reg_status = msg->reg_status; + const u8 rxerr = reg_status; + const u8 txerr = (reg_status >> 8); + u8 can_err_prot_type = 0U; + + priv->can.can_stats.bus_error++; + + /* Error occurred during transmission? */ + if (msg->ecc & ACC_ECC_DIR) { + stats->rx_errors++; + } else { + can_err_prot_type |= CAN_ERR_PROT_TX; + stats->tx_errors++; + } + /* Determine error type */ + switch (msg->ecc & ACC_ECC_MASK) { + case ACC_ECC_BIT: + can_err_prot_type |= CAN_ERR_PROT_BIT; + break; + case ACC_ECC_FORM: + can_err_prot_type |= CAN_ERR_PROT_FORM; + break; + case ACC_ECC_STUFF: + can_err_prot_type |= CAN_ERR_PROT_STUFF; + break; + default: + can_err_prot_type |= CAN_ERR_PROT_UNSPEC; + break; + } + + skb = alloc_can_err_skb(core->netdev, &cf); + if (!skb) + return; + + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR | CAN_ERR_CNT; + + /* Set protocol error type */ + cf->data[2] = can_err_prot_type; + /* Set error location */ + cf->data[3] = msg->ecc & ACC_ECC_SEG; + + /* Insert CAN TX and RX error counters. */ + cf->data[6] = txerr; + cf->data[7] = rxerr; + + skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts); + + netif_rx(skb); +} + +static void +handle_core_msg_errstatechange(struct acc_core *core, + const struct acc_bmmsg_errstatechange *msg) +{ + struct acc_net_priv *priv = netdev_priv(core->netdev); + struct can_frame *cf = NULL; + struct sk_buff *skb; + const u32 reg_status = msg->reg_status; + const u8 rxerr = reg_status; + const u8 txerr = (reg_status >> 8); + enum can_state new_state; + + if (reg_status & ACC_REG_STATUS_MASK_STATUS_BS) { + new_state = CAN_STATE_BUS_OFF; + } else if (reg_status & ACC_REG_STATUS_MASK_STATUS_EP) { + new_state = CAN_STATE_ERROR_PASSIVE; + } else if (reg_status & ACC_REG_STATUS_MASK_STATUS_ES) { + new_state = CAN_STATE_ERROR_WARNING; + } else { + new_state = CAN_STATE_ERROR_ACTIVE; + if (priv->can.state == CAN_STATE_BUS_OFF) { + /* See comment in acc_set_mode() for CAN_MODE_START */ + netif_wake_queue(core->netdev); + } + } + + skb = alloc_can_err_skb(core->netdev, &cf); + + if (new_state != priv->can.state) { + enum can_state tx_state, rx_state; + + tx_state = (txerr >= rxerr) ? + new_state : CAN_STATE_ERROR_ACTIVE; + rx_state = (rxerr >= txerr) ? + new_state : CAN_STATE_ERROR_ACTIVE; + + /* Always call can_change_state() to update the state + * even if alloc_can_err_skb() may have failed. + * can_change_state() can cope with a NULL cf pointer. + */ + can_change_state(core->netdev, cf, tx_state, rx_state); + } + + if (skb) { + cf->can_id |= CAN_ERR_CNT; + cf->data[6] = txerr; + cf->data[7] = rxerr; + + skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts); + + netif_rx(skb); + } + + if (new_state == CAN_STATE_BUS_OFF) { + acc_write32(core, ACC_CORE_OF_TX_ABORT_MASK, 0xffff); + can_bus_off(core->netdev); + } +} + +static void handle_core_interrupt(struct acc_core *core) +{ + u32 msg_fifo_head = core->bmfifo.local_irq_cnt & 0xff; + + while (core->bmfifo.msg_fifo_tail != msg_fifo_head) { + const union acc_bmmsg *msg = + &core->bmfifo.messages[core->bmfifo.msg_fifo_tail]; + + switch (msg->msg_id) { + case BM_MSG_ID_RXTXDONE: + handle_core_msg_rxtxdone(core, &msg->rxtxdone); + break; + + case BM_MSG_ID_TXABORT: + handle_core_msg_txabort(core, &msg->txabort); + break; + + case BM_MSG_ID_OVERRUN: + handle_core_msg_overrun(core, &msg->overrun); + break; + + case BM_MSG_ID_BUSERR: + handle_core_msg_buserr(core, &msg->buserr); + break; + + case BM_MSG_ID_ERRPASSIVE: + case BM_MSG_ID_ERRWARN: + handle_core_msg_errstatechange(core, + &msg->errstatechange); + break; + + default: + /* Ignore all other BM messages (like the CAN-FD messages) */ + break; + } + + core->bmfifo.msg_fifo_tail = + (core->bmfifo.msg_fifo_tail + 1) & 0xff; + } +} + +/** + * acc_card_interrupt() - handle the interrupts of an esdACC FPGA + * + * @ov: overview module structure + * @cores: array of core structures + * + * This function handles all interrupts pending for the overview module and the + * CAN cores of the esdACC FPGA. + * + * It examines for all cores (the overview module core and the CAN cores) + * the bmfifo.irq_cnt and compares it with the previously saved + * bmfifo.local_irq_cnt. An IRQ is pending if they differ. The esdACC FPGA + * updates the bmfifo.irq_cnt values by DMA. + * + * The pending interrupts are masked by writing to the IRQ mask register at + * ACC_OV_OF_BM_IRQ_MASK. This register has for each core a two bit command + * field evaluated as follows: + * + * Define, bit pattern: meaning + * 00: no action + * ACC_BM_IRQ_UNMASK, 01: unmask interrupt + * ACC_BM_IRQ_MASK, 10: mask interrupt + * 11: no action + * + * For each CAN core with a pending IRQ handle_core_interrupt() handles all + * busmaster messages from the message FIFO. The last handled message (FIFO + * index) is written to the CAN core to acknowledge its handling. + * + * Last step is to unmask all interrupts in the FPGA using + * ACC_BM_IRQ_UNMASK_ALL. + * + * Return: + * IRQ_HANDLED, if card generated an interrupt that was handled + * IRQ_NONE, if the interrupt is not ours + */ +irqreturn_t acc_card_interrupt(struct acc_ov *ov, struct acc_core *cores) +{ + u32 irqmask; + int i; + + /* First we look for whom interrupts are pending, card/overview + * or any of the cores. Two bits in irqmask are used for each; + * Each two bit field is set to ACC_BM_IRQ_MASK if an IRQ is + * pending. + */ + irqmask = 0U; + if (READ_ONCE(*ov->bmfifo.irq_cnt) != ov->bmfifo.local_irq_cnt) { + irqmask |= ACC_BM_IRQ_MASK; + ov->bmfifo.local_irq_cnt = READ_ONCE(*ov->bmfifo.irq_cnt); + } + + for (i = 0; i < ov->active_cores; i++) { + struct acc_core *core = &cores[i]; + + if (READ_ONCE(*core->bmfifo.irq_cnt) != core->bmfifo.local_irq_cnt) { + irqmask |= (ACC_BM_IRQ_MASK << (2 * (i + 1))); + core->bmfifo.local_irq_cnt = READ_ONCE(*core->bmfifo.irq_cnt); + } + } + + if (!irqmask) + return IRQ_NONE; + + /* At second we tell the card we're working on them by writing irqmask, + * call handle_{ov|core}_interrupt and then acknowledge the + * interrupts by writing irq_cnt: + */ + acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_MASK, irqmask); + + if (irqmask & ACC_BM_IRQ_MASK) { + /* handle_ov_interrupt(); - no use yet. */ + acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_COUNTER, + ov->bmfifo.local_irq_cnt); + } + + for (i = 0; i < ov->active_cores; i++) { + struct acc_core *core = &cores[i]; + + if (irqmask & (ACC_BM_IRQ_MASK << (2 * (i + 1)))) { + handle_core_interrupt(core); + acc_write32(core, ACC_OV_OF_BM_IRQ_COUNTER, + core->bmfifo.local_irq_cnt); + } + } + + acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_MASK, ACC_BM_IRQ_UNMASK_ALL); + + return IRQ_HANDLED; +} diff --git a/drivers/net/can/esd/esdacc.h b/drivers/net/can/esd/esdacc.h new file mode 100644 index 000000000000..6b7ebd8c91b2 --- /dev/null +++ b/drivers/net/can/esd/esdacc.h @@ -0,0 +1,358 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2015 - 2016 Thomas Körper, esd electronic system design gmbh + * Copyright (C) 2017 - 2023 Stefan Mätje, esd electronics gmbh + */ + +#include <linux/bits.h> +#include <linux/can/dev.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/units.h> + +#define ACC_TS_FREQ_80MHZ (80 * HZ_PER_MHZ) +#define ACC_I2C_ADDON_DETECT_DELAY_MS 10 + +/* esdACC Overview Module */ +#define ACC_OV_OF_PROBE 0x0000 +#define ACC_OV_OF_VERSION 0x0004 +#define ACC_OV_OF_INFO 0x0008 +#define ACC_OV_OF_CANCORE_FREQ 0x000c +#define ACC_OV_OF_TS_FREQ_LO 0x0010 +#define ACC_OV_OF_TS_FREQ_HI 0x0014 +#define ACC_OV_OF_IRQ_STATUS_CORES 0x0018 +#define ACC_OV_OF_TS_CURR_LO 0x001c +#define ACC_OV_OF_TS_CURR_HI 0x0020 +#define ACC_OV_OF_IRQ_STATUS 0x0028 +#define ACC_OV_OF_MODE 0x002c +#define ACC_OV_OF_BM_IRQ_COUNTER 0x0070 +#define ACC_OV_OF_BM_IRQ_MASK 0x0074 +#define ACC_OV_OF_MSI_DATA 0x0080 +#define ACC_OV_OF_MSI_ADDRESSOFFSET 0x0084 + +/* Feature flags are contained in the upper 16 bit of the version + * register at ACC_OV_OF_VERSION but only used with these masks after + * extraction into an extra variable => (xx - 16). + */ +#define ACC_OV_REG_FEAT_MASK_CANFD BIT(27 - 16) +#define ACC_OV_REG_FEAT_MASK_NEW_PSC BIT(28 - 16) +#define ACC_OV_REG_FEAT_MASK_DAR BIT(30 - 16) + +#define ACC_OV_REG_MODE_MASK_ENDIAN_LITTLE BIT(0) +#define ACC_OV_REG_MODE_MASK_BM_ENABLE BIT(1) +#define ACC_OV_REG_MODE_MASK_MODE_LED BIT(2) +#define ACC_OV_REG_MODE_MASK_TIMER_ENABLE BIT(4) +#define ACC_OV_REG_MODE_MASK_TIMER_ONE_SHOT BIT(5) +#define ACC_OV_REG_MODE_MASK_TIMER_ABSOLUTE BIT(6) +#define ACC_OV_REG_MODE_MASK_TIMER GENMASK(6, 4) +#define ACC_OV_REG_MODE_MASK_TS_SRC GENMASK(8, 7) +#define ACC_OV_REG_MODE_MASK_I2C_ENABLE BIT(11) +#define ACC_OV_REG_MODE_MASK_MSI_ENABLE BIT(14) +#define ACC_OV_REG_MODE_MASK_NEW_PSC_ENABLE BIT(15) +#define ACC_OV_REG_MODE_MASK_FPGA_RESET BIT(31) + +/* esdACC CAN Core Module */ +#define ACC_CORE_OF_CTRL 0x0000 +#define ACC_CORE_OF_STATUS_IRQ 0x0008 +#define ACC_CORE_OF_BRP 0x000c +#define ACC_CORE_OF_BTR 0x0010 +#define ACC_CORE_OF_FBTR 0x0014 +#define ACC_CORE_OF_STATUS 0x0030 +#define ACC_CORE_OF_TXFIFO_CONFIG 0x0048 +#define ACC_CORE_OF_TXFIFO_STATUS 0x004c +#define ACC_CORE_OF_TX_STATUS_IRQ 0x0050 +#define ACC_CORE_OF_TX_ABORT_MASK 0x0054 +#define ACC_CORE_OF_BM_IRQ_COUNTER 0x0070 +#define ACC_CORE_OF_TXFIFO_ID 0x00c0 +#define ACC_CORE_OF_TXFIFO_DLC 0x00c4 +#define ACC_CORE_OF_TXFIFO_DATA_0 0x00c8 +#define ACC_CORE_OF_TXFIFO_DATA_1 0x00cc + +/* CTRL register layout */ +#define ACC_REG_CTRL_MASK_RESETMODE BIT(0) +#define ACC_REG_CTRL_MASK_LOM BIT(1) +#define ACC_REG_CTRL_MASK_STM BIT(2) +#define ACC_REG_CTRL_MASK_TRANSEN BIT(5) +#define ACC_REG_CTRL_MASK_TS BIT(6) +#define ACC_REG_CTRL_MASK_SCHEDULE BIT(7) + +#define ACC_REG_CTRL_MASK_IE_RXTX BIT(8) +#define ACC_REG_CTRL_MASK_IE_TXERROR BIT(9) +#define ACC_REG_CTRL_MASK_IE_ERRWARN BIT(10) +#define ACC_REG_CTRL_MASK_IE_OVERRUN BIT(11) +#define ACC_REG_CTRL_MASK_IE_TSI BIT(12) +#define ACC_REG_CTRL_MASK_IE_ERRPASS BIT(13) +#define ACC_REG_CTRL_MASK_IE_ALI BIT(14) +#define ACC_REG_CTRL_MASK_IE_BUSERR BIT(15) + +/* BRP and BTR register layout for CAN-Classic version */ +#define ACC_REG_BRP_CL_MASK_BRP GENMASK(8, 0) +#define ACC_REG_BTR_CL_MASK_TSEG1 GENMASK(3, 0) +#define ACC_REG_BTR_CL_MASK_TSEG2 GENMASK(18, 16) +#define ACC_REG_BTR_CL_MASK_SJW GENMASK(25, 24) + +/* BRP and BTR register layout for CAN-FD version */ +#define ACC_REG_BRP_FD_MASK_BRP GENMASK(7, 0) +#define ACC_REG_BTR_FD_MASK_TSEG1 GENMASK(7, 0) +#define ACC_REG_BTR_FD_MASK_TSEG2 GENMASK(22, 16) +#define ACC_REG_BTR_FD_MASK_SJW GENMASK(30, 24) + +/* 256 BM_MSGs of 32 byte size */ +#define ACC_CORE_DMAMSG_SIZE 32U +#define ACC_CORE_DMABUF_SIZE (256U * ACC_CORE_DMAMSG_SIZE) + +enum acc_bmmsg_id { + BM_MSG_ID_RXTXDONE = 0x01, + BM_MSG_ID_TXABORT = 0x02, + BM_MSG_ID_OVERRUN = 0x03, + BM_MSG_ID_BUSERR = 0x04, + BM_MSG_ID_ERRPASSIVE = 0x05, + BM_MSG_ID_ERRWARN = 0x06, + BM_MSG_ID_TIMESLICE = 0x07, + BM_MSG_ID_HWTIMER = 0x08, + BM_MSG_ID_HOTPLUG = 0x09, +}; + +/* The struct acc_bmmsg_* structure declarations that follow here provide + * access to the ring buffer of bus master messages maintained by the FPGA + * bus master engine. All bus master messages have the same size of + * ACC_CORE_DMAMSG_SIZE and a minimum alignment of ACC_CORE_DMAMSG_SIZE in + * memory. + * + * All structure members are natural aligned. Therefore we should not need + * a __packed attribute. All struct acc_bmmsg_* declarations have at least + * reserved* members to fill the structure to the full ACC_CORE_DMAMSG_SIZE. + * + * A failure of this property due padding will be detected at compile time + * by static_assert(sizeof(union acc_bmmsg) == ACC_CORE_DMAMSG_SIZE). + */ + +struct acc_bmmsg_rxtxdone { + u8 msg_id; + u8 txfifo_level; + u8 reserved1[2]; + u8 txtsfifo_level; + u8 reserved2[3]; + u32 id; + struct { + u8 len; + u8 txdfifo_idx; + u8 zeroes8; + u8 reserved; + } acc_dlc; + u8 data[CAN_MAX_DLEN]; + /* Time stamps in struct acc_ov::timestamp_frequency ticks. */ + u64 ts; +}; + +struct acc_bmmsg_txabort { + u8 msg_id; + u8 txfifo_level; + u16 abort_mask; + u8 txtsfifo_level; + u8 reserved2[1]; + u16 abort_mask_txts; + u64 ts; + u32 reserved3[4]; +}; + +struct acc_bmmsg_overrun { + u8 msg_id; + u8 txfifo_level; + u8 lost_cnt; + u8 reserved1; + u8 txtsfifo_level; + u8 reserved2[3]; + u64 ts; + u32 reserved3[4]; +}; + +struct acc_bmmsg_buserr { + u8 msg_id; + u8 txfifo_level; + u8 ecc; + u8 reserved1; + u8 txtsfifo_level; + u8 reserved2[3]; + u64 ts; + u32 reg_status; + u32 reg_btr; + u32 reserved3[2]; +}; + +struct acc_bmmsg_errstatechange { + u8 msg_id; + u8 txfifo_level; + u8 reserved1[2]; + u8 txtsfifo_level; + u8 reserved2[3]; + u64 ts; + u32 reg_status; + u32 reserved3[3]; +}; + +struct acc_bmmsg_timeslice { + u8 msg_id; + u8 txfifo_level; + u8 reserved1[2]; + u8 txtsfifo_level; + u8 reserved2[3]; + u64 ts; + u32 reserved3[4]; +}; + +struct acc_bmmsg_hwtimer { + u8 msg_id; + u8 reserved1[3]; + u32 reserved2[1]; + u64 timer; + u32 reserved3[4]; +}; + +struct acc_bmmsg_hotplug { + u8 msg_id; + u8 reserved1[3]; + u32 reserved2[7]; +}; + +union acc_bmmsg { + u8 msg_id; + struct acc_bmmsg_rxtxdone rxtxdone; + struct acc_bmmsg_txabort txabort; + struct acc_bmmsg_overrun overrun; + struct acc_bmmsg_buserr buserr; + struct acc_bmmsg_errstatechange errstatechange; + struct acc_bmmsg_timeslice timeslice; + struct acc_bmmsg_hwtimer hwtimer; +}; + +/* Check size of union acc_bmmsg to be of expected size. */ +static_assert(sizeof(union acc_bmmsg) == ACC_CORE_DMAMSG_SIZE); + +struct acc_bmfifo { + const union acc_bmmsg *messages; + /* irq_cnt points to an u32 value where the esdACC FPGA deposits + * the bm_fifo head index in coherent DMA memory. Only bits 7..0 + * are valid. Use READ_ONCE() to access this memory location. + */ + const u32 *irq_cnt; + u32 local_irq_cnt; + u32 msg_fifo_tail; +}; + +struct acc_core { + void __iomem *addr; + struct net_device *netdev; + struct acc_bmfifo bmfifo; + u8 tx_fifo_size; + u8 tx_fifo_head; + u8 tx_fifo_tail; +}; + +struct acc_ov { + void __iomem *addr; + struct acc_bmfifo bmfifo; + u32 timestamp_frequency; + u32 core_frequency; + u16 version; + u16 features; + u8 total_cores; + u8 active_cores; +}; + +struct acc_net_priv { + struct can_priv can; /* must be the first member! */ + struct acc_core *core; + struct acc_ov *ov; +}; + +static inline u32 acc_read32(struct acc_core *core, unsigned short offs) +{ + return ioread32be(core->addr + offs); +} + +static inline void acc_write32(struct acc_core *core, + unsigned short offs, u32 v) +{ + iowrite32be(v, core->addr + offs); +} + +static inline void acc_write32_noswap(struct acc_core *core, + unsigned short offs, u32 v) +{ + iowrite32(v, core->addr + offs); +} + +static inline void acc_set_bits(struct acc_core *core, + unsigned short offs, u32 mask) +{ + u32 v = acc_read32(core, offs); + + v |= mask; + acc_write32(core, offs, v); +} + +static inline void acc_clear_bits(struct acc_core *core, + unsigned short offs, u32 mask) +{ + u32 v = acc_read32(core, offs); + + v &= ~mask; + acc_write32(core, offs, v); +} + +static inline int acc_resetmode_entered(struct acc_core *core) +{ + u32 ctrl = acc_read32(core, ACC_CORE_OF_CTRL); + + return (ctrl & ACC_REG_CTRL_MASK_RESETMODE) != 0; +} + +static inline u32 acc_ov_read32(struct acc_ov *ov, unsigned short offs) +{ + return ioread32be(ov->addr + offs); +} + +static inline void acc_ov_write32(struct acc_ov *ov, + unsigned short offs, u32 v) +{ + iowrite32be(v, ov->addr + offs); +} + +static inline void acc_ov_set_bits(struct acc_ov *ov, + unsigned short offs, u32 b) +{ + u32 v = acc_ov_read32(ov, offs); + + v |= b; + acc_ov_write32(ov, offs, v); +} + +static inline void acc_ov_clear_bits(struct acc_ov *ov, + unsigned short offs, u32 b) +{ + u32 v = acc_ov_read32(ov, offs); + + v &= ~b; + acc_ov_write32(ov, offs, v); +} + +static inline void acc_reset_fpga(struct acc_ov *ov) +{ + acc_ov_write32(ov, ACC_OV_OF_MODE, ACC_OV_REG_MODE_MASK_FPGA_RESET); + + /* (Re-)start and wait for completion of addon detection on the I^2C bus */ + acc_ov_set_bits(ov, ACC_OV_OF_MODE, ACC_OV_REG_MODE_MASK_I2C_ENABLE); + mdelay(ACC_I2C_ADDON_DETECT_DELAY_MS); +} + +void acc_init_ov(struct acc_ov *ov, struct device *dev); +void acc_init_bm_ptr(struct acc_ov *ov, struct acc_core *cores, + const void *mem); +int acc_open(struct net_device *netdev); +int acc_close(struct net_device *netdev); +netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev); +int acc_get_berr_counter(const struct net_device *netdev, + struct can_berr_counter *bec); +int acc_set_mode(struct net_device *netdev, enum can_mode mode); +int acc_set_bittiming(struct net_device *netdev); +irqreturn_t acc_card_interrupt(struct acc_ov *ov, struct acc_core *cores); diff --git a/drivers/net/can/flexcan/Makefile b/drivers/net/can/flexcan/Makefile new file mode 100644 index 000000000000..89d5695c902e --- /dev/null +++ b/drivers/net/can/flexcan/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o + +flexcan-objs := +flexcan-objs += flexcan-core.o +flexcan-objs += flexcan-ethtool.o diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan/flexcan-core.c index 0f36eafe3ac1..f5d22c61503f 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan/flexcan-core.c @@ -9,23 +9,30 @@ // // Based on code originally by Andrey Volkov <avolkov@varma-el.com> -#include <linux/netdevice.h> +#include <dt-bindings/firmware/imx/rsrc.h> +#include <linux/bitfield.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> -#include <linux/can/led.h> -#include <linux/can/rx-offload.h> #include <linux/clk.h> #include <linux/delay.h> +#include <linux/firmware/imx/sci.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/mfd/syscon.h> #include <linux/module.h> +#include <linux/netdevice.h> #include <linux/of.h> -#include <linux/of_device.h> +#include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> -#include <linux/regulator/consumer.h> +#include <linux/can/platform/flexcan.h> +#include <linux/phy/phy.h> +#include <linux/pm_runtime.h> +#include <linux/property.h> #include <linux/regmap.h> +#include <linux/regulator/consumer.h> + +#include "flexcan.h" #define DRV_NAME "flexcan" @@ -51,6 +58,7 @@ #define FLEXCAN_MCR_IRMQ BIT(16) #define FLEXCAN_MCR_LPRIO_EN BIT(13) #define FLEXCAN_MCR_AEN BIT(12) +#define FLEXCAN_MCR_FDEN BIT(11) /* MCR_MAXMB: maximum used MBs is MAXMB + 1 */ #define FLEXCAN_MCR_MAXMB(x) ((x) & 0x7f) #define FLEXCAN_MCR_IDAM_A (0x0 << 8) @@ -90,6 +98,7 @@ #define FLEXCAN_CTRL2_MRP BIT(18) #define FLEXCAN_CTRL2_RRS BIT(17) #define FLEXCAN_CTRL2_EACEN BIT(16) +#define FLEXCAN_CTRL2_ISOCANFDEN BIT(12) /* FLEXCAN memory error control register (MECR) bits */ #define FLEXCAN_MECR_ECRWRDIS BIT(31) @@ -133,15 +142,42 @@ (FLEXCAN_ESR_ERR_BUS | FLEXCAN_ESR_ERR_STATE) #define FLEXCAN_ESR_ALL_INT \ (FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | \ - FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT | \ - FLEXCAN_ESR_WAK_INT) + FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT) + +/* FLEXCAN Bit Timing register (CBT) bits */ +#define FLEXCAN_CBT_BTF BIT(31) +#define FLEXCAN_CBT_EPRESDIV_MASK GENMASK(30, 21) +#define FLEXCAN_CBT_ERJW_MASK GENMASK(20, 16) +#define FLEXCAN_CBT_EPROPSEG_MASK GENMASK(15, 10) +#define FLEXCAN_CBT_EPSEG1_MASK GENMASK(9, 5) +#define FLEXCAN_CBT_EPSEG2_MASK GENMASK(4, 0) + +/* FLEXCAN FD control register (FDCTRL) bits */ +#define FLEXCAN_FDCTRL_FDRATE BIT(31) +#define FLEXCAN_FDCTRL_MBDSR1 GENMASK(20, 19) +#define FLEXCAN_FDCTRL_MBDSR0 GENMASK(17, 16) +#define FLEXCAN_FDCTRL_MBDSR_8 0x0 +#define FLEXCAN_FDCTRL_MBDSR_12 0x1 +#define FLEXCAN_FDCTRL_MBDSR_32 0x2 +#define FLEXCAN_FDCTRL_MBDSR_64 0x3 +#define FLEXCAN_FDCTRL_TDCEN BIT(15) +#define FLEXCAN_FDCTRL_TDCFAIL BIT(14) +#define FLEXCAN_FDCTRL_TDCOFF GENMASK(12, 8) +#define FLEXCAN_FDCTRL_TDCVAL GENMASK(5, 0) + +/* FLEXCAN FD Bit Timing register (FDCBT) bits */ +#define FLEXCAN_FDCBT_FPRESDIV_MASK GENMASK(29, 20) +#define FLEXCAN_FDCBT_FRJW_MASK GENMASK(18, 16) +#define FLEXCAN_FDCBT_FPROPSEG_MASK GENMASK(14, 10) +#define FLEXCAN_FDCBT_FPSEG1_MASK GENMASK(7, 5) +#define FLEXCAN_FDCBT_FPSEG2_MASK GENMASK(2, 0) /* FLEXCAN interrupt flag register (IFLAG) bits */ /* Errata ERR005829 step7: Reserve first valid MB */ -#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8 -#define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0 -#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP + 1) -#define FLEXCAN_IFLAG_MB(x) BIT((x) & 0x1f) +#define FLEXCAN_TX_MB_RESERVED_RX_FIFO 8 +#define FLEXCAN_TX_MB_RESERVED_RX_MAILBOX 0 +#define FLEXCAN_RX_MB_RX_MAILBOX_FIRST (FLEXCAN_TX_MB_RESERVED_RX_MAILBOX + 1) +#define FLEXCAN_IFLAG_MB(x) BIT_ULL(x) #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) #define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5) @@ -160,37 +196,16 @@ #define FLEXCAN_MB_CODE_TX_DATA (0xc << 24) #define FLEXCAN_MB_CODE_TX_TANSWER (0xe << 24) +#define FLEXCAN_MB_CNT_EDL BIT(31) +#define FLEXCAN_MB_CNT_BRS BIT(30) +#define FLEXCAN_MB_CNT_ESI BIT(29) #define FLEXCAN_MB_CNT_SRR BIT(22) #define FLEXCAN_MB_CNT_IDE BIT(21) #define FLEXCAN_MB_CNT_RTR BIT(20) #define FLEXCAN_MB_CNT_LENGTH(x) (((x) & 0xf) << 16) #define FLEXCAN_MB_CNT_TIMESTAMP(x) ((x) & 0xffff) -#define FLEXCAN_TIMEOUT_US (50) - -/* FLEXCAN hardware feature flags - * - * Below is some version info we got: - * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR re- - * Filter? connected? Passive detection ception in MB - * MX25 FlexCAN2 03.00.00.00 no no no no no - * MX28 FlexCAN2 03.00.04.00 yes yes no no no - * MX35 FlexCAN2 03.00.00.00 no no no no no - * MX53 FlexCAN2 03.00.00.00 yes no no no no - * MX6s FlexCAN3 10.00.12.00 yes yes no no yes - * VF610 FlexCAN3 ? no yes no yes yes? - * LS1021A FlexCAN2 03.00.04.00 no yes no no yes - * - * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected. - */ -#define FLEXCAN_QUIRK_BROKEN_WERR_STATE BIT(1) /* [TR]WRN_INT not connected */ -#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */ -#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */ -#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */ -#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */ -#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6) /* No interrupt for error passive */ -#define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN BIT(7) /* default to BE register access */ -#define FLEXCAN_QUIRK_SETUP_STOP_MODE BIT(8) /* Setup stop mode to support wakeup */ +#define FLEXCAN_TIMEOUT_US (250) /* Structure of the message buffer */ struct flexcan_mb { @@ -202,12 +217,12 @@ struct flexcan_mb { /* Structure of the hardware registers */ struct flexcan_regs { u32 mcr; /* 0x00 */ - u32 ctrl; /* 0x04 */ + u32 ctrl; /* 0x04 - Not affected by Soft Reset */ u32 timer; /* 0x08 */ - u32 _reserved1; /* 0x0c */ - u32 rxgmask; /* 0x10 */ - u32 rx14mask; /* 0x14 */ - u32 rx15mask; /* 0x18 */ + u32 tcr; /* 0x0c */ + u32 rxgmask; /* 0x10 - Not affected by Soft Reset */ + u32 rx14mask; /* 0x14 - Not affected by Soft Reset */ + u32 rx15mask; /* 0x18 - Not affected by Soft Reset */ u32 ecr; /* 0x1c */ u32 esr; /* 0x20 */ u32 imask2; /* 0x24 */ @@ -216,31 +231,46 @@ struct flexcan_regs { u32 iflag1; /* 0x30 */ union { /* 0x34 */ u32 gfwr_mx28; /* MX28, MX53 */ - u32 ctrl2; /* MX6, VF610 */ + u32 ctrl2; /* MX6, VF610 - Not affected by Soft Reset */ }; u32 esr2; /* 0x38 */ u32 imeur; /* 0x3c */ u32 lrfr; /* 0x40 */ u32 crcr; /* 0x44 */ u32 rxfgmask; /* 0x48 */ - u32 rxfir; /* 0x4c */ - u32 _reserved3[12]; /* 0x50 */ - u8 mb[2][512]; /* 0x80 */ - /* FIFO-mode: - * MB - * 0x080...0x08f 0 RX message buffer - * 0x090...0x0df 1-5 reserverd - * 0x0e0...0x0ff 6-7 8 entry ID table - * (mx25, mx28, mx35, mx53) - * 0x0e0...0x2df 6-7..37 8..128 entry ID table - * size conf'ed via ctrl2::RFFN - * (mx6, vf610) - */ - u32 _reserved4[256]; /* 0x480 */ - u32 rximr[64]; /* 0x880 */ - u32 _reserved5[24]; /* 0x980 */ - u32 gfwr_mx6; /* 0x9e0 - MX6 */ - u32 _reserved6[63]; /* 0x9e4 */ + u32 rxfir; /* 0x4c - Not affected by Soft Reset */ + u32 cbt; /* 0x50 - Not affected by Soft Reset */ + u32 _reserved2; /* 0x54 */ + u32 dbg1; /* 0x58 */ + u32 dbg2; /* 0x5c */ + u32 _reserved3[8]; /* 0x60 */ + struct_group(init, + u8 mb[2][512]; /* 0x80 - Not affected by Soft Reset */ + /* FIFO-mode: + * MB + * 0x080...0x08f 0 RX message buffer + * 0x090...0x0df 1-5 reserved + * 0x0e0...0x0ff 6-7 8 entry ID table + * (mx25, mx28, mx35, mx53) + * 0x0e0...0x2df 6-7..37 8..128 entry ID table + * size conf'ed via ctrl2::RFFN + * (mx6, vf610) + */ + u32 _reserved4[256]; /* 0x480 */ + u32 rximr[64]; /* 0x880 - Not affected by Soft Reset */ + u32 _reserved5[24]; /* 0x980 */ + u32 gfwr_mx6; /* 0x9e0 - MX6 */ + u32 _reserved6[39]; /* 0x9e4 */ + u32 _rxfir[6]; /* 0xa80 */ + u32 _reserved8[2]; /* 0xa98 */ + u32 _rxmgmask; /* 0xaa0 */ + u32 _rxfgmask; /* 0xaa4 */ + u32 _rx14mask; /* 0xaa8 */ + u32 _rx15mask; /* 0xaac */ + u32 tx_smb[4]; /* 0xab0 */ + u32 rx_smb0[4]; /* 0xac0 */ + u32 rx_smb1[4]; /* 0xad0 */ + ); u32 mecr; /* 0xae0 */ u32 erriar; /* 0xae4 */ u32 erridpr; /* 0xae8 */ @@ -249,76 +279,122 @@ struct flexcan_regs { u32 rerrdr; /* 0xaf4 */ u32 rerrsynr; /* 0xaf8 */ u32 errsr; /* 0xafc */ + u32 _reserved7[64]; /* 0xb00 */ + u32 fdctrl; /* 0xc00 - Not affected by Soft Reset */ + u32 fdcbt; /* 0xc04 - Not affected by Soft Reset */ + u32 fdcrc; /* 0xc08 */ + u32 _reserved9[199]; /* 0xc0c */ + struct_group(init_fd, + u32 tx_smb_fd[18]; /* 0xf28 */ + u32 rx_smb0_fd[18]; /* 0xf70 */ + u32 rx_smb1_fd[18]; /* 0xfb8 */ + ); }; -struct flexcan_devtype_data { - u32 quirks; /* quirks needed for different IP cores */ -}; - -struct flexcan_stop_mode { - struct regmap *gpr; - u8 req_gpr; - u8 req_bit; - u8 ack_gpr; - u8 ack_bit; -}; - -struct flexcan_priv { - struct can_priv can; - struct can_rx_offload offload; - - struct flexcan_regs __iomem *regs; - struct flexcan_mb __iomem *tx_mb; - struct flexcan_mb __iomem *tx_mb_reserved; - u8 tx_mb_idx; - u8 mb_count; - u8 mb_size; - u32 reg_ctrl_default; - u32 reg_imask1_default; - u32 reg_imask2_default; - - struct clk *clk_ipg; - struct clk *clk_per; - const struct flexcan_devtype_data *devtype_data; - struct regulator *reg_xceiver; - struct flexcan_stop_mode stm; +static_assert(sizeof(struct flexcan_regs) == 0x4 * 18 + 0xfb8); - /* Read and Write APIs */ - u32 (*read)(void __iomem *addr); - void (*write)(u32 val, void __iomem *addr); +static const struct flexcan_devtype_data fsl_mcf5441x_devtype_data = { + .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE | + FLEXCAN_QUIRK_NR_IRQ_3 | FLEXCAN_QUIRK_NR_MB_16 | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_FIFO, }; static const struct flexcan_devtype_data fsl_p1010_devtype_data = { .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE | FLEXCAN_QUIRK_BROKEN_PERR_STATE | - FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN, + FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_FIFO, }; static const struct flexcan_devtype_data fsl_imx25_devtype_data = { .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE | - FLEXCAN_QUIRK_BROKEN_PERR_STATE, + FLEXCAN_QUIRK_BROKEN_PERR_STATE | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_FIFO, }; static const struct flexcan_devtype_data fsl_imx28_devtype_data = { - .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE, + .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_FIFO, }; static const struct flexcan_devtype_data fsl_imx6q_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | - FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE | - FLEXCAN_QUIRK_SETUP_STOP_MODE, + FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE | + FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR, +}; + +static const struct flexcan_devtype_data fsl_imx8qm_devtype_data = { + .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | + FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE | + FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR, +}; + +static struct flexcan_devtype_data fsl_imx8mp_devtype_data = { + .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | + FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX | + FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR | + FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR, +}; + +static struct flexcan_devtype_data fsl_imx93_devtype_data = { + .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | + FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX | + FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR | + FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR, +}; + +static const struct flexcan_devtype_data fsl_imx95_devtype_data = { + .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | + FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX | + FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_FD | + FLEXCAN_QUIRK_SUPPORT_ECC | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR | FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI, }; static const struct flexcan_devtype_data fsl_vf610_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | - FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | - FLEXCAN_QUIRK_BROKEN_PERR_STATE, + FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX | + FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_ECC | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR, }; static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | + FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_USE_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR, +}; + +static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = { + .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | + FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE | + FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_FD | + FLEXCAN_QUIRK_SUPPORT_ECC | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR, +}; + +static const struct flexcan_devtype_data nxp_s32g2_devtype_data = { + .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE | - FLEXCAN_QUIRK_USE_OFF_TIMESTAMP, + FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_FD | + FLEXCAN_QUIRK_SUPPORT_ECC | FLEXCAN_QUIRK_NR_IRQ_3 | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR | + FLEXCAN_QUIRK_SECONDARY_MB_IRQ, }; static const struct can_bittiming_const flexcan_bittiming_const = { @@ -333,6 +409,30 @@ static const struct can_bittiming_const flexcan_bittiming_const = { .brp_inc = 1, }; +static const struct can_bittiming_const flexcan_fd_bittiming_const = { + .name = DRV_NAME, + .tseg1_min = 2, + .tseg1_max = 96, + .tseg2_min = 2, + .tseg2_max = 32, + .sjw_max = 16, + .brp_min = 1, + .brp_max = 1024, + .brp_inc = 1, +}; + +static const struct can_bittiming_const flexcan_fd_data_bittiming_const = { + .name = DRV_NAME, + .tseg1_min = 2, + .tseg1_max = 39, + .tseg2_min = 2, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 1024, + .brp_inc = 1, +}; + /* FlexCAN module is essentially modelled as a little-endian IP in most * SoCs, i.e the registers as well as the message buffer areas are * implemented in a little-endian fashion. @@ -385,6 +485,34 @@ static struct flexcan_mb __iomem *flexcan_get_mb(const struct flexcan_priv *priv (&priv->regs->mb[bank][priv->mb_size * mb_index]); } +static int flexcan_low_power_enter_ack(struct flexcan_priv *priv) +{ + struct flexcan_regs __iomem *regs = priv->regs; + unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; + + while (timeout-- && !(priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) + udelay(10); + + if (!(priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) + return -ETIMEDOUT; + + return 0; +} + +static int flexcan_low_power_exit_ack(struct flexcan_priv *priv) +{ + struct flexcan_regs __iomem *regs = priv->regs; + unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; + + while (timeout-- && (priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) + udelay(10); + + if (priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK) + return -ETIMEDOUT; + + return 0; +} + static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable) { struct flexcan_regs __iomem *regs = priv->regs; @@ -400,32 +528,78 @@ static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable) priv->write(reg_mcr, ®s->mcr); } -static inline void flexcan_enter_stop_mode(struct flexcan_priv *priv) +static int flexcan_stop_mode_enable_scfw(struct flexcan_priv *priv, bool enabled) +{ + u8 idx = priv->scu_idx; + u32 rsrc_id, val; + + rsrc_id = IMX_SC_R_CAN(idx); + + if (enabled) + val = 1; + else + val = 0; + + /* stop mode request via scu firmware */ + return imx_sc_misc_set_control(priv->sc_ipc_handle, rsrc_id, + IMX_SC_C_IPG_STOP, val); +} + +static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; u32 reg_mcr; + int ret; reg_mcr = priv->read(®s->mcr); reg_mcr |= FLEXCAN_MCR_SLF_WAK; priv->write(reg_mcr, ®s->mcr); /* enable stop request */ - regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr, - 1 << priv->stm.req_bit, 1 << priv->stm.req_bit); + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) { + ret = flexcan_stop_mode_enable_scfw(priv, true); + if (ret < 0) + return ret; + } else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) { + regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr, + 1 << priv->stm.req_bit, 1 << priv->stm.req_bit); + } else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI) { + /* For the SCMI mode, driver do nothing, ATF will send request to + * SM(system manager, M33 core) through SCMI protocol after linux + * suspend. Once SM get this request, it will send IPG_STOP signal + * to Flex_CAN, let CAN in STOP mode. + */ + return 0; + } + + return flexcan_low_power_enter_ack(priv); } -static inline void flexcan_exit_stop_mode(struct flexcan_priv *priv) +static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; u32 reg_mcr; + int ret; - /* remove stop request */ - regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr, - 1 << priv->stm.req_bit, 0); + /* Remove stop request, for FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI, + * do nothing here, because ATF already send request to SM before + * linux resume. Once SM get this request, it will deassert the + * IPG_STOP signal to Flex_CAN. + */ + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) { + ret = flexcan_stop_mode_enable_scfw(priv, false); + if (ret < 0) + return ret; + } else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) { + regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr, + 1 << priv->stm.req_bit, 0); + } reg_mcr = priv->read(®s->mcr); reg_mcr &= ~FLEXCAN_MCR_SLF_WAK; priv->write(reg_mcr, ®s->mcr); + + return flexcan_low_power_exit_ack(priv); } static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv) @@ -444,68 +618,89 @@ static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv) priv->write(reg_ctrl, ®s->ctrl); } +static int flexcan_clks_enable(const struct flexcan_priv *priv) +{ + int err = 0; + + if (priv->clk_ipg) { + err = clk_prepare_enable(priv->clk_ipg); + if (err) + return err; + } + + if (priv->clk_per) { + err = clk_prepare_enable(priv->clk_per); + if (err) + clk_disable_unprepare(priv->clk_ipg); + } + + return err; +} + +static void flexcan_clks_disable(const struct flexcan_priv *priv) +{ + clk_disable_unprepare(priv->clk_per); + clk_disable_unprepare(priv->clk_ipg); +} + static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv) { - if (!priv->reg_xceiver) - return 0; + if (priv->reg_xceiver) + return regulator_enable(priv->reg_xceiver); + else if (priv->transceiver) + return phy_power_on(priv->transceiver); - return regulator_enable(priv->reg_xceiver); + return 0; } static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv) { - if (!priv->reg_xceiver) - return 0; + if (priv->reg_xceiver) + return regulator_disable(priv->reg_xceiver); + else if (priv->transceiver) + return phy_power_off(priv->transceiver); - return regulator_disable(priv->reg_xceiver); + return 0; } static int flexcan_chip_enable(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; - unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; u32 reg; reg = priv->read(®s->mcr); reg &= ~FLEXCAN_MCR_MDIS; priv->write(reg, ®s->mcr); - while (timeout-- && (priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) - udelay(10); - - if (priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK) - return -ETIMEDOUT; - - return 0; + return flexcan_low_power_exit_ack(priv); } static int flexcan_chip_disable(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; - unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; u32 reg; reg = priv->read(®s->mcr); reg |= FLEXCAN_MCR_MDIS; priv->write(reg, ®s->mcr); - while (timeout-- && !(priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) - udelay(10); - - if (!(priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) - return -ETIMEDOUT; - - return 0; + return flexcan_low_power_enter_ack(priv); } static int flexcan_chip_freeze(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; - unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate; + unsigned int timeout; + u32 bitrate = priv->can.bittiming.bitrate; u32 reg; + if (bitrate) + timeout = 1000 * 1000 * 10 / bitrate; + else + timeout = FLEXCAN_TIMEOUT_US / 10; + reg = priv->read(®s->mcr); - reg |= FLEXCAN_MCR_HALT; + reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT; priv->write(reg, ®s->mcr); while (timeout-- && !(priv->read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) @@ -570,19 +765,13 @@ static int flexcan_get_berr_counter(const struct net_device *dev, const struct flexcan_priv *priv = netdev_priv(dev); int err; - err = clk_prepare_enable(priv->clk_ipg); - if (err) + err = pm_runtime_resume_and_get(priv->dev); + if (err < 0) return err; - err = clk_prepare_enable(priv->clk_per); - if (err) - goto out_disable_ipg; - err = __flexcan_get_berr_counter(dev, bec); - clk_disable_unprepare(priv->clk_per); - out_disable_ipg: - clk_disable_unprepare(priv->clk_ipg); + pm_runtime_put(priv->dev); return err; } @@ -590,33 +779,40 @@ static int flexcan_get_berr_counter(const struct net_device *dev, static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) { const struct flexcan_priv *priv = netdev_priv(dev); - struct can_frame *cf = (struct can_frame *)skb->data; + struct canfd_frame *cfd = (struct canfd_frame *)skb->data; u32 can_id; u32 data; - u32 ctrl = FLEXCAN_MB_CODE_TX_DATA | (cf->can_dlc << 16); + u32 ctrl = FLEXCAN_MB_CODE_TX_DATA | ((can_fd_len2dlc(cfd->len)) << 16); int i; - if (can_dropped_invalid_skb(dev, skb)) + if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; netif_stop_queue(dev); - if (cf->can_id & CAN_EFF_FLAG) { - can_id = cf->can_id & CAN_EFF_MASK; + if (cfd->can_id & CAN_EFF_FLAG) { + can_id = cfd->can_id & CAN_EFF_MASK; ctrl |= FLEXCAN_MB_CNT_IDE | FLEXCAN_MB_CNT_SRR; } else { - can_id = (cf->can_id & CAN_SFF_MASK) << 18; + can_id = (cfd->can_id & CAN_SFF_MASK) << 18; } - if (cf->can_id & CAN_RTR_FLAG) + if (cfd->can_id & CAN_RTR_FLAG) ctrl |= FLEXCAN_MB_CNT_RTR; - for (i = 0; i < cf->can_dlc; i += sizeof(u32)) { - data = be32_to_cpup((__be32 *)&cf->data[i]); + if (can_is_canfd_skb(skb)) { + ctrl |= FLEXCAN_MB_CNT_EDL; + + if (cfd->flags & CANFD_BRS) + ctrl |= FLEXCAN_MB_CNT_BRS; + } + + for (i = 0; i < cfd->len; i += sizeof(u32)) { + data = be32_to_cpup((__be32 *)&cfd->data[i]); priv->write(data, &priv->tx_mb->data[i / sizeof(u32)]); } - can_put_echo_skb(skb, dev, 0); + can_put_echo_skb(skb, dev, 0, 0); priv->write(can_id, &priv->tx_mb->can_id); priv->write(ctrl, &priv->tx_mb->can_ctrl); @@ -640,6 +836,7 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr) struct can_frame *cf; bool rx_errors = false, tx_errors = false; u32 timestamp; + int err; timestamp = priv->read(®s->timer) << 16; @@ -688,7 +885,9 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr) if (tx_errors) dev->stats.tx_errors++; - can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); + if (err) + dev->stats.rx_fifo_errors++; } static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) @@ -701,8 +900,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) int flt; struct can_berr_counter bec; u32 timestamp; - - timestamp = priv->read(®s->timer) << 16; + int err; flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK; if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) { @@ -723,6 +921,8 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) if (likely(new_state == priv->can.state)) return; + timestamp = priv->read(®s->timer) << 16; + skb = alloc_can_err_skb(dev, &cf); if (unlikely(!skb)) return; @@ -732,7 +932,39 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) if (unlikely(new_state == CAN_STATE_BUS_OFF)) can_bus_off(dev); - can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); + if (err) + dev->stats.rx_fifo_errors++; +} + +static inline u64 flexcan_read64_mask(struct flexcan_priv *priv, void __iomem *addr, u64 mask) +{ + u64 reg = 0; + + if (upper_32_bits(mask)) + reg = (u64)priv->read(addr - 4) << 32; + if (lower_32_bits(mask)) + reg |= priv->read(addr); + + return reg & mask; +} + +static inline void flexcan_write64(struct flexcan_priv *priv, u64 val, void __iomem *addr) +{ + if (upper_32_bits(val)) + priv->write(upper_32_bits(val), addr - 4); + if (lower_32_bits(val)) + priv->write(lower_32_bits(val), addr); +} + +static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv) +{ + return flexcan_read64_mask(priv, &priv->regs->iflag1, priv->rx_mask); +} + +static inline u64 flexcan_read_reg_iflag_tx(struct flexcan_priv *priv) +{ + return flexcan_read64_mask(priv, &priv->regs->iflag1, priv->tx_mask); } static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload) @@ -740,19 +972,21 @@ static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *off return container_of(offload, struct flexcan_priv, offload); } -static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload, - struct can_frame *cf, - u32 *timestamp, unsigned int n) +static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload, + unsigned int n, u32 *timestamp, + bool drop) { struct flexcan_priv *priv = rx_offload_to_priv(offload); struct flexcan_regs __iomem *regs = priv->regs; struct flexcan_mb __iomem *mb; + struct sk_buff *skb; + struct canfd_frame *cfd; u32 reg_ctrl, reg_id, reg_iflag1; int i; mb = flexcan_get_mb(priv, n); - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) { u32 code; do { @@ -763,7 +997,7 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload, code = reg_ctrl & FLEXCAN_MB_CODE_MASK; if ((code != FLEXCAN_MB_CODE_RX_FULL) && (code != FLEXCAN_MB_CODE_RX_OVERRUN)) - return 0; + return NULL; if (code == FLEXCAN_MB_CODE_RX_OVERRUN) { /* This MB was overrun, we lost data */ @@ -773,39 +1007,59 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload, } else { reg_iflag1 = priv->read(®s->iflag1); if (!(reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE)) - return 0; + return NULL; reg_ctrl = priv->read(&mb->can_ctrl); } + if (unlikely(drop)) { + skb = ERR_PTR(-ENOBUFS); + goto mark_as_read; + } + + if (reg_ctrl & FLEXCAN_MB_CNT_EDL) + skb = alloc_canfd_skb(offload->dev, &cfd); + else + skb = alloc_can_skb(offload->dev, (struct can_frame **)&cfd); + if (unlikely(!skb)) { + skb = ERR_PTR(-ENOMEM); + goto mark_as_read; + } + /* increase timstamp to full 32 bit */ *timestamp = reg_ctrl << 16; reg_id = priv->read(&mb->can_id); if (reg_ctrl & FLEXCAN_MB_CNT_IDE) - cf->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG; + cfd->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG; else - cf->can_id = (reg_id >> 18) & CAN_SFF_MASK; + cfd->can_id = (reg_id >> 18) & CAN_SFF_MASK; + + if (reg_ctrl & FLEXCAN_MB_CNT_EDL) { + cfd->len = can_fd_dlc2len((reg_ctrl >> 16) & 0xf); - if (reg_ctrl & FLEXCAN_MB_CNT_RTR) - cf->can_id |= CAN_RTR_FLAG; - cf->can_dlc = get_can_dlc((reg_ctrl >> 16) & 0xf); + if (reg_ctrl & FLEXCAN_MB_CNT_BRS) + cfd->flags |= CANFD_BRS; + } else { + cfd->len = can_cc_dlc2len((reg_ctrl >> 16) & 0xf); + + if (reg_ctrl & FLEXCAN_MB_CNT_RTR) + cfd->can_id |= CAN_RTR_FLAG; + } - for (i = 0; i < cf->can_dlc; i += sizeof(u32)) { + if (reg_ctrl & FLEXCAN_MB_CNT_ESI) + cfd->flags |= CANFD_ESI; + + for (i = 0; i < cfd->len; i += sizeof(u32)) { __be32 data = cpu_to_be32(priv->read(&mb->data[i / sizeof(u32)])); - *(__be32 *)(cf->data + i) = data; + *(__be32 *)(cfd->data + i) = data; } - /* mark as read */ - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { - /* Clear IRQ */ - if (n < 32) - priv->write(BIT(n), ®s->iflag1); - else - priv->write(BIT(n - 32), ®s->iflag2); - } else { + mark_as_read: + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) + flexcan_write64(priv, FLEXCAN_IFLAG_MB(n), ®s->iflag1); + else priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, ®s->iflag1); - } /* Read the Free Running Timer. It is optional but recommended * to unlock Mailbox as soon as possible and make it available @@ -813,20 +1067,7 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload, */ priv->read(®s->timer); - return 1; -} - - -static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv) -{ - struct flexcan_regs __iomem *regs = priv->regs; - u32 iflag1, iflag2; - - iflag2 = priv->read(®s->iflag2) & priv->reg_imask2_default & - ~FLEXCAN_IFLAG_MB(priv->tx_mb_idx); - iflag1 = priv->read(®s->iflag1) & priv->reg_imask1_default; - - return (u64)iflag2 << 32 | iflag1; + return skb; } static irqreturn_t flexcan_irq(int irq, void *dev_id) @@ -836,18 +1077,19 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->regs; irqreturn_t handled = IRQ_NONE; - u32 reg_iflag2, reg_esr; + u64 reg_iflag_tx; + u32 reg_esr; enum can_state last_state = priv->can.state; /* reception interrupt */ - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { - u64 reg_iflag; + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) { + u64 reg_iflag_rx; int ret; - while ((reg_iflag = flexcan_read_reg_iflag_rx(priv))) { + while ((reg_iflag_rx = flexcan_read_reg_iflag_rx(priv))) { handled = IRQ_HANDLED; ret = can_rx_offload_irq_offload_timestamp(&priv->offload, - reg_iflag); + reg_iflag_rx); if (!ret) break; } @@ -870,36 +1112,36 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) } } - reg_iflag2 = priv->read(®s->iflag2); + reg_iflag_tx = flexcan_read_reg_iflag_tx(priv); /* transmission complete interrupt */ - if (reg_iflag2 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) { + if (reg_iflag_tx & priv->tx_mask) { u32 reg_ctrl = priv->read(&priv->tx_mb->can_ctrl); handled = IRQ_HANDLED; - stats->tx_bytes += can_rx_offload_get_echo_skb(&priv->offload, - 0, reg_ctrl << 16); + stats->tx_bytes += + can_rx_offload_get_echo_skb_queue_timestamp(&priv->offload, 0, + reg_ctrl << 16, NULL); stats->tx_packets++; - can_led_event(dev, CAN_LED_EVENT_TX); /* after sending a RTR frame MB is in RX mode */ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, &priv->tx_mb->can_ctrl); - priv->write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), ®s->iflag2); + flexcan_write64(priv, priv->tx_mask, ®s->iflag1); netif_wake_queue(dev); } reg_esr = priv->read(®s->esr); - /* ACK all bus error and state change IRQ sources */ - if (reg_esr & FLEXCAN_ESR_ALL_INT) { + /* ACK all bus error, state change and wake IRQ sources */ + if (reg_esr & (FLEXCAN_ESR_ALL_INT | FLEXCAN_ESR_WAK_INT)) { handled = IRQ_HANDLED; - priv->write(reg_esr & FLEXCAN_ESR_ALL_INT, ®s->esr); + priv->write(reg_esr & (FLEXCAN_ESR_ALL_INT | FLEXCAN_ESR_WAK_INT), ®s->esr); } /* state change interrupt or broken error state quirk fix is enabled */ if ((reg_esr & FLEXCAN_ESR_ERR_STATE) || - (priv->devtype_data->quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE | + (priv->devtype_data.quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE | FLEXCAN_QUIRK_BROKEN_PERR_STATE))) flexcan_irq_state(dev, reg_esr); @@ -921,11 +1163,11 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) * (1): enabled if FLEXCAN_QUIRK_BROKEN_WERR_STATE is enabled */ if ((last_state != priv->can.state) && - (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_PERR_STATE) && + (priv->devtype_data.quirks & FLEXCAN_QUIRK_BROKEN_PERR_STATE) && !(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) { switch (priv->can.state) { case CAN_STATE_ERROR_ACTIVE: - if (priv->devtype_data->quirks & + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE) flexcan_error_irq_enable(priv); else @@ -946,10 +1188,13 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) } } + if (handled) + can_rx_offload_irq_finish(&priv->offload); + return handled; } -static void flexcan_set_bittiming(struct net_device *dev) +static void flexcan_set_bittiming_ctrl(const struct net_device *dev) { const struct flexcan_priv *priv = netdev_priv(dev); const struct can_bittiming *bt = &priv->can.bittiming; @@ -961,10 +1206,7 @@ static void flexcan_set_bittiming(struct net_device *dev) FLEXCAN_CTRL_RJW(0x3) | FLEXCAN_CTRL_PSEG1(0x7) | FLEXCAN_CTRL_PSEG2(0x7) | - FLEXCAN_CTRL_PROPSEG(0x7) | - FLEXCAN_CTRL_LPB | - FLEXCAN_CTRL_SMP | - FLEXCAN_CTRL_LOM); + FLEXCAN_CTRL_PROPSEG(0x7)); reg |= FLEXCAN_CTRL_PRESDIV(bt->brp - 1) | FLEXCAN_CTRL_PSEG1(bt->phase_seg1 - 1) | @@ -972,6 +1214,130 @@ static void flexcan_set_bittiming(struct net_device *dev) FLEXCAN_CTRL_RJW(bt->sjw - 1) | FLEXCAN_CTRL_PROPSEG(bt->prop_seg - 1); + netdev_dbg(dev, "writing ctrl=0x%08x\n", reg); + priv->write(reg, ®s->ctrl); + + /* print chip status */ + netdev_dbg(dev, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__, + priv->read(®s->mcr), priv->read(®s->ctrl)); +} + +static void flexcan_set_bittiming_cbt(const struct net_device *dev) +{ + struct flexcan_priv *priv = netdev_priv(dev); + struct can_bittiming *bt = &priv->can.bittiming; + struct can_bittiming *dbt = &priv->can.fd.data_bittiming; + struct flexcan_regs __iomem *regs = priv->regs; + u32 reg_cbt, reg_fdctrl; + + /* CBT */ + /* CBT[EPSEG1] is 5 bit long and CBT[EPROPSEG] is 6 bit + * long. The can_calc_bittiming() tries to divide the tseg1 + * equally between phase_seg1 and prop_seg, which may not fit + * in CBT register. Therefore, if phase_seg1 is more than + * possible value, increase prop_seg and decrease phase_seg1. + */ + if (bt->phase_seg1 > 0x20) { + bt->prop_seg += (bt->phase_seg1 - 0x20); + bt->phase_seg1 = 0x20; + } + + reg_cbt = FLEXCAN_CBT_BTF | + FIELD_PREP(FLEXCAN_CBT_EPRESDIV_MASK, bt->brp - 1) | + FIELD_PREP(FLEXCAN_CBT_ERJW_MASK, bt->sjw - 1) | + FIELD_PREP(FLEXCAN_CBT_EPROPSEG_MASK, bt->prop_seg - 1) | + FIELD_PREP(FLEXCAN_CBT_EPSEG1_MASK, bt->phase_seg1 - 1) | + FIELD_PREP(FLEXCAN_CBT_EPSEG2_MASK, bt->phase_seg2 - 1); + + netdev_dbg(dev, "writing cbt=0x%08x\n", reg_cbt); + priv->write(reg_cbt, ®s->cbt); + + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { + u32 reg_fdcbt, reg_ctrl2; + + if (bt->brp != dbt->brp) + netdev_warn(dev, "Data brp=%d and brp=%d don't match, this may result in a phase error. Consider using different bitrate and/or data bitrate.\n", + dbt->brp, bt->brp); + + /* FDCBT */ + /* FDCBT[FPSEG1] is 3 bit long and FDCBT[FPROPSEG] is + * 5 bit long. The can_calc_bittiming tries to divide + * the tseg1 equally between phase_seg1 and prop_seg, + * which may not fit in FDCBT register. Therefore, if + * phase_seg1 is more than possible value, increase + * prop_seg and decrease phase_seg1 + */ + if (dbt->phase_seg1 > 0x8) { + dbt->prop_seg += (dbt->phase_seg1 - 0x8); + dbt->phase_seg1 = 0x8; + } + + reg_fdcbt = priv->read(®s->fdcbt); + reg_fdcbt &= ~(FIELD_PREP(FLEXCAN_FDCBT_FPRESDIV_MASK, 0x3ff) | + FIELD_PREP(FLEXCAN_FDCBT_FRJW_MASK, 0x7) | + FIELD_PREP(FLEXCAN_FDCBT_FPROPSEG_MASK, 0x1f) | + FIELD_PREP(FLEXCAN_FDCBT_FPSEG1_MASK, 0x7) | + FIELD_PREP(FLEXCAN_FDCBT_FPSEG2_MASK, 0x7)); + + reg_fdcbt |= FIELD_PREP(FLEXCAN_FDCBT_FPRESDIV_MASK, dbt->brp - 1) | + FIELD_PREP(FLEXCAN_FDCBT_FRJW_MASK, dbt->sjw - 1) | + FIELD_PREP(FLEXCAN_FDCBT_FPROPSEG_MASK, dbt->prop_seg) | + FIELD_PREP(FLEXCAN_FDCBT_FPSEG1_MASK, dbt->phase_seg1 - 1) | + FIELD_PREP(FLEXCAN_FDCBT_FPSEG2_MASK, dbt->phase_seg2 - 1); + + netdev_dbg(dev, "writing fdcbt=0x%08x\n", reg_fdcbt); + priv->write(reg_fdcbt, ®s->fdcbt); + + /* CTRL2 */ + reg_ctrl2 = priv->read(®s->ctrl2); + reg_ctrl2 &= ~FLEXCAN_CTRL2_ISOCANFDEN; + if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)) + reg_ctrl2 |= FLEXCAN_CTRL2_ISOCANFDEN; + + netdev_dbg(dev, "writing ctrl2=0x%08x\n", reg_ctrl2); + priv->write(reg_ctrl2, ®s->ctrl2); + } + + /* FDCTRL */ + reg_fdctrl = priv->read(®s->fdctrl); + reg_fdctrl &= ~(FLEXCAN_FDCTRL_FDRATE | + FIELD_PREP(FLEXCAN_FDCTRL_TDCOFF, 0x1f)); + + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { + reg_fdctrl |= FLEXCAN_FDCTRL_FDRATE; + + if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { + /* TDC must be disabled for Loop Back mode */ + reg_fdctrl &= ~FLEXCAN_FDCTRL_TDCEN; + } else { + reg_fdctrl |= FLEXCAN_FDCTRL_TDCEN | + FIELD_PREP(FLEXCAN_FDCTRL_TDCOFF, + ((dbt->phase_seg1 - 1) + + dbt->prop_seg + 2) * + ((dbt->brp - 1 ) + 1)); + } + } + + netdev_dbg(dev, "writing fdctrl=0x%08x\n", reg_fdctrl); + priv->write(reg_fdctrl, ®s->fdctrl); + + netdev_dbg(dev, "%s: mcr=0x%08x ctrl=0x%08x ctrl2=0x%08x fdctrl=0x%08x cbt=0x%08x fdcbt=0x%08x\n", + __func__, + priv->read(®s->mcr), priv->read(®s->ctrl), + priv->read(®s->ctrl2), priv->read(®s->fdctrl), + priv->read(®s->cbt), priv->read(®s->fdcbt)); +} + +static void flexcan_set_bittiming(struct net_device *dev) +{ + const struct flexcan_priv *priv = netdev_priv(dev); + struct flexcan_regs __iomem *regs = priv->regs; + u32 reg; + + reg = priv->read(®s->ctrl); + reg &= ~(FLEXCAN_CTRL_LPB | FLEXCAN_CTRL_SMP | + FLEXCAN_CTRL_LOM); + if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) reg |= FLEXCAN_CTRL_LPB; if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) @@ -982,9 +1348,107 @@ static void flexcan_set_bittiming(struct net_device *dev) netdev_dbg(dev, "writing ctrl=0x%08x\n", reg); priv->write(reg, ®s->ctrl); - /* print chip status */ - netdev_dbg(dev, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__, - priv->read(®s->mcr), priv->read(®s->ctrl)); + if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) + return flexcan_set_bittiming_cbt(dev); + else + return flexcan_set_bittiming_ctrl(dev); +} + +static void flexcan_ram_init(struct net_device *dev) +{ + struct flexcan_priv *priv = netdev_priv(dev); + struct flexcan_regs __iomem *regs = priv->regs; + u32 reg_ctrl2; + + /* 11.8.3.13 Detection and correction of memory errors: + * CTRL2[WRMFRZ] grants write access to all memory positions + * that require initialization, ranging from 0x080 to 0xADF + * and from 0xF28 to 0xFFF when the CAN FD feature is enabled. + * The RXMGMASK, RX14MASK, RX15MASK, and RXFGMASK registers + * need to be initialized as well. MCR[RFEN] must not be set + * during memory initialization. + */ + reg_ctrl2 = priv->read(®s->ctrl2); + reg_ctrl2 |= FLEXCAN_CTRL2_WRMFRZ; + priv->write(reg_ctrl2, ®s->ctrl2); + + memset_io(®s->init, 0, sizeof(regs->init)); + + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) + memset_io(®s->init_fd, 0, sizeof(regs->init_fd)); + + reg_ctrl2 &= ~FLEXCAN_CTRL2_WRMFRZ; + priv->write(reg_ctrl2, ®s->ctrl2); +} + +static int flexcan_rx_offload_setup(struct net_device *dev) +{ + struct flexcan_priv *priv = netdev_priv(dev); + int err; + + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) + priv->mb_size = sizeof(struct flexcan_mb) + CANFD_MAX_DLEN; + else + priv->mb_size = sizeof(struct flexcan_mb) + CAN_MAX_DLEN; + + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_MB_16) + priv->mb_count = 16; + else + priv->mb_count = (sizeof(priv->regs->mb[0]) / priv->mb_size) + + (sizeof(priv->regs->mb[1]) / priv->mb_size); + + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) + priv->tx_mb_reserved = + flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_RX_MAILBOX); + else + priv->tx_mb_reserved = + flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_RX_FIFO); + priv->tx_mb_idx = priv->mb_count - 1; + priv->tx_mb = flexcan_get_mb(priv, priv->tx_mb_idx); + priv->tx_mask = FLEXCAN_IFLAG_MB(priv->tx_mb_idx); + + priv->offload.mailbox_read = flexcan_mailbox_read; + + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) { + priv->offload.mb_first = FLEXCAN_RX_MB_RX_MAILBOX_FIRST; + priv->offload.mb_last = priv->mb_count - 2; + + priv->rx_mask = GENMASK_ULL(priv->offload.mb_last, + priv->offload.mb_first); + err = can_rx_offload_add_timestamp(dev, &priv->offload); + } else { + priv->rx_mask = FLEXCAN_IFLAG_RX_FIFO_OVERFLOW | + FLEXCAN_IFLAG_RX_FIFO_AVAILABLE; + err = can_rx_offload_add_fifo(dev, &priv->offload, + FLEXCAN_NAPI_WEIGHT); + } + + return err; +} + +static void flexcan_chip_interrupts_enable(const struct net_device *dev) +{ + const struct flexcan_priv *priv = netdev_priv(dev); + struct flexcan_regs __iomem *regs = priv->regs; + u64 reg_imask; + + disable_irq(dev->irq); + priv->write(priv->reg_ctrl_default, ®s->ctrl); + reg_imask = priv->rx_mask | priv->tx_mask; + priv->write(upper_32_bits(reg_imask), ®s->imask2); + priv->write(lower_32_bits(reg_imask), ®s->imask1); + enable_irq(dev->irq); +} + +static void flexcan_chip_interrupts_disable(const struct net_device *dev) +{ + const struct flexcan_priv *priv = netdev_priv(dev); + struct flexcan_regs __iomem *regs = priv->regs; + + priv->write(0, ®s->imask2); + priv->write(0, ®s->imask1); + priv->write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL, + ®s->ctrl); } /* flexcan_chip_start @@ -1010,12 +1474,18 @@ static int flexcan_chip_start(struct net_device *dev) if (err) goto out_chip_disable; + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SUPPORT_ECC) + flexcan_ram_init(dev); + flexcan_set_bittiming(dev); + /* set freeze, halt */ + err = flexcan_chip_freeze(priv); + if (err) + goto out_chip_disable; + /* MCR * - * enable freeze - * halt now * only supervisor access * enable warning int * enable individual RX masking @@ -1024,17 +1494,16 @@ static int flexcan_chip_start(struct net_device *dev) */ reg_mcr = priv->read(®s->mcr); reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff); - reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV | - FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_IRMQ | FLEXCAN_MCR_IDAM_C | - FLEXCAN_MCR_MAXMB(priv->tx_mb_idx); + reg_mcr |= FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_IRMQ | + FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(priv->tx_mb_idx); /* MCR * * FIFO: - * - disable for timestamp mode + * - disable for mailbox mode * - enable for FIFO mode */ - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) reg_mcr &= ~FLEXCAN_MCR_FEN; else reg_mcr |= FLEXCAN_MCR_FEN; @@ -1056,6 +1525,12 @@ static int flexcan_chip_start(struct net_device *dev) else reg_mcr |= FLEXCAN_MCR_SRX_DIS; + /* MCR - CAN-FD */ + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) + reg_mcr |= FLEXCAN_MCR_FDEN; + else + reg_mcr &= ~FLEXCAN_MCR_FDEN; + netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr); priv->write(reg_mcr, ®s->mcr); @@ -1079,7 +1554,7 @@ static int flexcan_chip_start(struct net_device *dev) * on most Flexcan cores, too. Otherwise we don't get * any error warning or passive interrupts. */ - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE || + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE || priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) reg_ctrl |= FLEXCAN_CTRL_ERR_MSK; else @@ -1092,13 +1567,39 @@ static int flexcan_chip_start(struct net_device *dev) netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); priv->write(reg_ctrl, ®s->ctrl); - if ((priv->devtype_data->quirks & FLEXCAN_QUIRK_ENABLE_EACEN_RRS)) { + if ((priv->devtype_data.quirks & FLEXCAN_QUIRK_ENABLE_EACEN_RRS)) { reg_ctrl2 = priv->read(®s->ctrl2); reg_ctrl2 |= FLEXCAN_CTRL2_EACEN | FLEXCAN_CTRL2_RRS; priv->write(reg_ctrl2, ®s->ctrl2); } - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) { + u32 reg_fdctrl; + + reg_fdctrl = priv->read(®s->fdctrl); + reg_fdctrl &= ~(FIELD_PREP(FLEXCAN_FDCTRL_MBDSR1, 0x3) | + FIELD_PREP(FLEXCAN_FDCTRL_MBDSR0, 0x3)); + + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { + reg_fdctrl |= + FIELD_PREP(FLEXCAN_FDCTRL_MBDSR1, + FLEXCAN_FDCTRL_MBDSR_64) | + FIELD_PREP(FLEXCAN_FDCTRL_MBDSR0, + FLEXCAN_FDCTRL_MBDSR_64); + } else { + reg_fdctrl |= + FIELD_PREP(FLEXCAN_FDCTRL_MBDSR1, + FLEXCAN_FDCTRL_MBDSR_8) | + FIELD_PREP(FLEXCAN_FDCTRL_MBDSR0, + FLEXCAN_FDCTRL_MBDSR_8); + } + + netdev_dbg(dev, "%s: writing fdctrl=0x%08x", + __func__, reg_fdctrl); + priv->write(reg_fdctrl, ®s->fdctrl); + } + + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) { for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) { mb = flexcan_get_mb(priv, i); priv->write(FLEXCAN_MB_CODE_RX_EMPTY, @@ -1106,7 +1607,7 @@ static int flexcan_chip_start(struct net_device *dev) } } else { /* clear and invalidate unused mailboxes first */ - for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= priv->mb_count; i++) { + for (i = FLEXCAN_TX_MB_RESERVED_RX_FIFO; i < priv->mb_count; i++) { mb = flexcan_get_mb(priv, i); priv->write(FLEXCAN_MB_CODE_RX_INACTIVE, &mb->can_ctrl); @@ -1126,88 +1627,105 @@ static int flexcan_chip_start(struct net_device *dev) priv->write(0x0, ®s->rx14mask); priv->write(0x0, ®s->rx15mask); - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_RXFG) + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_DISABLE_RXFG) priv->write(0x0, ®s->rxfgmask); /* clear acceptance filters */ for (i = 0; i < priv->mb_count; i++) priv->write(0, ®s->rximr[i]); - /* On Vybrid, disable memory error detection interrupts - * and freeze mode. - * This also works around errata e5295 which generates - * false positive memory errors and put the device in - * freeze mode. + /* On Vybrid, disable non-correctable errors interrupt and + * freeze mode. It still can correct the correctable errors + * when HW supports ECC. + * + * This also works around errata e5295 which generates false + * positive memory errors and put the device in freeze mode. */ - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_MECR) { + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_DISABLE_MECR) { /* Follow the protocol as described in "Detection * and Correction of Memory Errors" to write to - * MECR register + * MECR register (step 1 - 5) + * + * 1. By default, CTRL2[ECRWRE] = 0, MECR[ECRWRDIS] = 1 + * 2. set CTRL2[ECRWRE] */ reg_ctrl2 = priv->read(®s->ctrl2); reg_ctrl2 |= FLEXCAN_CTRL2_ECRWRE; priv->write(reg_ctrl2, ®s->ctrl2); + /* 3. clear MECR[ECRWRDIS] */ reg_mecr = priv->read(®s->mecr); reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS; priv->write(reg_mecr, ®s->mecr); + + /* 4. all writes to MECR must keep MECR[ECRWRDIS] cleared */ reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK | FLEXCAN_MECR_FANCEI_MSK); priv->write(reg_mecr, ®s->mecr); - } - err = flexcan_transceiver_enable(priv); - if (err) - goto out_chip_disable; + /* 5. after configuration done, lock MECR by either + * setting MECR[ECRWRDIS] or clearing CTRL2[ECRWRE] + */ + reg_mecr |= FLEXCAN_MECR_ECRWRDIS; + priv->write(reg_mecr, ®s->mecr); + + reg_ctrl2 &= ~FLEXCAN_CTRL2_ECRWRE; + priv->write(reg_ctrl2, ®s->ctrl2); + } /* synchronize with the can bus */ err = flexcan_chip_unfreeze(priv); if (err) - goto out_transceiver_disable; + goto out_chip_disable; priv->can.state = CAN_STATE_ERROR_ACTIVE; - /* enable interrupts atomically */ - disable_irq(dev->irq); - priv->write(priv->reg_ctrl_default, ®s->ctrl); - priv->write(priv->reg_imask1_default, ®s->imask1); - priv->write(priv->reg_imask2_default, ®s->imask2); - enable_irq(dev->irq); - /* print chip status */ netdev_dbg(dev, "%s: reading mcr=0x%08x ctrl=0x%08x\n", __func__, priv->read(®s->mcr), priv->read(®s->ctrl)); return 0; - out_transceiver_disable: - flexcan_transceiver_disable(priv); out_chip_disable: flexcan_chip_disable(priv); return err; } -/* flexcan_chip_stop +/* __flexcan_chip_stop * - * this functions is entered with clocks enabled + * this function is entered with clocks enabled */ -static void flexcan_chip_stop(struct net_device *dev) +static int __flexcan_chip_stop(struct net_device *dev, bool disable_on_error) { struct flexcan_priv *priv = netdev_priv(dev); - struct flexcan_regs __iomem *regs = priv->regs; + int err; /* freeze + disable module */ - flexcan_chip_freeze(priv); - flexcan_chip_disable(priv); - - /* Disable all interrupts */ - priv->write(0, ®s->imask2); - priv->write(0, ®s->imask1); - priv->write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL, - ®s->ctrl); + err = flexcan_chip_freeze(priv); + if (err && !disable_on_error) + return err; + err = flexcan_chip_disable(priv); + if (err && !disable_on_error) + goto out_chip_unfreeze; - flexcan_transceiver_disable(priv); priv->can.state = CAN_STATE_STOPPED; + + return 0; + + out_chip_unfreeze: + flexcan_chip_unfreeze(priv); + + return err; +} + +static inline int flexcan_chip_stop_disable_on_error(struct net_device *dev) +{ + return __flexcan_chip_stop(dev, true); +} + +static inline int flexcan_chip_stop(struct net_device *dev) +{ + return __flexcan_chip_stop(dev, false); } static int flexcan_open(struct net_device *dev) @@ -1215,83 +1733,82 @@ static int flexcan_open(struct net_device *dev) struct flexcan_priv *priv = netdev_priv(dev); int err; - err = clk_prepare_enable(priv->clk_ipg); - if (err) - return err; + if ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) && + (priv->can.ctrlmode & CAN_CTRLMODE_FD)) { + netdev_err(dev, "Three Samples mode and CAN-FD mode can't be used together\n"); + return -EINVAL; + } - err = clk_prepare_enable(priv->clk_per); - if (err) - goto out_disable_ipg; + err = pm_runtime_resume_and_get(priv->dev); + if (err < 0) + return err; err = open_candev(dev); if (err) - goto out_disable_per; + goto out_runtime_put; - err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev); + err = flexcan_transceiver_enable(priv); if (err) goto out_close; - priv->mb_size = sizeof(struct flexcan_mb) + CAN_MAX_DLEN; - priv->mb_count = (sizeof(priv->regs->mb[0]) / priv->mb_size) + - (sizeof(priv->regs->mb[1]) / priv->mb_size); - - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) - priv->tx_mb_reserved = - flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP); - else - priv->tx_mb_reserved = - flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_OFF_FIFO); - priv->tx_mb_idx = priv->mb_count - 1; - priv->tx_mb = flexcan_get_mb(priv, priv->tx_mb_idx); - - priv->reg_imask1_default = 0; - priv->reg_imask2_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx); + err = flexcan_rx_offload_setup(dev); + if (err) + goto out_transceiver_disable; - priv->offload.mailbox_read = flexcan_mailbox_read; + err = flexcan_chip_start(dev); + if (err) + goto out_can_rx_offload_del; - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { - u64 imask; + can_rx_offload_enable(&priv->offload); - priv->offload.mb_first = FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST; - priv->offload.mb_last = priv->mb_count - 2; + err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev); + if (err) + goto out_can_rx_offload_disable; - imask = GENMASK_ULL(priv->offload.mb_last, - priv->offload.mb_first); - priv->reg_imask1_default |= imask; - priv->reg_imask2_default |= imask >> 32; + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) { + err = request_irq(priv->irq_boff, + flexcan_irq, IRQF_SHARED, dev->name, dev); + if (err) + goto out_free_irq; - err = can_rx_offload_add_timestamp(dev, &priv->offload); - } else { - priv->reg_imask1_default |= FLEXCAN_IFLAG_RX_FIFO_OVERFLOW | - FLEXCAN_IFLAG_RX_FIFO_AVAILABLE; - err = can_rx_offload_add_fifo(dev, &priv->offload, - FLEXCAN_NAPI_WEIGHT); + err = request_irq(priv->irq_err, + flexcan_irq, IRQF_SHARED, dev->name, dev); + if (err) + goto out_free_irq_boff; } - if (err) - goto out_free_irq; - /* start chip and queuing */ - err = flexcan_chip_start(dev); - if (err) - goto out_offload_del; + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ) { + err = request_irq(priv->irq_secondary_mb, + flexcan_irq, IRQF_SHARED, dev->name, dev); + if (err) + goto out_free_irq_err; + } - can_led_event(dev, CAN_LED_EVENT_OPEN); + flexcan_chip_interrupts_enable(dev); - can_rx_offload_enable(&priv->offload); netif_start_queue(dev); return 0; - out_offload_del: - can_rx_offload_del(&priv->offload); + out_free_irq_err: + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) + free_irq(priv->irq_err, dev); + out_free_irq_boff: + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) + free_irq(priv->irq_boff, dev); out_free_irq: free_irq(dev->irq, dev); + out_can_rx_offload_disable: + can_rx_offload_disable(&priv->offload); + flexcan_chip_stop(dev); + out_can_rx_offload_del: + can_rx_offload_del(&priv->offload); + out_transceiver_disable: + flexcan_transceiver_disable(priv); out_close: close_candev(dev); - out_disable_per: - clk_disable_unprepare(priv->clk_per); - out_disable_ipg: - clk_disable_unprepare(priv->clk_ipg); + out_runtime_put: + pm_runtime_put(priv->dev); return err; } @@ -1301,17 +1818,25 @@ static int flexcan_close(struct net_device *dev) struct flexcan_priv *priv = netdev_priv(dev); netif_stop_queue(dev); - can_rx_offload_disable(&priv->offload); - flexcan_chip_stop(dev); + flexcan_chip_interrupts_disable(dev); + + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ) + free_irq(priv->irq_secondary_mb, dev); + + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) { + free_irq(priv->irq_err, dev); + free_irq(priv->irq_boff, dev); + } - can_rx_offload_del(&priv->offload); free_irq(dev->irq, dev); - clk_disable_unprepare(priv->clk_per); - clk_disable_unprepare(priv->clk_ipg); + can_rx_offload_disable(&priv->offload); + flexcan_chip_stop_disable_on_error(dev); + can_rx_offload_del(&priv->offload); + flexcan_transceiver_disable(priv); close_candev(dev); - can_led_event(dev, CAN_LED_EVENT_STOP); + pm_runtime_put(priv->dev); return 0; } @@ -1326,6 +1851,8 @@ static int flexcan_set_mode(struct net_device *dev, enum can_mode mode) if (err) return err; + flexcan_chip_interrupts_enable(dev); + netif_wake_queue(dev); break; @@ -1340,7 +1867,6 @@ static const struct net_device_ops flexcan_netdev_ops = { .ndo_open = flexcan_open, .ndo_stop = flexcan_close, .ndo_start_xmit = flexcan_start_xmit, - .ndo_change_mtu = can_change_mtu, }; static int register_flexcandev(struct net_device *dev) @@ -1349,30 +1875,34 @@ static int register_flexcandev(struct net_device *dev) struct flexcan_regs __iomem *regs = priv->regs; u32 reg, err; - err = clk_prepare_enable(priv->clk_ipg); + err = flexcan_clks_enable(priv); if (err) return err; - err = clk_prepare_enable(priv->clk_per); - if (err) - goto out_disable_ipg; - /* select "bus clock", chip must be disabled */ err = flexcan_chip_disable(priv); if (err) - goto out_disable_per; + goto out_clks_disable; + reg = priv->read(®s->ctrl); - reg |= FLEXCAN_CTRL_CLK_SRC; + if (priv->clk_src) + reg |= FLEXCAN_CTRL_CLK_SRC; + else + reg &= ~FLEXCAN_CTRL_CLK_SRC; priv->write(reg, ®s->ctrl); err = flexcan_chip_enable(priv); if (err) goto out_chip_disable; - /* set freeze, halt and activate FIFO, restrict register access */ + /* set freeze, halt */ + err = flexcan_chip_freeze(priv); + if (err) + goto out_chip_disable; + + /* activate FIFO, restrict register access */ reg = priv->read(®s->mcr); - reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | - FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV; + reg |= FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV; priv->write(reg, ®s->mcr); /* Currently we only support newer versions of this core @@ -1388,15 +1918,21 @@ static int register_flexcandev(struct net_device *dev) } err = register_candev(dev); + if (err) + goto out_chip_disable; - /* disable core and turn off clocks */ - out_chip_disable: + /* Disable core and let pm_runtime_put() disable the clocks. + * If CONFIG_PM is not enabled, the clocks will stay powered. + */ flexcan_chip_disable(priv); - out_disable_per: - clk_disable_unprepare(priv->clk_per); - out_disable_ipg: - clk_disable_unprepare(priv->clk_ipg); + pm_runtime_put(priv->dev); + + return 0; + out_chip_disable: + flexcan_chip_disable(priv); + out_clks_disable: + flexcan_clks_disable(priv); return err; } @@ -1405,21 +1941,21 @@ static void unregister_flexcandev(struct net_device *dev) unregister_candev(dev); } -static int flexcan_setup_stop_mode(struct platform_device *pdev) +static int flexcan_setup_stop_mode_gpr(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct device_node *np = pdev->dev.of_node; struct device_node *gpr_np; struct flexcan_priv *priv; phandle phandle; - u32 out_val[5]; + u32 out_val[3]; int ret; if (!np) return -EINVAL; /* stop mode property format is: - * <&gpr req_gpr req_bit ack_gpr ack_bit>. + * <&gpr req_gpr req_bit>. */ ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val, ARRAY_SIZE(out_val)); @@ -1432,33 +1968,98 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev) gpr_np = of_find_node_by_phandle(phandle); if (!gpr_np) { dev_dbg(&pdev->dev, "could not find gpr node by phandle\n"); - return PTR_ERR(gpr_np); + return -ENODEV; } priv = netdev_priv(dev); priv->stm.gpr = syscon_node_to_regmap(gpr_np); - of_node_put(gpr_np); if (IS_ERR(priv->stm.gpr)) { dev_dbg(&pdev->dev, "could not find gpr regmap\n"); - return PTR_ERR(priv->stm.gpr); + ret = PTR_ERR(priv->stm.gpr); + goto out_put_node; } priv->stm.req_gpr = out_val[1]; priv->stm.req_bit = out_val[2]; - priv->stm.ack_gpr = out_val[3]; - priv->stm.ack_bit = out_val[4]; dev_dbg(&pdev->dev, - "gpr %s req_gpr=0x02%x req_bit=%u ack_gpr=0x02%x ack_bit=%u\n", - gpr_np->full_name, priv->stm.req_gpr, priv->stm.req_bit, - priv->stm.ack_gpr, priv->stm.ack_bit); + "gpr %s req_gpr=0x02%x req_bit=%u\n", + gpr_np->full_name, priv->stm.req_gpr, priv->stm.req_bit); + + return 0; + +out_put_node: + of_node_put(gpr_np); + return ret; +} + +static int flexcan_setup_stop_mode_scfw(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct flexcan_priv *priv; + u8 scu_idx; + int ret; + + ret = of_property_read_u8(pdev->dev.of_node, "fsl,scu-index", &scu_idx); + if (ret < 0) { + dev_dbg(&pdev->dev, "failed to get scu index\n"); + return ret; + } + + priv = netdev_priv(dev); + priv->scu_idx = scu_idx; + + /* this function could be deferred probe, return -EPROBE_DEFER */ + return imx_scu_get_handle(&priv->sc_ipc_handle); +} + +/* flexcan_setup_stop_mode - Setup stop mode for wakeup + * + * Return: = 0 setup stop mode successfully or doesn't support this feature + * < 0 fail to setup stop mode (could be deferred probe) + */ +static int flexcan_setup_stop_mode(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct flexcan_priv *priv; + int ret; + + priv = netdev_priv(dev); + + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) + ret = flexcan_setup_stop_mode_scfw(pdev); + else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) + ret = flexcan_setup_stop_mode_gpr(pdev); + else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI) + /* ATF will handle all STOP_IPG related work */ + ret = 0; + else + /* return 0 directly if doesn't support stop mode feature */ + return 0; + + /* If ret is -EINVAL, this means SoC claim to support stop mode, but + * dts file lack the stop mode property definition. For this case, + * directly return 0, this will skip the wakeup capable setting and + * will not block the driver probe. + */ + if (ret == -EINVAL) + return 0; + else if (ret) + return ret; device_set_wakeup_capable(&pdev->dev, true); + if (of_property_read_bool(pdev->dev.of_node, "wakeup-source")) + device_set_wakeup_enable(&pdev->dev, true); + return 0; } static const struct of_device_id flexcan_of_match[] = { + { .compatible = "fsl,imx8qm-flexcan", .data = &fsl_imx8qm_devtype_data, }, + { .compatible = "fsl,imx8mp-flexcan", .data = &fsl_imx8mp_devtype_data, }, + { .compatible = "fsl,imx93-flexcan", .data = &fsl_imx93_devtype_data, }, + { .compatible = "fsl,imx95-flexcan", .data = &fsl_imx95_devtype_data, }, { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, }, { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, }, { .compatible = "fsl,imx53-flexcan", .data = &fsl_imx25_devtype_data, }, @@ -1467,38 +2068,61 @@ static const struct of_device_id flexcan_of_match[] = { { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, }, { .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, }, { .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, }, + { .compatible = "fsl,lx2160ar1-flexcan", .data = &fsl_lx2160a_r1_devtype_data, }, + { .compatible = "nxp,s32g2-flexcan", .data = &nxp_s32g2_devtype_data, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, flexcan_of_match); static const struct platform_device_id flexcan_id_table[] = { - { .name = "flexcan", .driver_data = (kernel_ulong_t)&fsl_p1010_devtype_data, }, - { /* sentinel */ }, + { + .name = "flexcan-mcf5441x", + .driver_data = (kernel_ulong_t)&fsl_mcf5441x_devtype_data, + }, { + /* sentinel */ + }, }; MODULE_DEVICE_TABLE(platform, flexcan_id_table); static int flexcan_probe(struct platform_device *pdev) { - const struct of_device_id *of_id; const struct flexcan_devtype_data *devtype_data; struct net_device *dev; struct flexcan_priv *priv; struct regulator *reg_xceiver; - struct resource *mem; + struct phy *transceiver; struct clk *clk_ipg = NULL, *clk_per = NULL; struct flexcan_regs __iomem *regs; + struct flexcan_platform_data *pdata; int err, irq; + u8 clk_src = 1; u32 clock_freq = 0; - reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver"); + reg_xceiver = devm_regulator_get_optional(&pdev->dev, "xceiver"); if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER) return -EPROBE_DEFER; - else if (IS_ERR(reg_xceiver)) + else if (PTR_ERR(reg_xceiver) == -ENODEV) reg_xceiver = NULL; + else if (IS_ERR(reg_xceiver)) + return PTR_ERR(reg_xceiver); - if (pdev->dev.of_node) + transceiver = devm_phy_optional_get(&pdev->dev, NULL); + if (IS_ERR(transceiver)) + return dev_err_probe(&pdev->dev, PTR_ERR(transceiver), + "failed to get phy\n"); + + if (pdev->dev.of_node) { of_property_read_u32(pdev->dev.of_node, "clock-frequency", &clock_freq); + of_property_read_u8(pdev->dev.of_node, + "fsl,clk-source", &clk_src); + } else { + pdata = dev_get_platdata(&pdev->dev); + if (pdata) { + clock_freq = pdata->clock_frequency; + clk_src = pdata->clk_src; + } + } if (!clock_freq) { clk_ipg = devm_clk_get(&pdev->dev, "ipg"); @@ -1515,23 +2139,37 @@ static int flexcan_probe(struct platform_device *pdev) clock_freq = clk_get_rate(clk_per); } - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); - if (irq <= 0) - return -ENODEV; + if (irq < 0) + return irq; - regs = devm_ioremap_resource(&pdev->dev, mem); + regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(regs)) return PTR_ERR(regs); - of_id = of_match_device(flexcan_of_match, &pdev->dev); - if (of_id) { - devtype_data = of_id->data; - } else if (platform_get_device_id(pdev)->driver_data) { - devtype_data = (struct flexcan_devtype_data *) - platform_get_device_id(pdev)->driver_data; - } else { - return -ENODEV; + devtype_data = device_get_match_data(&pdev->dev); + + if ((devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) && + !((devtype_data->quirks & + (FLEXCAN_QUIRK_USE_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR | + FLEXCAN_QUIRK_SUPPORT_RX_FIFO)) == + (FLEXCAN_QUIRK_USE_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR))) { + dev_err(&pdev->dev, "CAN-FD mode doesn't work in RX-FIFO mode!\n"); + return -EINVAL; + } + + if ((devtype_data->quirks & + (FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR)) == + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR) { + dev_err(&pdev->dev, + "Quirks (0x%08x) inconsistent: RX_MAILBOX_RX supported but not RX_MAILBOX\n", + devtype_data->quirks); + return -EINVAL; } dev = alloc_candev(sizeof(struct flexcan_priv), 1); @@ -1542,13 +2180,15 @@ static int flexcan_probe(struct platform_device *pdev) SET_NETDEV_DEV(dev, &pdev->dev); dev->netdev_ops = &flexcan_netdev_ops; + dev->ethtool_ops = &flexcan_ethtool_ops; dev->irq = irq; dev->flags |= IFF_ECHO; priv = netdev_priv(dev); + priv->devtype_data = *devtype_data; if (of_property_read_bool(pdev->dev.of_node, "big-endian") || - devtype_data->quirks & FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN) { + priv->devtype_data.quirks & FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN) { priv->read = flexcan_read_be; priv->write = flexcan_write_be; } else { @@ -1556,8 +2196,8 @@ static int flexcan_probe(struct platform_device *pdev) priv->write = flexcan_write_le; } + priv->dev = &pdev->dev; priv->can.clock.freq = clock_freq; - priv->can.bittiming_const = &flexcan_bittiming_const; priv->can.do_set_mode = flexcan_set_mode; priv->can.do_get_berr_counter = flexcan_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | @@ -1566,8 +2206,47 @@ static int flexcan_probe(struct platform_device *pdev) priv->regs = regs; priv->clk_ipg = clk_ipg; priv->clk_per = clk_per; - priv->devtype_data = devtype_data; + priv->clk_src = clk_src; priv->reg_xceiver = reg_xceiver; + priv->transceiver = transceiver; + + if (transceiver) + priv->can.bitrate_max = transceiver->attrs.max_link_rate; + + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) { + priv->irq_boff = platform_get_irq(pdev, 1); + if (priv->irq_boff < 0) { + err = priv->irq_boff; + goto failed_platform_get_irq; + } + priv->irq_err = platform_get_irq(pdev, 2); + if (priv->irq_err < 0) { + err = priv->irq_err; + goto failed_platform_get_irq; + } + } + + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ) { + priv->irq_secondary_mb = platform_get_irq_byname(pdev, "mb-1"); + if (priv->irq_secondary_mb < 0) { + err = priv->irq_secondary_mb; + goto failed_platform_get_irq; + } + } + + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SUPPORT_FD) { + priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD | + CAN_CTRLMODE_FD_NON_ISO; + priv->can.bittiming_const = &flexcan_fd_bittiming_const; + priv->can.fd.data_bittiming_const = + &flexcan_fd_data_bittiming_const; + } else { + priv->can.bittiming_const = &flexcan_bittiming_const; + } + + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); err = register_flexcandev(dev); if (err) { @@ -1575,32 +2254,35 @@ static int flexcan_probe(struct platform_device *pdev) goto failed_register; } - devm_can_led_init(dev); - - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE) { - err = flexcan_setup_stop_mode(pdev); - if (err) - dev_dbg(&pdev->dev, "failed to setup stop-mode\n"); + err = flexcan_setup_stop_mode(pdev); + if (err < 0) { + dev_err_probe(&pdev->dev, err, "setup stop mode failed\n"); + goto failed_setup_stop_mode; } - dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n", - priv->regs, dev->irq); + of_can_transceiver(dev); return 0; + failed_setup_stop_mode: + unregister_flexcandev(dev); failed_register: + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_disable(&pdev->dev); + failed_platform_get_irq: free_candev(dev); return err; } -static int flexcan_remove(struct platform_device *pdev) +static void flexcan_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); + device_set_wakeup_enable(&pdev->dev, false); + device_set_wakeup_capable(&pdev->dev, false); unregister_flexcandev(dev); + pm_runtime_disable(&pdev->dev); free_candev(dev); - - return 0; } static int __maybe_unused flexcan_suspend(struct device *device) @@ -1615,16 +2297,29 @@ static int __maybe_unused flexcan_suspend(struct device *device) */ if (device_may_wakeup(device)) { enable_irq_wake(dev->irq); - flexcan_enter_stop_mode(priv); + err = flexcan_enter_stop_mode(priv); + if (err) + return err; } else { - err = flexcan_chip_disable(priv); + err = flexcan_chip_stop(dev); + if (err) + return err; + + flexcan_chip_interrupts_disable(dev); + + err = flexcan_transceiver_disable(priv); + if (err) + return err; + + err = pinctrl_pm_select_sleep_state(device); if (err) return err; } netif_stop_queue(dev); netif_device_detach(dev); + + priv->can.state = CAN_STATE_SLEEPING; } - priv->can.state = CAN_STATE_SLEEPING; return 0; } @@ -1635,28 +2330,81 @@ static int __maybe_unused flexcan_resume(struct device *device) struct flexcan_priv *priv = netdev_priv(dev); int err; - priv->can.state = CAN_STATE_ERROR_ACTIVE; if (netif_running(dev)) { netif_device_attach(dev); netif_start_queue(dev); if (device_may_wakeup(device)) { disable_irq_wake(dev->irq); + err = flexcan_exit_stop_mode(priv); + if (err) + return err; } else { - err = flexcan_chip_enable(priv); + err = pinctrl_pm_select_default_state(device); + if (err) + return err; + + err = flexcan_transceiver_enable(priv); if (err) return err; + + err = flexcan_chip_start(dev); + if (err) { + flexcan_transceiver_disable(priv); + return err; + } + + flexcan_chip_interrupts_enable(dev); } + + priv->can.state = CAN_STATE_ERROR_ACTIVE; } + return 0; } +static int __maybe_unused flexcan_runtime_suspend(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + struct flexcan_priv *priv = netdev_priv(dev); + + flexcan_clks_disable(priv); + + return 0; +} + +static int __maybe_unused flexcan_runtime_resume(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + struct flexcan_priv *priv = netdev_priv(dev); + + return flexcan_clks_enable(priv); +} + static int __maybe_unused flexcan_noirq_suspend(struct device *device) { struct net_device *dev = dev_get_drvdata(device); struct flexcan_priv *priv = netdev_priv(dev); - if (netif_running(dev) && device_may_wakeup(device)) - flexcan_enable_wakeup_irq(priv, true); + if (netif_running(dev)) { + int err; + + if (device_may_wakeup(device)) + flexcan_enable_wakeup_irq(priv, true); + + /* For FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI, it need ATF to send + * to SM through SCMI protocol, SM will assert the IPG_STOP + * signal. But all this works need the CAN clocks keep on. + * After the CAN module get the IPG_STOP mode, and switch to + * STOP mode, whether still keep the CAN clocks on or gate them + * off depend on the Hardware design. + */ + if (!(device_may_wakeup(device) && + priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI)) { + err = pm_runtime_force_suspend(device); + if (err) + return err; + } + } return 0; } @@ -1666,9 +2414,18 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device) struct net_device *dev = dev_get_drvdata(device); struct flexcan_priv *priv = netdev_priv(dev); - if (netif_running(dev) && device_may_wakeup(device)) { - flexcan_enable_wakeup_irq(priv, false); - flexcan_exit_stop_mode(priv); + if (netif_running(dev)) { + int err; + + if (!(device_may_wakeup(device) && + priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI)) { + err = pm_runtime_force_resume(device); + if (err) + return err; + } + + if (device_may_wakeup(device)) + flexcan_enable_wakeup_irq(priv, false); } return 0; @@ -1676,6 +2433,7 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device) static const struct dev_pm_ops flexcan_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(flexcan_suspend, flexcan_resume) + SET_RUNTIME_PM_OPS(flexcan_runtime_suspend, flexcan_runtime_resume, NULL) SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(flexcan_noirq_suspend, flexcan_noirq_resume) }; diff --git a/drivers/net/can/flexcan/flexcan-ethtool.c b/drivers/net/can/flexcan/flexcan-ethtool.c new file mode 100644 index 000000000000..50e86b2da532 --- /dev/null +++ b/drivers/net/can/flexcan/flexcan-ethtool.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2022 Amarula Solutions, Dario Binacchi <dario.binacchi@amarulasolutions.com> + * Copyright (c) 2022 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de> + * + */ + +#include <linux/can/dev.h> +#include <linux/ethtool.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/platform_device.h> + +#include "flexcan.h" + +static const char flexcan_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define FLEXCAN_PRIV_FLAGS_RX_RTR BIT(0) + "rx-rtr", +}; + +static void +flexcan_get_ringparam(struct net_device *ndev, struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *ext_ack) +{ + const struct flexcan_priv *priv = netdev_priv(ndev); + + ring->rx_max_pending = priv->mb_count; + ring->tx_max_pending = priv->mb_count; + + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) + ring->rx_pending = priv->offload.mb_last - + priv->offload.mb_first + 1; + else + ring->rx_pending = 6; /* RX-FIFO depth is fixed */ + + /* the drive currently supports only on TX buffer */ + ring->tx_pending = 1; +} + +static void +flexcan_get_strings(struct net_device *ndev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_PRIV_FLAGS: + memcpy(data, flexcan_priv_flags_strings, + sizeof(flexcan_priv_flags_strings)); + } +} + +static u32 flexcan_get_priv_flags(struct net_device *ndev) +{ + const struct flexcan_priv *priv = netdev_priv(ndev); + u32 priv_flags = 0; + + if (flexcan_active_rx_rtr(priv)) + priv_flags |= FLEXCAN_PRIV_FLAGS_RX_RTR; + + return priv_flags; +} + +static int flexcan_set_priv_flags(struct net_device *ndev, u32 priv_flags) +{ + struct flexcan_priv *priv = netdev_priv(ndev); + u32 quirks = priv->devtype_data.quirks; + + if (priv_flags & FLEXCAN_PRIV_FLAGS_RX_RTR) { + if (flexcan_supports_rx_mailbox_rtr(priv)) + quirks |= FLEXCAN_QUIRK_USE_RX_MAILBOX; + else if (flexcan_supports_rx_fifo(priv)) + quirks &= ~FLEXCAN_QUIRK_USE_RX_MAILBOX; + else + quirks |= FLEXCAN_QUIRK_USE_RX_MAILBOX; + } else { + if (flexcan_supports_rx_mailbox(priv)) + quirks |= FLEXCAN_QUIRK_USE_RX_MAILBOX; + else + quirks &= ~FLEXCAN_QUIRK_USE_RX_MAILBOX; + } + + if (quirks != priv->devtype_data.quirks && netif_running(ndev)) + return -EBUSY; + + priv->devtype_data.quirks = quirks; + + if (!(priv_flags & FLEXCAN_PRIV_FLAGS_RX_RTR) && + !flexcan_active_rx_rtr(priv)) + netdev_info(ndev, + "Activating RX mailbox mode, cannot receive RTR frames.\n"); + + return 0; +} + +static int flexcan_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_PRIV_FLAGS: + return ARRAY_SIZE(flexcan_priv_flags_strings); + default: + return -EOPNOTSUPP; + } +} + +const struct ethtool_ops flexcan_ethtool_ops = { + .get_ringparam = flexcan_get_ringparam, + .get_strings = flexcan_get_strings, + .get_priv_flags = flexcan_get_priv_flags, + .set_priv_flags = flexcan_set_priv_flags, + .get_sset_count = flexcan_get_sset_count, + .get_ts_info = ethtool_op_get_ts_info, +}; diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h new file mode 100644 index 000000000000..16692a2502eb --- /dev/null +++ b/drivers/net/can/flexcan/flexcan.h @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: GPL-2.0 + * flexcan.c - FLEXCAN CAN controller driver + * + * Copyright (c) 2005-2006 Varma Electronics Oy + * Copyright (c) 2009 Sascha Hauer, Pengutronix + * Copyright (c) 2010-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de> + * Copyright (c) 2014 David Jander, Protonic Holland + * Copyright (C) 2022 Amarula Solutions, Dario Binacchi <dario.binacchi@amarulasolutions.com> + * + * Based on code originally by Andrey Volkov <avolkov@varma-el.com> + * + */ + +#ifndef _FLEXCAN_H +#define _FLEXCAN_H + +#include <linux/can/rx-offload.h> + +/* FLEXCAN hardware feature flags + * + * Below is some version info we got: + * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR rece- FD Mode MB + * Filter? connected? Passive detection ption in MB Supported? + * MCF5441X FlexCAN2 ? no yes no no no no 16 + * MX25 FlexCAN2 03.00.00.00 no no no no no no 64 + * MX28 FlexCAN2 03.00.04.00 yes yes no no no no 64 + * MX35 FlexCAN2 03.00.00.00 no no no no no no 64 + * MX53 FlexCAN2 03.00.00.00 yes no no no no no 64 + * MX6s FlexCAN3 10.00.12.00 yes yes no no yes no 64 + * MX8QM FlexCAN3 03.00.23.00 yes yes no no yes yes 64 + * MX8MP FlexCAN3 03.00.17.01 yes yes no yes yes yes 64 + * VF610 FlexCAN3 ? no yes no yes yes? no 64 + * LS1021A FlexCAN2 03.00.04.00 no yes no no yes no 64 + * LX2160A FlexCAN3 03.00.23.00 no yes no yes yes yes 64 + * + * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected. + */ + +/* [TR]WRN_INT not connected */ +#define FLEXCAN_QUIRK_BROKEN_WERR_STATE BIT(1) + /* Disable RX FIFO Global mask */ +#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) +/* Enable EACEN and RRS bit in ctrl2 */ +#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) +/* Disable non-correctable errors interrupt and freeze mode */ +#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) +/* Use mailboxes (not FIFO) for RX path */ +#define FLEXCAN_QUIRK_USE_RX_MAILBOX BIT(5) +/* No interrupt for error passive */ +#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6) +/* default to BE register access */ +#define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN BIT(7) +/* Setup stop mode with GPR to support wakeup */ +#define FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR BIT(8) +/* Support CAN-FD mode */ +#define FLEXCAN_QUIRK_SUPPORT_FD BIT(9) +/* support memory detection and correction */ +#define FLEXCAN_QUIRK_SUPPORT_ECC BIT(10) +/* Setup stop mode with SCU firmware to support wakeup */ +#define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW BIT(11) +/* Setup 3 separate interrupts, main, boff and err */ +#define FLEXCAN_QUIRK_NR_IRQ_3 BIT(12) +/* Setup 16 mailboxes */ +#define FLEXCAN_QUIRK_NR_MB_16 BIT(13) +/* Device supports RX via mailboxes */ +#define FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX BIT(14) +/* Device supports RTR reception via mailboxes */ +#define FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR BIT(15) +/* Device supports RX via FIFO */ +#define FLEXCAN_QUIRK_SUPPORT_RX_FIFO BIT(16) +/* Setup stop mode with ATF SCMI protocol to support wakeup */ +#define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI BIT(17) +/* Device has two separate interrupt lines for two mailbox ranges, which + * both need to have an interrupt handler registered. + */ +#define FLEXCAN_QUIRK_SECONDARY_MB_IRQ BIT(18) + +struct flexcan_devtype_data { + u32 quirks; /* quirks needed for different IP cores */ +}; + +struct flexcan_stop_mode { + struct regmap *gpr; + u8 req_gpr; + u8 req_bit; +}; + +struct flexcan_priv { + struct can_priv can; + struct can_rx_offload offload; + struct device *dev; + + struct flexcan_regs __iomem *regs; + struct flexcan_mb __iomem *tx_mb; + struct flexcan_mb __iomem *tx_mb_reserved; + u8 tx_mb_idx; + u8 mb_count; + u8 mb_size; + u8 clk_src; /* clock source of CAN Protocol Engine */ + u8 scu_idx; + + u64 rx_mask; + u64 tx_mask; + u32 reg_ctrl_default; + + struct clk *clk_ipg; + struct clk *clk_per; + struct flexcan_devtype_data devtype_data; + struct regulator *reg_xceiver; + struct phy *transceiver; + struct flexcan_stop_mode stm; + + int irq_boff; + int irq_err; + int irq_secondary_mb; + + /* IPC handle when setup stop mode by System Controller firmware(scfw) */ + struct imx_sc_ipc *sc_ipc_handle; + + /* Read and Write APIs */ + u32 (*read)(void __iomem *addr); + void (*write)(u32 val, void __iomem *addr); +}; + +extern const struct ethtool_ops flexcan_ethtool_ops; + +static inline bool +flexcan_supports_rx_mailbox(const struct flexcan_priv *priv) +{ + const u32 quirks = priv->devtype_data.quirks; + + return quirks & FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX; +} + +static inline bool +flexcan_supports_rx_mailbox_rtr(const struct flexcan_priv *priv) +{ + const u32 quirks = priv->devtype_data.quirks; + + return (quirks & (FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR)) == + (FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR); +} + +static inline bool +flexcan_supports_rx_fifo(const struct flexcan_priv *priv) +{ + const u32 quirks = priv->devtype_data.quirks; + + return quirks & FLEXCAN_QUIRK_SUPPORT_RX_FIFO; +} + +static inline bool +flexcan_active_rx_rtr(const struct flexcan_priv *priv) +{ + const u32 quirks = priv->devtype_data.quirks; + + if (quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) { + if (quirks & FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR) + return true; + } else { + /* RX-FIFO is always RTR capable */ + return true; + } + + return false; +} + + +#endif /* _FLEXCAN_H */ diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c index 7eec1d9f86a0..3b1b09943436 100644 --- a/drivers/net/can/grcan.c +++ b/drivers/net/can/grcan.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Socket CAN driver for Aeroflex Gaisler GRCAN and GRHCAN. * @@ -18,11 +19,6 @@ * See "Documentation/admin-guide/kernel-parameters.rst" for information on the module * parameters. * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * * Contributors: Andreas Larsson <andreas@gaisler.com> */ @@ -31,10 +27,12 @@ #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/delay.h> +#include <linux/ethtool.h> #include <linux/io.h> #include <linux/can/dev.h> +#include <linux/platform_device.h> #include <linux/spinlock.h> -#include <linux/of_platform.h> +#include <linux/of.h> #include <linux/of_irq.h> #include <linux/dma-mapping.h> @@ -245,13 +243,14 @@ struct grcan_device_config { .rxsize = GRCAN_DEFAULT_BUFFER_SIZE, \ } -#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 0x4100 +#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 4100 #define GRLIB_VERSION_MASK 0xffff /* GRCAN private data structure */ struct grcan_priv { struct can_priv can; /* must be the first member */ struct net_device *dev; + struct device *ofdev_dev; struct napi_struct napi; struct grcan_registers __iomem *regs; /* ioremap'ed registers */ @@ -259,7 +258,6 @@ struct grcan_priv { struct grcan_dma dma; struct sk_buff **echo_skb; /* We allocate this on our own */ - u8 *txdlc; /* Length of queued frames */ /* The echo skb pointer, pointing into echo_skb and indicating which * frames can be echoed back. See the "Notes on the tx cyclic buffer @@ -519,12 +517,10 @@ static int catch_up_echo_skb(struct net_device *dev, int budget, bool echo) if (echo) { /* Normal echo of messages */ stats->tx_packets++; - stats->tx_bytes += priv->txdlc[i]; - priv->txdlc[i] = 0; - can_get_echo_skb(dev, i); + stats->tx_bytes += can_get_echo_skb(dev, i, NULL); } else { /* For cleanup of untransmitted messages */ - can_free_echo_skb(dev, i); + can_free_echo_skb(dev, i, NULL); } priv->eskbp = grcan_ring_add(priv->eskbp, GRCAN_MSG_SIZE, @@ -677,6 +673,7 @@ static void grcan_err(struct net_device *dev, u32 sources, u32 status) /* There are no others at this point */ break; } + cf.can_id |= CAN_ERR_CNT; cf.data[6] = txerr; cf.data[7] = rxerr; priv->can.state = state; @@ -730,7 +727,7 @@ static void grcan_err(struct net_device *dev, u32 sources, u32 status) txrx = "on rx "; stats->rx_errors++; } - netdev_err(dev, "Fatal AHB buss error %s- halting device\n", + netdev_err(dev, "Fatal AHB bus error %s- halting device\n", txrx); spin_lock_irqsave(&priv->lock, flags); @@ -781,7 +778,7 @@ static irqreturn_t grcan_interrupt(int irq, void *dev_id) */ if (priv->need_txbug_workaround && (sources & (GRCAN_IRQ_TX | GRCAN_IRQ_TXLOSS))) { - del_timer(&priv->hang_timer); + timer_delete(&priv->hang_timer); } /* Frame(s) received or transmitted */ @@ -809,7 +806,7 @@ static irqreturn_t grcan_interrupt(int irq, void *dev_id) */ static void grcan_running_reset(struct timer_list *t) { - struct grcan_priv *priv = from_timer(priv, t, rr_timer); + struct grcan_priv *priv = timer_container_of(priv, t, rr_timer); struct net_device *dev = priv->dev; struct grcan_registers __iomem *regs = priv->regs; unsigned long flags; @@ -820,8 +817,8 @@ static void grcan_running_reset(struct timer_list *t) spin_lock_irqsave(&priv->lock, flags); priv->resetting = false; - del_timer(&priv->hang_timer); - del_timer(&priv->rr_timer); + timer_delete(&priv->hang_timer); + timer_delete(&priv->rr_timer); if (!priv->closing) { /* Save and reset - config register preserved by grcan_reset */ @@ -900,7 +897,7 @@ static inline void grcan_reset_timer(struct timer_list *timer, __u32 bitrate) /* Disable channels and schedule a running reset */ static void grcan_initiate_running_reset(struct timer_list *t) { - struct grcan_priv *priv = from_timer(priv, t, hang_timer); + struct grcan_priv *priv = timer_container_of(priv, t, hang_timer); struct net_device *dev = priv->dev; struct grcan_registers __iomem *regs = priv->regs; unsigned long flags; @@ -928,7 +925,7 @@ static void grcan_free_dma_buffers(struct net_device *dev) struct grcan_priv *priv = netdev_priv(dev); struct grcan_dma *dma = &priv->dma; - dma_free_coherent(&dev->dev, dma->base_size, dma->base_buf, + dma_free_coherent(priv->ofdev_dev, dma->base_size, dma->base_buf, dma->base_handle); memset(dma, 0, sizeof(*dma)); } @@ -953,7 +950,7 @@ static int grcan_allocate_dma_buffers(struct net_device *dev, /* Extra GRCAN_BUFFER_ALIGNMENT to allow for alignment */ dma->base_size = lsize + ssize + GRCAN_BUFFER_ALIGNMENT; - dma->base_buf = dma_alloc_coherent(&dev->dev, + dma->base_buf = dma_alloc_coherent(priv->ofdev_dev, dma->base_size, &dma->base_handle, GFP_KERNEL); @@ -1066,25 +1063,20 @@ static int grcan_open(struct net_device *dev) priv->can.echo_skb_max = dma->tx.size; priv->can.echo_skb = priv->echo_skb; - priv->txdlc = kcalloc(dma->tx.size, sizeof(*priv->txdlc), GFP_KERNEL); - if (!priv->txdlc) { - err = -ENOMEM; - goto exit_free_echo_skb; - } - /* Get can device up */ err = open_candev(dev); if (err) - goto exit_free_txdlc; + goto exit_free_echo_skb; err = request_irq(dev->irq, grcan_interrupt, IRQF_SHARED, dev->name, dev); if (err) goto exit_close_candev; + napi_enable(&priv->napi); + spin_lock_irqsave(&priv->lock, flags); - napi_enable(&priv->napi); grcan_start(dev); if (!(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) netif_start_queue(dev); @@ -1097,8 +1089,6 @@ static int grcan_open(struct net_device *dev) exit_close_candev: close_candev(dev); -exit_free_txdlc: - kfree(priv->txdlc); exit_free_echo_skb: kfree(priv->echo_skb); exit_free_dma_buffers: @@ -1117,8 +1107,10 @@ static int grcan_close(struct net_device *dev) priv->closing = true; if (priv->need_txbug_workaround) { - del_timer_sync(&priv->hang_timer); - del_timer_sync(&priv->rr_timer); + spin_unlock_irqrestore(&priv->lock, flags); + timer_delete_sync(&priv->hang_timer); + timer_delete_sync(&priv->rr_timer); + spin_lock_irqsave(&priv->lock, flags); } netif_stop_queue(dev); grcan_stop_hardware(dev); @@ -1133,12 +1125,11 @@ static int grcan_close(struct net_device *dev) priv->can.echo_skb_max = 0; priv->can.echo_skb = NULL; kfree(priv->echo_skb); - kfree(priv->txdlc); return 0; } -static int grcan_transmit_catch_up(struct net_device *dev, int budget) +static void grcan_transmit_catch_up(struct net_device *dev) { struct grcan_priv *priv = netdev_priv(dev); unsigned long flags; @@ -1146,7 +1137,7 @@ static int grcan_transmit_catch_up(struct net_device *dev, int budget) spin_lock_irqsave(&priv->lock, flags); - work_done = catch_up_echo_skb(dev, budget, true); + work_done = catch_up_echo_skb(dev, -1, true); if (work_done) { if (!priv->resetting && !priv->closing && !(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) @@ -1156,12 +1147,10 @@ static int grcan_transmit_catch_up(struct net_device *dev, int budget) * so prevent a running reset while catching up */ if (priv->need_txbug_workaround) - del_timer(&priv->hang_timer); + timer_delete(&priv->hang_timer); } spin_unlock_irqrestore(&priv->lock, flags); - - return work_done; } static int grcan_receive(struct net_device *dev, int budget) @@ -1205,21 +1194,21 @@ static int grcan_receive(struct net_device *dev, int budget) cf->can_id = ((slot[0] & GRCAN_MSG_BID) >> GRCAN_MSG_BID_BIT); } - cf->can_dlc = get_can_dlc((slot[1] & GRCAN_MSG_DLC) + cf->len = can_cc_dlc2len((slot[1] & GRCAN_MSG_DLC) >> GRCAN_MSG_DLC_BIT); if (rtr) { cf->can_id |= CAN_RTR_FLAG; } else { - for (i = 0; i < cf->can_dlc; i++) { + for (i = 0; i < cf->len; i++) { j = GRCAN_MSG_DATA_SLOT_INDEX(i); shift = GRCAN_MSG_DATA_SHIFT(i); cf->data[i] = (u8)(slot[j] >> shift); } - } - /* Update statistics and read pointer */ + stats->rx_bytes += cf->len; + } stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + netif_receive_skb(skb); rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size); @@ -1243,19 +1232,13 @@ static int grcan_poll(struct napi_struct *napi, int budget) struct net_device *dev = priv->dev; struct grcan_registers __iomem *regs = priv->regs; unsigned long flags; - int tx_work_done, rx_work_done; - int rx_budget = budget / 2; - int tx_budget = budget - rx_budget; + int work_done; - /* Half of the budget for receiveing messages */ - rx_work_done = grcan_receive(dev, rx_budget); + work_done = grcan_receive(dev, budget); - /* Half of the budget for transmitting messages as that can trigger echo - * frames being received - */ - tx_work_done = grcan_transmit_catch_up(dev, tx_budget); + grcan_transmit_catch_up(dev); - if (rx_work_done < rx_budget && tx_work_done < tx_budget) { + if (work_done < budget) { napi_complete(napi); /* Guarantee no interference with a running reset that otherwise @@ -1272,7 +1255,7 @@ static int grcan_poll(struct napi_struct *napi, int budget) spin_unlock_irqrestore(&priv->lock, flags); } - return rx_work_done + tx_work_done; + return work_done; } /* Work tx bug by waiting while for the risky situation to clear. If that fails, @@ -1364,7 +1347,7 @@ static netdev_tx_t grcan_start_xmit(struct sk_buff *skb, unsigned long flags; u32 oneshotmode = priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT; - if (can_dropped_invalid_skb(dev, skb)) + if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; /* Trying to transmit in silent mode will generate error interrupts, but @@ -1403,7 +1386,7 @@ static netdev_tx_t grcan_start_xmit(struct sk_buff *skb, eff = cf->can_id & CAN_EFF_FLAG; rtr = cf->can_id & CAN_RTR_FLAG; id = cf->can_id & (eff ? CAN_EFF_MASK : CAN_SFF_MASK); - dlc = cf->can_dlc; + dlc = cf->len; if (eff) tmp = (id << GRCAN_MSG_EID_BIT) & GRCAN_MSG_EID; else @@ -1451,8 +1434,7 @@ static netdev_tx_t grcan_start_xmit(struct sk_buff *skb, * can_put_echo_skb would be an error unless other measures are * taken. */ - priv->txdlc[slotindex] = cf->can_dlc; /* Store dlc for statistics */ - can_put_echo_skb(skb, dev, slotindex); + can_put_echo_skb(skb, dev, slotindex, 0); /* Make sure everything is written before allowing hardware to * read from the memory @@ -1579,7 +1561,10 @@ static const struct net_device_ops grcan_netdev_ops = { .ndo_open = grcan_open, .ndo_stop = grcan_close, .ndo_start_xmit = grcan_start_xmit, - .ndo_change_mtu = can_change_mtu, +}; + +static const struct ethtool_ops grcan_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, }; static int grcan_setup_netdev(struct platform_device *ofdev, @@ -1598,12 +1583,14 @@ static int grcan_setup_netdev(struct platform_device *ofdev, dev->irq = irq; dev->flags |= IFF_ECHO; dev->netdev_ops = &grcan_netdev_ops; + dev->ethtool_ops = &grcan_ethtool_ops; dev->sysfs_groups[0] = &sysfs_grcan_group; priv = netdev_priv(dev); memcpy(&priv->config, &grcan_module_config, sizeof(struct grcan_device_config)); priv->dev = dev; + priv->ofdev_dev = &ofdev->dev; priv->regs = base; priv->can.bittiming_const = &grcan_bittiming_const; priv->can.do_set_bittiming = grcan_set_bittiming; @@ -1630,7 +1617,7 @@ static int grcan_setup_netdev(struct platform_device *ofdev, timer_setup(&priv->hang_timer, grcan_initiate_running_reset, 0); } - netif_napi_add(dev, &priv->napi, grcan_poll, GRCAN_NAPI_WEIGHT); + netif_napi_add_weight(dev, &priv->napi, grcan_poll, GRCAN_NAPI_WEIGHT); SET_NETDEV_DEV(dev, &ofdev->dev); dev_info(&ofdev->dev, "regs=0x%p, irq=%d, clock=%d\n", @@ -1656,7 +1643,7 @@ exit_free_candev: static int grcan_probe(struct platform_device *ofdev) { struct device_node *np = ofdev->dev.of_node; - struct resource *res; + struct device_node *sysid_parent; u32 sysid, ambafreq; int irq, err; void __iomem *base; @@ -1665,10 +1652,14 @@ static int grcan_probe(struct platform_device *ofdev) /* Compare GRLIB version number with the first that does not * have the tx bug (see start_xmit) */ - err = of_property_read_u32(np, "systemid", &sysid); - if (!err && ((sysid & GRLIB_VERSION_MASK) - >= GRCAN_TXBUG_SAFE_GRLIB_VERSION)) - txbug = false; + sysid_parent = of_find_node_by_path("/ambapp0"); + if (sysid_parent) { + err = of_property_read_u32(sysid_parent, "systemid", &sysid); + if (!err && ((sysid & GRLIB_VERSION_MASK) >= + GRCAN_TXBUG_SAFE_GRLIB_VERSION)) + txbug = false; + of_node_put(sysid_parent); + } err = of_property_read_u32(np, "freq", &ambafreq); if (err) { @@ -1676,8 +1667,7 @@ static int grcan_probe(struct platform_device *ofdev) goto exit_error; } - res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(&ofdev->dev, res); + base = devm_platform_ioremap_resource(ofdev, 0); if (IS_ERR(base)) { err = PTR_ERR(base); goto exit_error; @@ -1707,7 +1697,7 @@ exit_error: return err; } -static int grcan_remove(struct platform_device *ofdev) +static void grcan_remove(struct platform_device *ofdev) { struct net_device *dev = platform_get_drvdata(ofdev); struct grcan_priv *priv = netdev_priv(dev); @@ -1717,8 +1707,6 @@ static int grcan_remove(struct platform_device *ofdev) irq_dispose_mapping(dev->irq); netif_napi_del(&priv->napi); free_candev(dev); - - return 0; } static const struct of_device_id grcan_match[] = { diff --git a/drivers/net/can/ifi_canfd/Kconfig b/drivers/net/can/ifi_canfd/Kconfig index 9e8934ff63a7..b5dd9c13d529 100644 --- a/drivers/net/can/ifi_canfd/Kconfig +++ b/drivers/net/can/ifi_canfd/Kconfig @@ -1,7 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only config CAN_IFI_CANFD depends on HAS_IOMEM tristate "IFI CAN_FD IP" - ---help--- + help This driver adds support for the I/F/I CAN_FD soft IP block connected to the "platform bus" (Linux abstraction for directly to the processor attached devices). The CAN_FD is most often diff --git a/drivers/net/can/ifi_canfd/Makefile b/drivers/net/can/ifi_canfd/Makefile index b229960cdf39..0cd724f10d1e 100644 --- a/drivers/net/can/ifi_canfd/Makefile +++ b/drivers/net/can/ifi_canfd/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only # # Makefile for the IFI CANFD controller driver. # diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index fedd927ba6ed..0f83335e4d07 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -13,13 +13,13 @@ #include <linux/clk.h> #include <linux/delay.h> +#include <linux/ethtool.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/can/dev.h> @@ -271,9 +271,9 @@ static void ifi_canfd_read_fifo(struct net_device *ndev) dlc = (rxdlc >> IFI_CANFD_RXFIFO_DLC_DLC_OFFSET) & IFI_CANFD_RXFIFO_DLC_DLC_MASK; if (rxdlc & IFI_CANFD_RXFIFO_DLC_EDL) - cf->len = can_dlc2len(dlc); + cf->len = can_fd_dlc2len(dlc); else - cf->len = get_can_dlc(dlc); + cf->len = can_cc_dlc2len(dlc); rxid = readl(priv->base + IFI_CANFD_RXFIFO_ID); id = (rxid >> IFI_CANFD_RXFIFO_ID_ID_OFFSET); @@ -309,15 +309,15 @@ static void ifi_canfd_read_fifo(struct net_device *ndev) *(u32 *)(cf->data + i) = readl(priv->base + IFI_CANFD_RXFIFO_DATA + i); } + + stats->rx_bytes += cf->len; } + stats->rx_packets++; /* Remove the packet from FIFO */ writel(IFI_CANFD_RXSTCMD_REMOVE_MSG, priv->base + IFI_CANFD_RXSTCMD); writel(rx_irq_mask, priv->base + IFI_CANFD_INTERRUPT); - stats->rx_packets++; - stats->rx_bytes += cf->len; - netif_receive_skb(skb); } @@ -345,9 +345,6 @@ static int ifi_canfd_do_rx_poll(struct net_device *ndev, int quota) rxst = readl(priv->base + IFI_CANFD_RXSTCMD); } - if (pkts) - can_led_event(ndev, CAN_LED_EVENT_RX); - return pkts; } @@ -393,36 +390,55 @@ static int ifi_canfd_handle_lec_err(struct net_device *ndev) return 0; priv->can.can_stats.bus_error++; - stats->rx_errors++; /* Propagate the error condition to the CAN stack. */ skb = alloc_can_err_skb(ndev, &cf); - if (unlikely(!skb)) - return 0; /* Read the error counter register and check for new errors. */ - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + if (likely(skb)) + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - if (errctr & IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST) - cf->data[2] |= CAN_ERR_PROT_OVERLOAD; + if (errctr & IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST) { + stats->rx_errors++; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_OVERLOAD; + } - if (errctr & IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST) - cf->data[3] = CAN_ERR_PROT_LOC_ACK; + if (errctr & IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST) { + stats->tx_errors++; + if (likely(skb)) + cf->data[3] = CAN_ERR_PROT_LOC_ACK; + } - if (errctr & IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST) - cf->data[2] |= CAN_ERR_PROT_BIT0; + if (errctr & IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST) { + stats->tx_errors++; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_BIT0; + } - if (errctr & IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST) - cf->data[2] |= CAN_ERR_PROT_BIT1; + if (errctr & IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST) { + stats->tx_errors++; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_BIT1; + } - if (errctr & IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST) - cf->data[2] |= CAN_ERR_PROT_STUFF; + if (errctr & IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST) { + stats->rx_errors++; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_STUFF; + } - if (errctr & IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST) - cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + if (errctr & IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST) { + stats->rx_errors++; + if (likely(skb)) + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + } - if (errctr & IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST) - cf->data[2] |= CAN_ERR_PROT_FORM; + if (errctr & IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST) { + stats->rx_errors++; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_FORM; + } /* Reset the error counter, ack the IRQ and re-enable the counter. */ writel(IFI_CANFD_ERROR_CTR_ER_RESET, priv->base + IFI_CANFD_ERROR_CTR); @@ -430,8 +446,9 @@ static int ifi_canfd_handle_lec_err(struct net_device *ndev) priv->base + IFI_CANFD_INTERRUPT); writel(IFI_CANFD_ERROR_CTR_ER_ENABLE, priv->base + IFI_CANFD_ERROR_CTR); - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + if (unlikely(!skb)) + return 0; + netif_receive_skb(skb); return 1; @@ -456,7 +473,6 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev, enum can_state new_state) { struct ifi_canfd_priv *priv = netdev_priv(ndev); - struct net_device_stats *stats = &ndev->stats; struct can_frame *cf; struct sk_buff *skb; struct can_berr_counter bec; @@ -498,7 +514,7 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev, switch (new_state) { case CAN_STATE_ERROR_WARNING: /* error warning state */ - cf->can_id |= CAN_ERR_CRTL; + cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; cf->data[1] = (bec.txerr > bec.rxerr) ? CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; @@ -507,7 +523,7 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev, break; case CAN_STATE_ERROR_PASSIVE: /* error passive state */ - cf->can_id |= CAN_ERR_CRTL; + cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; if (bec.txerr > 127) cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; @@ -522,8 +538,6 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev, break; } - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; netif_receive_skb(skb); return 1; @@ -629,9 +643,8 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id) /* TX IRQ */ if (isr & IFI_CANFD_INTERRUPT_TXFIFO_REMOVE) { - stats->tx_bytes += can_get_echo_skb(ndev, 0); + stats->tx_bytes += can_get_echo_skb(ndev, 0, NULL); stats->tx_packets++; - can_led_event(ndev, CAN_LED_EVENT_TX); } if (isr & tx_irq_mask) @@ -656,7 +669,7 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev) { struct ifi_canfd_priv *priv = netdev_priv(ndev); const struct can_bittiming *bt = &priv->can.bittiming; - const struct can_bittiming *dbt = &priv->can.data_bittiming; + const struct can_bittiming *dbt = &priv->can.fd.data_bittiming; u16 brp, sjw, tseg1, tseg2, tdc; /* Configure bit timing */ @@ -835,7 +848,6 @@ static int ifi_canfd_open(struct net_device *ndev) ifi_canfd_start(ndev); - can_led_event(ndev, CAN_LED_EVENT_OPEN); napi_enable(&priv->napi); netif_start_queue(ndev); @@ -858,8 +870,6 @@ static int ifi_canfd_close(struct net_device *ndev) close_candev(ndev); - can_led_event(ndev, CAN_LED_EVENT_STOP); - return 0; } @@ -871,7 +881,7 @@ static netdev_tx_t ifi_canfd_start_xmit(struct sk_buff *skb, u32 txst, txid, txdlc; int i; - if (can_dropped_invalid_skb(ndev, skb)) + if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; /* Check if the TX buffer is full */ @@ -900,7 +910,7 @@ static netdev_tx_t ifi_canfd_start_xmit(struct sk_buff *skb, txid = cf->can_id & CAN_SFF_MASK; } - txdlc = can_len2dlc(cf->len); + txdlc = can_fd_len2dlc(cf->len); if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) && can_is_canfd_skb(skb)) { txdlc |= IFI_CANFD_TXFIFO_DLC_EDL; if (cf->flags & CANFD_BRS) @@ -922,7 +932,7 @@ static netdev_tx_t ifi_canfd_start_xmit(struct sk_buff *skb, writel(0, priv->base + IFI_CANFD_TXFIFO_REPEATCOUNT); writel(0, priv->base + IFI_CANFD_TXFIFO_SUSPEND_US); - can_put_echo_skb(skb, ndev, 0); + can_put_echo_skb(skb, ndev, 0, 0); /* Start the transmission */ writel(IFI_CANFD_TXSTCMD_ADD_MSG, priv->base + IFI_CANFD_TXSTCMD); @@ -934,7 +944,10 @@ static const struct net_device_ops ifi_canfd_netdev_ops = { .ndo_open = ifi_canfd_open, .ndo_stop = ifi_canfd_close, .ndo_start_xmit = ifi_canfd_start_xmit, - .ndo_change_mtu = can_change_mtu, +}; + +static const struct ethtool_ops ifi_canfd_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, }; static int ifi_canfd_plat_probe(struct platform_device *pdev) @@ -942,15 +955,16 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct net_device *ndev; struct ifi_canfd_priv *priv; - struct resource *res; void __iomem *addr; int irq, ret; u32 id, rev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - addr = devm_ioremap_resource(dev, res); + addr = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(addr)) + return PTR_ERR(addr); + irq = platform_get_irq(pdev, 0); - if (IS_ERR(addr) || irq < 0) + if (irq < 0) return -EINVAL; id = readl(addr + IFI_CANFD_IP_ID); @@ -973,21 +987,22 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev) ndev->irq = irq; ndev->flags |= IFF_ECHO; /* we support local echo */ ndev->netdev_ops = &ifi_canfd_netdev_ops; + ndev->ethtool_ops = &ifi_canfd_ethtool_ops; priv = netdev_priv(ndev); priv->ndev = ndev; priv->base = addr; - netif_napi_add(ndev, &priv->napi, ifi_canfd_poll, 64); + netif_napi_add(ndev, &priv->napi, ifi_canfd_poll); priv->can.state = CAN_STATE_STOPPED; priv->can.clock.freq = readl(addr + IFI_CANFD_CANCLOCK); - priv->can.bittiming_const = &ifi_canfd_bittiming_const; - priv->can.data_bittiming_const = &ifi_canfd_bittiming_const; - priv->can.do_set_mode = ifi_canfd_set_mode; - priv->can.do_get_berr_counter = ifi_canfd_get_berr_counter; + priv->can.bittiming_const = &ifi_canfd_bittiming_const; + priv->can.fd.data_bittiming_const = &ifi_canfd_bittiming_const; + priv->can.do_set_mode = ifi_canfd_set_mode; + priv->can.do_get_berr_counter = ifi_canfd_get_berr_counter; /* IFI CANFD can do both Bosch FD and ISO FD */ priv->can.ctrlmode = CAN_CTRLMODE_FD; @@ -1008,8 +1023,6 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev) goto err_reg; } - devm_can_led_init(ndev); - dev_info(dev, "Driver registered: regs=%p, irq=%d, clock=%d\n", priv->base, ndev->irq, priv->can.clock.freq); @@ -1020,15 +1033,13 @@ err_reg: return ret; } -static int ifi_canfd_plat_remove(struct platform_device *pdev) +static void ifi_canfd_plat_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); unregister_candev(ndev); platform_set_drvdata(pdev, NULL); free_candev(ndev); - - return 0; } static const struct of_device_id ifi_canfd_of_table[] = { @@ -1043,7 +1054,7 @@ static struct platform_driver ifi_canfd_plat_driver = { .of_match_table = ifi_canfd_of_table, }, .probe = ifi_canfd_plat_probe, - .remove = ifi_canfd_plat_remove, + .remove = ifi_canfd_plat_remove, }; module_platform_driver(ifi_canfd_plat_driver); diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index 02042cb09bd2..1efdd1fd8caa 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1,18 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Janz MODULbus VMOD-ICAN3 CAN Interface Driver * * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/delay.h> +#include <linux/ethtool.h> #include <linux/platform_device.h> #include <linux/netdevice.h> @@ -920,10 +917,10 @@ static void ican3_to_can_frame(struct ican3_dev *mod, cf->can_id |= desc->data[0] << 3; cf->can_id |= (desc->data[1] & 0xe0) >> 5; - cf->can_dlc = get_can_dlc(desc->data[1] & ICAN3_CAN_DLC_MASK); - memcpy(cf->data, &desc->data[2], cf->can_dlc); + cf->len = can_cc_dlc2len(desc->data[1] & ICAN3_CAN_DLC_MASK); + memcpy(cf->data, &desc->data[2], cf->len); } else { - cf->can_dlc = get_can_dlc(desc->data[0] & ICAN3_CAN_DLC_MASK); + cf->len = can_cc_dlc2len(desc->data[0] & ICAN3_CAN_DLC_MASK); if (desc->data[0] & ICAN3_EFF_RTR) cf->can_id |= CAN_RTR_FLAG; @@ -938,7 +935,7 @@ static void ican3_to_can_frame(struct ican3_dev *mod, cf->can_id |= desc->data[3] >> 5; /* 2-0 */ } - memcpy(cf->data, &desc->data[6], cf->can_dlc); + memcpy(cf->data, &desc->data[6], cf->len); } } @@ -951,7 +948,7 @@ static void can_frame_to_ican3(struct ican3_dev *mod, /* we always use the extended format, with the ECHO flag set */ desc->command = ICAN3_CAN_TYPE_EFF; - desc->data[0] |= cf->can_dlc; + desc->data[0] |= cf->len; desc->data[1] |= ICAN3_ECHO; /* support single transmission (no retries) mode */ @@ -974,7 +971,7 @@ static void can_frame_to_ican3(struct ican3_dev *mod, } /* copy the data bits into the descriptor */ - memcpy(&desc->data[6], cf->data, cf->can_dlc); + memcpy(&desc->data[6], cf->data, cf->len); } /* @@ -1131,7 +1128,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg) /* bus error interrupt */ if (isrc == CEVTIND_BEI) { mod->can.can_stats.bus_error++; - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR | CAN_ERR_CNT; switch (ecc & ECC_MASK) { case ECC_BIT: @@ -1157,7 +1154,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg) if (state != mod->can.state && (state == CAN_STATE_ERROR_WARNING || state == CAN_STATE_ERROR_PASSIVE)) { - cf->can_id |= CAN_ERR_CRTL; + cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; if (state == CAN_STATE_ERROR_WARNING) { mod->can.can_stats.error_warning++; cf->data[1] = (txerr > rxerr) ? @@ -1281,6 +1278,8 @@ static void ican3_put_echo_skb(struct ican3_dev *mod, struct sk_buff *skb) if (!skb) return; + skb_tx_timestamp(skb); + /* save this skb for tx interrupt echo handling */ skb_queue_tail(&mod->echoq, skb); } @@ -1289,7 +1288,7 @@ static unsigned int ican3_get_echo_skb(struct ican3_dev *mod) { struct sk_buff *skb = skb_dequeue(&mod->echoq); struct can_frame *cf; - u8 dlc; + u8 dlc = 0; /* this should never trigger unless there is a driver bug */ if (!skb) { @@ -1298,7 +1297,8 @@ static unsigned int ican3_get_echo_skb(struct ican3_dev *mod) } cf = (struct can_frame *)skb->data; - dlc = cf->can_dlc; + if (!(cf->can_id & CAN_RTR_FLAG)) + dlc = cf->len; /* check flag whether this packet has to be looped back */ if (skb->pkt_type != PACKET_LOOPBACK) { @@ -1336,10 +1336,10 @@ static bool ican3_echo_skb_matches(struct ican3_dev *mod, struct sk_buff *skb) if (cf->can_id != echo_cf->can_id) return false; - if (cf->can_dlc != echo_cf->can_dlc) + if (cf->len != echo_cf->len) return false; - return memcmp(cf->data, echo_cf->data, cf->can_dlc) == 0; + return memcmp(cf->data, echo_cf->data, cf->len) == 0; } /* @@ -1425,7 +1425,8 @@ static int ican3_recv_skb(struct ican3_dev *mod) /* update statistics, receive the skb */ stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + if (!(cf->can_id & CAN_RTR_FLAG)) + stats->rx_bytes += cf->len; netif_receive_skb(skb); err_noalloc: @@ -1455,7 +1456,7 @@ static int ican3_napi(struct napi_struct *napi, int budget) /* process all communication messages */ while (true) { - struct ican3_msg uninitialized_var(msg); + struct ican3_msg msg; ret = ican3_recv_msg(mod, &msg); if (ret) break; @@ -1692,7 +1693,7 @@ static netdev_tx_t ican3_xmit(struct sk_buff *skb, struct net_device *ndev) void __iomem *desc_addr; unsigned long flags; - if (can_dropped_invalid_skb(ndev, skb)) + if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; spin_lock_irqsave(&mod->lock, flags); @@ -1751,7 +1752,10 @@ static const struct net_device_ops ican3_netdev_ops = { .ndo_open = ican3_open, .ndo_stop = ican3_stop, .ndo_start_xmit = ican3_xmit, - .ndo_change_mtu = can_change_mtu, +}; + +static const struct ethtool_ops ican3_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, }; /* @@ -1819,9 +1823,9 @@ static int ican3_get_berr_counter(const struct net_device *ndev, * Sysfs Attributes */ -static ssize_t ican3_sysfs_show_term(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t termination_show(struct device *dev, + struct device_attribute *attr, + char *buf) { struct ican3_dev *mod = netdev_priv(to_net_dev(dev)); int ret; @@ -1835,12 +1839,12 @@ static ssize_t ican3_sysfs_show_term(struct device *dev, return -ETIMEDOUT; } - return snprintf(buf, PAGE_SIZE, "%u\n", mod->termination_enabled); + return sysfs_emit(buf, "%u\n", mod->termination_enabled); } -static ssize_t ican3_sysfs_set_term(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t termination_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { struct ican3_dev *mod = netdev_priv(to_net_dev(dev)); unsigned long enable; @@ -1856,18 +1860,17 @@ static ssize_t ican3_sysfs_set_term(struct device *dev, return count; } -static ssize_t ican3_sysfs_show_fwinfo(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t fwinfo_show(struct device *dev, + struct device_attribute *attr, + char *buf) { struct ican3_dev *mod = netdev_priv(to_net_dev(dev)); - return scnprintf(buf, PAGE_SIZE, "%s\n", mod->fwinfo); + return sysfs_emit(buf, "%s\n", mod->fwinfo); } -static DEVICE_ATTR(termination, 0644, ican3_sysfs_show_term, - ican3_sysfs_set_term); -static DEVICE_ATTR(fwinfo, 0444, ican3_sysfs_show_fwinfo, NULL); +static DEVICE_ATTR_RW(termination); +static DEVICE_ATTR_RO(fwinfo); static struct attribute *ican3_sysfs_attrs[] = { &dev_attr_termination.attr, @@ -1913,7 +1916,7 @@ static int ican3_probe(struct platform_device *pdev) mod = netdev_priv(ndev); mod->ndev = ndev; mod->num = pdata->modno; - netif_napi_add(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS); + netif_napi_add_weight(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS); skb_queue_head_init(&mod->echoq); spin_lock_init(&mod->lock); init_completion(&mod->termination_comp); @@ -1926,6 +1929,7 @@ static int ican3_probe(struct platform_device *pdev) mod->free_page = DPM_FREE_START; ndev->netdev_ops = &ican3_netdev_ops; + ndev->ethtool_ops = &ican3_ethtool_ops; ndev->flags |= IFF_ECHO; SET_NETDEV_DEV(ndev, &pdev->dev); @@ -1940,7 +1944,6 @@ static int ican3_probe(struct platform_device *pdev) /* find our IRQ number */ mod->irq = platform_get_irq(pdev, 0); if (mod->irq < 0) { - dev_err(dev, "IRQ line not found\n"); ret = -ENODEV; goto out_free_ndev; } @@ -2019,7 +2022,7 @@ out_return: return ret; } -static int ican3_remove(struct platform_device *pdev) +static void ican3_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct ican3_dev *mod = netdev_priv(ndev); @@ -2038,8 +2041,6 @@ static int ican3_remove(struct platform_device *pdev) iounmap(mod->dpm); free_candev(ndev); - - return 0; } static struct platform_driver ican3_driver = { diff --git a/drivers/net/can/kvaser_pciefd/Makefile b/drivers/net/can/kvaser_pciefd/Makefile new file mode 100644 index 000000000000..8c5b8cdc6b5f --- /dev/null +++ b/drivers/net/can/kvaser_pciefd/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_CAN_KVASER_PCIEFD) += kvaser_pciefd.o +kvaser_pciefd-y = kvaser_pciefd_core.o kvaser_pciefd_devlink.o diff --git a/drivers/net/can/kvaser_pciefd/kvaser_pciefd.h b/drivers/net/can/kvaser_pciefd/kvaser_pciefd.h new file mode 100644 index 000000000000..08c9ddc1ee85 --- /dev/null +++ b/drivers/net/can/kvaser_pciefd/kvaser_pciefd.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ +/* kvaser_pciefd common definitions and declarations + * + * Copyright (C) 2025 KVASER AB, Sweden. All rights reserved. + */ + +#ifndef _KVASER_PCIEFD_H +#define _KVASER_PCIEFD_H + +#include <linux/can/dev.h> +#include <linux/completion.h> +#include <linux/pci.h> +#include <linux/spinlock.h> +#include <linux/timer.h> +#include <linux/types.h> +#include <net/devlink.h> + +#define KVASER_PCIEFD_MAX_CAN_CHANNELS 8UL +#define KVASER_PCIEFD_DMA_COUNT 2U +#define KVASER_PCIEFD_DMA_SIZE (4U * 1024U) +#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17U + +struct kvaser_pciefd; + +struct kvaser_pciefd_address_offset { + u32 serdes; + u32 pci_ien; + u32 pci_irq; + u32 sysid; + u32 loopback; + u32 kcan_srb_fifo; + u32 kcan_srb; + u32 kcan_ch0; + u32 kcan_ch1; +}; + +struct kvaser_pciefd_irq_mask { + u32 kcan_rx0; + u32 kcan_tx[KVASER_PCIEFD_MAX_CAN_CHANNELS]; + u32 all; +}; + +struct kvaser_pciefd_dev_ops { + void (*kvaser_pciefd_write_dma_map)(struct kvaser_pciefd *pcie, + dma_addr_t addr, int index); +}; + +struct kvaser_pciefd_driver_data { + const struct kvaser_pciefd_address_offset *address_offset; + const struct kvaser_pciefd_irq_mask *irq_mask; + const struct kvaser_pciefd_dev_ops *ops; +}; + +struct kvaser_pciefd_fw_version { + u8 major; + u8 minor; + u16 build; +}; + +struct kvaser_pciefd_can { + struct can_priv can; + struct devlink_port devlink_port; + struct kvaser_pciefd *kv_pcie; + void __iomem *reg_base; + struct can_berr_counter bec; + u32 ioc; + u8 cmd_seq; + u8 tx_max_count; + u8 tx_idx; + u8 ack_idx; + int err_rep_cnt; + unsigned int completed_tx_pkts; + unsigned int completed_tx_bytes; + spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */ + struct timer_list bec_poll_timer; + struct completion start_comp, flush_comp; +}; + +struct kvaser_pciefd { + struct pci_dev *pci; + void __iomem *reg_base; + struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS]; + const struct kvaser_pciefd_driver_data *driver_data; + void *dma_data[KVASER_PCIEFD_DMA_COUNT]; + u8 nr_channels; + u32 bus_freq; + u32 freq; + u32 freq_to_ticks_div; + struct kvaser_pciefd_fw_version fw_version; +}; + +extern const struct devlink_ops kvaser_pciefd_devlink_ops; + +int kvaser_pciefd_devlink_port_register(struct kvaser_pciefd_can *can); +void kvaser_pciefd_devlink_port_unregister(struct kvaser_pciefd_can *can); +#endif /* _KVASER_PCIEFD_H */ diff --git a/drivers/net/can/kvaser_pciefd/kvaser_pciefd_core.c b/drivers/net/can/kvaser_pciefd/kvaser_pciefd_core.c new file mode 100644 index 000000000000..d8c9bfb20230 --- /dev/null +++ b/drivers/net/can/kvaser_pciefd/kvaser_pciefd_core.c @@ -0,0 +1,1908 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +/* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved. + * Parts of this driver are based on the following: + * - Kvaser linux pciefd driver (version 5.42) + * - PEAK linux canfd driver + */ + +#include "kvaser_pciefd.h" + +#include <linux/bitfield.h> +#include <linux/can/dev.h> +#include <linux/device.h> +#include <linux/ethtool.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/minmax.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/timer.h> +#include <net/netdev_queues.h> + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Kvaser AB <support@kvaser.com>"); +MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); + +#define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd" + +#define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000) +#define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200)) +#define KVASER_PCIEFD_MAX_ERR_REP 256U + +#define KVASER_PCIEFD_VENDOR 0x1a07 + +/* Altera based devices */ +#define KVASER_PCIEFD_4HS_DEVICE_ID 0x000d +#define KVASER_PCIEFD_2HS_V2_DEVICE_ID 0x000e +#define KVASER_PCIEFD_HS_V2_DEVICE_ID 0x000f +#define KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID 0x0010 +#define KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID 0x0011 + +/* SmartFusion2 based devices */ +#define KVASER_PCIEFD_2CAN_V3_DEVICE_ID 0x0012 +#define KVASER_PCIEFD_1CAN_V3_DEVICE_ID 0x0013 +#define KVASER_PCIEFD_4CAN_V2_DEVICE_ID 0x0014 +#define KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID 0x0015 +#define KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID 0x0016 + +/* Xilinx based devices */ +#define KVASER_PCIEFD_M2_4CAN_DEVICE_ID 0x0017 +#define KVASER_PCIEFD_8CAN_DEVICE_ID 0x0019 + +/* Altera SerDes Enable 64-bit DMA address translation */ +#define KVASER_PCIEFD_ALTERA_DMA_64BIT BIT(0) + +/* SmartFusion2 SerDes LSB address translation mask */ +#define KVASER_PCIEFD_SF2_DMA_LSB_MASK GENMASK(31, 12) + +/* Xilinx SerDes LSB address translation mask */ +#define KVASER_PCIEFD_XILINX_DMA_LSB_MASK GENMASK(31, 12) + +/* Kvaser KCAN CAN controller registers */ +#define KVASER_PCIEFD_KCAN_FIFO_REG 0x100 +#define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180 +#define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0 +#define KVASER_PCIEFD_KCAN_CMD_REG 0x400 +#define KVASER_PCIEFD_KCAN_IOC_REG 0x404 +#define KVASER_PCIEFD_KCAN_IEN_REG 0x408 +#define KVASER_PCIEFD_KCAN_IRQ_REG 0x410 +#define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG 0x414 +#define KVASER_PCIEFD_KCAN_STAT_REG 0x418 +#define KVASER_PCIEFD_KCAN_MODE_REG 0x41c +#define KVASER_PCIEFD_KCAN_BTRN_REG 0x420 +#define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424 +#define KVASER_PCIEFD_KCAN_BTRD_REG 0x428 +#define KVASER_PCIEFD_KCAN_PWM_REG 0x430 +/* System identification and information registers */ +#define KVASER_PCIEFD_SYSID_VERSION_REG 0x8 +#define KVASER_PCIEFD_SYSID_CANFREQ_REG 0xc +#define KVASER_PCIEFD_SYSID_BUSFREQ_REG 0x10 +#define KVASER_PCIEFD_SYSID_BUILD_REG 0x14 +/* Shared receive buffer FIFO registers */ +#define KVASER_PCIEFD_SRB_FIFO_LAST_REG 0x1f4 +/* Shared receive buffer registers */ +#define KVASER_PCIEFD_SRB_CMD_REG 0x0 +#define KVASER_PCIEFD_SRB_IEN_REG 0x04 +#define KVASER_PCIEFD_SRB_IRQ_REG 0x0c +#define KVASER_PCIEFD_SRB_STAT_REG 0x10 +#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG 0x14 +#define KVASER_PCIEFD_SRB_CTRL_REG 0x18 + +/* System build information fields */ +#define KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK GENMASK(31, 24) +#define KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK GENMASK(23, 16) +#define KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK GENMASK(7, 0) +#define KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK GENMASK(15, 1) + +/* Reset DMA buffer 0, 1 and FIFO offset */ +#define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5) +#define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4) +#define KVASER_PCIEFD_SRB_CMD_FOR BIT(0) + +/* DMA underflow, buffer 0 and 1 */ +#define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13) +#define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12) +/* DMA overflow, buffer 0 and 1 */ +#define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11) +#define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10) +/* DMA packet done, buffer 0 and 1 */ +#define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9) +#define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8) + +/* Got DMA support */ +#define KVASER_PCIEFD_SRB_STAT_DMA BIT(24) +/* DMA idle */ +#define KVASER_PCIEFD_SRB_STAT_DI BIT(15) + +/* SRB current packet level */ +#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK GENMASK(7, 0) + +/* DMA Enable */ +#define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0) + +/* KCAN CTRL packet types */ +#define KVASER_PCIEFD_KCAN_CTRL_TYPE_MASK GENMASK(31, 29) +#define KVASER_PCIEFD_KCAN_CTRL_TYPE_EFLUSH 0x4 +#define KVASER_PCIEFD_KCAN_CTRL_TYPE_EFRAME 0x5 + +/* Command sequence number */ +#define KVASER_PCIEFD_KCAN_CMD_SEQ_MASK GENMASK(23, 16) +/* Command bits */ +#define KVASER_PCIEFD_KCAN_CMD_MASK GENMASK(5, 0) +/* Abort, flush and reset */ +#define KVASER_PCIEFD_KCAN_CMD_AT BIT(1) +/* Request status packet */ +#define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0) + +/* Control CAN LED, active low */ +#define KVASER_PCIEFD_KCAN_IOC_LED BIT(0) + +/* Transmitter unaligned */ +#define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17) +/* Tx FIFO empty */ +#define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16) +/* Tx FIFO overflow */ +#define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15) +/* Tx buffer flush done */ +#define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14) +/* Abort done */ +#define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13) +/* Rx FIFO overflow */ +#define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5) +/* FDF bit when controller is in classic CAN mode */ +#define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3) +/* Bus parameter protection error */ +#define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2) +/* Tx FIFO unaligned end */ +#define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1) +/* Tx FIFO unaligned read */ +#define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0) + +/* Tx FIFO size */ +#define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK GENMASK(23, 16) +/* Tx FIFO current packet level */ +#define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK GENMASK(7, 0) + +/* Current status packet sequence number */ +#define KVASER_PCIEFD_KCAN_STAT_SEQNO_MASK GENMASK(31, 24) +/* Controller got CAN FD capability */ +#define KVASER_PCIEFD_KCAN_STAT_FD BIT(19) +/* Controller got one-shot capability */ +#define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16) +/* Controller in reset mode */ +#define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15) +/* Reset mode request */ +#define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14) +/* Bus off */ +#define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11) +/* Idle state. Controller in reset mode and no abort or flush pending */ +#define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10) +/* Abort request */ +#define KVASER_PCIEFD_KCAN_STAT_AR BIT(7) +/* Controller is bus off */ +#define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MASK \ + (KVASER_PCIEFD_KCAN_STAT_AR | KVASER_PCIEFD_KCAN_STAT_BOFF | \ + KVASER_PCIEFD_KCAN_STAT_RMR | KVASER_PCIEFD_KCAN_STAT_IRM) + +/* Classic CAN mode */ +#define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31) +/* Active error flag enable. Clear to force error passive */ +#define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23) +/* Acknowledgment packet type */ +#define KVASER_PCIEFD_KCAN_MODE_APT BIT(20) +/* CAN FD non-ISO */ +#define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15) +/* Error packet enable */ +#define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12) +/* Listen only mode */ +#define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9) +/* Reset mode */ +#define KVASER_PCIEFD_KCAN_MODE_RM BIT(8) + +/* BTRN and BTRD fields */ +#define KVASER_PCIEFD_KCAN_BTRN_TSEG2_MASK GENMASK(30, 26) +#define KVASER_PCIEFD_KCAN_BTRN_TSEG1_MASK GENMASK(25, 17) +#define KVASER_PCIEFD_KCAN_BTRN_SJW_MASK GENMASK(16, 13) +#define KVASER_PCIEFD_KCAN_BTRN_BRP_MASK GENMASK(12, 0) + +/* PWM Control fields */ +#define KVASER_PCIEFD_KCAN_PWM_TOP_MASK GENMASK(23, 16) +#define KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK GENMASK(7, 0) + +/* KCAN packet type IDs */ +#define KVASER_PCIEFD_PACK_TYPE_DATA 0x0 +#define KVASER_PCIEFD_PACK_TYPE_ACK 0x1 +#define KVASER_PCIEFD_PACK_TYPE_TXRQ 0x2 +#define KVASER_PCIEFD_PACK_TYPE_ERROR 0x3 +#define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 0x4 +#define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 0x5 +#define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 0x6 +#define KVASER_PCIEFD_PACK_TYPE_STATUS 0x8 +#define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 0x9 + +/* Common KCAN packet definitions, second word */ +#define KVASER_PCIEFD_PACKET_TYPE_MASK GENMASK(31, 28) +#define KVASER_PCIEFD_PACKET_CHID_MASK GENMASK(27, 25) +#define KVASER_PCIEFD_PACKET_SEQ_MASK GENMASK(7, 0) + +/* KCAN Transmit/Receive data packet, first word */ +#define KVASER_PCIEFD_RPACKET_IDE BIT(30) +#define KVASER_PCIEFD_RPACKET_RTR BIT(29) +#define KVASER_PCIEFD_RPACKET_ID_MASK GENMASK(28, 0) +/* KCAN Transmit data packet, second word */ +#define KVASER_PCIEFD_TPACKET_AREQ BIT(31) +#define KVASER_PCIEFD_TPACKET_SMS BIT(16) +/* KCAN Transmit/Receive data packet, second word */ +#define KVASER_PCIEFD_RPACKET_FDF BIT(15) +#define KVASER_PCIEFD_RPACKET_BRS BIT(14) +#define KVASER_PCIEFD_RPACKET_ESI BIT(13) +#define KVASER_PCIEFD_RPACKET_DLC_MASK GENMASK(11, 8) + +/* KCAN Transmit acknowledge packet, first word */ +#define KVASER_PCIEFD_APACKET_NACK BIT(11) +#define KVASER_PCIEFD_APACKET_ABL BIT(10) +#define KVASER_PCIEFD_APACKET_CT BIT(9) +#define KVASER_PCIEFD_APACKET_FLU BIT(8) + +/* KCAN Status packet, first word */ +#define KVASER_PCIEFD_SPACK_RMCD BIT(22) +#define KVASER_PCIEFD_SPACK_IRM BIT(21) +#define KVASER_PCIEFD_SPACK_IDET BIT(20) +#define KVASER_PCIEFD_SPACK_BOFF BIT(16) +#define KVASER_PCIEFD_SPACK_RXERR_MASK GENMASK(15, 8) +#define KVASER_PCIEFD_SPACK_TXERR_MASK GENMASK(7, 0) +/* KCAN Status packet, second word */ +#define KVASER_PCIEFD_SPACK_EPLR BIT(24) +#define KVASER_PCIEFD_SPACK_EWLR BIT(23) +#define KVASER_PCIEFD_SPACK_AUTO BIT(21) + +/* KCAN Error detected packet, second word */ +#define KVASER_PCIEFD_EPACK_DIR_TX BIT(0) + +/* Macros for calculating addresses of registers */ +#define KVASER_PCIEFD_GET_BLOCK_ADDR(pcie, block) \ + ((pcie)->reg_base + (pcie)->driver_data->address_offset->block) +#define KVASER_PCIEFD_PCI_IEN_ADDR(pcie) \ + (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), pci_ien)) +#define KVASER_PCIEFD_PCI_IRQ_ADDR(pcie) \ + (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), pci_irq)) +#define KVASER_PCIEFD_SERDES_ADDR(pcie) \ + (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), serdes)) +#define KVASER_PCIEFD_SYSID_ADDR(pcie) \ + (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), sysid)) +#define KVASER_PCIEFD_LOOPBACK_ADDR(pcie) \ + (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), loopback)) +#define KVASER_PCIEFD_SRB_FIFO_ADDR(pcie) \ + (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_srb_fifo)) +#define KVASER_PCIEFD_SRB_ADDR(pcie) \ + (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_srb)) +#define KVASER_PCIEFD_KCAN_CH0_ADDR(pcie) \ + (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_ch0)) +#define KVASER_PCIEFD_KCAN_CH1_ADDR(pcie) \ + (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_ch1)) +#define KVASER_PCIEFD_KCAN_CHANNEL_SPAN(pcie) \ + (KVASER_PCIEFD_KCAN_CH1_ADDR((pcie)) - KVASER_PCIEFD_KCAN_CH0_ADDR((pcie))) +#define KVASER_PCIEFD_KCAN_CHX_ADDR(pcie, i) \ + (KVASER_PCIEFD_KCAN_CH0_ADDR((pcie)) + (i) * KVASER_PCIEFD_KCAN_CHANNEL_SPAN((pcie))) + +struct kvaser_pciefd; +static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie, + dma_addr_t addr, int index); +static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie, + dma_addr_t addr, int index); +static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie, + dma_addr_t addr, int index); + +static const struct kvaser_pciefd_address_offset kvaser_pciefd_altera_address_offset = { + .serdes = 0x1000, + .pci_ien = 0x50, + .pci_irq = 0x40, + .sysid = 0x1f020, + .loopback = 0x1f000, + .kcan_srb_fifo = 0x1f200, + .kcan_srb = 0x1f400, + .kcan_ch0 = 0x10000, + .kcan_ch1 = 0x11000, +}; + +static const struct kvaser_pciefd_address_offset kvaser_pciefd_sf2_address_offset = { + .serdes = 0x280c8, + .pci_ien = 0x102004, + .pci_irq = 0x102008, + .sysid = 0x100000, + .loopback = 0x103000, + .kcan_srb_fifo = 0x120000, + .kcan_srb = 0x121000, + .kcan_ch0 = 0x140000, + .kcan_ch1 = 0x142000, +}; + +static const struct kvaser_pciefd_address_offset kvaser_pciefd_xilinx_address_offset = { + .serdes = 0x00208, + .pci_ien = 0x102004, + .pci_irq = 0x102008, + .sysid = 0x100000, + .loopback = 0x103000, + .kcan_srb_fifo = 0x120000, + .kcan_srb = 0x121000, + .kcan_ch0 = 0x140000, + .kcan_ch1 = 0x142000, +}; + +static const struct kvaser_pciefd_irq_mask kvaser_pciefd_altera_irq_mask = { + .kcan_rx0 = BIT(4), + .kcan_tx = { BIT(0), BIT(1), BIT(2), BIT(3) }, + .all = GENMASK(4, 0), +}; + +static const struct kvaser_pciefd_irq_mask kvaser_pciefd_sf2_irq_mask = { + .kcan_rx0 = BIT(4), + .kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19) }, + .all = GENMASK(19, 16) | BIT(4), +}; + +static const struct kvaser_pciefd_irq_mask kvaser_pciefd_xilinx_irq_mask = { + .kcan_rx0 = BIT(4), + .kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19), BIT(20), BIT(21), BIT(22), BIT(23) }, + .all = GENMASK(23, 16) | BIT(4), +}; + +static const struct kvaser_pciefd_dev_ops kvaser_pciefd_altera_dev_ops = { + .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_altera, +}; + +static const struct kvaser_pciefd_dev_ops kvaser_pciefd_sf2_dev_ops = { + .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_sf2, +}; + +static const struct kvaser_pciefd_dev_ops kvaser_pciefd_xilinx_dev_ops = { + .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_xilinx, +}; + +static const struct kvaser_pciefd_driver_data kvaser_pciefd_altera_driver_data = { + .address_offset = &kvaser_pciefd_altera_address_offset, + .irq_mask = &kvaser_pciefd_altera_irq_mask, + .ops = &kvaser_pciefd_altera_dev_ops, +}; + +static const struct kvaser_pciefd_driver_data kvaser_pciefd_sf2_driver_data = { + .address_offset = &kvaser_pciefd_sf2_address_offset, + .irq_mask = &kvaser_pciefd_sf2_irq_mask, + .ops = &kvaser_pciefd_sf2_dev_ops, +}; + +static const struct kvaser_pciefd_driver_data kvaser_pciefd_xilinx_driver_data = { + .address_offset = &kvaser_pciefd_xilinx_address_offset, + .irq_mask = &kvaser_pciefd_xilinx_irq_mask, + .ops = &kvaser_pciefd_xilinx_dev_ops, +}; + +struct kvaser_pciefd_rx_packet { + u32 header[2]; + u64 timestamp; +}; + +struct kvaser_pciefd_tx_packet { + u32 header[2]; + u8 data[64]; +}; + +static const struct can_bittiming_const kvaser_pciefd_bittiming_const = { + .name = KVASER_PCIEFD_DRV_NAME, + .tseg1_min = 1, + .tseg1_max = 512, + .tseg2_min = 1, + .tseg2_max = 32, + .sjw_max = 16, + .brp_min = 1, + .brp_max = 8192, + .brp_inc = 1, +}; + +static struct pci_device_id kvaser_pciefd_id_table[] = { + { + PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_DEVICE_ID), + .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, + }, + { + PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_V2_DEVICE_ID), + .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, + }, + { + PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_V2_DEVICE_ID), + .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, + }, + { + PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID), + .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, + }, + { + PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID), + .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, + }, + { + PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2CAN_V3_DEVICE_ID), + .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, + }, + { + PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_1CAN_V3_DEVICE_ID), + .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, + }, + { + PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4CAN_V2_DEVICE_ID), + .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, + }, + { + PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID), + .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, + }, + { + PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID), + .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, + }, + { + PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_M2_4CAN_DEVICE_ID), + .driver_data = (kernel_ulong_t)&kvaser_pciefd_xilinx_driver_data, + }, + { + PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_8CAN_DEVICE_ID), + .driver_data = (kernel_ulong_t)&kvaser_pciefd_xilinx_driver_data, + }, + { + 0, + }, +}; +MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table); + +static inline void kvaser_pciefd_send_kcan_cmd(struct kvaser_pciefd_can *can, u32 cmd) +{ + iowrite32(FIELD_PREP(KVASER_PCIEFD_KCAN_CMD_MASK, cmd) | + FIELD_PREP(KVASER_PCIEFD_KCAN_CMD_SEQ_MASK, ++can->cmd_seq), + can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG); +} + +static inline void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can) +{ + kvaser_pciefd_send_kcan_cmd(can, KVASER_PCIEFD_KCAN_CMD_SRQ); +} + +static inline void kvaser_pciefd_abort_flush_reset(struct kvaser_pciefd_can *can) +{ + kvaser_pciefd_send_kcan_cmd(can, KVASER_PCIEFD_KCAN_CMD_AT); +} + +static inline void kvaser_pciefd_set_led(struct kvaser_pciefd_can *can, bool on) +{ + if (on) + can->ioc &= ~KVASER_PCIEFD_KCAN_IOC_LED; + else + can->ioc |= KVASER_PCIEFD_KCAN_IOC_LED; + + iowrite32(can->ioc, can->reg_base + KVASER_PCIEFD_KCAN_IOC_REG); +} + +static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can) +{ + u32 mode; + unsigned long irq; + + spin_lock_irqsave(&can->lock, irq); + mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + if (!(mode & KVASER_PCIEFD_KCAN_MODE_EPEN)) { + mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; + iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + } + spin_unlock_irqrestore(&can->lock, irq); +} + +static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can) +{ + u32 mode; + unsigned long irq; + + spin_lock_irqsave(&can->lock, irq); + mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + mode &= ~KVASER_PCIEFD_KCAN_MODE_EPEN; + iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + spin_unlock_irqrestore(&can->lock, irq); +} + +static inline void kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can) +{ + u32 msk; + + msk = KVASER_PCIEFD_KCAN_IRQ_TE | KVASER_PCIEFD_KCAN_IRQ_ROF | + KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD | + KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL | + KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP | + KVASER_PCIEFD_KCAN_IRQ_TAR; + + iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); +} + +static inline void kvaser_pciefd_set_skb_timestamp(const struct kvaser_pciefd *pcie, + struct sk_buff *skb, u64 timestamp) +{ + skb_hwtstamps(skb)->hwtstamp = + ns_to_ktime(div_u64(timestamp * 1000, pcie->freq_to_ticks_div)); +} + +static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can) +{ + u32 mode; + unsigned long irq; + + spin_lock_irqsave(&can->lock, irq); + mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + if (can->can.ctrlmode & CAN_CTRLMODE_FD) { + mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM; + if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) + mode |= KVASER_PCIEFD_KCAN_MODE_NIFDEN; + else + mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; + } else { + mode |= KVASER_PCIEFD_KCAN_MODE_CCM; + mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; + } + + if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + mode |= KVASER_PCIEFD_KCAN_MODE_LOM; + else + mode &= ~KVASER_PCIEFD_KCAN_MODE_LOM; + mode |= KVASER_PCIEFD_KCAN_MODE_EEN; + mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; + /* Use ACK packet type */ + mode &= ~KVASER_PCIEFD_KCAN_MODE_APT; + mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; + iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + + spin_unlock_irqrestore(&can->lock, irq); +} + +static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can) +{ + u32 status; + unsigned long irq; + + spin_lock_irqsave(&can->lock, irq); + iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); + iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, + can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); + status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); + if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) { + /* If controller is already idle, run abort, flush and reset */ + kvaser_pciefd_abort_flush_reset(can); + } else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) { + u32 mode; + + /* Put controller in reset mode */ + mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + mode |= KVASER_PCIEFD_KCAN_MODE_RM; + iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + } + spin_unlock_irqrestore(&can->lock, irq); +} + +static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can) +{ + u32 mode; + unsigned long irq; + + timer_delete(&can->bec_poll_timer); + if (!completion_done(&can->flush_comp)) + kvaser_pciefd_start_controller_flush(can); + + if (!wait_for_completion_timeout(&can->flush_comp, + KVASER_PCIEFD_WAIT_TIMEOUT)) { + netdev_err(can->can.dev, "Timeout during bus on flush\n"); + return -ETIMEDOUT; + } + + spin_lock_irqsave(&can->lock, irq); + iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); + iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); + iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, + can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); + mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; + iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + spin_unlock_irqrestore(&can->lock, irq); + + if (!wait_for_completion_timeout(&can->start_comp, + KVASER_PCIEFD_WAIT_TIMEOUT)) { + netdev_err(can->can.dev, "Timeout during bus on reset\n"); + return -ETIMEDOUT; + } + /* Reset interrupt handling */ + iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); + iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); + + kvaser_pciefd_set_tx_irq(can); + kvaser_pciefd_setup_controller(can); + can->can.state = CAN_STATE_ERROR_ACTIVE; + netif_wake_queue(can->can.dev); + can->bec.txerr = 0; + can->bec.rxerr = 0; + can->err_rep_cnt = 0; + + return 0; +} + +static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can) +{ + u8 top; + u32 pwm_ctrl; + unsigned long irq; + + spin_lock_irqsave(&can->lock, irq); + pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); + top = FIELD_GET(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, pwm_ctrl); + /* Set duty cycle to zero */ + pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, top); + iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); + spin_unlock_irqrestore(&can->lock, irq); +} + +static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can) +{ + int top, trigger; + u32 pwm_ctrl; + unsigned long irq; + + kvaser_pciefd_pwm_stop(can); + spin_lock_irqsave(&can->lock, irq); + /* Set frequency to 500 KHz */ + top = can->kv_pcie->bus_freq / (2 * 500000) - 1; + + pwm_ctrl = FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, top); + pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, top); + iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); + + /* Set duty cycle to 95 */ + trigger = (100 * top - 95 * (top + 1) + 50) / 100; + pwm_ctrl = FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, trigger); + pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, top); + iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); + spin_unlock_irqrestore(&can->lock, irq); +} + +static int kvaser_pciefd_open(struct net_device *netdev) +{ + int ret; + struct kvaser_pciefd_can *can = netdev_priv(netdev); + + can->tx_idx = 0; + can->ack_idx = 0; + + ret = open_candev(netdev); + if (ret) + return ret; + + ret = kvaser_pciefd_bus_on(can); + if (ret) { + close_candev(netdev); + return ret; + } + + return 0; +} + +static int kvaser_pciefd_stop(struct net_device *netdev) +{ + struct kvaser_pciefd_can *can = netdev_priv(netdev); + int ret = 0; + + /* Don't interrupt ongoing flush */ + if (!completion_done(&can->flush_comp)) + kvaser_pciefd_start_controller_flush(can); + + if (!wait_for_completion_timeout(&can->flush_comp, + KVASER_PCIEFD_WAIT_TIMEOUT)) { + netdev_err(can->can.dev, "Timeout during stop\n"); + ret = -ETIMEDOUT; + } else { + iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); + timer_delete(&can->bec_poll_timer); + } + can->can.state = CAN_STATE_STOPPED; + netdev_reset_queue(netdev); + close_candev(netdev); + + return ret; +} + +static unsigned int kvaser_pciefd_tx_avail(const struct kvaser_pciefd_can *can) +{ + return can->tx_max_count - (READ_ONCE(can->tx_idx) - READ_ONCE(can->ack_idx)); +} + +static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p, + struct can_priv *can, u8 seq, + struct sk_buff *skb) +{ + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + int packet_size; + + memset(p, 0, sizeof(*p)); + if (can->ctrlmode & CAN_CTRLMODE_ONE_SHOT) + p->header[1] |= KVASER_PCIEFD_TPACKET_SMS; + + if (cf->can_id & CAN_RTR_FLAG) + p->header[0] |= KVASER_PCIEFD_RPACKET_RTR; + + if (cf->can_id & CAN_EFF_FLAG) + p->header[0] |= KVASER_PCIEFD_RPACKET_IDE; + + p->header[0] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_ID_MASK, cf->can_id); + p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ; + + if (can_is_canfd_skb(skb)) { + p->header[1] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK, + can_fd_len2dlc(cf->len)); + p->header[1] |= KVASER_PCIEFD_RPACKET_FDF; + if (cf->flags & CANFD_BRS) + p->header[1] |= KVASER_PCIEFD_RPACKET_BRS; + if (cf->flags & CANFD_ESI) + p->header[1] |= KVASER_PCIEFD_RPACKET_ESI; + } else { + p->header[1] |= + FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK, + can_get_cc_dlc((struct can_frame *)cf, can->ctrlmode)); + } + + p->header[1] |= FIELD_PREP(KVASER_PCIEFD_PACKET_SEQ_MASK, seq); + + packet_size = cf->len; + memcpy(p->data, cf->data, packet_size); + + return DIV_ROUND_UP(packet_size, 4); +} + +static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct kvaser_pciefd_can *can = netdev_priv(netdev); + struct kvaser_pciefd_tx_packet packet; + unsigned int seq = can->tx_idx & (can->can.echo_skb_max - 1); + unsigned int frame_len; + int nr_words; + + if (can_dev_dropped_skb(netdev, skb)) + return NETDEV_TX_OK; + if (!netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1)) + return NETDEV_TX_BUSY; + + nr_words = kvaser_pciefd_prepare_tx_packet(&packet, &can->can, seq, skb); + + /* Prepare and save echo skb in internal slot */ + WRITE_ONCE(can->can.echo_skb[seq], NULL); + frame_len = can_skb_get_frame_len(skb); + can_put_echo_skb(skb, netdev, seq, frame_len); + netdev_sent_queue(netdev, frame_len); + WRITE_ONCE(can->tx_idx, can->tx_idx + 1); + + /* Write header to fifo */ + iowrite32(packet.header[0], + can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); + iowrite32(packet.header[1], + can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); + + if (nr_words) { + u32 data_last = ((u32 *)packet.data)[nr_words - 1]; + + /* Write data to fifo, except last word */ + iowrite32_rep(can->reg_base + + KVASER_PCIEFD_KCAN_FIFO_REG, packet.data, + nr_words - 1); + /* Write last word to end of fifo */ + __raw_writel(data_last, can->reg_base + + KVASER_PCIEFD_KCAN_FIFO_LAST_REG); + } else { + /* Complete write to fifo */ + __raw_writel(0, can->reg_base + + KVASER_PCIEFD_KCAN_FIFO_LAST_REG); + } + + netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1); + + return NETDEV_TX_OK; +} + +static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data) +{ + u32 mode, test, btrn; + unsigned long irq_flags; + int ret; + struct can_bittiming *bt; + + if (data) + bt = &can->can.fd.data_bittiming; + else + bt = &can->can.bittiming; + + btrn = FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_TSEG2_MASK, bt->phase_seg2 - 1) | + FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_TSEG1_MASK, bt->prop_seg + bt->phase_seg1 - 1) | + FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_SJW_MASK, bt->sjw - 1) | + FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_BRP_MASK, bt->brp - 1); + + spin_lock_irqsave(&can->lock, irq_flags); + mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + /* Put the circuit in reset mode */ + iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM, + can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + + /* Can only set bittiming if in reset mode */ + ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG, + test, test & KVASER_PCIEFD_KCAN_MODE_RM, 0, 10); + if (ret) { + spin_unlock_irqrestore(&can->lock, irq_flags); + return -EBUSY; + } + + if (data) + iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG); + else + iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG); + /* Restore previous reset mode status */ + iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + spin_unlock_irqrestore(&can->lock, irq_flags); + + return 0; +} + +static int kvaser_pciefd_set_nominal_bittiming(struct net_device *ndev) +{ + return kvaser_pciefd_set_bittiming(netdev_priv(ndev), false); +} + +static int kvaser_pciefd_set_data_bittiming(struct net_device *ndev) +{ + return kvaser_pciefd_set_bittiming(netdev_priv(ndev), true); +} + +static int kvaser_pciefd_set_mode(struct net_device *ndev, enum can_mode mode) +{ + struct kvaser_pciefd_can *can = netdev_priv(ndev); + int ret = 0; + + switch (mode) { + case CAN_MODE_START: + if (!can->can.restart_ms) + ret = kvaser_pciefd_bus_on(can); + break; + default: + return -EOPNOTSUPP; + } + + return ret; +} + +static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev, + struct can_berr_counter *bec) +{ + struct kvaser_pciefd_can *can = netdev_priv(ndev); + + bec->rxerr = can->bec.rxerr; + bec->txerr = can->bec.txerr; + + return 0; +} + +static void kvaser_pciefd_bec_poll_timer(struct timer_list *data) +{ + struct kvaser_pciefd_can *can = timer_container_of(can, data, + bec_poll_timer); + + kvaser_pciefd_enable_err_gen(can); + kvaser_pciefd_request_status(can); + can->err_rep_cnt = 0; +} + +static const struct net_device_ops kvaser_pciefd_netdev_ops = { + .ndo_open = kvaser_pciefd_open, + .ndo_stop = kvaser_pciefd_stop, + .ndo_start_xmit = kvaser_pciefd_start_xmit, + .ndo_hwtstamp_get = can_hwtstamp_get, + .ndo_hwtstamp_set = can_hwtstamp_set, +}; + +static int kvaser_pciefd_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct kvaser_pciefd_can *can = netdev_priv(netdev); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 3; /* 3 On/Off cycles per second */ + + case ETHTOOL_ID_ON: + kvaser_pciefd_set_led(can, true); + return 0; + + case ETHTOOL_ID_OFF: + case ETHTOOL_ID_INACTIVE: + kvaser_pciefd_set_led(can, false); + return 0; + + default: + return -EINVAL; + } +} + +static const struct ethtool_ops kvaser_pciefd_ethtool_ops = { + .get_ts_info = can_ethtool_op_get_ts_info_hwts, + .set_phys_id = kvaser_pciefd_set_phys_id, +}; + +static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) +{ + int i; + + for (i = 0; i < pcie->nr_channels; i++) { + struct net_device *netdev; + struct kvaser_pciefd_can *can; + u32 status, tx_nr_packets_max; + int ret; + + netdev = alloc_candev(sizeof(struct kvaser_pciefd_can), + roundup_pow_of_two(KVASER_PCIEFD_CAN_TX_MAX_COUNT)); + if (!netdev) + return -ENOMEM; + + can = netdev_priv(netdev); + netdev->netdev_ops = &kvaser_pciefd_netdev_ops; + netdev->ethtool_ops = &kvaser_pciefd_ethtool_ops; + can->reg_base = KVASER_PCIEFD_KCAN_CHX_ADDR(pcie, i); + can->kv_pcie = pcie; + can->cmd_seq = 0; + can->err_rep_cnt = 0; + can->completed_tx_pkts = 0; + can->completed_tx_bytes = 0; + can->bec.txerr = 0; + can->bec.rxerr = 0; + can->can.dev->dev_port = i; + + init_completion(&can->start_comp); + init_completion(&can->flush_comp); + timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer, 0); + + /* Disable Bus load reporting */ + iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG); + + can->ioc = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IOC_REG); + kvaser_pciefd_set_led(can, false); + + tx_nr_packets_max = + FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK, + ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); + can->tx_max_count = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1); + + can->can.clock.freq = pcie->freq; + spin_lock_init(&can->lock); + + can->can.bittiming_const = &kvaser_pciefd_bittiming_const; + can->can.fd.data_bittiming_const = &kvaser_pciefd_bittiming_const; + can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming; + can->can.fd.do_set_data_bittiming = kvaser_pciefd_set_data_bittiming; + can->can.do_set_mode = kvaser_pciefd_set_mode; + can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter; + can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_FD | + CAN_CTRLMODE_FD_NON_ISO | + CAN_CTRLMODE_CC_LEN8_DLC | + CAN_CTRLMODE_BERR_REPORTING; + + status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); + if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) { + dev_err(&pcie->pci->dev, + "CAN FD not supported as expected %d\n", i); + + free_candev(netdev); + return -ENODEV; + } + + if (status & KVASER_PCIEFD_KCAN_STAT_CAP) + can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; + + netdev->flags |= IFF_ECHO; + SET_NETDEV_DEV(netdev, &pcie->pci->dev); + + iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); + iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, + can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); + + pcie->can[i] = can; + kvaser_pciefd_pwm_start(can); + ret = kvaser_pciefd_devlink_port_register(can); + if (ret) { + dev_err(&pcie->pci->dev, "Failed to register devlink port\n"); + return ret; + } + } + + return 0; +} + +static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie) +{ + int i; + + for (i = 0; i < pcie->nr_channels; i++) { + int ret = register_candev(pcie->can[i]->can.dev); + + if (ret) { + int j; + + /* Unregister all successfully registered devices. */ + for (j = 0; j < i; j++) + unregister_candev(pcie->can[j]->can.dev); + return ret; + } + } + + return 0; +} + +static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie, + dma_addr_t addr, int index) +{ + void __iomem *serdes_base; + u32 word1, word2; + + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) { + word1 = lower_32_bits(addr) | KVASER_PCIEFD_ALTERA_DMA_64BIT; + word2 = upper_32_bits(addr); + } else { + word1 = addr; + word2 = 0; + } + serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index; + iowrite32(word1, serdes_base); + iowrite32(word2, serdes_base + 0x4); +} + +static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie, + dma_addr_t addr, int index) +{ + void __iomem *serdes_base; + u32 lsb = addr & KVASER_PCIEFD_SF2_DMA_LSB_MASK; + u32 msb = 0x0; + + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) + msb = upper_32_bits(addr); + + serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x10 * index; + iowrite32(lsb, serdes_base); + iowrite32(msb, serdes_base + 0x4); +} + +static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie, + dma_addr_t addr, int index) +{ + void __iomem *serdes_base; + u32 lsb = addr & KVASER_PCIEFD_XILINX_DMA_LSB_MASK; + u32 msb = 0x0; + + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) + msb = upper_32_bits(addr); + + serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index; + iowrite32(msb, serdes_base); + iowrite32(lsb, serdes_base + 0x4); +} + +static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) +{ + int i; + u32 srb_status; + u32 srb_packet_count; + dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT]; + + /* Disable the DMA */ + iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); + + dma_set_mask_and_coherent(&pcie->pci->dev, DMA_BIT_MASK(64)); + + for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) { + pcie->dma_data[i] = dmam_alloc_coherent(&pcie->pci->dev, + KVASER_PCIEFD_DMA_SIZE, + &dma_addr[i], + GFP_KERNEL); + + if (!pcie->dma_data[i] || !dma_addr[i]) { + dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n", + KVASER_PCIEFD_DMA_SIZE); + return -ENOMEM; + } + pcie->driver_data->ops->kvaser_pciefd_write_dma_map(pcie, dma_addr[i], i); + } + + /* Reset Rx FIFO, and both DMA buffers */ + iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 | + KVASER_PCIEFD_SRB_CMD_RDB1, + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); + /* Empty Rx FIFO */ + srb_packet_count = + FIELD_GET(KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK, + ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + + KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG)); + while (srb_packet_count) { + /* Drop current packet in FIFO */ + ioread32(KVASER_PCIEFD_SRB_FIFO_ADDR(pcie) + KVASER_PCIEFD_SRB_FIFO_LAST_REG); + srb_packet_count--; + } + + srb_status = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_STAT_REG); + if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) { + dev_err(&pcie->pci->dev, "DMA not idle before enabling\n"); + return -EIO; + } + + /* Enable the DMA */ + iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE, + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); + + return 0; +} + +static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie) +{ + u32 version, srb_status, build; + + version = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_VERSION_REG); + build = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUILD_REG); + pcie->nr_channels = min(KVASER_PCIEFD_MAX_CAN_CHANNELS, + FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK, version)); + pcie->fw_version.major = FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK, version); + pcie->fw_version.minor = FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK, version); + pcie->fw_version.build = FIELD_GET(KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK, build); + + srb_status = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_STAT_REG); + if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) { + dev_err(&pcie->pci->dev, "Hardware without DMA is not supported\n"); + return -ENODEV; + } + + pcie->bus_freq = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUSFREQ_REG); + pcie->freq = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_CANFREQ_REG); + pcie->freq_to_ticks_div = pcie->freq / 1000000; + if (pcie->freq_to_ticks_div == 0) + pcie->freq_to_ticks_div = 1; + /* Turn off all loopback functionality */ + iowrite32(0, KVASER_PCIEFD_LOOPBACK_ADDR(pcie)); + + return 0; +} + +static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_rx_packet *p, + __le32 *data) +{ + struct sk_buff *skb; + struct canfd_frame *cf; + struct can_priv *priv; + u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); + u8 dlc; + + if (ch_id >= pcie->nr_channels) + return -EIO; + + priv = &pcie->can[ch_id]->can; + dlc = FIELD_GET(KVASER_PCIEFD_RPACKET_DLC_MASK, p->header[1]); + + if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) { + skb = alloc_canfd_skb(priv->dev, &cf); + if (!skb) { + priv->dev->stats.rx_dropped++; + return 0; + } + + cf->len = can_fd_dlc2len(dlc); + if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS) + cf->flags |= CANFD_BRS; + if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI) + cf->flags |= CANFD_ESI; + } else { + skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf); + if (!skb) { + priv->dev->stats.rx_dropped++; + return 0; + } + can_frame_set_cc_len((struct can_frame *)cf, dlc, priv->ctrlmode); + } + + cf->can_id = FIELD_GET(KVASER_PCIEFD_RPACKET_ID_MASK, p->header[0]); + if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE) + cf->can_id |= CAN_EFF_FLAG; + + if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR) { + cf->can_id |= CAN_RTR_FLAG; + } else { + memcpy(cf->data, data, cf->len); + priv->dev->stats.rx_bytes += cf->len; + } + priv->dev->stats.rx_packets++; + kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp); + + netif_rx(skb); + + return 0; +} + +static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can, + const struct can_berr_counter *bec, + struct can_frame *cf, + enum can_state new_state, + enum can_state tx_state, + enum can_state rx_state) +{ + enum can_state old_state; + + old_state = can->can.state; + can_change_state(can->can.dev, cf, tx_state, rx_state); + + if (new_state == CAN_STATE_BUS_OFF) { + struct net_device *ndev = can->can.dev; + unsigned long irq_flags; + + spin_lock_irqsave(&can->lock, irq_flags); + netif_stop_queue(can->can.dev); + spin_unlock_irqrestore(&can->lock, irq_flags); + /* Prevent CAN controller from auto recover from bus off */ + if (!can->can.restart_ms) { + kvaser_pciefd_start_controller_flush(can); + can_bus_off(ndev); + } + } + if (old_state == CAN_STATE_BUS_OFF && + new_state == CAN_STATE_ERROR_ACTIVE && + can->can.restart_ms) { + can->can.can_stats.restarts++; + if (cf) + cf->can_id |= CAN_ERR_RESTARTED; + } + if (cf && new_state != CAN_STATE_BUS_OFF) { + cf->can_id |= CAN_ERR_CNT; + cf->data[6] = bec->txerr; + cf->data[7] = bec->rxerr; + } +} + +static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p, + struct can_berr_counter *bec, + enum can_state *new_state, + enum can_state *tx_state, + enum can_state *rx_state) +{ + if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF || + p->header[0] & KVASER_PCIEFD_SPACK_IRM) + *new_state = CAN_STATE_BUS_OFF; + else if (bec->txerr >= 255 || bec->rxerr >= 255) + *new_state = CAN_STATE_BUS_OFF; + else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR) + *new_state = CAN_STATE_ERROR_PASSIVE; + else if (bec->txerr >= 128 || bec->rxerr >= 128) + *new_state = CAN_STATE_ERROR_PASSIVE; + else if (p->header[1] & KVASER_PCIEFD_SPACK_EWLR) + *new_state = CAN_STATE_ERROR_WARNING; + else if (bec->txerr >= 96 || bec->rxerr >= 96) + *new_state = CAN_STATE_ERROR_WARNING; + else + *new_state = CAN_STATE_ERROR_ACTIVE; + + *tx_state = bec->txerr >= bec->rxerr ? *new_state : 0; + *rx_state = bec->txerr <= bec->rxerr ? *new_state : 0; +} + +static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can, + struct kvaser_pciefd_rx_packet *p) +{ + struct can_berr_counter bec; + enum can_state old_state, new_state, tx_state, rx_state; + struct net_device *ndev = can->can.dev; + struct sk_buff *skb = NULL; + struct can_frame *cf = NULL; + + old_state = can->can.state; + + bec.txerr = FIELD_GET(KVASER_PCIEFD_SPACK_TXERR_MASK, p->header[0]); + bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]); + + kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, &rx_state); + if (can->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) + skb = alloc_can_err_skb(ndev, &cf); + if (new_state != old_state) { + kvaser_pciefd_change_state(can, &bec, cf, new_state, tx_state, rx_state); + } + + can->err_rep_cnt++; + can->can.can_stats.bus_error++; + if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX) + ndev->stats.tx_errors++; + else + ndev->stats.rx_errors++; + + can->bec.txerr = bec.txerr; + can->bec.rxerr = bec.rxerr; + + if (can->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) { + if (!skb) { + netdev_warn(ndev, "No memory left for err_skb\n"); + ndev->stats.rx_dropped++; + return -ENOMEM; + } + kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp); + cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + netif_rx(skb); + } + + return 0; +} + +static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_rx_packet *p) +{ + struct kvaser_pciefd_can *can; + u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); + + if (ch_id >= pcie->nr_channels) + return -EIO; + + can = pcie->can[ch_id]; + kvaser_pciefd_rx_error_frame(can, p); + if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP) + /* Do not report more errors, until bec_poll_timer expires */ + kvaser_pciefd_disable_err_gen(can); + /* Start polling the error counters */ + mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); + + return 0; +} + +static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can, + struct kvaser_pciefd_rx_packet *p) +{ + struct can_berr_counter bec; + enum can_state old_state, new_state, tx_state, rx_state; + int ret = 0; + + old_state = can->can.state; + + bec.txerr = FIELD_GET(KVASER_PCIEFD_SPACK_TXERR_MASK, p->header[0]); + bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]); + + kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, &rx_state); + if (new_state != old_state) { + struct net_device *ndev = can->can.dev; + struct sk_buff *skb; + struct can_frame *cf; + + skb = alloc_can_err_skb(ndev, &cf); + kvaser_pciefd_change_state(can, &bec, cf, new_state, tx_state, rx_state); + if (skb) { + kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp); + netif_rx(skb); + } else { + ndev->stats.rx_dropped++; + netdev_warn(ndev, "No memory left for err_skb\n"); + ret = -ENOMEM; + } + } + can->bec.txerr = bec.txerr; + can->bec.rxerr = bec.rxerr; + /* Check if we need to poll the error counters */ + if (bec.txerr || bec.rxerr) + mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); + + return ret; +} + +static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_rx_packet *p) +{ + struct kvaser_pciefd_can *can; + u8 cmdseq; + u32 status; + u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); + + if (ch_id >= pcie->nr_channels) + return -EIO; + + can = pcie->can[ch_id]; + + status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); + cmdseq = FIELD_GET(KVASER_PCIEFD_KCAN_STAT_SEQNO_MASK, status); + + /* Reset done, start abort and flush */ + if (p->header[0] & KVASER_PCIEFD_SPACK_IRM && + p->header[0] & KVASER_PCIEFD_SPACK_RMCD && + p->header[1] & KVASER_PCIEFD_SPACK_AUTO && + cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1]) && + status & KVASER_PCIEFD_KCAN_STAT_IDLE) { + iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, + can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); + kvaser_pciefd_abort_flush_reset(can); + } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET && + p->header[0] & KVASER_PCIEFD_SPACK_IRM && + cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1]) && + status & KVASER_PCIEFD_KCAN_STAT_IDLE) { + /* Reset detected, send end of flush if no packet are in FIFO */ + u8 count; + + count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, + ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); + if (!count) + iowrite32(FIELD_PREP(KVASER_PCIEFD_KCAN_CTRL_TYPE_MASK, + KVASER_PCIEFD_KCAN_CTRL_TYPE_EFLUSH), + can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG); + } else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) && + cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1])) { + /* Response to status request received */ + kvaser_pciefd_handle_status_resp(can, p); + if (can->can.state != CAN_STATE_BUS_OFF && + can->can.state != CAN_STATE_ERROR_ACTIVE) { + mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); + } + } else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD && + !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MASK)) { + /* Reset to bus on detected */ + if (!completion_done(&can->start_comp)) + complete(&can->start_comp); + } + + return 0; +} + +static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can, + struct kvaser_pciefd_rx_packet *p) +{ + struct sk_buff *skb; + struct can_frame *cf; + + skb = alloc_can_err_skb(can->can.dev, &cf); + can->can.dev->stats.tx_errors++; + if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) { + if (skb) + cf->can_id |= CAN_ERR_LOSTARB; + can->can.can_stats.arbitration_lost++; + } else if (skb) { + cf->can_id |= CAN_ERR_ACK; + } + + if (skb) { + cf->can_id |= CAN_ERR_BUSERROR; + kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp); + netif_rx(skb); + } else { + can->can.dev->stats.rx_dropped++; + netdev_warn(can->can.dev, "No memory left for err_skb\n"); + } +} + +static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_rx_packet *p) +{ + struct kvaser_pciefd_can *can; + bool one_shot_fail = false; + u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); + + if (ch_id >= pcie->nr_channels) + return -EIO; + + can = pcie->can[ch_id]; + /* Ignore control packet ACK */ + if (p->header[0] & KVASER_PCIEFD_APACKET_CT) + return 0; + + if (p->header[0] & KVASER_PCIEFD_APACKET_NACK) { + kvaser_pciefd_handle_nack_packet(can, p); + one_shot_fail = true; + } + + if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) { + netdev_dbg(can->can.dev, "Packet was flushed\n"); + } else { + int echo_idx = FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[0]); + unsigned int len, frame_len = 0; + struct sk_buff *skb; + + if (echo_idx != (can->ack_idx & (can->can.echo_skb_max - 1))) + return 0; + skb = can->can.echo_skb[echo_idx]; + if (!skb) + return 0; + kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp); + len = can_get_echo_skb(can->can.dev, echo_idx, &frame_len); + + /* Pairs with barrier in kvaser_pciefd_start_xmit() */ + smp_store_release(&can->ack_idx, can->ack_idx + 1); + can->completed_tx_pkts++; + can->completed_tx_bytes += frame_len; + + if (!one_shot_fail) { + can->can.dev->stats.tx_bytes += len; + can->can.dev->stats.tx_packets++; + } + } + + return 0; +} + +static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_rx_packet *p) +{ + struct kvaser_pciefd_can *can; + u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); + + if (ch_id >= pcie->nr_channels) + return -EIO; + + can = pcie->can[ch_id]; + + if (!completion_done(&can->flush_comp)) + complete(&can->flush_comp); + + return 0; +} + +static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos, + int dma_buf) +{ + __le32 *buffer = pcie->dma_data[dma_buf]; + __le64 timestamp; + struct kvaser_pciefd_rx_packet packet; + struct kvaser_pciefd_rx_packet *p = &packet; + u8 type; + int pos = *start_pos; + int size; + int ret = 0; + + size = le32_to_cpu(buffer[pos++]); + if (!size) { + *start_pos = 0; + return 0; + } + + p->header[0] = le32_to_cpu(buffer[pos++]); + p->header[1] = le32_to_cpu(buffer[pos++]); + + /* Read 64-bit timestamp */ + memcpy(×tamp, &buffer[pos], sizeof(__le64)); + pos += 2; + p->timestamp = le64_to_cpu(timestamp); + + type = FIELD_GET(KVASER_PCIEFD_PACKET_TYPE_MASK, p->header[1]); + switch (type) { + case KVASER_PCIEFD_PACK_TYPE_DATA: + ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]); + if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) { + u8 data_len; + + data_len = can_fd_dlc2len(FIELD_GET(KVASER_PCIEFD_RPACKET_DLC_MASK, + p->header[1])); + pos += DIV_ROUND_UP(data_len, 4); + } + break; + + case KVASER_PCIEFD_PACK_TYPE_ACK: + ret = kvaser_pciefd_handle_ack_packet(pcie, p); + break; + + case KVASER_PCIEFD_PACK_TYPE_STATUS: + ret = kvaser_pciefd_handle_status_packet(pcie, p); + break; + + case KVASER_PCIEFD_PACK_TYPE_ERROR: + ret = kvaser_pciefd_handle_error_packet(pcie, p); + break; + + case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK: + ret = kvaser_pciefd_handle_eflush_packet(pcie, p); + break; + + case KVASER_PCIEFD_PACK_TYPE_ACK_DATA: + case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD: + case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK: + case KVASER_PCIEFD_PACK_TYPE_TXRQ: + dev_info(&pcie->pci->dev, + "Received unexpected packet type 0x%08X\n", type); + break; + + default: + dev_err(&pcie->pci->dev, "Unknown packet type 0x%08X\n", type); + ret = -EIO; + break; + } + + if (ret) + return ret; + + /* Position does not point to the end of the package, + * corrupted packet size? + */ + if (unlikely((*start_pos + size) != pos)) + return -EIO; + + /* Point to the next packet header, if any */ + *start_pos = pos; + + return ret; +} + +static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf) +{ + int pos = 0; + int res = 0; + unsigned int i; + + do { + res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf); + } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE); + + /* Report ACKs in this buffer to BQL en masse for correct periods */ + for (i = 0; i < pcie->nr_channels; ++i) { + struct kvaser_pciefd_can *can = pcie->can[i]; + + if (!can->completed_tx_pkts) + continue; + netif_subqueue_completed_wake(can->can.dev, 0, + can->completed_tx_pkts, + can->completed_tx_bytes, + kvaser_pciefd_tx_avail(can), 1); + can->completed_tx_pkts = 0; + can->completed_tx_bytes = 0; + } + + return res; +} + +static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) +{ + void __iomem *srb_cmd_reg = KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG; + u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); + + iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); + + if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) { + kvaser_pciefd_read_buffer(pcie, 0); + iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, srb_cmd_reg); /* Rearm buffer */ + } + + if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) { + kvaser_pciefd_read_buffer(pcie, 1); + iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, srb_cmd_reg); /* Rearm buffer */ + } + + if (unlikely(irq & KVASER_PCIEFD_SRB_IRQ_DOF0 || + irq & KVASER_PCIEFD_SRB_IRQ_DOF1 || + irq & KVASER_PCIEFD_SRB_IRQ_DUF0 || + irq & KVASER_PCIEFD_SRB_IRQ_DUF1)) + dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq); +} + +static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) +{ + u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); + + if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF) + netdev_err(can->can.dev, "Tx FIFO overflow\n"); + + if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP) + netdev_err(can->can.dev, + "Fail to change bittiming, when not in reset mode\n"); + + if (irq & KVASER_PCIEFD_KCAN_IRQ_FDIC) + netdev_err(can->can.dev, "CAN FD frame in CAN mode\n"); + + if (irq & KVASER_PCIEFD_KCAN_IRQ_ROF) + netdev_err(can->can.dev, "Rx FIFO overflow\n"); + + iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); +} + +static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) +{ + struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev; + const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask; + u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie)); + int i; + + if (!(pci_irq & irq_mask->all)) + return IRQ_NONE; + + iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); + + if (pci_irq & irq_mask->kcan_rx0) + kvaser_pciefd_receive_irq(pcie); + + for (i = 0; i < pcie->nr_channels; i++) { + if (pci_irq & irq_mask->kcan_tx[i]) + kvaser_pciefd_transmit_irq(pcie->can[i]); + } + + iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); + + return IRQ_HANDLED; +} + +static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie) +{ + int i; + + for (i = 0; i < pcie->nr_channels; i++) { + struct kvaser_pciefd_can *can = pcie->can[i]; + + if (can) { + iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); + kvaser_pciefd_pwm_stop(can); + kvaser_pciefd_devlink_port_unregister(can); + free_candev(can->can.dev); + } + } +} + +static void kvaser_pciefd_disable_irq_srcs(struct kvaser_pciefd *pcie) +{ + unsigned int i; + + /* Masking PCI_IRQ is insufficient as running ISR will unmask it */ + iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG); + for (i = 0; i < pcie->nr_channels; ++i) + iowrite32(0, pcie->can[i]->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); +} + +static int kvaser_pciefd_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int ret; + struct devlink *devlink; + struct device *dev = &pdev->dev; + struct kvaser_pciefd *pcie; + const struct kvaser_pciefd_irq_mask *irq_mask; + + devlink = devlink_alloc(&kvaser_pciefd_devlink_ops, sizeof(*pcie), dev); + if (!devlink) + return -ENOMEM; + + pcie = devlink_priv(devlink); + pci_set_drvdata(pdev, pcie); + pcie->pci = pdev; + pcie->driver_data = (const struct kvaser_pciefd_driver_data *)id->driver_data; + irq_mask = pcie->driver_data->irq_mask; + + ret = pci_enable_device(pdev); + if (ret) + goto err_free_devlink; + + ret = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME); + if (ret) + goto err_disable_pci; + + pcie->reg_base = pci_iomap(pdev, 0, 0); + if (!pcie->reg_base) { + ret = -ENOMEM; + goto err_release_regions; + } + + ret = kvaser_pciefd_setup_board(pcie); + if (ret) + goto err_pci_iounmap; + + ret = kvaser_pciefd_setup_dma(pcie); + if (ret) + goto err_pci_iounmap; + + pci_set_master(pdev); + + ret = kvaser_pciefd_setup_can_ctrls(pcie); + if (ret) + goto err_teardown_can_ctrls; + + ret = pci_alloc_irq_vectors(pcie->pci, 1, 1, PCI_IRQ_INTX | PCI_IRQ_MSI); + if (ret < 0) { + dev_err(dev, "Failed to allocate IRQ vectors.\n"); + goto err_teardown_can_ctrls; + } + + ret = pci_irq_vector(pcie->pci, 0); + if (ret < 0) + goto err_pci_free_irq_vectors; + + pcie->pci->irq = ret; + + ret = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler, + IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie); + if (ret) { + dev_err(dev, "Failed to request IRQ %d\n", pcie->pci->irq); + goto err_pci_free_irq_vectors; + } + iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1, + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); + + iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 | + KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 | + KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1, + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG); + + /* Enable PCI interrupts */ + iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); + /* Ready the DMA buffers */ + iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); + iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); + + ret = kvaser_pciefd_reg_candev(pcie); + if (ret) + goto err_free_irq; + + devlink_register(devlink); + + return 0; + +err_free_irq: + kvaser_pciefd_disable_irq_srcs(pcie); + free_irq(pcie->pci->irq, pcie); + +err_pci_free_irq_vectors: + pci_free_irq_vectors(pcie->pci); + +err_teardown_can_ctrls: + kvaser_pciefd_teardown_can_ctrls(pcie); + iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); + pci_clear_master(pdev); + +err_pci_iounmap: + pci_iounmap(pdev, pcie->reg_base); + +err_release_regions: + pci_release_regions(pdev); + +err_disable_pci: + pci_disable_device(pdev); + +err_free_devlink: + devlink_free(devlink); + + return ret; +} + +static void kvaser_pciefd_remove(struct pci_dev *pdev) +{ + struct kvaser_pciefd *pcie = pci_get_drvdata(pdev); + unsigned int i; + + for (i = 0; i < pcie->nr_channels; ++i) { + struct kvaser_pciefd_can *can = pcie->can[i]; + + unregister_candev(can->can.dev); + timer_delete(&can->bec_poll_timer); + kvaser_pciefd_pwm_stop(can); + kvaser_pciefd_devlink_port_unregister(can); + } + + kvaser_pciefd_disable_irq_srcs(pcie); + free_irq(pcie->pci->irq, pcie); + pci_free_irq_vectors(pcie->pci); + + for (i = 0; i < pcie->nr_channels; ++i) + free_candev(pcie->can[i]->can.dev); + + devlink_unregister(priv_to_devlink(pcie)); + pci_iounmap(pdev, pcie->reg_base); + pci_release_regions(pdev); + pci_disable_device(pdev); + devlink_free(priv_to_devlink(pcie)); +} + +static struct pci_driver kvaser_pciefd = { + .name = KVASER_PCIEFD_DRV_NAME, + .id_table = kvaser_pciefd_id_table, + .probe = kvaser_pciefd_probe, + .remove = kvaser_pciefd_remove, +}; + +module_pci_driver(kvaser_pciefd) diff --git a/drivers/net/can/kvaser_pciefd/kvaser_pciefd_devlink.c b/drivers/net/can/kvaser_pciefd/kvaser_pciefd_devlink.c new file mode 100644 index 000000000000..1d61a8b0eeba --- /dev/null +++ b/drivers/net/can/kvaser_pciefd/kvaser_pciefd_devlink.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +/* kvaser_pciefd devlink functions + * + * Copyright (C) 2025 KVASER AB, Sweden. All rights reserved. + */ +#include "kvaser_pciefd.h" + +#include <linux/netdevice.h> +#include <net/devlink.h> + +static int kvaser_pciefd_devlink_info_get(struct devlink *devlink, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct kvaser_pciefd *pcie = devlink_priv(devlink); + char buf[] = "xxx.xxx.xxxxx"; + int ret; + + if (pcie->fw_version.major) { + snprintf(buf, sizeof(buf), "%u.%u.%u", + pcie->fw_version.major, + pcie->fw_version.minor, + pcie->fw_version.build); + ret = devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW, + buf); + if (ret) + return ret; + } + + return 0; +} + +const struct devlink_ops kvaser_pciefd_devlink_ops = { + .info_get = kvaser_pciefd_devlink_info_get, +}; + +int kvaser_pciefd_devlink_port_register(struct kvaser_pciefd_can *can) +{ + int ret; + struct devlink_port_attrs attrs = { + .flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL, + .phys.port_number = can->can.dev->dev_port, + }; + devlink_port_attrs_set(&can->devlink_port, &attrs); + + ret = devlink_port_register(priv_to_devlink(can->kv_pcie), + &can->devlink_port, can->can.dev->dev_port); + if (ret) + return ret; + + SET_NETDEV_DEVLINK_PORT(can->can.dev, &can->devlink_port); + + return 0; +} + +void kvaser_pciefd_devlink_port_unregister(struct kvaser_pciefd_can *can) +{ + devlink_port_unregister(&can->devlink_port); +} diff --git a/drivers/net/can/led.c b/drivers/net/can/led.c deleted file mode 100644 index c1b667675fa1..000000000000 --- a/drivers/net/can/led.c +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Copyright 2012, Fabio Baltieri <fabio.baltieri@gmail.com> - * Copyright 2012, Kurt Van Dijck <kurt.van.dijck@eia.be> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/module.h> -#include <linux/device.h> -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/netdevice.h> -#include <linux/can/dev.h> - -#include <linux/can/led.h> - -static unsigned long led_delay = 50; -module_param(led_delay, ulong, 0644); -MODULE_PARM_DESC(led_delay, - "blink delay time for activity leds (msecs, default: 50)."); - -/* Trigger a LED event in response to a CAN device event */ -void can_led_event(struct net_device *netdev, enum can_led_event event) -{ - struct can_priv *priv = netdev_priv(netdev); - - switch (event) { - case CAN_LED_EVENT_OPEN: - led_trigger_event(priv->tx_led_trig, LED_FULL); - led_trigger_event(priv->rx_led_trig, LED_FULL); - led_trigger_event(priv->rxtx_led_trig, LED_FULL); - break; - case CAN_LED_EVENT_STOP: - led_trigger_event(priv->tx_led_trig, LED_OFF); - led_trigger_event(priv->rx_led_trig, LED_OFF); - led_trigger_event(priv->rxtx_led_trig, LED_OFF); - break; - case CAN_LED_EVENT_TX: - if (led_delay) { - led_trigger_blink_oneshot(priv->tx_led_trig, - &led_delay, &led_delay, 1); - led_trigger_blink_oneshot(priv->rxtx_led_trig, - &led_delay, &led_delay, 1); - } - break; - case CAN_LED_EVENT_RX: - if (led_delay) { - led_trigger_blink_oneshot(priv->rx_led_trig, - &led_delay, &led_delay, 1); - led_trigger_blink_oneshot(priv->rxtx_led_trig, - &led_delay, &led_delay, 1); - } - break; - } -} -EXPORT_SYMBOL_GPL(can_led_event); - -static void can_led_release(struct device *gendev, void *res) -{ - struct can_priv *priv = netdev_priv(to_net_dev(gendev)); - - led_trigger_unregister_simple(priv->tx_led_trig); - led_trigger_unregister_simple(priv->rx_led_trig); - led_trigger_unregister_simple(priv->rxtx_led_trig); -} - -/* Register CAN LED triggers for a CAN device - * - * This is normally called from a driver's probe function - */ -void devm_can_led_init(struct net_device *netdev) -{ - struct can_priv *priv = netdev_priv(netdev); - void *res; - - res = devres_alloc(can_led_release, 0, GFP_KERNEL); - if (!res) { - netdev_err(netdev, "cannot register LED triggers\n"); - return; - } - - snprintf(priv->tx_led_trig_name, sizeof(priv->tx_led_trig_name), - "%s-tx", netdev->name); - snprintf(priv->rx_led_trig_name, sizeof(priv->rx_led_trig_name), - "%s-rx", netdev->name); - snprintf(priv->rxtx_led_trig_name, sizeof(priv->rxtx_led_trig_name), - "%s-rxtx", netdev->name); - - led_trigger_register_simple(priv->tx_led_trig_name, - &priv->tx_led_trig); - led_trigger_register_simple(priv->rx_led_trig_name, - &priv->rx_led_trig); - led_trigger_register_simple(priv->rxtx_led_trig_name, - &priv->rxtx_led_trig); - - devres_add(&netdev->dev, res); -} -EXPORT_SYMBOL_GPL(devm_can_led_init); - -/* NETDEV rename notifier to rename the associated led triggers too */ -static int can_led_notifier(struct notifier_block *nb, unsigned long msg, - void *ptr) -{ - struct net_device *netdev = netdev_notifier_info_to_dev(ptr); - struct can_priv *priv = safe_candev_priv(netdev); - char name[CAN_LED_NAME_SZ]; - - if (!priv) - return NOTIFY_DONE; - - if (!priv->tx_led_trig || !priv->rx_led_trig || !priv->rxtx_led_trig) - return NOTIFY_DONE; - - if (msg == NETDEV_CHANGENAME) { - snprintf(name, sizeof(name), "%s-tx", netdev->name); - led_trigger_rename_static(name, priv->tx_led_trig); - - snprintf(name, sizeof(name), "%s-rx", netdev->name); - led_trigger_rename_static(name, priv->rx_led_trig); - - snprintf(name, sizeof(name), "%s-rxtx", netdev->name); - led_trigger_rename_static(name, priv->rxtx_led_trig); - } - - return NOTIFY_DONE; -} - -/* notifier block for netdevice event */ -static struct notifier_block can_netdev_notifier __read_mostly = { - .notifier_call = can_led_notifier, -}; - -int __init can_led_notifier_init(void) -{ - return register_netdevice_notifier(&can_netdev_notifier); -} - -void __exit can_led_notifier_exit(void) -{ - unregister_netdevice_notifier(&can_netdev_notifier); -} diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig index 04f20dd39007..fc2afab36279 100644 --- a/drivers/net/can/m_can/Kconfig +++ b/drivers/net/can/m_can/Kconfig @@ -1,5 +1,35 @@ -config CAN_M_CAN +# SPDX-License-Identifier: GPL-2.0-only +menuconfig CAN_M_CAN + tristate "Bosch M_CAN support" + select CAN_RX_OFFLOAD + help + Say Y here if you want support for Bosch M_CAN controller framework. + This is common support for devices that embed the Bosch M_CAN IP. + +if CAN_M_CAN + +config CAN_M_CAN_PCI + tristate "Generic PCI Bus based M_CAN driver" + depends on PCI + help + Say Y here if you want to support Bosch M_CAN controller connected + to the pci bus. + +config CAN_M_CAN_PLATFORM + tristate "Bosch M_CAN support for io-mapped devices" depends on HAS_IOMEM - tristate "Bosch M_CAN devices" - ---help--- - Say Y here if you want to support for Bosch M_CAN controller. + help + Say Y here if you want support for IO Mapped Bosch M_CAN controller. + This support is for devices that have the Bosch M_CAN controller + IP embedded into the device and the IP is IO Mapped to the processor. + +config CAN_M_CAN_TCAN4X5X + depends on SPI + select REGMAP_SPI + tristate "TCAN4X5X M_CAN device" + help + Say Y here if you want support for Texas Instruments TCAN4x5x + M_CAN controller. This device is a peripheral device that uses the + SPI bus for communication. + +endif diff --git a/drivers/net/can/m_can/Makefile b/drivers/net/can/m_can/Makefile index 8bbd7f24f5be..d717bbc9e033 100644 --- a/drivers/net/can/m_can/Makefile +++ b/drivers/net/can/m_can/Makefile @@ -1,5 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0-only # # Makefile for the Bosch M_CAN controller driver. # obj-$(CONFIG_CAN_M_CAN) += m_can.o +obj-$(CONFIG_CAN_M_CAN_PCI) += m_can_pci.o +obj-$(CONFIG_CAN_M_CAN_PLATFORM) += m_can_platform.o +obj-$(CONFIG_CAN_M_CAN_TCAN4X5X) += tcan4x5x.o + +tcan4x5x-objs := +tcan4x5x-objs += tcan4x5x-core.o +tcan4x5x-objs += tcan4x5x-regmap.o diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 9b449400376b..eb856547ae7d 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -1,38 +1,31 @@ -/* - * CAN bus driver for Bosch M_CAN controller - * - * Copyright (C) 2014 Freescale Semiconductor, Inc. - * Dong Aisheng <b29396@freescale.com> - * - * Bosch M_CAN user manual can be obtained from: - * http://www.bosch-semiconductors.de/media/pdf_1/ipmodules_1/m_can/ - * mcan_users_manual_v302.pdf - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. +// SPDX-License-Identifier: GPL-2.0 +// CAN bus driver for Bosch M_CAN controller +// Copyright (C) 2014 Freescale Semiconductor, Inc. +// Dong Aisheng <aisheng.dong@nxp.com> +// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/ + +/* Bosch M_CAN user manual can be obtained from: + * https://github.com/linux-can/can-doc/tree/master/m_can */ -#include <linux/clk.h> -#include <linux/delay.h> +#include <linux/bitfield.h> +#include <linux/can/dev.h> +#include <linux/ethtool.h> +#include <linux/hrtimer.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> -#include <linux/of_device.h> +#include <linux/phy/phy.h> +#include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> -#include <linux/iopoll.h> -#include <linux/can/dev.h> -#include <linux/pinctrl/consumer.h> +#include <linux/reset.h> -/* napi related */ -#define M_CAN_NAPI_WEIGHT 64 - -/* message ram configuration data length */ -#define MRAM_CFG_LEN 8 +#include "m_can.h" /* registers definition */ enum m_can_reg { @@ -50,7 +43,7 @@ enum m_can_reg { M_CAN_TOCV = 0x2c, M_CAN_ECR = 0x40, M_CAN_PSR = 0x44, -/* TDCR Register only available for version >=3.1.x */ + /* TDCR Register only available for version >=3.1.x */ M_CAN_TDCR = 0x48, M_CAN_IR = 0x50, M_CAN_IE = 0x54, @@ -86,109 +79,87 @@ enum m_can_reg { M_CAN_TXEFA = 0xf8, }; -/* m_can lec values */ -enum m_can_lec_type { - LEC_NO_ERROR = 0, - LEC_STUFF_ERROR, - LEC_FORM_ERROR, - LEC_ACK_ERROR, - LEC_BIT1_ERROR, - LEC_BIT0_ERROR, - LEC_CRC_ERROR, - LEC_UNUSED, -}; - -enum m_can_mram_cfg { - MRAM_SIDF = 0, - MRAM_XIDF, - MRAM_RXF0, - MRAM_RXF1, - MRAM_RXB, - MRAM_TXE, - MRAM_TXB, - MRAM_CFG_NUM, -}; +/* message ram configuration data length */ +#define MRAM_CFG_LEN 8 /* Core Release Register (CREL) */ -#define CREL_REL_SHIFT 28 -#define CREL_REL_MASK (0xF << CREL_REL_SHIFT) -#define CREL_STEP_SHIFT 24 -#define CREL_STEP_MASK (0xF << CREL_STEP_SHIFT) -#define CREL_SUBSTEP_SHIFT 20 -#define CREL_SUBSTEP_MASK (0xF << CREL_SUBSTEP_SHIFT) +#define CREL_REL_MASK GENMASK(31, 28) +#define CREL_STEP_MASK GENMASK(27, 24) +#define CREL_SUBSTEP_MASK GENMASK(23, 20) /* Data Bit Timing & Prescaler Register (DBTP) */ #define DBTP_TDC BIT(23) -#define DBTP_DBRP_SHIFT 16 -#define DBTP_DBRP_MASK (0x1f << DBTP_DBRP_SHIFT) -#define DBTP_DTSEG1_SHIFT 8 -#define DBTP_DTSEG1_MASK (0x1f << DBTP_DTSEG1_SHIFT) -#define DBTP_DTSEG2_SHIFT 4 -#define DBTP_DTSEG2_MASK (0xf << DBTP_DTSEG2_SHIFT) -#define DBTP_DSJW_SHIFT 0 -#define DBTP_DSJW_MASK (0xf << DBTP_DSJW_SHIFT) +#define DBTP_DBRP_MASK GENMASK(20, 16) +#define DBTP_DTSEG1_MASK GENMASK(12, 8) +#define DBTP_DTSEG2_MASK GENMASK(7, 4) +#define DBTP_DSJW_MASK GENMASK(3, 0) /* Transmitter Delay Compensation Register (TDCR) */ -#define TDCR_TDCO_SHIFT 8 -#define TDCR_TDCO_MASK (0x7F << TDCR_TDCO_SHIFT) -#define TDCR_TDCF_SHIFT 0 -#define TDCR_TDCF_MASK (0x7F << TDCR_TDCF_SHIFT) +#define TDCR_TDCO_MASK GENMASK(14, 8) +#define TDCR_TDCF_MASK GENMASK(6, 0) /* Test Register (TEST) */ #define TEST_LBCK BIT(4) -/* CC Control Register(CCCR) */ -#define CCCR_CMR_MASK 0x3 -#define CCCR_CMR_SHIFT 10 -#define CCCR_CMR_CANFD 0x1 -#define CCCR_CMR_CANFD_BRS 0x2 -#define CCCR_CMR_CAN 0x3 -#define CCCR_CME_MASK 0x3 -#define CCCR_CME_SHIFT 8 -#define CCCR_CME_CAN 0 -#define CCCR_CME_CANFD 0x1 -#define CCCR_CME_CANFD_BRS 0x2 +/* CC Control Register (CCCR) */ #define CCCR_TXP BIT(14) #define CCCR_TEST BIT(7) +#define CCCR_DAR BIT(6) #define CCCR_MON BIT(5) #define CCCR_CSR BIT(4) #define CCCR_CSA BIT(3) #define CCCR_ASM BIT(2) #define CCCR_CCE BIT(1) #define CCCR_INIT BIT(0) -#define CCCR_CANFD 0x10 +/* for version 3.0.x */ +#define CCCR_CMR_MASK GENMASK(11, 10) +#define CCCR_CMR_CANFD 0x1 +#define CCCR_CMR_CANFD_BRS 0x2 +#define CCCR_CMR_CAN 0x3 +#define CCCR_CME_MASK GENMASK(9, 8) +#define CCCR_CME_CAN 0 +#define CCCR_CME_CANFD 0x1 +#define CCCR_CME_CANFD_BRS 0x2 /* for version >=3.1.x */ #define CCCR_EFBI BIT(13) #define CCCR_PXHD BIT(12) #define CCCR_BRSE BIT(9) #define CCCR_FDOE BIT(8) -/* only for version >=3.2.x */ +/* for version >=3.2.x */ #define CCCR_NISO BIT(15) +/* for version >=3.3.x */ +#define CCCR_WMM BIT(11) +#define CCCR_UTSU BIT(10) /* Nominal Bit Timing & Prescaler Register (NBTP) */ -#define NBTP_NSJW_SHIFT 25 -#define NBTP_NSJW_MASK (0x7f << NBTP_NSJW_SHIFT) -#define NBTP_NBRP_SHIFT 16 -#define NBTP_NBRP_MASK (0x1ff << NBTP_NBRP_SHIFT) -#define NBTP_NTSEG1_SHIFT 8 -#define NBTP_NTSEG1_MASK (0xff << NBTP_NTSEG1_SHIFT) -#define NBTP_NTSEG2_SHIFT 0 -#define NBTP_NTSEG2_MASK (0x7f << NBTP_NTSEG2_SHIFT) - -/* Error Counter Register(ECR) */ +#define NBTP_NSJW_MASK GENMASK(31, 25) +#define NBTP_NBRP_MASK GENMASK(24, 16) +#define NBTP_NTSEG1_MASK GENMASK(15, 8) +#define NBTP_NTSEG2_MASK GENMASK(6, 0) + +/* Timestamp Counter Configuration Register (TSCC) */ +#define TSCC_TCP_MASK GENMASK(19, 16) +#define TSCC_TSS_MASK GENMASK(1, 0) +#define TSCC_TSS_DISABLE 0x0 +#define TSCC_TSS_INTERNAL 0x1 +#define TSCC_TSS_EXTERNAL 0x2 + +/* Timestamp Counter Value Register (TSCV) */ +#define TSCV_TSC_MASK GENMASK(15, 0) + +/* Error Counter Register (ECR) */ #define ECR_RP BIT(15) -#define ECR_REC_SHIFT 8 -#define ECR_REC_MASK (0x7f << ECR_REC_SHIFT) -#define ECR_TEC_SHIFT 0 -#define ECR_TEC_MASK 0xff +#define ECR_REC_MASK GENMASK(14, 8) +#define ECR_TEC_MASK GENMASK(7, 0) -/* Protocol Status Register(PSR) */ +/* Protocol Status Register (PSR) */ #define PSR_BO BIT(7) #define PSR_EW BIT(6) #define PSR_EP BIT(5) -#define PSR_LEC_MASK 0x7 +#define PSR_LEC_MASK GENMASK(2, 0) +#define PSR_DLEC_MASK GENMASK(10, 8) -/* Interrupt Register(IR) */ +/* Interrupt Register (IR) */ #define IR_ALL_INT 0xffffffff /* Renamed bits for versions > 3.1.x */ @@ -233,15 +204,16 @@ enum m_can_mram_cfg { /* Interrupts for version 3.0.x */ #define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE) -#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_ELO | IR_BEU | \ - IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \ - IR_RF1L | IR_RF0L) +#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_BEU | IR_BEC | \ + IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \ + IR_RF0L) #define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X) + /* Interrupts for version >= 3.1.x */ #define IR_ERR_LEC_31X (IR_PED | IR_PEA) -#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_ELO | IR_BEU | \ - IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \ - IR_RF1L | IR_RF0L) +#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_BEU | IR_BEC | \ + IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \ + IR_RF0L) #define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X) /* Interrupt Line Select (ILS) */ @@ -253,58 +225,48 @@ enum m_can_mram_cfg { #define ILE_EINT0 BIT(0) /* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */ -#define RXFC_FWM_SHIFT 24 -#define RXFC_FWM_MASK (0x7f << RXFC_FWM_SHIFT) -#define RXFC_FS_SHIFT 16 -#define RXFC_FS_MASK (0x7f << RXFC_FS_SHIFT) +#define RXFC_FWM_MASK GENMASK(30, 24) +#define RXFC_FS_MASK GENMASK(22, 16) /* Rx FIFO 0/1 Status (RXF0S/RXF1S) */ #define RXFS_RFL BIT(25) #define RXFS_FF BIT(24) -#define RXFS_FPI_SHIFT 16 -#define RXFS_FPI_MASK 0x3f0000 -#define RXFS_FGI_SHIFT 8 -#define RXFS_FGI_MASK 0x3f00 -#define RXFS_FFL_MASK 0x7f +#define RXFS_FPI_MASK GENMASK(21, 16) +#define RXFS_FGI_MASK GENMASK(13, 8) +#define RXFS_FFL_MASK GENMASK(6, 0) /* Rx Buffer / FIFO Element Size Configuration (RXESC) */ -#define M_CAN_RXESC_8BYTES 0x0 -#define M_CAN_RXESC_64BYTES 0x777 +#define RXESC_RBDS_MASK GENMASK(10, 8) +#define RXESC_F1DS_MASK GENMASK(6, 4) +#define RXESC_F0DS_MASK GENMASK(2, 0) +#define RXESC_64B 0x7 -/* Tx Buffer Configuration(TXBC) */ -#define TXBC_NDTB_SHIFT 16 -#define TXBC_NDTB_MASK (0x3f << TXBC_NDTB_SHIFT) -#define TXBC_TFQS_SHIFT 24 -#define TXBC_TFQS_MASK (0x3f << TXBC_TFQS_SHIFT) +/* Tx Buffer Configuration (TXBC) */ +#define TXBC_TFQS_MASK GENMASK(29, 24) +#define TXBC_NDTB_MASK GENMASK(21, 16) /* Tx FIFO/Queue Status (TXFQS) */ #define TXFQS_TFQF BIT(21) -#define TXFQS_TFQPI_SHIFT 16 -#define TXFQS_TFQPI_MASK (0x1f << TXFQS_TFQPI_SHIFT) -#define TXFQS_TFGI_SHIFT 8 -#define TXFQS_TFGI_MASK (0x1f << TXFQS_TFGI_SHIFT) -#define TXFQS_TFFL_SHIFT 0 -#define TXFQS_TFFL_MASK (0x3f << TXFQS_TFFL_SHIFT) +#define TXFQS_TFQPI_MASK GENMASK(20, 16) +#define TXFQS_TFGI_MASK GENMASK(12, 8) +#define TXFQS_TFFL_MASK GENMASK(5, 0) -/* Tx Buffer Element Size Configuration(TXESC) */ -#define TXESC_TBDS_8BYTES 0x0 -#define TXESC_TBDS_64BYTES 0x7 +/* Tx Buffer Element Size Configuration (TXESC) */ +#define TXESC_TBDS_MASK GENMASK(2, 0) +#define TXESC_TBDS_64B 0x7 /* Tx Event FIFO Configuration (TXEFC) */ -#define TXEFC_EFS_SHIFT 16 -#define TXEFC_EFS_MASK (0x3f << TXEFC_EFS_SHIFT) +#define TXEFC_EFWM_MASK GENMASK(29, 24) +#define TXEFC_EFS_MASK GENMASK(21, 16) /* Tx Event FIFO Status (TXEFS) */ #define TXEFS_TEFL BIT(25) #define TXEFS_EFF BIT(24) -#define TXEFS_EFGI_SHIFT 8 -#define TXEFS_EFGI_MASK (0x1f << TXEFS_EFGI_SHIFT) -#define TXEFS_EFFL_SHIFT 0 -#define TXEFS_EFFL_MASK (0x3f << TXEFS_EFFL_SHIFT) +#define TXEFS_EFGI_MASK GENMASK(12, 8) +#define TXEFS_EFFL_MASK GENMASK(5, 0) /* Tx Event FIFO Acknowledge (TXEFA) */ -#define TXEFA_EFAI_SHIFT 0 -#define TXEFA_EFAI_MASK (0x1f << TXEFA_EFAI_SHIFT) +#define TXEFA_EFAI_MASK GENMASK(4, 0) /* Message RAM Configuration (in bytes) */ #define SIDF_ELEMENT_SIZE 4 @@ -318,7 +280,7 @@ enum m_can_mram_cfg { /* Message RAM Elements */ #define M_CAN_FIFO_ID 0x0 #define M_CAN_FIFO_DLC 0x4 -#define M_CAN_FIFO_DATA(n) (0x8 + ((n) << 2)) +#define M_CAN_FIFO_DATA 0x8 /* Rx Buffer Element */ /* R0 */ @@ -329,6 +291,7 @@ enum m_can_mram_cfg { #define RX_BUF_ANMF BIT(31) #define RX_BUF_FDF BIT(21) #define RX_BUF_BRS BIT(20) +#define RX_BUF_RXTS_MASK GENMASK(15, 0) /* Tx Buffer Element */ /* T0 */ @@ -339,210 +302,371 @@ enum m_can_mram_cfg { #define TX_BUF_EFC BIT(23) #define TX_BUF_FDF BIT(21) #define TX_BUF_BRS BIT(20) -#define TX_BUF_MM_SHIFT 24 -#define TX_BUF_MM_MASK (0xff << TX_BUF_MM_SHIFT) +#define TX_BUF_MM_MASK GENMASK(31, 24) +#define TX_BUF_DLC_MASK GENMASK(19, 16) /* Tx event FIFO Element */ /* E1 */ -#define TX_EVENT_MM_SHIFT TX_BUF_MM_SHIFT -#define TX_EVENT_MM_MASK (0xff << TX_EVENT_MM_SHIFT) +#define TX_EVENT_MM_MASK GENMASK(31, 24) +#define TX_EVENT_TXTS_MASK GENMASK(15, 0) -/* address offset and element number for each FIFO/Buffer in the Message RAM */ -struct mram_cfg { - u16 off; - u8 num; -}; +/* Hrtimer polling interval */ +#define HRTIMER_POLL_INTERVAL_MS 1 -/* m_can private data structure */ -struct m_can_priv { - struct can_priv can; /* must be the first member */ - struct napi_struct napi; - struct net_device *dev; - struct device *device; - struct clk *hclk; - struct clk *cclk; - void __iomem *base; - u32 irqstatus; - int version; +/* The ID and DLC registers are adjacent in M_CAN FIFO memory, + * and we can save a (potentially slow) bus round trip by combining + * reads and writes to them. + */ +struct id_and_dlc { + u32 id; + u32 dlc; +}; - /* message ram configuration */ - void __iomem *mram_base; - struct mram_cfg mcfg[MRAM_CFG_NUM]; +struct m_can_fifo_element { + u32 id; + u32 dlc; + u8 data[CANFD_MAX_DLEN]; }; -static inline u32 m_can_read(const struct m_can_priv *priv, enum m_can_reg reg) +static inline u32 m_can_read(struct m_can_classdev *cdev, enum m_can_reg reg) { - return readl(priv->base + reg); + return cdev->ops->read_reg(cdev, reg); } -static inline void m_can_write(const struct m_can_priv *priv, - enum m_can_reg reg, u32 val) +static inline void m_can_write(struct m_can_classdev *cdev, enum m_can_reg reg, + u32 val) { - writel(val, priv->base + reg); + cdev->ops->write_reg(cdev, reg, val); } -static inline u32 m_can_fifo_read(const struct m_can_priv *priv, - u32 fgi, unsigned int offset) +static int +m_can_fifo_read(struct m_can_classdev *cdev, + u32 fgi, unsigned int offset, void *val, size_t val_count) { - return readl(priv->mram_base + priv->mcfg[MRAM_RXF0].off + - fgi * RXF0_ELEMENT_SIZE + offset); + u32 addr_offset = cdev->mcfg[MRAM_RXF0].off + fgi * RXF0_ELEMENT_SIZE + + offset; + + if (val_count == 0) + return 0; + + return cdev->ops->read_fifo(cdev, addr_offset, val, val_count); } -static inline void m_can_fifo_write(const struct m_can_priv *priv, - u32 fpi, unsigned int offset, u32 val) +static int +m_can_fifo_write(struct m_can_classdev *cdev, + u32 fpi, unsigned int offset, const void *val, size_t val_count) { - writel(val, priv->mram_base + priv->mcfg[MRAM_TXB].off + - fpi * TXB_ELEMENT_SIZE + offset); -} + u32 addr_offset = cdev->mcfg[MRAM_TXB].off + fpi * TXB_ELEMENT_SIZE + + offset; -static inline u32 m_can_txe_fifo_read(const struct m_can_priv *priv, - u32 fgi, - u32 offset) { - return readl(priv->mram_base + priv->mcfg[MRAM_TXE].off + - fgi * TXE_ELEMENT_SIZE + offset); + if (val_count == 0) + return 0; + + return cdev->ops->write_fifo(cdev, addr_offset, val, val_count); } -static inline bool m_can_tx_fifo_full(const struct m_can_priv *priv) +static inline int m_can_fifo_write_no_off(struct m_can_classdev *cdev, + u32 fpi, u32 val) { - return !!(m_can_read(priv, M_CAN_TXFQS) & TXFQS_TFQF); + return cdev->ops->write_fifo(cdev, fpi, &val, 1); } -static inline void m_can_config_endisable(const struct m_can_priv *priv, - bool enable) +static int +m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset, u32 *val) { - u32 cccr = m_can_read(priv, M_CAN_CCCR); - u32 timeout = 10; - u32 val = 0; + u32 addr_offset = cdev->mcfg[MRAM_TXE].off + fgi * TXE_ELEMENT_SIZE + + offset; - if (enable) { - /* enable m_can configuration */ - m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT); - udelay(5); - /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */ - m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE); - } else { - m_can_write(priv, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE)); + return cdev->ops->read_fifo(cdev, addr_offset, val, 1); +} + +static int m_can_cccr_update_bits(struct m_can_classdev *cdev, u32 mask, u32 val) +{ + u32 val_before = m_can_read(cdev, M_CAN_CCCR); + u32 val_after = (val_before & ~mask) | val; + size_t tries = 10; + + if (!(mask & CCCR_INIT) && !(val_before & CCCR_INIT)) { + netdev_err(cdev->net, + "refusing to configure device when in normal mode\n"); + return -EBUSY; } - /* there's a delay for module initialization */ - if (enable) - val = CCCR_INIT | CCCR_CCE; + /* The chip should be in standby mode when changing the CCCR register, + * and some chips set the CSR and CSA bits when in standby. Furthermore, + * the CSR and CSA bits should be written as zeros, even when they read + * ones. + */ + val_after &= ~(CCCR_CSR | CCCR_CSA); - while ((m_can_read(priv, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) { - if (timeout == 0) { - netdev_warn(priv->dev, "Failed to init module\n"); - return; - } - timeout--; - udelay(1); + while (tries--) { + u32 val_read; + + /* Write the desired value in each try, as setting some bits in + * the CCCR register require other bits to be set first. E.g. + * setting the NISO bit requires setting the CCE bit first. + */ + m_can_write(cdev, M_CAN_CCCR, val_after); + + val_read = m_can_read(cdev, M_CAN_CCCR) & ~(CCCR_CSR | CCCR_CSA); + + if (val_read == val_after) + return 0; + + usleep_range(1, 5); } + + return -ETIMEDOUT; +} + +static int m_can_config_enable(struct m_can_classdev *cdev) +{ + int err; + + /* CCCR_INIT must be set in order to set CCCR_CCE, but access to + * configuration registers should only be enabled when in standby mode, + * where CCCR_INIT is always set. + */ + err = m_can_cccr_update_bits(cdev, CCCR_CCE, CCCR_CCE); + if (err) + netdev_err(cdev->net, "failed to enable configuration mode\n"); + + return err; +} + +static int m_can_config_disable(struct m_can_classdev *cdev) +{ + int err; + + /* Only clear CCCR_CCE, since CCCR_INIT cannot be cleared while in + * standby mode + */ + err = m_can_cccr_update_bits(cdev, CCCR_CCE, 0); + if (err) + netdev_err(cdev->net, "failed to disable configuration registers\n"); + + return err; +} + +static void m_can_interrupt_enable(struct m_can_classdev *cdev, u32 interrupts) +{ + if (cdev->active_interrupts == interrupts) + return; + m_can_write(cdev, M_CAN_IE, interrupts); + cdev->active_interrupts = interrupts; +} + +static void m_can_coalescing_disable(struct m_can_classdev *cdev) +{ + u32 new_interrupts = cdev->active_interrupts | IR_RF0N | IR_TEFN; + + if (!cdev->net->irq) + return; + + hrtimer_cancel(&cdev->hrtimer); + m_can_interrupt_enable(cdev, new_interrupts); } -static inline void m_can_enable_all_interrupts(const struct m_can_priv *priv) +static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev) { + if (!cdev->net->irq) { + netdev_dbg(cdev->net, "Start hrtimer\n"); + hrtimer_start(&cdev->hrtimer, + ms_to_ktime(HRTIMER_POLL_INTERVAL_MS), + HRTIMER_MODE_REL_PINNED); + } + /* Only interrupt line 0 is used in this driver */ - m_can_write(priv, M_CAN_ILE, ILE_EINT0); + m_can_write(cdev, M_CAN_ILE, ILE_EINT0); +} + +static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev) +{ + m_can_coalescing_disable(cdev); + m_can_write(cdev, M_CAN_ILE, 0x0); + + if (!cdev->net->irq) { + netdev_dbg(cdev->net, "Stop hrtimer\n"); + hrtimer_try_to_cancel(&cdev->hrtimer); + } +} + +/* Retrieve internal timestamp counter from TSCV.TSC, and shift it to 32-bit + * width. + */ +static u32 m_can_get_timestamp(struct m_can_classdev *cdev) +{ + u32 tscv; + u32 tsc; + + tscv = m_can_read(cdev, M_CAN_TSCV); + tsc = FIELD_GET(TSCV_TSC_MASK, tscv); + + return (tsc << 16); +} + +static void m_can_clean(struct net_device *net) +{ + struct m_can_classdev *cdev = netdev_priv(net); + unsigned long irqflags; + + if (cdev->tx_ops) { + for (int i = 0; i != cdev->tx_fifo_size; ++i) { + if (!cdev->tx_ops[i].skb) + continue; + + net->stats.tx_errors++; + cdev->tx_ops[i].skb = NULL; + } + } + + for (int i = 0; i != cdev->can.echo_skb_max; ++i) + can_free_echo_skb(cdev->net, i, NULL); + + netdev_reset_queue(cdev->net); + + spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags); + cdev->tx_fifo_in_flight = 0; + spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags); } -static inline void m_can_disable_all_interrupts(const struct m_can_priv *priv) +/* For peripherals, pass skb to rx-offload, which will push skb from + * napi. For non-peripherals, RX is done in napi already, so push + * directly. timestamp is used to ensure good skb ordering in + * rx-offload and is ignored for non-peripherals. + */ +static void m_can_receive_skb(struct m_can_classdev *cdev, + struct sk_buff *skb, + u32 timestamp) { - m_can_write(priv, M_CAN_ILE, 0x0); + if (cdev->is_peripheral) { + struct net_device_stats *stats = &cdev->net->stats; + int err; + + err = can_rx_offload_queue_timestamp(&cdev->offload, skb, + timestamp); + if (err) + stats->rx_fifo_errors++; + } else { + netif_receive_skb(skb); + } } -static void m_can_read_fifo(struct net_device *dev, u32 rxfs) +static int m_can_read_fifo(struct net_device *dev, u32 fgi) { struct net_device_stats *stats = &dev->stats; - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); struct canfd_frame *cf; struct sk_buff *skb; - u32 id, fgi, dlc; - int i; + struct id_and_dlc fifo_header; + u32 timestamp = 0; + int err; - /* calculate the fifo get index for where to read data */ - fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_SHIFT; - dlc = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC); - if (dlc & RX_BUF_FDF) + err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_ID, &fifo_header, 2); + if (err) + goto out_fail; + + if (fifo_header.dlc & RX_BUF_FDF) skb = alloc_canfd_skb(dev, &cf); else skb = alloc_can_skb(dev, (struct can_frame **)&cf); if (!skb) { stats->rx_dropped++; - return; + return 0; } - if (dlc & RX_BUF_FDF) - cf->len = can_dlc2len((dlc >> 16) & 0x0F); + if (fifo_header.dlc & RX_BUF_FDF) + cf->len = can_fd_dlc2len((fifo_header.dlc >> 16) & 0x0F); else - cf->len = get_can_dlc((dlc >> 16) & 0x0F); + cf->len = can_cc_dlc2len((fifo_header.dlc >> 16) & 0x0F); - id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_ID); - if (id & RX_BUF_XTD) - cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG; + if (fifo_header.id & RX_BUF_XTD) + cf->can_id = (fifo_header.id & CAN_EFF_MASK) | CAN_EFF_FLAG; else - cf->can_id = (id >> 18) & CAN_SFF_MASK; + cf->can_id = (fifo_header.id >> 18) & CAN_SFF_MASK; - if (id & RX_BUF_ESI) { + if (fifo_header.id & RX_BUF_ESI) { cf->flags |= CANFD_ESI; netdev_dbg(dev, "ESI Error\n"); } - if (!(dlc & RX_BUF_FDF) && (id & RX_BUF_RTR)) { + if (!(fifo_header.dlc & RX_BUF_FDF) && (fifo_header.id & RX_BUF_RTR)) { cf->can_id |= CAN_RTR_FLAG; } else { - if (dlc & RX_BUF_BRS) + if (fifo_header.dlc & RX_BUF_BRS) cf->flags |= CANFD_BRS; - for (i = 0; i < cf->len; i += 4) - *(u32 *)(cf->data + i) = - m_can_fifo_read(priv, fgi, - M_CAN_FIFO_DATA(i / 4)); + err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DATA, + cf->data, DIV_ROUND_UP(cf->len, 4)); + if (err) + goto out_free_skb; + + stats->rx_bytes += cf->len; } + stats->rx_packets++; - /* acknowledge rx fifo 0 */ - m_can_write(priv, M_CAN_RXF0A, fgi); + timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc) << 16; - stats->rx_packets++; - stats->rx_bytes += cf->len; + m_can_receive_skb(cdev, skb, timestamp); - netif_receive_skb(skb); + return 0; + +out_free_skb: + kfree_skb(skb); +out_fail: + netdev_err(dev, "FIFO read returned %d\n", err); + return err; } static int m_can_do_rx_poll(struct net_device *dev, int quota) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); u32 pkts = 0; u32 rxfs; + u32 rx_count; + u32 fgi; + int ack_fgi = -1; + int i; + int err = 0; - rxfs = m_can_read(priv, M_CAN_RXF0S); + rxfs = m_can_read(cdev, M_CAN_RXF0S); if (!(rxfs & RXFS_FFL_MASK)) { netdev_dbg(dev, "no messages in fifo0\n"); return 0; } - while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) { - if (rxfs & RXFS_RFL) - netdev_warn(dev, "Rx FIFO 0 Message Lost\n"); + rx_count = FIELD_GET(RXFS_FFL_MASK, rxfs); + fgi = FIELD_GET(RXFS_FGI_MASK, rxfs); - m_can_read_fifo(dev, rxfs); + for (i = 0; i < rx_count && quota > 0; ++i) { + err = m_can_read_fifo(dev, fgi); + if (err) + break; quota--; pkts++; - rxfs = m_can_read(priv, M_CAN_RXF0S); + ack_fgi = fgi; + fgi = (++fgi >= cdev->mcfg[MRAM_RXF0].num ? 0 : fgi); } - if (pkts) - can_led_event(dev, CAN_LED_EVENT_RX); + if (ack_fgi != -1) + m_can_write(cdev, M_CAN_RXF0A, ack_fgi); + + if (err) + return err; return pkts; } static int m_can_handle_lost_msg(struct net_device *dev) { + struct m_can_classdev *cdev = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct sk_buff *skb; struct can_frame *frame; + u32 timestamp = 0; - netdev_err(dev, "msg lost in rxf0\n"); + netdev_dbg(dev, "msg lost in rxf0\n"); stats->rx_errors++; stats->rx_over_errors++; @@ -554,7 +678,10 @@ static int m_can_handle_lost_msg(struct net_device *dev) frame->can_id |= CAN_ERR_CRTL; frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; - netif_receive_skb(skb); + if (cdev->is_peripheral) + timestamp = m_can_get_timestamp(cdev); + + m_can_receive_skb(cdev, skb, timestamp); return 1; } @@ -562,56 +689,71 @@ static int m_can_handle_lost_msg(struct net_device *dev) static int m_can_handle_lec_err(struct net_device *dev, enum m_can_lec_type lec_type) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; + u32 timestamp = 0; - priv->can.can_stats.bus_error++; - stats->rx_errors++; + cdev->can.can_stats.bus_error++; /* propagate the error condition to the CAN stack */ skb = alloc_can_err_skb(dev, &cf); - if (unlikely(!skb)) - return 0; /* check for 'last error code' which tells us the * type of the last error to occur on the CAN bus */ - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + if (likely(skb)) + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; switch (lec_type) { case LEC_STUFF_ERROR: netdev_dbg(dev, "stuff error\n"); - cf->data[2] |= CAN_ERR_PROT_STUFF; + stats->rx_errors++; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_STUFF; break; case LEC_FORM_ERROR: netdev_dbg(dev, "form error\n"); - cf->data[2] |= CAN_ERR_PROT_FORM; + stats->rx_errors++; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_FORM; break; case LEC_ACK_ERROR: netdev_dbg(dev, "ack error\n"); - cf->data[3] = CAN_ERR_PROT_LOC_ACK; + stats->tx_errors++; + if (likely(skb)) + cf->data[3] = CAN_ERR_PROT_LOC_ACK; break; case LEC_BIT1_ERROR: netdev_dbg(dev, "bit1 error\n"); - cf->data[2] |= CAN_ERR_PROT_BIT1; + stats->tx_errors++; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_BIT1; break; case LEC_BIT0_ERROR: netdev_dbg(dev, "bit0 error\n"); - cf->data[2] |= CAN_ERR_PROT_BIT0; + stats->tx_errors++; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_BIT0; break; case LEC_CRC_ERROR: netdev_dbg(dev, "CRC error\n"); - cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + stats->rx_errors++; + if (likely(skb)) + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; break; default: break; } - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_receive_skb(skb); + if (unlikely(!skb)) + return 0; + + if (cdev->is_peripheral) + timestamp = m_can_get_timestamp(cdev); + + m_can_receive_skb(cdev, skb, timestamp); return 1; } @@ -619,47 +761,47 @@ static int m_can_handle_lec_err(struct net_device *dev, static int __m_can_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); unsigned int ecr; - ecr = m_can_read(priv, M_CAN_ECR); - bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT; - bec->txerr = (ecr & ECR_TEC_MASK) >> ECR_TEC_SHIFT; + ecr = m_can_read(cdev, M_CAN_ECR); + bec->rxerr = FIELD_GET(ECR_REC_MASK, ecr); + bec->txerr = FIELD_GET(ECR_TEC_MASK, ecr); return 0; } -static int m_can_clk_start(struct m_can_priv *priv) +static int m_can_clk_start(struct m_can_classdev *cdev) { - int err; - - err = pm_runtime_get_sync(priv->device); - if (err < 0) { - pm_runtime_put_noidle(priv->device); - return err; - } + if (cdev->pm_clock_support == 0) + return 0; - return 0; + return pm_runtime_resume_and_get(cdev->dev); } -static void m_can_clk_stop(struct m_can_priv *priv) +static void m_can_clk_stop(struct m_can_classdev *cdev) { - pm_runtime_put_sync(priv->device); + if (cdev->pm_clock_support) + pm_runtime_put_sync(cdev->dev); } static int m_can_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); int err; - err = m_can_clk_start(priv); + /* Avoid waking up the controller if the interface is down */ + if (!(dev->flags & IFF_UP)) + return 0; + + err = m_can_clk_start(cdev); if (err) return err; __m_can_get_berr_counter(dev, bec); - m_can_clk_stop(priv); + m_can_clk_stop(cdev); return 0; } @@ -667,29 +809,32 @@ static int m_can_get_berr_counter(const struct net_device *dev, static int m_can_handle_state_change(struct net_device *dev, enum can_state new_state) { - struct m_can_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; + struct m_can_classdev *cdev = netdev_priv(dev); struct can_frame *cf; struct sk_buff *skb; struct can_berr_counter bec; unsigned int ecr; + u32 timestamp = 0; switch (new_state) { case CAN_STATE_ERROR_ACTIVE: + cdev->can.state = CAN_STATE_ERROR_ACTIVE; + break; + case CAN_STATE_ERROR_WARNING: /* error warning state */ - priv->can.can_stats.error_warning++; - priv->can.state = CAN_STATE_ERROR_WARNING; + cdev->can.can_stats.error_warning++; + cdev->can.state = CAN_STATE_ERROR_WARNING; break; case CAN_STATE_ERROR_PASSIVE: /* error passive state */ - priv->can.can_stats.error_passive++; - priv->can.state = CAN_STATE_ERROR_PASSIVE; + cdev->can.can_stats.error_passive++; + cdev->can.state = CAN_STATE_ERROR_PASSIVE; break; case CAN_STATE_BUS_OFF: /* bus-off state */ - priv->can.state = CAN_STATE_BUS_OFF; - m_can_disable_all_interrupts(priv); - priv->can.can_stats.bus_off++; + cdev->can.state = CAN_STATE_BUS_OFF; + m_can_disable_all_interrupts(cdev); + cdev->can.can_stats.bus_off++; can_bus_off(dev); break; default: @@ -705,8 +850,14 @@ static int m_can_handle_state_change(struct net_device *dev, switch (new_state) { case CAN_STATE_ERROR_ACTIVE: + cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; + cf->data[1] = CAN_ERR_CRTL_ACTIVE; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + break; + case CAN_STATE_ERROR_WARNING: /* error warning state */ - cf->can_id |= CAN_ERR_CRTL; + cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; cf->data[1] = (bec.txerr > bec.rxerr) ? CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; @@ -715,8 +866,8 @@ static int m_can_handle_state_change(struct net_device *dev, break; case CAN_STATE_ERROR_PASSIVE: /* error passive state */ - cf->can_id |= CAN_ERR_CRTL; - ecr = m_can_read(priv, M_CAN_ECR); + cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; + ecr = m_can_read(cdev, M_CAN_ECR); if (ecr & ECR_RP) cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; if (bec.txerr > 127) @@ -732,48 +883,47 @@ static int m_can_handle_state_change(struct net_device *dev, break; } - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_receive_skb(skb); + if (cdev->is_peripheral) + timestamp = m_can_get_timestamp(cdev); + + m_can_receive_skb(cdev, skb, timestamp); return 1; } -static int m_can_handle_state_errors(struct net_device *dev, u32 psr) +static enum can_state +m_can_state_get_by_psr(struct m_can_classdev *cdev) { - struct m_can_priv *priv = netdev_priv(dev); - int work_done = 0; + u32 reg_psr; - if ((psr & PSR_EW) && - (priv->can.state != CAN_STATE_ERROR_WARNING)) { - netdev_dbg(dev, "entered error warning state\n"); - work_done += m_can_handle_state_change(dev, - CAN_STATE_ERROR_WARNING); - } + reg_psr = m_can_read(cdev, M_CAN_PSR); - if ((psr & PSR_EP) && - (priv->can.state != CAN_STATE_ERROR_PASSIVE)) { - netdev_dbg(dev, "entered error passive state\n"); - work_done += m_can_handle_state_change(dev, - CAN_STATE_ERROR_PASSIVE); - } + if (reg_psr & PSR_BO) + return CAN_STATE_BUS_OFF; + if (reg_psr & PSR_EP) + return CAN_STATE_ERROR_PASSIVE; + if (reg_psr & PSR_EW) + return CAN_STATE_ERROR_WARNING; - if ((psr & PSR_BO) && - (priv->can.state != CAN_STATE_BUS_OFF)) { - netdev_dbg(dev, "entered error bus off state\n"); - work_done += m_can_handle_state_change(dev, - CAN_STATE_BUS_OFF); - } + return CAN_STATE_ERROR_ACTIVE; +} - return work_done; +static int m_can_handle_state_errors(struct net_device *dev) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + enum can_state new_state; + + new_state = m_can_state_get_by_psr(cdev); + if (new_state == cdev->can.state) + return 0; + + return m_can_handle_state_change(dev, new_state); } static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus) { if (irqstatus & IR_WDI) netdev_err(dev, "Message RAM Watchdog event due to missing READY\n"); - if (irqstatus & IR_ELO) - netdev_err(dev, "Error Logging Overflow\n"); if (irqstatus & IR_BEU) netdev_err(dev, "Bit Error Uncorrected\n"); if (irqstatus & IR_BEC) @@ -784,26 +934,82 @@ static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus) netdev_err(dev, "Message RAM access failure occurred\n"); } -static inline bool is_lec_err(u32 psr) +static inline bool is_lec_err(u8 lec) +{ + return lec != LEC_NO_ERROR && lec != LEC_NO_CHANGE; +} + +static inline bool m_can_is_protocol_err(u32 irqstatus) +{ + return irqstatus & IR_ERR_LEC_31X; +} + +static int m_can_handle_protocol_error(struct net_device *dev, u32 irqstatus) { - psr &= LEC_UNUSED; + struct net_device_stats *stats = &dev->stats; + struct m_can_classdev *cdev = netdev_priv(dev); + struct can_frame *cf; + struct sk_buff *skb; + u32 timestamp = 0; + + /* propagate the error condition to the CAN stack */ + skb = alloc_can_err_skb(dev, &cf); + + /* update tx error stats since there is protocol error */ + stats->tx_errors++; + + /* update arbitration lost status */ + if (cdev->version >= 31 && (irqstatus & IR_PEA)) { + netdev_dbg(dev, "Protocol error in Arbitration fail\n"); + cdev->can.can_stats.arbitration_lost++; + if (skb) { + cf->can_id |= CAN_ERR_LOSTARB; + cf->data[0] |= CAN_ERR_LOSTARB_UNSPEC; + } + } - return psr && (psr != LEC_UNUSED); + if (unlikely(!skb)) { + netdev_dbg(dev, "allocation of skb failed\n"); + return 0; + } + + if (cdev->is_peripheral) + timestamp = m_can_get_timestamp(cdev); + + m_can_receive_skb(cdev, skb, timestamp); + + return 1; } static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus, u32 psr) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); int work_done = 0; if (irqstatus & IR_RF0L) work_done += m_can_handle_lost_msg(dev); /* handle lec errors on the bus */ - if ((priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && - is_lec_err(psr)) - work_done += m_can_handle_lec_err(dev, psr & LEC_UNUSED); + if (cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) { + u8 lec = FIELD_GET(PSR_LEC_MASK, psr); + u8 dlec = FIELD_GET(PSR_DLEC_MASK, psr); + + if (is_lec_err(lec)) { + netdev_dbg(dev, "Arbitration phase error detected\n"); + work_done += m_can_handle_lec_err(dev, lec); + } + + if (is_lec_err(dlec)) { + netdev_dbg(dev, "Data phase error detected\n"); + work_done += m_can_handle_lec_err(dev, dlec); + } + } + + /* handle protocol errors in arbitration phase */ + if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && + m_can_is_protocol_err(irqstatus)) + work_done += m_can_handle_protocol_error(dev, irqstatus); /* other unproccessed error interrupts */ m_can_handle_other_err(dev, irqstatus); @@ -811,122 +1017,325 @@ static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus, return work_done; } -static int m_can_poll(struct napi_struct *napi, int quota) +static int m_can_rx_handler(struct net_device *dev, int quota, u32 irqstatus) { - struct net_device *dev = napi->dev; - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); + int rx_work_or_err; int work_done = 0; - u32 irqstatus, psr; - irqstatus = priv->irqstatus | m_can_read(priv, M_CAN_IR); if (!irqstatus) goto end; - psr = m_can_read(priv, M_CAN_PSR); + /* Errata workaround for issue "Needless activation of MRAF irq" + * During frame reception while the MCAN is in Error Passive state + * and the Receive Error Counter has the value MCAN_ECR.REC = 127, + * it may happen that MCAN_IR.MRAF is set although there was no + * Message RAM access failure. + * If MCAN_IR.MRAF is enabled, an interrupt to the Host CPU is generated + * The Message RAM Access Failure interrupt routine needs to check + * whether MCAN_ECR.RP = ’1’ and MCAN_ECR.REC = 127. + * In this case, reset MCAN_IR.MRAF. No further action is required. + */ + if (cdev->version <= 31 && irqstatus & IR_MRAF && + m_can_read(cdev, M_CAN_ECR) & ECR_RP) { + struct can_berr_counter bec; + + __m_can_get_berr_counter(dev, &bec); + if (bec.rxerr == 127) { + m_can_write(cdev, M_CAN_IR, IR_MRAF); + irqstatus &= ~IR_MRAF; + } + } + if (irqstatus & IR_ERR_STATE) - work_done += m_can_handle_state_errors(dev, psr); + work_done += m_can_handle_state_errors(dev); if (irqstatus & IR_ERR_BUS_30X) - work_done += m_can_handle_bus_errors(dev, irqstatus, psr); + work_done += m_can_handle_bus_errors(dev, irqstatus, + m_can_read(cdev, M_CAN_PSR)); + + if (irqstatus & IR_RF0N) { + rx_work_or_err = m_can_do_rx_poll(dev, (quota - work_done)); + if (rx_work_or_err < 0) + return rx_work_or_err; + + work_done += rx_work_or_err; + } +end: + return work_done; +} + +static int m_can_poll(struct napi_struct *napi, int quota) +{ + struct net_device *dev = napi->dev; + struct m_can_classdev *cdev = netdev_priv(dev); + int work_done; + u32 irqstatus; + + irqstatus = cdev->irqstatus | m_can_read(cdev, M_CAN_IR); - if (irqstatus & IR_RF0N) - work_done += m_can_do_rx_poll(dev, (quota - work_done)); + work_done = m_can_rx_handler(dev, quota, irqstatus); - if (work_done < quota) { + /* Don't re-enable interrupts if the driver had a fatal error + * (e.g., FIFO read failure). + */ + if (work_done >= 0 && work_done < quota) { napi_complete_done(napi, work_done); - m_can_enable_all_interrupts(priv); + m_can_enable_all_interrupts(cdev); } -end: return work_done; } -static void m_can_echo_tx_event(struct net_device *dev) +/* Echo tx skb and update net stats. Peripherals use rx-offload for + * echo. timestamp is used for peripherals to ensure correct ordering + * by rx-offload, and is ignored for non-peripherals. + */ +static unsigned int m_can_tx_update_stats(struct m_can_classdev *cdev, + unsigned int msg_mark, u32 timestamp) +{ + struct net_device *dev = cdev->net; + struct net_device_stats *stats = &dev->stats; + unsigned int frame_len; + + if (cdev->is_peripheral) + stats->tx_bytes += + can_rx_offload_get_echo_skb_queue_timestamp(&cdev->offload, + msg_mark, + timestamp, + &frame_len); + else + stats->tx_bytes += can_get_echo_skb(dev, msg_mark, &frame_len); + + stats->tx_packets++; + + return frame_len; +} + +static void m_can_finish_tx(struct m_can_classdev *cdev, int transmitted, + unsigned int transmitted_frame_len) +{ + unsigned long irqflags; + + netdev_completed_queue(cdev->net, transmitted, transmitted_frame_len); + + spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags); + if (cdev->tx_fifo_in_flight >= cdev->tx_fifo_size && transmitted > 0) + netif_wake_queue(cdev->net); + cdev->tx_fifo_in_flight -= transmitted; + spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags); +} + +static netdev_tx_t m_can_start_tx(struct m_can_classdev *cdev) +{ + unsigned long irqflags; + int tx_fifo_in_flight; + + spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags); + tx_fifo_in_flight = cdev->tx_fifo_in_flight + 1; + if (tx_fifo_in_flight >= cdev->tx_fifo_size) { + netif_stop_queue(cdev->net); + if (tx_fifo_in_flight > cdev->tx_fifo_size) { + netdev_err_once(cdev->net, "hard_xmit called while TX FIFO full\n"); + spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags); + return NETDEV_TX_BUSY; + } + } + cdev->tx_fifo_in_flight = tx_fifo_in_flight; + spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags); + + return NETDEV_TX_OK; +} + +static int m_can_echo_tx_event(struct net_device *dev) { u32 txe_count = 0; u32 m_can_txefs; u32 fgi = 0; + int ack_fgi = -1; int i = 0; + int err = 0; unsigned int msg_mark; + int processed = 0; + unsigned int processed_frame_len = 0; - struct m_can_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; + struct m_can_classdev *cdev = netdev_priv(dev); /* read tx event fifo status */ - m_can_txefs = m_can_read(priv, M_CAN_TXEFS); + m_can_txefs = m_can_read(cdev, M_CAN_TXEFS); /* Get Tx Event fifo element count */ - txe_count = (m_can_txefs & TXEFS_EFFL_MASK) - >> TXEFS_EFFL_SHIFT; + txe_count = FIELD_GET(TXEFS_EFFL_MASK, m_can_txefs); + fgi = FIELD_GET(TXEFS_EFGI_MASK, m_can_txefs); /* Get and process all sent elements */ for (i = 0; i < txe_count; i++) { - /* retrieve get index */ - fgi = (m_can_read(priv, M_CAN_TXEFS) & TXEFS_EFGI_MASK) - >> TXEFS_EFGI_SHIFT; + u32 txe, timestamp = 0; + + /* get message marker, timestamp */ + err = m_can_txe_fifo_read(cdev, fgi, 4, &txe); + if (err) { + netdev_err(dev, "TXE FIFO read returned %d\n", err); + break; + } - /* get message marker */ - msg_mark = (m_can_txe_fifo_read(priv, fgi, 4) & - TX_EVENT_MM_MASK) >> TX_EVENT_MM_SHIFT; + msg_mark = FIELD_GET(TX_EVENT_MM_MASK, txe); + timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe) << 16; - /* ack txe element */ - m_can_write(priv, M_CAN_TXEFA, (TXEFA_EFAI_MASK & - (fgi << TXEFA_EFAI_SHIFT))); + ack_fgi = fgi; + fgi = (++fgi >= cdev->mcfg[MRAM_TXE].num ? 0 : fgi); /* update stats */ - stats->tx_bytes += can_get_echo_skb(dev, msg_mark); - stats->tx_packets++; + processed_frame_len += m_can_tx_update_stats(cdev, msg_mark, + timestamp); + + ++processed; } + + if (ack_fgi != -1) + m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK, + ack_fgi)); + + m_can_finish_tx(cdev, processed, processed_frame_len); + + return err; } -static irqreturn_t m_can_isr(int irq, void *dev_id) +static void m_can_coalescing_update(struct m_can_classdev *cdev, u32 ir) { - struct net_device *dev = (struct net_device *)dev_id; - struct m_can_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; - u32 ir; + u32 new_interrupts = cdev->active_interrupts; + bool enable_rx_timer = false; + bool enable_tx_timer = false; + + if (!cdev->net->irq) + return; + + if (cdev->rx_coalesce_usecs_irq > 0 && (ir & (IR_RF0N | IR_RF0W))) { + enable_rx_timer = true; + new_interrupts &= ~IR_RF0N; + } + if (cdev->tx_coalesce_usecs_irq > 0 && (ir & (IR_TEFN | IR_TEFW))) { + enable_tx_timer = true; + new_interrupts &= ~IR_TEFN; + } + if (!enable_rx_timer && !hrtimer_active(&cdev->hrtimer)) + new_interrupts |= IR_RF0N; + if (!enable_tx_timer && !hrtimer_active(&cdev->hrtimer)) + new_interrupts |= IR_TEFN; + + m_can_interrupt_enable(cdev, new_interrupts); + if (enable_rx_timer | enable_tx_timer) + hrtimer_start(&cdev->hrtimer, cdev->irq_timer_wait, + HRTIMER_MODE_REL); +} - ir = m_can_read(priv, M_CAN_IR); +/* This interrupt handler is called either from the interrupt thread or a + * hrtimer. This has implications like cancelling a timer won't be possible + * blocking. + */ +static int m_can_interrupt_handler(struct m_can_classdev *cdev) +{ + struct net_device *dev = cdev->net; + u32 ir = 0, ir_read; + int ret; + + if (pm_runtime_suspended(cdev->dev)) + return IRQ_NONE; + + /* The m_can controller signals its interrupt status as a level, but + * depending in the integration the CPU may interpret the signal as + * edge-triggered (for example with m_can_pci). For these + * edge-triggered integrations, we must observe that IR is 0 at least + * once to be sure that the next interrupt will generate an edge. + */ + while ((ir_read = m_can_read(cdev, M_CAN_IR)) != 0) { + ir |= ir_read; + + /* ACK all irqs */ + m_can_write(cdev, M_CAN_IR, ir); + + if (!cdev->irq_edge_triggered) + break; + } + + m_can_coalescing_update(cdev, ir); if (!ir) return IRQ_NONE; - /* ACK all irqs */ - if (ir & IR_ALL_INT) - m_can_write(priv, M_CAN_IR, ir); + if (cdev->ops->clear_interrupts) + cdev->ops->clear_interrupts(cdev); /* schedule NAPI in case of * - rx IRQ * - state change IRQ * - bus error IRQ and bus error reporting */ - if ((ir & IR_RF0N) || (ir & IR_ERR_ALL_30X)) { - priv->irqstatus = ir; - m_can_disable_all_interrupts(priv); - napi_schedule(&priv->napi); + if (ir & (IR_RF0N | IR_RF0W | IR_ERR_ALL_30X)) { + cdev->irqstatus = ir; + if (!cdev->is_peripheral) { + m_can_disable_all_interrupts(cdev); + napi_schedule(&cdev->napi); + } else { + ret = m_can_rx_handler(dev, NAPI_POLL_WEIGHT, ir); + if (ret < 0) + return ret; + } } - if (priv->version == 30) { + if (cdev->version == 30) { if (ir & IR_TC) { /* Transmission Complete Interrupt*/ - stats->tx_bytes += can_get_echo_skb(dev, 0); - stats->tx_packets++; - can_led_event(dev, CAN_LED_EVENT_TX); - netif_wake_queue(dev); + u32 timestamp = 0; + unsigned int frame_len; + + if (cdev->is_peripheral) + timestamp = m_can_get_timestamp(cdev); + frame_len = m_can_tx_update_stats(cdev, 0, timestamp); + m_can_finish_tx(cdev, 1, frame_len); } } else { - if (ir & IR_TEFN) { + if (ir & (IR_TEFN | IR_TEFW)) { /* New TX FIFO Element arrived */ - m_can_echo_tx_event(dev); - can_led_event(dev, CAN_LED_EVENT_TX); - if (netif_queue_stopped(dev) && - !m_can_tx_fifo_full(priv)) - netif_wake_queue(dev); + ret = m_can_echo_tx_event(dev); + if (ret != 0) + return ret; } } + if (cdev->is_peripheral) + can_rx_offload_threaded_irq_finish(&cdev->offload); + return IRQ_HANDLED; } +static irqreturn_t m_can_isr(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct m_can_classdev *cdev = netdev_priv(dev); + int ret; + + ret = m_can_interrupt_handler(cdev); + if (ret < 0) { + m_can_disable_all_interrupts(cdev); + return IRQ_HANDLED; + } + + return ret; +} + +static enum hrtimer_restart m_can_coalescing_timer(struct hrtimer *timer) +{ + struct m_can_classdev *cdev = container_of(timer, struct m_can_classdev, hrtimer); + + if (cdev->can.state == CAN_STATE_BUS_OFF || + cdev->can.state == CAN_STATE_STOPPED) + return HRTIMER_NORESTART; + + irq_wake_thread(cdev->net->irq, cdev->net); + + return HRTIMER_NORESTART; +} + static const struct can_bittiming_const m_can_bittiming_const_30X = { .name = KBUILD_MODNAME, .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ @@ -955,7 +1364,7 @@ static const struct can_bittiming_const m_can_bittiming_const_31X = { .name = KBUILD_MODNAME, .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ .tseg1_max = 256, - .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_min = 2, /* Time segment 2 = phase_seg2 */ .tseg2_max = 128, .sjw_max = 128, .brp_min = 1, @@ -975,11 +1384,32 @@ static const struct can_bittiming_const m_can_data_bittiming_const_31X = { .brp_inc = 1, }; +static int m_can_init_ram(struct m_can_classdev *cdev) +{ + int end, i, start; + int err = 0; + + /* initialize the entire Message RAM in use to avoid possible + * ECC/parity checksum errors when reading an uninitialized buffer + */ + start = cdev->mcfg[MRAM_SIDF].off; + end = cdev->mcfg[MRAM_TXB].off + + cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE; + + for (i = start; i < end; i += 4) { + err = m_can_fifo_write_no_off(cdev, i, 0x0); + if (err) + break; + } + + return err; +} + static int m_can_set_bittiming(struct net_device *dev) { - struct m_can_priv *priv = netdev_priv(dev); - const struct can_bittiming *bt = &priv->can.bittiming; - const struct can_bittiming *dbt = &priv->can.data_bittiming; + struct m_can_classdev *cdev = netdev_priv(dev); + const struct can_bittiming *bt = &cdev->can.bittiming; + const struct can_bittiming *dbt = &cdev->can.fd.data_bittiming; u16 brp, sjw, tseg1, tseg2; u32 reg_btp; @@ -987,11 +1417,13 @@ static int m_can_set_bittiming(struct net_device *dev) sjw = bt->sjw - 1; tseg1 = bt->prop_seg + bt->phase_seg1 - 1; tseg2 = bt->phase_seg2 - 1; - reg_btp = (brp << NBTP_NBRP_SHIFT) | (sjw << NBTP_NSJW_SHIFT) | - (tseg1 << NBTP_NTSEG1_SHIFT) | (tseg2 << NBTP_NTSEG2_SHIFT); - m_can_write(priv, M_CAN_NBTP, reg_btp); + reg_btp = FIELD_PREP(NBTP_NBRP_MASK, brp) | + FIELD_PREP(NBTP_NSJW_MASK, sjw) | + FIELD_PREP(NBTP_NTSEG1_MASK, tseg1) | + FIELD_PREP(NBTP_NTSEG2_MASK, tseg2); + m_can_write(cdev, M_CAN_NBTP, reg_btp); - if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { reg_btp = 0; brp = dbt->brp - 1; sjw = dbt->sjw - 1; @@ -1013,8 +1445,8 @@ static int m_can_set_bittiming(struct net_device *dev) /* Equation based on Bosch's M_CAN User Manual's * Transmitter Delay Compensation Section */ - tdco = (priv->can.clock.freq / 1000) * - ssp / dbt->bitrate; + tdco = (cdev->can.clock.freq / 1000) * + ssp / dbt->bitrate; /* Max valid TDCO value is 127 */ if (tdco > 127) { @@ -1024,16 +1456,16 @@ static int m_can_set_bittiming(struct net_device *dev) } reg_btp |= DBTP_TDC; - m_can_write(priv, M_CAN_TDCR, - tdco << TDCR_TDCO_SHIFT); + m_can_write(cdev, M_CAN_TDCR, + FIELD_PREP(TDCR_TDCO_MASK, tdco)); } - reg_btp |= (brp << DBTP_DBRP_SHIFT) | - (sjw << DBTP_DSJW_SHIFT) | - (tseg1 << DBTP_DTSEG1_SHIFT) | - (tseg2 << DBTP_DTSEG2_SHIFT); + reg_btp |= FIELD_PREP(DBTP_DBRP_MASK, brp) | + FIELD_PREP(DBTP_DSJW_MASK, sjw) | + FIELD_PREP(DBTP_DTSEG1_MASK, tseg1) | + FIELD_PREP(DBTP_DTSEG2_MASK, tseg2); - m_can_write(priv, M_CAN_DBTP, reg_btp); + m_can_write(cdev, M_CAN_DBTP, reg_btp); } return 0; @@ -1047,132 +1479,190 @@ static int m_can_set_bittiming(struct net_device *dev) * - >= v3.1.x: TX FIFO is used * - configure mode * - setup bittiming + * - configure timestamp generation */ -static void m_can_chip_config(struct net_device *dev) +static int m_can_chip_config(struct net_device *dev) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); + u32 interrupts = IR_ALL_INT; u32 cccr, test; + int err; + + err = m_can_init_ram(cdev); + if (err) { + netdev_err(dev, "Message RAM configuration failed\n"); + return err; + } - m_can_config_endisable(priv, true); + /* Disable unused interrupts */ + interrupts &= ~(IR_ARA | IR_ELO | IR_DRX | IR_TEFF | IR_TFE | IR_TCF | + IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N | IR_RF0F | + IR_TSW); + + err = m_can_config_enable(cdev); + if (err) + return err; /* RX Buffer/FIFO Element Size 64 bytes data field */ - m_can_write(priv, M_CAN_RXESC, M_CAN_RXESC_64BYTES); + m_can_write(cdev, M_CAN_RXESC, + FIELD_PREP(RXESC_RBDS_MASK, RXESC_64B) | + FIELD_PREP(RXESC_F1DS_MASK, RXESC_64B) | + FIELD_PREP(RXESC_F0DS_MASK, RXESC_64B)); /* Accept Non-matching Frames Into FIFO 0 */ - m_can_write(priv, M_CAN_GFC, 0x0); + m_can_write(cdev, M_CAN_GFC, 0x0); - if (priv->version == 30) { + if (cdev->version == 30) { /* only support one Tx Buffer currently */ - m_can_write(priv, M_CAN_TXBC, (1 << TXBC_NDTB_SHIFT) | - priv->mcfg[MRAM_TXB].off); + m_can_write(cdev, M_CAN_TXBC, FIELD_PREP(TXBC_NDTB_MASK, 1) | + cdev->mcfg[MRAM_TXB].off); } else { /* TX FIFO is used for newer IP Core versions */ - m_can_write(priv, M_CAN_TXBC, - (priv->mcfg[MRAM_TXB].num << TXBC_TFQS_SHIFT) | - (priv->mcfg[MRAM_TXB].off)); + m_can_write(cdev, M_CAN_TXBC, + FIELD_PREP(TXBC_TFQS_MASK, + cdev->mcfg[MRAM_TXB].num) | + cdev->mcfg[MRAM_TXB].off); } /* support 64 bytes payload */ - m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_64BYTES); + m_can_write(cdev, M_CAN_TXESC, + FIELD_PREP(TXESC_TBDS_MASK, TXESC_TBDS_64B)); /* TX Event FIFO */ - if (priv->version == 30) { - m_can_write(priv, M_CAN_TXEFC, (1 << TXEFC_EFS_SHIFT) | - priv->mcfg[MRAM_TXE].off); + if (cdev->version == 30) { + m_can_write(cdev, M_CAN_TXEFC, + FIELD_PREP(TXEFC_EFS_MASK, 1) | + cdev->mcfg[MRAM_TXE].off); } else { /* Full TX Event FIFO is used */ - m_can_write(priv, M_CAN_TXEFC, - ((priv->mcfg[MRAM_TXE].num << TXEFC_EFS_SHIFT) - & TXEFC_EFS_MASK) | - priv->mcfg[MRAM_TXE].off); + m_can_write(cdev, M_CAN_TXEFC, + FIELD_PREP(TXEFC_EFWM_MASK, + cdev->tx_max_coalesced_frames_irq) | + FIELD_PREP(TXEFC_EFS_MASK, + cdev->mcfg[MRAM_TXE].num) | + cdev->mcfg[MRAM_TXE].off); } /* rx fifo configuration, blocking mode, fifo size 1 */ - m_can_write(priv, M_CAN_RXF0C, - (priv->mcfg[MRAM_RXF0].num << RXFC_FS_SHIFT) | - priv->mcfg[MRAM_RXF0].off); + m_can_write(cdev, M_CAN_RXF0C, + FIELD_PREP(RXFC_FWM_MASK, cdev->rx_max_coalesced_frames_irq) | + FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF0].num) | + cdev->mcfg[MRAM_RXF0].off); - m_can_write(priv, M_CAN_RXF1C, - (priv->mcfg[MRAM_RXF1].num << RXFC_FS_SHIFT) | - priv->mcfg[MRAM_RXF1].off); + m_can_write(cdev, M_CAN_RXF1C, + FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF1].num) | + cdev->mcfg[MRAM_RXF1].off); - cccr = m_can_read(priv, M_CAN_CCCR); - test = m_can_read(priv, M_CAN_TEST); + cccr = m_can_read(cdev, M_CAN_CCCR); + test = m_can_read(cdev, M_CAN_TEST); test &= ~TEST_LBCK; - if (priv->version == 30) { - /* Version 3.0.x */ + if (cdev->version == 30) { + /* Version 3.0.x */ - cccr &= ~(CCCR_TEST | CCCR_MON | - (CCCR_CMR_MASK << CCCR_CMR_SHIFT) | - (CCCR_CME_MASK << CCCR_CME_SHIFT)); + cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_DAR | + FIELD_PREP(CCCR_CMR_MASK, FIELD_MAX(CCCR_CMR_MASK)) | + FIELD_PREP(CCCR_CME_MASK, FIELD_MAX(CCCR_CME_MASK))); - if (priv->can.ctrlmode & CAN_CTRLMODE_FD) - cccr |= CCCR_CME_CANFD_BRS << CCCR_CME_SHIFT; + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) + cccr |= FIELD_PREP(CCCR_CME_MASK, CCCR_CME_CANFD_BRS); } else { - /* Version 3.1.x or 3.2.x */ + /* Version 3.1.x or 3.2.x */ cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE | - CCCR_NISO); + CCCR_NISO | CCCR_DAR); /* Only 3.2.x has NISO Bit implemented */ - if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) cccr |= CCCR_NISO; - if (priv->can.ctrlmode & CAN_CTRLMODE_FD) + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) cccr |= (CCCR_BRSE | CCCR_FDOE); } /* Loopback Mode */ - if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { + if (cdev->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { cccr |= CCCR_TEST | CCCR_MON; test |= TEST_LBCK; } /* Enable Monitoring (all versions) */ - if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + if (cdev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) cccr |= CCCR_MON; + /* Disable Auto Retransmission (all versions) */ + if (cdev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) + cccr |= CCCR_DAR; + /* Write config */ - m_can_write(priv, M_CAN_CCCR, cccr); - m_can_write(priv, M_CAN_TEST, test); + m_can_write(cdev, M_CAN_CCCR, cccr); + m_can_write(cdev, M_CAN_TEST, test); /* Enable interrupts */ - m_can_write(priv, M_CAN_IR, IR_ALL_INT); - if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) - if (priv->version == 30) - m_can_write(priv, M_CAN_IE, IR_ALL_INT & - ~(IR_ERR_LEC_30X)); + if (!(cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) { + if (cdev->version == 30) + interrupts &= ~(IR_ERR_LEC_30X); else - m_can_write(priv, M_CAN_IE, IR_ALL_INT & - ~(IR_ERR_LEC_31X)); - else - m_can_write(priv, M_CAN_IE, IR_ALL_INT); + interrupts &= ~(IR_ERR_LEC_31X); + } + cdev->active_interrupts = 0; + m_can_interrupt_enable(cdev, interrupts); /* route all interrupts to INT0 */ - m_can_write(priv, M_CAN_ILS, ILS_ALL_INT0); + m_can_write(cdev, M_CAN_ILS, ILS_ALL_INT0); /* set bittiming params */ m_can_set_bittiming(dev); - m_can_config_endisable(priv, false); + /* enable internal timestamp generation, with a prescaler of 16. The + * prescaler is applied to the nominal bit timing + */ + m_can_write(cdev, M_CAN_TSCC, + FIELD_PREP(TSCC_TCP_MASK, 0xf) | + FIELD_PREP(TSCC_TSS_MASK, TSCC_TSS_INTERNAL)); + + err = m_can_config_disable(cdev); + if (err) + return err; + + if (cdev->ops->init) + cdev->ops->init(cdev); + + return 0; } -static void m_can_start(struct net_device *dev) +static int m_can_start(struct net_device *dev) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); + int ret; /* basic m_can configuration */ - m_can_chip_config(dev); + ret = m_can_chip_config(dev); + if (ret) + return ret; + + netdev_queue_set_dql_min_limit(netdev_get_tx_queue(cdev->net, 0), + cdev->tx_max_coalesced_frames); - priv->can.state = CAN_STATE_ERROR_ACTIVE; + cdev->can.state = m_can_state_get_by_psr(cdev); - m_can_enable_all_interrupts(priv); + m_can_enable_all_interrupts(cdev); + + if (cdev->version > 30) + cdev->tx_fifo_putidx = FIELD_GET(TXFQS_TFQPI_MASK, + m_can_read(cdev, M_CAN_TXFQS)); + + ret = m_can_cccr_update_bits(cdev, CCCR_INIT, 0); + if (ret) + netdev_err(dev, "failed to enter normal mode\n"); + + return ret; } static int m_can_set_mode(struct net_device *dev, enum can_mode mode) { switch (mode) { case CAN_MODE_START: + m_can_clean(dev); m_can_start(dev); netif_wake_queue(dev); break; @@ -1188,22 +1678,19 @@ static int m_can_set_mode(struct net_device *dev, enum can_mode mode) * else it returns the release and step coded as: * return value = 10 * <release> + 1 * <step> */ -static int m_can_check_core_release(void __iomem *m_can_base) +static int m_can_check_core_release(struct m_can_classdev *cdev) { u32 crel_reg; u8 rel; u8 step; int res; - struct m_can_priv temp_priv = { - .base = m_can_base - }; /* Read Core Release Version and split into version number * Example: Version 3.2.1 => rel = 3; step = 2; substep = 1; */ - crel_reg = m_can_read(&temp_priv, M_CAN_CREL); - rel = (u8)((crel_reg & CREL_REL_MASK) >> CREL_REL_SHIFT); - step = (u8)((crel_reg & CREL_STEP_MASK) >> CREL_STEP_SHIFT); + crel_reg = m_can_read(cdev, M_CAN_CREL); + rel = (u8)FIELD_GET(CREL_REL_MASK, crel_reg); + step = (u8)FIELD_GET(CREL_STEP_MASK, crel_reg); if (rel == 3) { /* M_CAN v3.x.y: create return value */ @@ -1217,248 +1704,232 @@ static int m_can_check_core_release(void __iomem *m_can_base) } /* Selectable Non ISO support only in version 3.2.x - * This function checks if the bit is writable. + * Return 1 if the bit is writable, 0 if it is not, or negative on error. */ -static bool m_can_niso_supported(const struct m_can_priv *priv) +static int m_can_niso_supported(struct m_can_classdev *cdev) { - u32 cccr_reg, cccr_poll; - int niso_timeout; + int ret, niso; - m_can_config_endisable(priv, true); - cccr_reg = m_can_read(priv, M_CAN_CCCR); - cccr_reg |= CCCR_NISO; - m_can_write(priv, M_CAN_CCCR, cccr_reg); + ret = m_can_config_enable(cdev); + if (ret) + return ret; - niso_timeout = readl_poll_timeout((priv->base + M_CAN_CCCR), cccr_poll, - (cccr_poll == cccr_reg), 0, 10); + /* First try to set the NISO bit. */ + niso = m_can_cccr_update_bits(cdev, CCCR_NISO, CCCR_NISO); - /* Clear NISO */ - cccr_reg &= ~(CCCR_NISO); - m_can_write(priv, M_CAN_CCCR, cccr_reg); + /* Then clear the it again. */ + ret = m_can_cccr_update_bits(cdev, CCCR_NISO, 0); + if (ret) { + netdev_err(cdev->net, "failed to revert the NON-ISO bit in CCCR\n"); + return ret; + } - m_can_config_endisable(priv, false); + ret = m_can_config_disable(cdev); + if (ret) + return ret; - /* return false if time out (-ETIMEDOUT), else return true */ - return !niso_timeout; + return niso == 0; } -static int m_can_dev_setup(struct platform_device *pdev, struct net_device *dev, - void __iomem *addr) +static int m_can_dev_setup(struct m_can_classdev *cdev) { - struct m_can_priv *priv; - int m_can_version; + struct net_device *dev = cdev->net; + int m_can_version, err, niso; - m_can_version = m_can_check_core_release(addr); + m_can_version = m_can_check_core_release(cdev); /* return if unsupported version */ if (!m_can_version) { - dev_err(&pdev->dev, "Unsupported version number: %2d", - m_can_version); + netdev_err(cdev->net, "Unsupported version number: %2d", + m_can_version); return -EINVAL; } - priv = netdev_priv(dev); - netif_napi_add(dev, &priv->napi, m_can_poll, M_CAN_NAPI_WEIGHT); + /* Write the INIT bit, in case no hardware reset has happened before + * the probe (for example, it was observed that the Intel Elkhart Lake + * SoCs do not properly reset the CAN controllers on reboot) + */ + err = m_can_cccr_update_bits(cdev, CCCR_INIT, CCCR_INIT); + if (err) + return err; + + if (!cdev->is_peripheral) + netif_napi_add(dev, &cdev->napi, m_can_poll); /* Shared properties of all M_CAN versions */ - priv->version = m_can_version; - priv->dev = dev; - priv->base = addr; - priv->can.do_set_mode = m_can_set_mode; - priv->can.do_get_berr_counter = m_can_get_berr_counter; + cdev->version = m_can_version; + cdev->can.do_set_mode = m_can_set_mode; + cdev->can.do_get_berr_counter = m_can_get_berr_counter; /* Set M_CAN supported operations */ - priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | - CAN_CTRLMODE_LISTENONLY | - CAN_CTRLMODE_BERR_REPORTING | - CAN_CTRLMODE_FD; + cdev->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | + CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_BERR_REPORTING | + CAN_CTRLMODE_FD | + CAN_CTRLMODE_ONE_SHOT; /* Set properties depending on M_CAN version */ - switch (priv->version) { + switch (cdev->version) { case 30: /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */ - can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); - priv->can.bittiming_const = &m_can_bittiming_const_30X; - priv->can.data_bittiming_const = - &m_can_data_bittiming_const_30X; + err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); + if (err) + return err; + cdev->can.bittiming_const = &m_can_bittiming_const_30X; + cdev->can.fd.data_bittiming_const = &m_can_data_bittiming_const_30X; break; case 31: /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */ - can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); - priv->can.bittiming_const = &m_can_bittiming_const_31X; - priv->can.data_bittiming_const = - &m_can_data_bittiming_const_31X; + err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); + if (err) + return err; + cdev->can.bittiming_const = &m_can_bittiming_const_31X; + cdev->can.fd.data_bittiming_const = &m_can_data_bittiming_const_31X; break; case 32: - priv->can.bittiming_const = &m_can_bittiming_const_31X; - priv->can.data_bittiming_const = - &m_can_data_bittiming_const_31X; - priv->can.ctrlmode_supported |= (m_can_niso_supported(priv) - ? CAN_CTRLMODE_FD_NON_ISO - : 0); + case 33: + /* Support both MCAN version v3.2.x and v3.3.0 */ + cdev->can.bittiming_const = &m_can_bittiming_const_31X; + cdev->can.fd.data_bittiming_const = &m_can_data_bittiming_const_31X; + + niso = m_can_niso_supported(cdev); + if (niso < 0) + return niso; + if (niso) + cdev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO; break; default: - dev_err(&pdev->dev, "Unsupported version number: %2d", - priv->version); + netdev_err(cdev->net, "Unsupported version number: %2d", + cdev->version); return -EINVAL; } return 0; } -static int m_can_open(struct net_device *dev) -{ - struct m_can_priv *priv = netdev_priv(dev); - int err; - - err = m_can_clk_start(priv); - if (err) - return err; - - /* open the can device */ - err = open_candev(dev); - if (err) { - netdev_err(dev, "failed to open can device\n"); - goto exit_disable_clks; - } - - /* register interrupt handler */ - err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name, - dev); - if (err < 0) { - netdev_err(dev, "failed to request interrupt\n"); - goto exit_irq_fail; - } - - /* start the m_can controller */ - m_can_start(dev); - - can_led_event(dev, CAN_LED_EVENT_OPEN); - napi_enable(&priv->napi); - netif_start_queue(dev); - - return 0; - -exit_irq_fail: - close_candev(dev); -exit_disable_clks: - m_can_clk_stop(priv); - return err; -} - static void m_can_stop(struct net_device *dev) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); + int ret; /* disable all interrupts */ - m_can_disable_all_interrupts(priv); + m_can_disable_all_interrupts(cdev); + + /* Set init mode to disengage from the network */ + ret = m_can_cccr_update_bits(cdev, CCCR_INIT, CCCR_INIT); + if (ret) + netdev_err(dev, "failed to enter standby mode: %pe\n", + ERR_PTR(ret)); /* set the state as STOPPED */ - priv->can.state = CAN_STATE_STOPPED; + cdev->can.state = CAN_STATE_STOPPED; + + if (cdev->ops->deinit) { + ret = cdev->ops->deinit(cdev); + if (ret) + netdev_err(dev, "failed to deinitialize: %pe\n", + ERR_PTR(ret)); + } } static int m_can_close(struct net_device *dev) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); netif_stop_queue(dev); - napi_disable(&priv->napi); + m_can_stop(dev); - m_can_clk_stop(priv); - free_irq(dev->irq, dev); - close_candev(dev); - can_led_event(dev, CAN_LED_EVENT_STOP); + if (dev->irq) + free_irq(dev->irq, dev); - return 0; -} + m_can_clean(dev); -static int m_can_next_echo_skb_occupied(struct net_device *dev, int putidx) -{ - struct m_can_priv *priv = netdev_priv(dev); - /*get wrap around for loopback skb index */ - unsigned int wrap = priv->can.echo_skb_max; - int next_idx; + if (cdev->is_peripheral) { + destroy_workqueue(cdev->tx_wq); + cdev->tx_wq = NULL; + can_rx_offload_disable(&cdev->offload); + } else { + napi_disable(&cdev->napi); + } - /* calculate next index */ - next_idx = (++putidx >= wrap ? 0 : putidx); + close_candev(dev); - /* check if occupied */ - return !!priv->can.echo_skb[next_idx]; + reset_control_assert(cdev->rst); + m_can_clk_stop(cdev); + phy_power_off(cdev->transceiver); + + return 0; } -static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, - struct net_device *dev) +static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev, + struct sk_buff *skb) { - struct m_can_priv *priv = netdev_priv(dev); struct canfd_frame *cf = (struct canfd_frame *)skb->data; - u32 id, cccr, fdflags; - int i; - int putidx; - - if (can_dropped_invalid_skb(dev, skb)) - return NETDEV_TX_OK; + u8 len_padded = DIV_ROUND_UP(cf->len, 4); + struct m_can_fifo_element fifo_element; + struct net_device *dev = cdev->net; + u32 cccr, fdflags; + int err; + u32 putidx; + unsigned int frame_len = can_skb_get_frame_len(skb); /* Generate ID field for TX buffer Element */ /* Common to all supported M_CAN versions */ if (cf->can_id & CAN_EFF_FLAG) { - id = cf->can_id & CAN_EFF_MASK; - id |= TX_BUF_XTD; + fifo_element.id = cf->can_id & CAN_EFF_MASK; + fifo_element.id |= TX_BUF_XTD; } else { - id = ((cf->can_id & CAN_SFF_MASK) << 18); + fifo_element.id = ((cf->can_id & CAN_SFF_MASK) << 18); } if (cf->can_id & CAN_RTR_FLAG) - id |= TX_BUF_RTR; + fifo_element.id |= TX_BUF_RTR; - if (priv->version == 30) { + if (cdev->version == 30) { netif_stop_queue(dev); - /* message ram configuration */ - m_can_fifo_write(priv, 0, M_CAN_FIFO_ID, id); - m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, - can_len2dlc(cf->len) << 16); + fifo_element.dlc = can_fd_len2dlc(cf->len) << 16; - for (i = 0; i < cf->len; i += 4) - m_can_fifo_write(priv, 0, - M_CAN_FIFO_DATA(i / 4), - *(u32 *)(cf->data + i)); + /* Write the frame ID, DLC, and payload to the FIFO element. */ + err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, &fifo_element, 2); + if (err) + goto out_fail; - can_put_echo_skb(skb, dev, 0); + err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_DATA, + cf->data, len_padded); + if (err) + goto out_fail; - if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { - cccr = m_can_read(priv, M_CAN_CCCR); - cccr &= ~(CCCR_CMR_MASK << CCCR_CMR_SHIFT); + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { + cccr = m_can_read(cdev, M_CAN_CCCR); + cccr &= ~CCCR_CMR_MASK; if (can_is_canfd_skb(skb)) { if (cf->flags & CANFD_BRS) - cccr |= CCCR_CMR_CANFD_BRS << - CCCR_CMR_SHIFT; + cccr |= FIELD_PREP(CCCR_CMR_MASK, + CCCR_CMR_CANFD_BRS); else - cccr |= CCCR_CMR_CANFD << - CCCR_CMR_SHIFT; + cccr |= FIELD_PREP(CCCR_CMR_MASK, + CCCR_CMR_CANFD); } else { - cccr |= CCCR_CMR_CAN << CCCR_CMR_SHIFT; + cccr |= FIELD_PREP(CCCR_CMR_MASK, CCCR_CMR_CAN); } - m_can_write(priv, M_CAN_CCCR, cccr); + m_can_write(cdev, M_CAN_CCCR, cccr); } - m_can_write(priv, M_CAN_TXBTIE, 0x1); - m_can_write(priv, M_CAN_TXBAR, 0x1); + m_can_write(cdev, M_CAN_TXBTIE, 0x1); + + can_put_echo_skb(skb, dev, 0, frame_len); + + m_can_write(cdev, M_CAN_TXBAR, 0x1); /* End of xmit function for version 3.0.x */ } else { /* Transmit routine for version >= v3.1.x */ - /* Check if FIFO full */ - if (m_can_tx_fifo_full(priv)) { - /* This shouldn't happen */ - netif_stop_queue(dev); - netdev_warn(dev, - "TX queue active although FIFO is full."); - return NETDEV_TX_BUSY; - } - /* get put index for frame */ - putidx = ((m_can_read(priv, M_CAN_TXFQS) & TXFQS_TFQPI_MASK) - >> TXFQS_TFQPI_SHIFT); - /* Write ID Field to FIFO Element */ - m_can_fifo_write(priv, putidx, M_CAN_FIFO_ID, id); + putidx = cdev->tx_fifo_putidx; + + /* Construct DLC Field, with CAN-FD configuration. + * Use the put index of the fifo as the message marker, + * used in the TX interrupt for sending the correct echo frame. + */ /* get CAN FD configuration of frame */ fdflags = 0; @@ -1468,344 +1939,742 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, fdflags |= TX_BUF_BRS; } - /* Construct DLC Field. Also contains CAN-FD configuration - * use put index of fifo as message marker - * it is used in TX interrupt for - * sending the correct echo frame - */ - m_can_fifo_write(priv, putidx, M_CAN_FIFO_DLC, - ((putidx << TX_BUF_MM_SHIFT) & - TX_BUF_MM_MASK) | - (can_len2dlc(cf->len) << 16) | - fdflags | TX_BUF_EFC); + fifo_element.dlc = FIELD_PREP(TX_BUF_MM_MASK, putidx) | + FIELD_PREP(TX_BUF_DLC_MASK, can_fd_len2dlc(cf->len)) | + fdflags | TX_BUF_EFC; + + memcpy_and_pad(fifo_element.data, CANFD_MAX_DLEN, &cf->data, + cf->len, 0); - for (i = 0; i < cf->len; i += 4) - m_can_fifo_write(priv, putidx, M_CAN_FIFO_DATA(i / 4), - *(u32 *)(cf->data + i)); + err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID, + &fifo_element, 2 + len_padded); + if (err) + goto out_fail; /* Push loopback echo. * Will be looped back on TX interrupt based on message marker */ - can_put_echo_skb(skb, dev, putidx); + can_put_echo_skb(skb, dev, putidx, frame_len); + + if (cdev->is_peripheral) { + /* Delay enabling TX FIFO element */ + cdev->tx_peripheral_submit |= BIT(putidx); + } else { + /* Enable TX FIFO element to start transfer */ + m_can_write(cdev, M_CAN_TXBAR, BIT(putidx)); + } + cdev->tx_fifo_putidx = (++cdev->tx_fifo_putidx >= cdev->can.echo_skb_max ? + 0 : cdev->tx_fifo_putidx); + } + + return NETDEV_TX_OK; + +out_fail: + netdev_err(dev, "FIFO write returned %d\n", err); + m_can_disable_all_interrupts(cdev); + return NETDEV_TX_BUSY; +} + +static void m_can_tx_submit(struct m_can_classdev *cdev) +{ + m_can_write(cdev, M_CAN_TXBAR, cdev->tx_peripheral_submit); + cdev->tx_peripheral_submit = 0; +} + +static void m_can_tx_work_queue(struct work_struct *ws) +{ + struct m_can_tx_op *op = container_of(ws, struct m_can_tx_op, work); + struct m_can_classdev *cdev = op->cdev; + struct sk_buff *skb = op->skb; + + op->skb = NULL; + m_can_tx_handler(cdev, skb); + if (op->submit) + m_can_tx_submit(cdev); +} + +static void m_can_tx_queue_skb(struct m_can_classdev *cdev, struct sk_buff *skb, + bool submit) +{ + cdev->tx_ops[cdev->next_tx_op].skb = skb; + cdev->tx_ops[cdev->next_tx_op].submit = submit; + queue_work(cdev->tx_wq, &cdev->tx_ops[cdev->next_tx_op].work); + + ++cdev->next_tx_op; + if (cdev->next_tx_op >= cdev->tx_fifo_size) + cdev->next_tx_op = 0; +} - /* Enable TX FIFO element to start transfer */ - m_can_write(priv, M_CAN_TXBAR, (1 << putidx)); +static netdev_tx_t m_can_start_peripheral_xmit(struct m_can_classdev *cdev, + struct sk_buff *skb) +{ + bool submit; - /* stop network queue if fifo full */ - if (m_can_tx_fifo_full(priv) || - m_can_next_echo_skb_occupied(dev, putidx)) - netif_stop_queue(dev); + ++cdev->nr_txs_without_submit; + if (cdev->nr_txs_without_submit >= cdev->tx_max_coalesced_frames || + !netdev_xmit_more()) { + cdev->nr_txs_without_submit = 0; + submit = true; + } else { + submit = false; } + m_can_tx_queue_skb(cdev, skb, submit); return NETDEV_TX_OK; } +static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + unsigned int frame_len; + netdev_tx_t ret; + + if (can_dev_dropped_skb(dev, skb)) + return NETDEV_TX_OK; + + frame_len = can_skb_get_frame_len(skb); + + if (cdev->can.state == CAN_STATE_BUS_OFF) { + m_can_clean(cdev->net); + return NETDEV_TX_OK; + } + + ret = m_can_start_tx(cdev); + if (ret != NETDEV_TX_OK) + return ret; + + netdev_sent_queue(dev, frame_len); + + if (cdev->is_peripheral) + ret = m_can_start_peripheral_xmit(cdev, skb); + else + ret = m_can_tx_handler(cdev, skb); + + if (ret != NETDEV_TX_OK) + netdev_completed_queue(dev, 1, frame_len); + + return ret; +} + +static enum hrtimer_restart m_can_polling_timer(struct hrtimer *timer) +{ + struct m_can_classdev *cdev = container_of(timer, struct + m_can_classdev, hrtimer); + int ret; + + if (cdev->can.state == CAN_STATE_BUS_OFF || + cdev->can.state == CAN_STATE_STOPPED) + return HRTIMER_NORESTART; + + ret = m_can_interrupt_handler(cdev); + + /* On error or if napi is scheduled to read, stop the timer */ + if (ret < 0 || napi_is_scheduled(&cdev->napi)) + return HRTIMER_NORESTART; + + hrtimer_forward_now(timer, ms_to_ktime(HRTIMER_POLL_INTERVAL_MS)); + + return HRTIMER_RESTART; +} + +static int m_can_open(struct net_device *dev) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + int err; + + err = phy_power_on(cdev->transceiver); + if (err) + return err; + + err = m_can_clk_start(cdev); + if (err) + goto out_phy_power_off; + + err = reset_control_deassert(cdev->rst); + if (err) + goto exit_disable_clks; + + /* open the can device */ + err = open_candev(dev); + if (err) { + netdev_err(dev, "failed to open can device\n"); + goto out_reset_control_assert; + } + + if (cdev->is_peripheral) + can_rx_offload_enable(&cdev->offload); + else + napi_enable(&cdev->napi); + + /* register interrupt handler */ + if (cdev->is_peripheral) { + cdev->tx_wq = alloc_ordered_workqueue("mcan_wq", + WQ_FREEZABLE | WQ_MEM_RECLAIM); + if (!cdev->tx_wq) { + err = -ENOMEM; + goto out_wq_fail; + } + + for (int i = 0; i != cdev->tx_fifo_size; ++i) { + cdev->tx_ops[i].cdev = cdev; + INIT_WORK(&cdev->tx_ops[i].work, m_can_tx_work_queue); + } + + err = request_threaded_irq(dev->irq, NULL, m_can_isr, + IRQF_ONESHOT, + dev->name, dev); + } else if (dev->irq) { + err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name, + dev); + } + + if (err < 0) { + netdev_err(dev, "failed to request interrupt\n"); + goto exit_irq_fail; + } + + /* start the m_can controller */ + err = m_can_start(dev); + if (err) + goto exit_start_fail; + + netif_start_queue(dev); + + return 0; + +exit_start_fail: + if (cdev->is_peripheral || dev->irq) + free_irq(dev->irq, dev); +exit_irq_fail: + if (cdev->is_peripheral) + destroy_workqueue(cdev->tx_wq); +out_wq_fail: + if (cdev->is_peripheral) + can_rx_offload_disable(&cdev->offload); + else + napi_disable(&cdev->napi); + close_candev(dev); +out_reset_control_assert: + reset_control_assert(cdev->rst); +exit_disable_clks: + m_can_clk_stop(cdev); +out_phy_power_off: + phy_power_off(cdev->transceiver); + return err; +} + static const struct net_device_ops m_can_netdev_ops = { .ndo_open = m_can_open, .ndo_stop = m_can_close, .ndo_start_xmit = m_can_start_xmit, - .ndo_change_mtu = can_change_mtu, }; -static int register_m_can_dev(struct net_device *dev) +static int m_can_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kec, + struct netlink_ext_ack *ext_ack) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + + ec->rx_max_coalesced_frames_irq = cdev->rx_max_coalesced_frames_irq; + ec->rx_coalesce_usecs_irq = cdev->rx_coalesce_usecs_irq; + ec->tx_max_coalesced_frames = cdev->tx_max_coalesced_frames; + ec->tx_max_coalesced_frames_irq = cdev->tx_max_coalesced_frames_irq; + ec->tx_coalesce_usecs_irq = cdev->tx_coalesce_usecs_irq; + + return 0; +} + +static int m_can_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kec, + struct netlink_ext_ack *ext_ack) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + + if (cdev->can.state != CAN_STATE_STOPPED) { + netdev_err(dev, "Device is in use, please shut it down first\n"); + return -EBUSY; + } + + if (ec->rx_max_coalesced_frames_irq > cdev->mcfg[MRAM_RXF0].num) { + netdev_err(dev, "rx-frames-irq %u greater than the RX FIFO %u\n", + ec->rx_max_coalesced_frames_irq, + cdev->mcfg[MRAM_RXF0].num); + return -EINVAL; + } + if ((ec->rx_max_coalesced_frames_irq == 0) != (ec->rx_coalesce_usecs_irq == 0)) { + netdev_err(dev, "rx-frames-irq and rx-usecs-irq can only be set together\n"); + return -EINVAL; + } + if (ec->tx_max_coalesced_frames_irq > cdev->mcfg[MRAM_TXE].num) { + netdev_err(dev, "tx-frames-irq %u greater than the TX event FIFO %u\n", + ec->tx_max_coalesced_frames_irq, + cdev->mcfg[MRAM_TXE].num); + return -EINVAL; + } + if (ec->tx_max_coalesced_frames_irq > cdev->mcfg[MRAM_TXB].num) { + netdev_err(dev, "tx-frames-irq %u greater than the TX FIFO %u\n", + ec->tx_max_coalesced_frames_irq, + cdev->mcfg[MRAM_TXB].num); + return -EINVAL; + } + if ((ec->tx_max_coalesced_frames_irq == 0) != (ec->tx_coalesce_usecs_irq == 0)) { + netdev_err(dev, "tx-frames-irq and tx-usecs-irq can only be set together\n"); + return -EINVAL; + } + if (ec->tx_max_coalesced_frames > cdev->mcfg[MRAM_TXE].num) { + netdev_err(dev, "tx-frames %u greater than the TX event FIFO %u\n", + ec->tx_max_coalesced_frames, + cdev->mcfg[MRAM_TXE].num); + return -EINVAL; + } + if (ec->tx_max_coalesced_frames > cdev->mcfg[MRAM_TXB].num) { + netdev_err(dev, "tx-frames %u greater than the TX FIFO %u\n", + ec->tx_max_coalesced_frames, + cdev->mcfg[MRAM_TXB].num); + return -EINVAL; + } + if (ec->rx_coalesce_usecs_irq != 0 && ec->tx_coalesce_usecs_irq != 0 && + ec->rx_coalesce_usecs_irq != ec->tx_coalesce_usecs_irq) { + netdev_err(dev, "rx-usecs-irq %u needs to be equal to tx-usecs-irq %u if both are enabled\n", + ec->rx_coalesce_usecs_irq, + ec->tx_coalesce_usecs_irq); + return -EINVAL; + } + + cdev->rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; + cdev->rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; + cdev->tx_max_coalesced_frames = ec->tx_max_coalesced_frames; + cdev->tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; + cdev->tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; + + if (cdev->rx_coalesce_usecs_irq) + cdev->irq_timer_wait = us_to_ktime(cdev->rx_coalesce_usecs_irq); + else + cdev->irq_timer_wait = us_to_ktime(cdev->tx_coalesce_usecs_irq); + + return 0; +} + +static void m_can_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + + wol->supported = device_can_wakeup(cdev->dev) ? WAKE_PHY : 0; + wol->wolopts = device_may_wakeup(cdev->dev) ? WAKE_PHY : 0; +} + +static int m_can_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + bool wol_enable = !!(wol->wolopts & WAKE_PHY); + int ret; + + if (wol->wolopts & ~WAKE_PHY) + return -EINVAL; + + if (wol_enable == device_may_wakeup(cdev->dev)) + return 0; + + ret = device_set_wakeup_enable(cdev->dev, wol_enable); + if (ret) { + netdev_err(cdev->net, "Failed to set wakeup enable %pE\n", + ERR_PTR(ret)); + return ret; + } + + if (!IS_ERR_OR_NULL(cdev->pinctrl_state_wakeup)) { + if (wol_enable) + ret = pinctrl_select_state(cdev->pinctrl, cdev->pinctrl_state_wakeup); + else + ret = pinctrl_pm_select_default_state(cdev->dev); + + if (ret) { + netdev_err(cdev->net, "Failed to select pinctrl state %pE\n", + ERR_PTR(ret)); + goto err_wakeup_enable; + } + } + + return 0; + +err_wakeup_enable: + /* Revert wakeup enable */ + device_set_wakeup_enable(cdev->dev, !wol_enable); + + return ret; +} + +static const struct ethtool_ops m_can_ethtool_ops_coalescing = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS_IRQ | + ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ | + ETHTOOL_COALESCE_TX_USECS_IRQ | + ETHTOOL_COALESCE_TX_MAX_FRAMES | + ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ, + .get_ts_info = ethtool_op_get_ts_info, + .get_coalesce = m_can_get_coalesce, + .set_coalesce = m_can_set_coalesce, + .get_wol = m_can_get_wol, + .set_wol = m_can_set_wol, +}; + +static const struct ethtool_ops m_can_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, + .get_wol = m_can_get_wol, + .set_wol = m_can_set_wol, +}; + +static int register_m_can_dev(struct m_can_classdev *cdev) { + struct net_device *dev = cdev->net; + dev->flags |= IFF_ECHO; /* we support local echo */ dev->netdev_ops = &m_can_netdev_ops; + if (dev->irq && cdev->is_peripheral) + dev->ethtool_ops = &m_can_ethtool_ops_coalescing; + else + dev->ethtool_ops = &m_can_ethtool_ops; return register_candev(dev); } -static void m_can_init_ram(struct m_can_priv *priv) +int m_can_check_mram_cfg(struct m_can_classdev *cdev, u32 mram_max_size) { - int end, i, start; + u32 total_size; - /* initialize the entire Message RAM in use to avoid possible - * ECC/parity checksum errors when reading an uninitialized buffer - */ - start = priv->mcfg[MRAM_SIDF].off; - end = priv->mcfg[MRAM_TXB].off + - priv->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE; - for (i = start; i < end; i += 4) - writel(0x0, priv->mram_base + i); + total_size = cdev->mcfg[MRAM_TXB].off - cdev->mcfg[MRAM_SIDF].off + + cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE; + if (total_size > mram_max_size) { + netdev_err(cdev->net, "Total size of mram config(%u) exceeds mram(%u)\n", + total_size, mram_max_size); + return -EINVAL; + } + + return 0; } +EXPORT_SYMBOL_GPL(m_can_check_mram_cfg); -static void m_can_of_parse_mram(struct m_can_priv *priv, +static void m_can_of_parse_mram(struct m_can_classdev *cdev, const u32 *mram_config_vals) { - priv->mcfg[MRAM_SIDF].off = mram_config_vals[0]; - priv->mcfg[MRAM_SIDF].num = mram_config_vals[1]; - priv->mcfg[MRAM_XIDF].off = priv->mcfg[MRAM_SIDF].off + - priv->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE; - priv->mcfg[MRAM_XIDF].num = mram_config_vals[2]; - priv->mcfg[MRAM_RXF0].off = priv->mcfg[MRAM_XIDF].off + - priv->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE; - priv->mcfg[MRAM_RXF0].num = mram_config_vals[3] & - (RXFC_FS_MASK >> RXFC_FS_SHIFT); - priv->mcfg[MRAM_RXF1].off = priv->mcfg[MRAM_RXF0].off + - priv->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE; - priv->mcfg[MRAM_RXF1].num = mram_config_vals[4] & - (RXFC_FS_MASK >> RXFC_FS_SHIFT); - priv->mcfg[MRAM_RXB].off = priv->mcfg[MRAM_RXF1].off + - priv->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE; - priv->mcfg[MRAM_RXB].num = mram_config_vals[5]; - priv->mcfg[MRAM_TXE].off = priv->mcfg[MRAM_RXB].off + - priv->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE; - priv->mcfg[MRAM_TXE].num = mram_config_vals[6]; - priv->mcfg[MRAM_TXB].off = priv->mcfg[MRAM_TXE].off + - priv->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE; - priv->mcfg[MRAM_TXB].num = mram_config_vals[7] & - (TXBC_NDTB_MASK >> TXBC_NDTB_SHIFT); - - dev_dbg(priv->device, - "mram_base %p sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n", - priv->mram_base, - priv->mcfg[MRAM_SIDF].off, priv->mcfg[MRAM_SIDF].num, - priv->mcfg[MRAM_XIDF].off, priv->mcfg[MRAM_XIDF].num, - priv->mcfg[MRAM_RXF0].off, priv->mcfg[MRAM_RXF0].num, - priv->mcfg[MRAM_RXF1].off, priv->mcfg[MRAM_RXF1].num, - priv->mcfg[MRAM_RXB].off, priv->mcfg[MRAM_RXB].num, - priv->mcfg[MRAM_TXE].off, priv->mcfg[MRAM_TXE].num, - priv->mcfg[MRAM_TXB].off, priv->mcfg[MRAM_TXB].num); - - m_can_init_ram(priv); -} - -static int m_can_plat_probe(struct platform_device *pdev) -{ - struct net_device *dev; - struct m_can_priv *priv; - struct resource *res; - void __iomem *addr; - void __iomem *mram_addr; - struct clk *hclk, *cclk; - int irq, ret; - struct device_node *np; - u32 mram_config_vals[MRAM_CFG_LEN]; - u32 tx_fifo_size; + cdev->mcfg[MRAM_SIDF].off = mram_config_vals[0]; + cdev->mcfg[MRAM_SIDF].num = mram_config_vals[1]; + cdev->mcfg[MRAM_XIDF].off = cdev->mcfg[MRAM_SIDF].off + + cdev->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE; + cdev->mcfg[MRAM_XIDF].num = mram_config_vals[2]; + cdev->mcfg[MRAM_RXF0].off = cdev->mcfg[MRAM_XIDF].off + + cdev->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE; + cdev->mcfg[MRAM_RXF0].num = mram_config_vals[3] & + FIELD_MAX(RXFC_FS_MASK); + cdev->mcfg[MRAM_RXF1].off = cdev->mcfg[MRAM_RXF0].off + + cdev->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE; + cdev->mcfg[MRAM_RXF1].num = mram_config_vals[4] & + FIELD_MAX(RXFC_FS_MASK); + cdev->mcfg[MRAM_RXB].off = cdev->mcfg[MRAM_RXF1].off + + cdev->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE; + cdev->mcfg[MRAM_RXB].num = mram_config_vals[5]; + cdev->mcfg[MRAM_TXE].off = cdev->mcfg[MRAM_RXB].off + + cdev->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE; + cdev->mcfg[MRAM_TXE].num = mram_config_vals[6]; + cdev->mcfg[MRAM_TXB].off = cdev->mcfg[MRAM_TXE].off + + cdev->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE; + cdev->mcfg[MRAM_TXB].num = mram_config_vals[7] & + FIELD_MAX(TXBC_NDTB_MASK); + + netdev_dbg(cdev->net, + "sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n", + cdev->mcfg[MRAM_SIDF].off, cdev->mcfg[MRAM_SIDF].num, + cdev->mcfg[MRAM_XIDF].off, cdev->mcfg[MRAM_XIDF].num, + cdev->mcfg[MRAM_RXF0].off, cdev->mcfg[MRAM_RXF0].num, + cdev->mcfg[MRAM_RXF1].off, cdev->mcfg[MRAM_RXF1].num, + cdev->mcfg[MRAM_RXB].off, cdev->mcfg[MRAM_RXB].num, + cdev->mcfg[MRAM_TXE].off, cdev->mcfg[MRAM_TXE].num, + cdev->mcfg[MRAM_TXB].off, cdev->mcfg[MRAM_TXB].num); +} - np = pdev->dev.of_node; +int m_can_class_get_clocks(struct m_can_classdev *cdev) +{ + int ret = 0; - hclk = devm_clk_get(&pdev->dev, "hclk"); - cclk = devm_clk_get(&pdev->dev, "cclk"); + cdev->hclk = devm_clk_get(cdev->dev, "hclk"); + cdev->cclk = devm_clk_get(cdev->dev, "cclk"); - if (IS_ERR(hclk) || IS_ERR(cclk)) { - dev_err(&pdev->dev, "no clock found\n"); + if (IS_ERR(cdev->hclk) || IS_ERR(cdev->cclk)) { + netdev_err(cdev->net, "no clock found\n"); ret = -ENODEV; - goto failed_ret; } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can"); - addr = devm_ioremap_resource(&pdev->dev, res); - irq = platform_get_irq_byname(pdev, "int0"); + return ret; +} +EXPORT_SYMBOL_GPL(m_can_class_get_clocks); - if (IS_ERR(addr) || irq < 0) { - ret = -EINVAL; - goto failed_ret; - } +static bool m_can_class_wakeup_pinctrl_enabled(struct m_can_classdev *class_dev) +{ + return device_may_wakeup(class_dev->dev) && class_dev->pinctrl_state_wakeup; +} - /* message ram could be shared */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram"); - if (!res) { - ret = -ENODEV; - goto failed_ret; +static int m_can_class_parse_pinctrl(struct m_can_classdev *class_dev) +{ + struct device *dev = class_dev->dev; + int ret; + + class_dev->pinctrl = devm_pinctrl_get(dev); + if (IS_ERR(class_dev->pinctrl)) { + ret = PTR_ERR(class_dev->pinctrl); + class_dev->pinctrl = NULL; + + if (ret == -ENODEV) + return 0; + + return dev_err_probe(dev, ret, "Failed to get pinctrl\n"); } - mram_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); - if (!mram_addr) { - ret = -ENOMEM; - goto failed_ret; + class_dev->pinctrl_state_wakeup = + pinctrl_lookup_state(class_dev->pinctrl, "wakeup"); + if (IS_ERR(class_dev->pinctrl_state_wakeup)) { + ret = PTR_ERR(class_dev->pinctrl_state_wakeup); + class_dev->pinctrl_state_wakeup = NULL; + + if (ret == -ENODEV) + return 0; + + return dev_err_probe(dev, ret, "Failed to lookup pinctrl wakeup state\n"); } - /* get message ram configuration */ - ret = of_property_read_u32_array(np, "bosch,mram-cfg", - mram_config_vals, - sizeof(mram_config_vals) / 4); + return 0; +} + +struct m_can_classdev *m_can_class_allocate_dev(struct device *dev, + int sizeof_priv) +{ + struct m_can_classdev *class_dev = NULL; + u32 mram_config_vals[MRAM_CFG_LEN]; + struct net_device *net_dev; + u32 tx_fifo_size; + int ret; + + ret = fwnode_property_read_u32_array(dev_fwnode(dev), + "bosch,mram-cfg", + mram_config_vals, + sizeof(mram_config_vals) / 4); if (ret) { - dev_err(&pdev->dev, "Could not get Message RAM configuration."); - goto failed_ret; + dev_err(dev, "Could not get Message RAM configuration."); + return ERR_PTR(ret); } + if (dev->of_node && of_property_read_bool(dev->of_node, "wakeup-source")) + device_set_wakeup_capable(dev, true); + /* Get TX FIFO size * Defines the total amount of echo buffers for loopback */ tx_fifo_size = mram_config_vals[7]; /* allocate the m_can device */ - dev = alloc_candev(sizeof(*priv), tx_fifo_size); - if (!dev) { - ret = -ENOMEM; - goto failed_ret; + net_dev = alloc_candev(sizeof_priv, tx_fifo_size); + if (!net_dev) { + dev_err(dev, "Failed to allocate CAN device"); + return ERR_PTR(-ENOMEM); } - priv = netdev_priv(dev); - dev->irq = irq; - priv->device = &pdev->dev; - priv->hclk = hclk; - priv->cclk = cclk; - priv->can.clock.freq = clk_get_rate(cclk); - priv->mram_base = mram_addr; + class_dev = netdev_priv(net_dev); + class_dev->net = net_dev; + class_dev->dev = dev; + SET_NETDEV_DEV(net_dev, dev); - platform_set_drvdata(pdev, dev); - SET_NETDEV_DEV(dev, &pdev->dev); + m_can_of_parse_mram(class_dev, mram_config_vals); + spin_lock_init(&class_dev->tx_handling_spinlock); - /* Enable clocks. Necessary to read Core Release in order to determine - * M_CAN version - */ - pm_runtime_enable(&pdev->dev); - ret = m_can_clk_start(priv); - if (ret) - goto pm_runtime_fail; - - ret = m_can_dev_setup(pdev, dev, addr); + ret = m_can_class_parse_pinctrl(class_dev); if (ret) - goto clk_disable; - - ret = register_m_can_dev(dev); - if (ret) { - dev_err(&pdev->dev, "registering %s failed (err=%d)\n", - KBUILD_MODNAME, ret); - goto clk_disable; - } - - m_can_of_parse_mram(priv, mram_config_vals); - - devm_can_led_init(dev); + goto err_free_candev; - of_can_transceiver(dev); + return class_dev; - dev_info(&pdev->dev, "%s device registered (irq=%d, version=%d)\n", - KBUILD_MODNAME, dev->irq, priv->version); - - /* Probe finished - * Stop clocks. They will be reactivated once the M_CAN device is opened - */ -clk_disable: - m_can_clk_stop(priv); -pm_runtime_fail: - if (ret) { - pm_runtime_disable(&pdev->dev); - free_candev(dev); - } -failed_ret: - return ret; +err_free_candev: + free_candev(net_dev); + return ERR_PTR(ret); } +EXPORT_SYMBOL_GPL(m_can_class_allocate_dev); -static __maybe_unused int m_can_suspend(struct device *dev) +void m_can_class_free_dev(struct net_device *net) { - struct net_device *ndev = dev_get_drvdata(dev); - struct m_can_priv *priv = netdev_priv(ndev); + free_candev(net); +} +EXPORT_SYMBOL_GPL(m_can_class_free_dev); - if (netif_running(ndev)) { - netif_stop_queue(ndev); - netif_device_detach(ndev); - m_can_stop(ndev); - m_can_clk_stop(priv); +int m_can_class_register(struct m_can_classdev *cdev) +{ + int ret; + + cdev->tx_fifo_size = max(1, min(cdev->mcfg[MRAM_TXB].num, + cdev->mcfg[MRAM_TXE].num)); + if (cdev->is_peripheral) { + cdev->tx_ops = + devm_kzalloc(cdev->dev, + cdev->tx_fifo_size * sizeof(*cdev->tx_ops), + GFP_KERNEL); + if (!cdev->tx_ops) + return -ENOMEM; } - pinctrl_pm_select_sleep_state(dev); + cdev->rst = devm_reset_control_get_optional_shared(cdev->dev, NULL); + if (IS_ERR(cdev->rst)) + return dev_err_probe(cdev->dev, PTR_ERR(cdev->rst), + "Failed to get reset line\n"); - priv->can.state = CAN_STATE_SLEEPING; + ret = m_can_clk_start(cdev); + if (ret) + return ret; - return 0; -} + ret = reset_control_deassert(cdev->rst); + if (ret) + goto clk_disable; -static __maybe_unused int m_can_resume(struct device *dev) -{ - struct net_device *ndev = dev_get_drvdata(dev); - struct m_can_priv *priv = netdev_priv(ndev); + if (cdev->is_peripheral) { + ret = can_rx_offload_add_manual(cdev->net, &cdev->offload, + NAPI_POLL_WEIGHT); + if (ret) + goto out_reset_control_assert; + } - pinctrl_pm_select_default_state(dev); + if (!cdev->net->irq) { + netdev_dbg(cdev->net, "Polling enabled, initialize hrtimer"); + hrtimer_setup(&cdev->hrtimer, m_can_polling_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_PINNED); + } else { + hrtimer_setup(&cdev->hrtimer, m_can_coalescing_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + } - priv->can.state = CAN_STATE_ERROR_ACTIVE; + ret = m_can_dev_setup(cdev); + if (ret) + goto rx_offload_del; - if (netif_running(ndev)) { - int ret; + ret = register_m_can_dev(cdev); + if (ret) { + netdev_err(cdev->net, "registering %s failed (err=%d)\n", + cdev->net->name, ret); + goto rx_offload_del; + } - ret = m_can_clk_start(priv); - if (ret) - return ret; + of_can_transceiver(cdev->net); - m_can_init_ram(priv); - m_can_start(ndev); - netif_device_attach(ndev); - netif_start_queue(ndev); - } + netdev_info(cdev->net, "device registered (irq=%d, version=%d)\n", + cdev->net->irq, cdev->version); + + /* Probe finished + * Assert reset and stop clocks. + * They will be reactivated once the M_CAN device is opened + */ + reset_control_assert(cdev->rst); + m_can_clk_stop(cdev); return 0; + +rx_offload_del: + if (cdev->is_peripheral) + can_rx_offload_del(&cdev->offload); +out_reset_control_assert: + reset_control_assert(cdev->rst); +clk_disable: + m_can_clk_stop(cdev); + + return ret; } +EXPORT_SYMBOL_GPL(m_can_class_register); -static void unregister_m_can_dev(struct net_device *dev) +void m_can_class_unregister(struct m_can_classdev *cdev) { - unregister_candev(dev); + unregister_candev(cdev->net); + if (cdev->is_peripheral) + can_rx_offload_del(&cdev->offload); } +EXPORT_SYMBOL_GPL(m_can_class_unregister); -static int m_can_plat_remove(struct platform_device *pdev) +int m_can_class_suspend(struct device *dev) { - struct net_device *dev = platform_get_drvdata(pdev); + struct m_can_classdev *cdev = dev_get_drvdata(dev); + struct net_device *ndev = cdev->net; + int ret = 0; - unregister_m_can_dev(dev); + if (netif_running(ndev)) { + netif_stop_queue(ndev); + netif_device_detach(ndev); - pm_runtime_disable(&pdev->dev); + /* leave the chip running with rx interrupt enabled if it is + * used as a wake-up source. Coalescing needs to be reset then, + * the timer is cancelled here, interrupts are done in resume. + */ + if (cdev->pm_wake_source) { + hrtimer_cancel(&cdev->hrtimer); + m_can_write(cdev, M_CAN_IE, IR_RF0N); + + if (cdev->ops->deinit) + ret = cdev->ops->deinit(cdev); + } else { + m_can_stop(ndev); + } - platform_set_drvdata(pdev, NULL); + m_can_clk_stop(cdev); + cdev->can.state = CAN_STATE_SLEEPING; + } - free_candev(dev); + if (!m_can_class_wakeup_pinctrl_enabled(cdev)) + pinctrl_pm_select_sleep_state(dev); - return 0; + return ret; } +EXPORT_SYMBOL_GPL(m_can_class_suspend); -static int __maybe_unused m_can_runtime_suspend(struct device *dev) +int m_can_class_resume(struct device *dev) { - struct net_device *ndev = dev_get_drvdata(dev); - struct m_can_priv *priv = netdev_priv(ndev); + struct m_can_classdev *cdev = dev_get_drvdata(dev); + struct net_device *ndev = cdev->net; + int ret = 0; - clk_disable_unprepare(priv->cclk); - clk_disable_unprepare(priv->hclk); + if (!m_can_class_wakeup_pinctrl_enabled(cdev)) + pinctrl_pm_select_default_state(dev); - return 0; -} - -static int __maybe_unused m_can_runtime_resume(struct device *dev) -{ - struct net_device *ndev = dev_get_drvdata(dev); - struct m_can_priv *priv = netdev_priv(ndev); - int err; + if (netif_running(ndev)) { + ret = m_can_clk_start(cdev); + if (ret) + return ret; - err = clk_prepare_enable(priv->hclk); - if (err) - return err; + if (cdev->pm_wake_source) { + /* Restore active interrupts but disable coalescing as + * we may have missed important waterlevel interrupts + * between suspend and resume. Timers are already + * stopped in suspend. Here we enable all interrupts + * again. + */ + cdev->active_interrupts |= IR_RF0N | IR_TEFN; - err = clk_prepare_enable(priv->cclk); - if (err) - clk_disable_unprepare(priv->hclk); + if (cdev->ops->init) + ret = cdev->ops->init(cdev); - return err; -} + cdev->can.state = m_can_state_get_by_psr(cdev); -static const struct dev_pm_ops m_can_pmops = { - SET_RUNTIME_PM_OPS(m_can_runtime_suspend, - m_can_runtime_resume, NULL) - SET_SYSTEM_SLEEP_PM_OPS(m_can_suspend, m_can_resume) -}; + m_can_write(cdev, M_CAN_IE, cdev->active_interrupts); + } else { + ret = m_can_start(ndev); + if (ret) { + m_can_clk_stop(cdev); + return ret; + } + } -static const struct of_device_id m_can_of_table[] = { - { .compatible = "bosch,m_can", .data = NULL }, - { /* sentinel */ }, -}; -MODULE_DEVICE_TABLE(of, m_can_of_table); - -static struct platform_driver m_can_plat_driver = { - .driver = { - .name = KBUILD_MODNAME, - .of_match_table = m_can_of_table, - .pm = &m_can_pmops, - }, - .probe = m_can_plat_probe, - .remove = m_can_plat_remove, -}; + netif_device_attach(ndev); + netif_start_queue(ndev); + } -module_platform_driver(m_can_plat_driver); + return ret; +} +EXPORT_SYMBOL_GPL(m_can_class_resume); -MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>"); +MODULE_AUTHOR("Dong Aisheng <aisheng.dong@nxp.com>"); +MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller"); diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h new file mode 100644 index 000000000000..4743342b2fba --- /dev/null +++ b/drivers/net/can/m_can/m_can.h @@ -0,0 +1,146 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* CAN bus driver for Bosch M_CAN controller + * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ + */ + +#ifndef _CAN_M_CAN_H_ +#define _CAN_M_CAN_H_ + +#include <linux/can/core.h> +#include <linux/can/dev.h> +#include <linux/can/rx-offload.h> +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/freezer.h> +#include <linux/hrtimer.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/of.h> +#include <linux/phy/phy.h> +#include <linux/pinctrl/consumer.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <linux/uaccess.h> + +/* m_can lec values */ +enum m_can_lec_type { + LEC_NO_ERROR = 0, + LEC_STUFF_ERROR, + LEC_FORM_ERROR, + LEC_ACK_ERROR, + LEC_BIT1_ERROR, + LEC_BIT0_ERROR, + LEC_CRC_ERROR, + LEC_NO_CHANGE, +}; + +enum m_can_mram_cfg { + MRAM_SIDF = 0, + MRAM_XIDF, + MRAM_RXF0, + MRAM_RXF1, + MRAM_RXB, + MRAM_TXE, + MRAM_TXB, + MRAM_CFG_NUM, +}; + +/* address offset and element number for each FIFO/Buffer in the Message RAM */ +struct mram_cfg { + u16 off; + u8 num; +}; + +struct m_can_classdev; +struct m_can_ops { + /* Device specific call backs */ + int (*clear_interrupts)(struct m_can_classdev *cdev); + u32 (*read_reg)(struct m_can_classdev *cdev, int reg); + int (*write_reg)(struct m_can_classdev *cdev, int reg, int val); + int (*read_fifo)(struct m_can_classdev *cdev, int addr_offset, void *val, size_t val_count); + int (*write_fifo)(struct m_can_classdev *cdev, int addr_offset, + const void *val, size_t val_count); + int (*init)(struct m_can_classdev *cdev); + int (*deinit)(struct m_can_classdev *cdev); +}; + +struct m_can_tx_op { + struct m_can_classdev *cdev; + struct work_struct work; + struct sk_buff *skb; + bool submit; +}; + +struct m_can_classdev { + struct can_priv can; + struct can_rx_offload offload; + struct napi_struct napi; + struct net_device *net; + struct device *dev; + struct clk *hclk; + struct clk *cclk; + struct reset_control *rst; + + struct workqueue_struct *tx_wq; + struct phy *transceiver; + + ktime_t irq_timer_wait; + + const struct m_can_ops *ops; + + int version; + u32 irqstatus; + + int pm_clock_support; + int pm_wake_source; + int is_peripheral; + bool irq_edge_triggered; + + // Cached M_CAN_IE register content + u32 active_interrupts; + u32 rx_max_coalesced_frames_irq; + u32 rx_coalesce_usecs_irq; + u32 tx_max_coalesced_frames; + u32 tx_max_coalesced_frames_irq; + u32 tx_coalesce_usecs_irq; + + // Store this internally to avoid fetch delays on peripheral chips + u32 tx_fifo_putidx; + + /* Protects shared state between start_xmit and m_can_isr */ + spinlock_t tx_handling_spinlock; + int tx_fifo_in_flight; + + struct m_can_tx_op *tx_ops; + int tx_fifo_size; + int next_tx_op; + + int nr_txs_without_submit; + /* bitfield of fifo elements that will be submitted together */ + u32 tx_peripheral_submit; + + struct mram_cfg mcfg[MRAM_CFG_NUM]; + + struct hrtimer hrtimer; + + struct pinctrl *pinctrl; + struct pinctrl_state *pinctrl_state_wakeup; +}; + +struct m_can_classdev *m_can_class_allocate_dev(struct device *dev, int sizeof_priv); +void m_can_class_free_dev(struct net_device *net); +int m_can_class_register(struct m_can_classdev *cdev); +void m_can_class_unregister(struct m_can_classdev *cdev); +int m_can_class_get_clocks(struct m_can_classdev *cdev); +int m_can_check_mram_cfg(struct m_can_classdev *cdev, u32 mram_max_size); + +int m_can_class_suspend(struct device *dev); +int m_can_class_resume(struct device *dev); +#endif /* _CAN_M_H_ */ diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c new file mode 100644 index 000000000000..eb31ed1f9644 --- /dev/null +++ b/drivers/net/can/m_can/m_can_pci.c @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCI Specific M_CAN Glue + * + * Copyright (C) 2018-2020 Intel Corporation + * Author: Felipe Balbi (Intel) + * Author: Jarkko Nikula <jarkko.nikula@linux.intel.com> + * Author: Raymond Tan <raymond.tan@intel.com> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/pm_runtime.h> + +#include "m_can.h" + +#define M_CAN_PCI_MMIO_BAR 0 + +#define M_CAN_CLOCK_FREQ_EHL 200000000 +#define CTL_CSR_INT_CTL_OFFSET 0x508 + +struct m_can_pci_priv { + struct m_can_classdev cdev; + + void __iomem *base; +}; + +static inline struct m_can_pci_priv *cdev_to_priv(struct m_can_classdev *cdev) +{ + return container_of(cdev, struct m_can_pci_priv, cdev); +} + +static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg) +{ + struct m_can_pci_priv *priv = cdev_to_priv(cdev); + + return readl(priv->base + reg); +} + +static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count) +{ + struct m_can_pci_priv *priv = cdev_to_priv(cdev); + void __iomem *src = priv->base + offset; + + while (val_count--) { + *(unsigned int *)val = ioread32(src); + val += 4; + src += 4; + } + + return 0; +} + +static int iomap_write_reg(struct m_can_classdev *cdev, int reg, int val) +{ + struct m_can_pci_priv *priv = cdev_to_priv(cdev); + + writel(val, priv->base + reg); + + return 0; +} + +static int iomap_write_fifo(struct m_can_classdev *cdev, int offset, + const void *val, size_t val_count) +{ + struct m_can_pci_priv *priv = cdev_to_priv(cdev); + void __iomem *dst = priv->base + offset; + + while (val_count--) { + iowrite32(*(unsigned int *)val, dst); + val += 4; + dst += 4; + } + + return 0; +} + +static const struct m_can_ops m_can_pci_ops = { + .read_reg = iomap_read_reg, + .write_reg = iomap_write_reg, + .write_fifo = iomap_write_fifo, + .read_fifo = iomap_read_fifo, +}; + +static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) +{ + struct device *dev = &pci->dev; + struct m_can_classdev *mcan_class; + struct m_can_pci_priv *priv; + void __iomem *base; + int ret; + + ret = pcim_enable_device(pci); + if (ret) + return ret; + + pci_set_master(pci); + + ret = pcim_iomap_regions(pci, BIT(M_CAN_PCI_MMIO_BAR), pci_name(pci)); + if (ret) + return ret; + + base = pcim_iomap_table(pci)[M_CAN_PCI_MMIO_BAR]; + + if (!base) { + dev_err(dev, "failed to map BARs\n"); + return -ENOMEM; + } + + mcan_class = m_can_class_allocate_dev(&pci->dev, + sizeof(struct m_can_pci_priv)); + if (IS_ERR(mcan_class)) + return PTR_ERR(mcan_class); + + priv = cdev_to_priv(mcan_class); + + priv->base = base; + + ret = pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_ALL_TYPES); + if (ret < 0) + goto err_free_dev; + + mcan_class->dev = &pci->dev; + mcan_class->net->irq = pci_irq_vector(pci, 0); + mcan_class->pm_clock_support = 1; + mcan_class->pm_wake_source = 0; + mcan_class->can.clock.freq = id->driver_data; + mcan_class->irq_edge_triggered = true; + mcan_class->ops = &m_can_pci_ops; + + pci_set_drvdata(pci, mcan_class); + + ret = m_can_class_register(mcan_class); + if (ret) + goto err_free_irq; + + /* Enable interrupt control at CAN wrapper IP */ + writel(0x1, base + CTL_CSR_INT_CTL_OFFSET); + + pm_runtime_set_autosuspend_delay(dev, 1000); + pm_runtime_use_autosuspend(dev); + pm_runtime_put_noidle(dev); + pm_runtime_allow(dev); + + return 0; + +err_free_irq: + pci_free_irq_vectors(pci); +err_free_dev: + m_can_class_free_dev(mcan_class->net); + return ret; +} + +static void m_can_pci_remove(struct pci_dev *pci) +{ + struct m_can_classdev *mcan_class = pci_get_drvdata(pci); + struct m_can_pci_priv *priv = cdev_to_priv(mcan_class); + + pm_runtime_forbid(&pci->dev); + pm_runtime_get_noresume(&pci->dev); + + /* Disable interrupt control at CAN wrapper IP */ + writel(0x0, priv->base + CTL_CSR_INT_CTL_OFFSET); + + m_can_class_unregister(mcan_class); + m_can_class_free_dev(mcan_class->net); + pci_free_irq_vectors(pci); +} + +static __maybe_unused int m_can_pci_suspend(struct device *dev) +{ + return m_can_class_suspend(dev); +} + +static __maybe_unused int m_can_pci_resume(struct device *dev) +{ + return m_can_class_resume(dev); +} + +static SIMPLE_DEV_PM_OPS(m_can_pci_pm_ops, + m_can_pci_suspend, m_can_pci_resume); + +static const struct pci_device_id m_can_pci_id_table[] = { + { PCI_VDEVICE(INTEL, 0x4bc1), M_CAN_CLOCK_FREQ_EHL, }, + { PCI_VDEVICE(INTEL, 0x4bc2), M_CAN_CLOCK_FREQ_EHL, }, + { } /* Terminating Entry */ +}; +MODULE_DEVICE_TABLE(pci, m_can_pci_id_table); + +static struct pci_driver m_can_pci_driver = { + .name = "m_can_pci", + .probe = m_can_pci_probe, + .remove = m_can_pci_remove, + .id_table = m_can_pci_id_table, + .driver = { + .pm = &m_can_pci_pm_ops, + }, +}; + +module_pci_driver(m_can_pci_driver); + +MODULE_AUTHOR("Felipe Balbi (Intel)"); +MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>"); +MODULE_AUTHOR("Raymond Tan <raymond.tan@intel.com>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller on PCI bus"); diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c new file mode 100644 index 000000000000..56da411878af --- /dev/null +++ b/drivers/net/can/m_can/m_can_platform.c @@ -0,0 +1,242 @@ +// SPDX-License-Identifier: GPL-2.0 +// IOMapped CAN bus driver for Bosch M_CAN controller +// Copyright (C) 2014 Freescale Semiconductor, Inc. +// Dong Aisheng <aisheng.dong@nxp.com> +// +// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/ + +#include <linux/hrtimer.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> + +#include "m_can.h" + +struct m_can_plat_priv { + struct m_can_classdev cdev; + + void __iomem *base; + void __iomem *mram_base; +}; + +static inline struct m_can_plat_priv *cdev_to_priv(struct m_can_classdev *cdev) +{ + return container_of(cdev, struct m_can_plat_priv, cdev); +} + +static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg) +{ + struct m_can_plat_priv *priv = cdev_to_priv(cdev); + + return readl(priv->base + reg); +} + +static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count) +{ + struct m_can_plat_priv *priv = cdev_to_priv(cdev); + void __iomem *src = priv->mram_base + offset; + + while (val_count--) { + *(unsigned int *)val = ioread32(src); + val += 4; + src += 4; + } + + return 0; +} + +static int iomap_write_reg(struct m_can_classdev *cdev, int reg, int val) +{ + struct m_can_plat_priv *priv = cdev_to_priv(cdev); + + writel(val, priv->base + reg); + + return 0; +} + +static int iomap_write_fifo(struct m_can_classdev *cdev, int offset, + const void *val, size_t val_count) +{ + struct m_can_plat_priv *priv = cdev_to_priv(cdev); + void __iomem *dst = priv->mram_base + offset; + + while (val_count--) { + iowrite32(*(unsigned int *)val, dst); + val += 4; + dst += 4; + } + + return 0; +} + +static const struct m_can_ops m_can_plat_ops = { + .read_reg = iomap_read_reg, + .write_reg = iomap_write_reg, + .write_fifo = iomap_write_fifo, + .read_fifo = iomap_read_fifo, +}; + +static int m_can_plat_probe(struct platform_device *pdev) +{ + struct m_can_classdev *mcan_class; + struct m_can_plat_priv *priv; + struct resource *res; + void __iomem *addr; + void __iomem *mram_addr; + struct phy *transceiver; + int irq = 0, ret = 0; + + mcan_class = m_can_class_allocate_dev(&pdev->dev, + sizeof(struct m_can_plat_priv)); + if (IS_ERR(mcan_class)) + return PTR_ERR(mcan_class); + + priv = cdev_to_priv(mcan_class); + + ret = m_can_class_get_clocks(mcan_class); + if (ret) + goto probe_fail; + + addr = devm_platform_ioremap_resource_byname(pdev, "m_can"); + if (IS_ERR(addr)) { + ret = PTR_ERR(addr); + goto probe_fail; + } + + if (device_property_present(mcan_class->dev, "interrupts") || + device_property_present(mcan_class->dev, "interrupt-names")) { + irq = platform_get_irq_byname(pdev, "int0"); + if (irq < 0) { + ret = irq; + goto probe_fail; + } + } + + /* message ram could be shared */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram"); + if (!res) { + ret = -ENODEV; + goto probe_fail; + } + + mram_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (!mram_addr) { + ret = -ENOMEM; + goto probe_fail; + } + + transceiver = devm_phy_optional_get(&pdev->dev, NULL); + if (IS_ERR(transceiver)) { + ret = PTR_ERR(transceiver); + dev_err_probe(&pdev->dev, ret, "failed to get phy\n"); + goto probe_fail; + } + + if (transceiver) + mcan_class->can.bitrate_max = transceiver->attrs.max_link_rate; + + priv->base = addr; + priv->mram_base = mram_addr; + + mcan_class->net->irq = irq; + mcan_class->pm_clock_support = 1; + mcan_class->pm_wake_source = 0; + mcan_class->can.clock.freq = clk_get_rate(mcan_class->cclk); + mcan_class->dev = &pdev->dev; + mcan_class->transceiver = transceiver; + + mcan_class->ops = &m_can_plat_ops; + + mcan_class->is_peripheral = false; + + platform_set_drvdata(pdev, mcan_class); + + pm_runtime_enable(mcan_class->dev); + ret = m_can_class_register(mcan_class); + if (ret) + goto out_runtime_disable; + + return ret; + +out_runtime_disable: + pm_runtime_disable(mcan_class->dev); +probe_fail: + m_can_class_free_dev(mcan_class->net); + return ret; +} + +static __maybe_unused int m_can_suspend(struct device *dev) +{ + return m_can_class_suspend(dev); +} + +static __maybe_unused int m_can_resume(struct device *dev) +{ + return m_can_class_resume(dev); +} + +static void m_can_plat_remove(struct platform_device *pdev) +{ + struct m_can_plat_priv *priv = platform_get_drvdata(pdev); + struct m_can_classdev *mcan_class = &priv->cdev; + + m_can_class_unregister(mcan_class); + pm_runtime_disable(mcan_class->dev); + m_can_class_free_dev(mcan_class->net); +} + +static int __maybe_unused m_can_runtime_suspend(struct device *dev) +{ + struct m_can_plat_priv *priv = dev_get_drvdata(dev); + struct m_can_classdev *mcan_class = &priv->cdev; + + clk_disable_unprepare(mcan_class->cclk); + clk_disable_unprepare(mcan_class->hclk); + + return 0; +} + +static int __maybe_unused m_can_runtime_resume(struct device *dev) +{ + struct m_can_plat_priv *priv = dev_get_drvdata(dev); + struct m_can_classdev *mcan_class = &priv->cdev; + int err; + + err = clk_prepare_enable(mcan_class->hclk); + if (err) + return err; + + err = clk_prepare_enable(mcan_class->cclk); + if (err) + clk_disable_unprepare(mcan_class->hclk); + + return err; +} + +static const struct dev_pm_ops m_can_pmops = { + SET_RUNTIME_PM_OPS(m_can_runtime_suspend, + m_can_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(m_can_suspend, m_can_resume) +}; + +static const struct of_device_id m_can_of_table[] = { + { .compatible = "bosch,m_can", .data = NULL }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, m_can_of_table); + +static struct platform_driver m_can_plat_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = m_can_of_table, + .pm = &m_can_pmops, + }, + .probe = m_can_plat_probe, + .remove = m_can_plat_remove, +}; + +module_platform_driver(m_can_plat_driver); + +MODULE_AUTHOR("Dong Aisheng <aisheng.dong@nxp.com>"); +MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("M_CAN driver for IO Mapped Bosch controllers"); diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c new file mode 100644 index 000000000000..31cc9d0abd45 --- /dev/null +++ b/drivers/net/can/m_can/tcan4x5x-core.c @@ -0,0 +1,610 @@ +// SPDX-License-Identifier: GPL-2.0 +// SPI to CAN driver for the Texas Instruments TCAN4x5x +// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/ + +#include "tcan4x5x.h" + +#define TCAN4X5X_EXT_CLK_DEF 40000000 + +#define TCAN4X5X_DEV_ID1 0x00 +#define TCAN4X5X_DEV_ID1_TCAN 0x4e414354 /* ASCII TCAN */ +#define TCAN4X5X_DEV_ID2 0x04 +#define TCAN4X5X_REV 0x08 +#define TCAN4X5X_STATUS 0x0C +#define TCAN4X5X_ERROR_STATUS_MASK 0x10 +#define TCAN4X5X_CONTROL 0x14 + +#define TCAN4X5X_CONFIG 0x800 +#define TCAN4X5X_TS_PRESCALE 0x804 +#define TCAN4X5X_TEST_REG 0x808 +#define TCAN4X5X_INT_FLAGS 0x820 +#define TCAN4X5X_MCAN_INT_REG 0x824 +#define TCAN4X5X_INT_EN 0x830 + +/* Interrupt bits */ +#define TCAN4X5X_CANBUSTERMOPEN_INT_EN BIT(30) +#define TCAN4X5X_CANHCANL_INT_EN BIT(29) +#define TCAN4X5X_CANHBAT_INT_EN BIT(28) +#define TCAN4X5X_CANLGND_INT_EN BIT(27) +#define TCAN4X5X_CANBUSOPEN_INT_EN BIT(26) +#define TCAN4X5X_CANBUSGND_INT_EN BIT(25) +#define TCAN4X5X_CANBUSBAT_INT_EN BIT(24) +#define TCAN4X5X_UVSUP_INT_EN BIT(22) +#define TCAN4X5X_UVIO_INT_EN BIT(21) +#define TCAN4X5X_TSD_INT_EN BIT(19) +#define TCAN4X5X_ECCERR_INT_EN BIT(16) +#define TCAN4X5X_CANINT_INT_EN BIT(15) +#define TCAN4X5X_LWU_INT_EN BIT(14) +#define TCAN4X5X_CANSLNT_INT_EN BIT(10) +#define TCAN4X5X_CANDOM_INT_EN BIT(8) +#define TCAN4X5X_CANBUS_ERR_INT_EN BIT(5) +#define TCAN4X5X_BUS_FAULT BIT(4) +#define TCAN4X5X_MCAN_INT BIT(1) +#define TCAN4X5X_ENABLE_TCAN_INT \ + (TCAN4X5X_MCAN_INT | TCAN4X5X_BUS_FAULT | \ + TCAN4X5X_CANBUS_ERR_INT_EN | TCAN4X5X_CANINT_INT_EN) + +/* MCAN Interrupt bits */ +#define TCAN4X5X_MCAN_IR_ARA BIT(29) +#define TCAN4X5X_MCAN_IR_PED BIT(28) +#define TCAN4X5X_MCAN_IR_PEA BIT(27) +#define TCAN4X5X_MCAN_IR_WD BIT(26) +#define TCAN4X5X_MCAN_IR_BO BIT(25) +#define TCAN4X5X_MCAN_IR_EW BIT(24) +#define TCAN4X5X_MCAN_IR_EP BIT(23) +#define TCAN4X5X_MCAN_IR_ELO BIT(22) +#define TCAN4X5X_MCAN_IR_BEU BIT(21) +#define TCAN4X5X_MCAN_IR_BEC BIT(20) +#define TCAN4X5X_MCAN_IR_DRX BIT(19) +#define TCAN4X5X_MCAN_IR_TOO BIT(18) +#define TCAN4X5X_MCAN_IR_MRAF BIT(17) +#define TCAN4X5X_MCAN_IR_TSW BIT(16) +#define TCAN4X5X_MCAN_IR_TEFL BIT(15) +#define TCAN4X5X_MCAN_IR_TEFF BIT(14) +#define TCAN4X5X_MCAN_IR_TEFW BIT(13) +#define TCAN4X5X_MCAN_IR_TEFN BIT(12) +#define TCAN4X5X_MCAN_IR_TFE BIT(11) +#define TCAN4X5X_MCAN_IR_TCF BIT(10) +#define TCAN4X5X_MCAN_IR_TC BIT(9) +#define TCAN4X5X_MCAN_IR_HPM BIT(8) +#define TCAN4X5X_MCAN_IR_RF1L BIT(7) +#define TCAN4X5X_MCAN_IR_RF1F BIT(6) +#define TCAN4X5X_MCAN_IR_RF1W BIT(5) +#define TCAN4X5X_MCAN_IR_RF1N BIT(4) +#define TCAN4X5X_MCAN_IR_RF0L BIT(3) +#define TCAN4X5X_MCAN_IR_RF0F BIT(2) +#define TCAN4X5X_MCAN_IR_RF0W BIT(1) +#define TCAN4X5X_MCAN_IR_RF0N BIT(0) +#define TCAN4X5X_ENABLE_MCAN_INT \ + (TCAN4X5X_MCAN_IR_TC | TCAN4X5X_MCAN_IR_RF0N | \ + TCAN4X5X_MCAN_IR_RF1N | TCAN4X5X_MCAN_IR_RF0F | \ + TCAN4X5X_MCAN_IR_RF1F) + +#define TCAN4X5X_MRAM_START 0x8000 +#define TCAN4X5X_MRAM_SIZE 0x800 +#define TCAN4X5X_MCAN_OFFSET 0x1000 + +#define TCAN4X5X_CLEAR_ALL_INT 0xffffffff +#define TCAN4X5X_SET_ALL_INT 0xffffffff + +#define TCAN4X5X_MODE_SEL_MASK (BIT(7) | BIT(6)) +#define TCAN4X5X_MODE_SLEEP 0x00 +#define TCAN4X5X_MODE_STANDBY BIT(6) +#define TCAN4X5X_MODE_NORMAL BIT(7) + +#define TCAN4X5X_NWKRQ_VOLTAGE_VIO BIT(19) + +#define TCAN4X5X_DISABLE_WAKE_MSK (BIT(31) | BIT(30)) +#define TCAN4X5X_DISABLE_INH_MSK BIT(9) + +#define TCAN4X5X_SW_RESET BIT(2) + +#define TCAN4X5X_MCAN_CONFIGURED BIT(5) +#define TCAN4X5X_WATCHDOG_EN BIT(3) +#define TCAN4X5X_WD_60_MS_TIMER 0 +#define TCAN4X5X_WD_600_MS_TIMER BIT(28) +#define TCAN4X5X_WD_3_S_TIMER BIT(29) +#define TCAN4X5X_WD_6_S_TIMER (BIT(28) | BIT(29)) + +struct tcan4x5x_version_info { + const char *name; + u32 id2_register; + + bool has_wake_pin; + bool has_state_pin; +}; + +enum { + TCAN4552 = 0, + TCAN4553, + TCAN4X5X, +}; + +static const struct tcan4x5x_version_info tcan4x5x_versions[] = { + [TCAN4552] = { + .name = "4552", + .id2_register = 0x32353534, + }, + [TCAN4553] = { + .name = "4553", + .id2_register = 0x33353534, + }, + /* generic version with no id2_register at the end */ + [TCAN4X5X] = { + .name = "generic", + .has_wake_pin = true, + .has_state_pin = true, + }, +}; + +static inline struct tcan4x5x_priv *cdev_to_priv(struct m_can_classdev *cdev) +{ + return container_of(cdev, struct tcan4x5x_priv, cdev); +} + +static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv) +{ + int wake_state = 0; + + if (priv->device_state_gpio) + wake_state = gpiod_get_value(priv->device_state_gpio); + + if (priv->device_wake_gpio && wake_state) { + gpiod_set_value(priv->device_wake_gpio, 0); + usleep_range(5, 50); + gpiod_set_value(priv->device_wake_gpio, 1); + } +} + +static int tcan4x5x_reset(struct tcan4x5x_priv *priv) +{ + int ret = 0; + + if (priv->reset_gpio) { + gpiod_set_value(priv->reset_gpio, 1); + + /* tpulse_width minimum 30us */ + usleep_range(30, 100); + gpiod_set_value(priv->reset_gpio, 0); + } else { + ret = regmap_write(priv->regmap, TCAN4X5X_CONFIG, + TCAN4X5X_SW_RESET); + if (ret) + return ret; + } + + usleep_range(700, 1000); + + return ret; +} + +static u32 tcan4x5x_read_reg(struct m_can_classdev *cdev, int reg) +{ + struct tcan4x5x_priv *priv = cdev_to_priv(cdev); + u32 val; + + regmap_read(priv->regmap, TCAN4X5X_MCAN_OFFSET + reg, &val); + + return val; +} + +static int tcan4x5x_read_fifo(struct m_can_classdev *cdev, int addr_offset, + void *val, size_t val_count) +{ + struct tcan4x5x_priv *priv = cdev_to_priv(cdev); + + return regmap_bulk_read(priv->regmap, TCAN4X5X_MRAM_START + addr_offset, val, val_count); +} + +static int tcan4x5x_write_reg(struct m_can_classdev *cdev, int reg, int val) +{ + struct tcan4x5x_priv *priv = cdev_to_priv(cdev); + + return regmap_write(priv->regmap, TCAN4X5X_MCAN_OFFSET + reg, val); +} + +static int tcan4x5x_write_fifo(struct m_can_classdev *cdev, + int addr_offset, const void *val, size_t val_count) +{ + struct tcan4x5x_priv *priv = cdev_to_priv(cdev); + + return regmap_bulk_write(priv->regmap, TCAN4X5X_MRAM_START + addr_offset, val, val_count); +} + +static int tcan4x5x_power_enable(struct regulator *reg, int enable) +{ + if (IS_ERR_OR_NULL(reg)) + return 0; + + if (enable) + return regulator_enable(reg); + else + return regulator_disable(reg); +} + +static int tcan4x5x_write_tcan_reg(struct m_can_classdev *cdev, + int reg, int val) +{ + struct tcan4x5x_priv *priv = cdev_to_priv(cdev); + + return regmap_write(priv->regmap, reg, val); +} + +static int tcan4x5x_clear_interrupts(struct m_can_classdev *cdev) +{ + int ret; + + ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_STATUS, + TCAN4X5X_CLEAR_ALL_INT); + if (ret) + return ret; + + return tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_FLAGS, + TCAN4X5X_CLEAR_ALL_INT); +} + +static int tcan4x5x_init(struct m_can_classdev *cdev) +{ + struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev); + int ret; + + tcan4x5x_check_wake(tcan4x5x); + + ret = tcan4x5x_clear_interrupts(cdev); + if (ret) + return ret; + + ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_EN, + TCAN4X5X_ENABLE_TCAN_INT); + if (ret) + return ret; + + ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_ERROR_STATUS_MASK, + TCAN4X5X_CLEAR_ALL_INT); + if (ret) + return ret; + + ret = regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG, + TCAN4X5X_MODE_SEL_MASK, TCAN4X5X_MODE_NORMAL); + if (ret) + return ret; + + if (tcan4x5x->nwkrq_voltage_vio) { + ret = regmap_set_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG, + TCAN4X5X_NWKRQ_VOLTAGE_VIO); + if (ret) + return ret; + } + + return ret; +} + +static int tcan4x5x_deinit(struct m_can_classdev *cdev) +{ + struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev); + + return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG, + TCAN4X5X_MODE_SEL_MASK, TCAN4X5X_MODE_STANDBY); +}; + +static int tcan4x5x_disable_wake(struct m_can_classdev *cdev) +{ + struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev); + + return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG, + TCAN4X5X_DISABLE_WAKE_MSK, 0x00); +} + +static int tcan4x5x_disable_state(struct m_can_classdev *cdev) +{ + struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev); + + return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG, + TCAN4X5X_DISABLE_INH_MSK, 0x01); +} + +static const struct tcan4x5x_version_info +*tcan4x5x_find_version(struct tcan4x5x_priv *priv) +{ + u32 val; + int ret; + + ret = regmap_read(priv->regmap, TCAN4X5X_DEV_ID1, &val); + if (ret) + return ERR_PTR(ret); + + if (val != TCAN4X5X_DEV_ID1_TCAN) { + dev_err(&priv->spi->dev, "Not a tcan device %x\n", val); + return ERR_PTR(-ENODEV); + } + + ret = regmap_read(priv->regmap, TCAN4X5X_DEV_ID2, &val); + if (ret) + return ERR_PTR(ret); + + for (int i = 0; i != ARRAY_SIZE(tcan4x5x_versions); ++i) { + const struct tcan4x5x_version_info *vinfo = &tcan4x5x_versions[i]; + + if (!vinfo->id2_register || val == vinfo->id2_register) { + dev_info(&priv->spi->dev, "Detected TCAN device version %s\n", + vinfo->name); + return vinfo; + } + } + + return &tcan4x5x_versions[TCAN4X5X]; +} + +static void tcan4x5x_get_dt_data(struct m_can_classdev *cdev) +{ + struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev); + + tcan4x5x->nwkrq_voltage_vio = + of_property_read_bool(cdev->dev->of_node, "ti,nwkrq-voltage-vio"); +} + +static int tcan4x5x_get_gpios(struct m_can_classdev *cdev) +{ + struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev); + int ret; + + tcan4x5x->device_wake_gpio = devm_gpiod_get_optional(cdev->dev, + "device-wake", + GPIOD_OUT_HIGH); + if (IS_ERR(tcan4x5x->device_wake_gpio)) { + if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + tcan4x5x->device_wake_gpio = NULL; + } + + tcan4x5x->reset_gpio = devm_gpiod_get_optional(cdev->dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(tcan4x5x->reset_gpio)) + tcan4x5x->reset_gpio = NULL; + + ret = tcan4x5x_reset(tcan4x5x); + if (ret) + return ret; + + tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev, + "device-state", + GPIOD_IN); + if (IS_ERR(tcan4x5x->device_state_gpio)) + tcan4x5x->device_state_gpio = NULL; + + return 0; +} + +static int tcan4x5x_check_gpios(struct m_can_classdev *cdev, + const struct tcan4x5x_version_info *version_info) +{ + struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev); + int ret; + + if (version_info->has_wake_pin && !tcan4x5x->device_wake_gpio) { + ret = tcan4x5x_disable_wake(cdev); + if (ret) + return ret; + } + + if (version_info->has_state_pin && !tcan4x5x->device_state_gpio) { + ret = tcan4x5x_disable_state(cdev); + if (ret) + return ret; + } + + return 0; +} + +static const struct m_can_ops tcan4x5x_ops = { + .init = tcan4x5x_init, + .deinit = tcan4x5x_deinit, + .read_reg = tcan4x5x_read_reg, + .write_reg = tcan4x5x_write_reg, + .write_fifo = tcan4x5x_write_fifo, + .read_fifo = tcan4x5x_read_fifo, + .clear_interrupts = tcan4x5x_clear_interrupts, +}; + +static int tcan4x5x_can_probe(struct spi_device *spi) +{ + const struct tcan4x5x_version_info *version_info; + struct tcan4x5x_priv *priv; + struct m_can_classdev *mcan_class; + int freq, ret; + + mcan_class = m_can_class_allocate_dev(&spi->dev, + sizeof(struct tcan4x5x_priv)); + if (IS_ERR(mcan_class)) + return PTR_ERR(mcan_class); + + ret = m_can_check_mram_cfg(mcan_class, TCAN4X5X_MRAM_SIZE); + if (ret) + goto out_m_can_class_free_dev; + + priv = cdev_to_priv(mcan_class); + + priv->power = devm_regulator_get_optional(&spi->dev, "vsup"); + if (IS_ERR(priv->power)) { + if (PTR_ERR(priv->power) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto out_m_can_class_free_dev; + } + priv->power = NULL; + } + + mcan_class->cclk = devm_clk_get(mcan_class->dev, "cclk"); + if (IS_ERR(mcan_class->cclk)) { + dev_err(&spi->dev, "no CAN clock source defined\n"); + freq = TCAN4X5X_EXT_CLK_DEF; + } else { + freq = clk_get_rate(mcan_class->cclk); + } + + /* Sanity check */ + if (freq < 20000000 || freq > TCAN4X5X_EXT_CLK_DEF) { + dev_err(&spi->dev, "Clock frequency is out of supported range %d\n", + freq); + ret = -ERANGE; + goto out_m_can_class_free_dev; + } + + priv->spi = spi; + + mcan_class->pm_clock_support = 0; + mcan_class->pm_wake_source = device_property_read_bool(&spi->dev, "wakeup-source"); + mcan_class->can.clock.freq = freq; + mcan_class->dev = &spi->dev; + mcan_class->ops = &tcan4x5x_ops; + mcan_class->is_peripheral = true; + mcan_class->net->irq = spi->irq; + + spi_set_drvdata(spi, priv); + + /* Configure the SPI bus */ + spi->bits_per_word = 8; + ret = spi_setup(spi); + if (ret) { + dev_err(&spi->dev, "SPI setup failed %pe\n", ERR_PTR(ret)); + goto out_m_can_class_free_dev; + } + + ret = tcan4x5x_regmap_init(priv); + if (ret) { + dev_err(&spi->dev, "regmap init failed %pe\n", ERR_PTR(ret)); + goto out_m_can_class_free_dev; + } + + ret = tcan4x5x_power_enable(priv->power, 1); + if (ret) { + dev_err(&spi->dev, "Enabling regulator failed %pe\n", + ERR_PTR(ret)); + goto out_m_can_class_free_dev; + } + + ret = tcan4x5x_get_gpios(mcan_class); + if (ret) { + dev_err(&spi->dev, "Getting gpios failed %pe\n", ERR_PTR(ret)); + goto out_power; + } + + version_info = tcan4x5x_find_version(priv); + if (IS_ERR(version_info)) { + ret = PTR_ERR(version_info); + goto out_power; + } + + ret = tcan4x5x_check_gpios(mcan_class, version_info); + if (ret) { + dev_err(&spi->dev, "Checking gpios failed %pe\n", ERR_PTR(ret)); + goto out_power; + } + + tcan4x5x_get_dt_data(mcan_class); + + tcan4x5x_check_wake(priv); + + ret = tcan4x5x_write_tcan_reg(mcan_class, TCAN4X5X_INT_EN, 0); + if (ret) { + dev_err(&spi->dev, "Disabling interrupts failed %pe\n", ERR_PTR(ret)); + goto out_power; + } + + ret = tcan4x5x_clear_interrupts(mcan_class); + if (ret) { + dev_err(&spi->dev, "Clearing interrupts failed %pe\n", ERR_PTR(ret)); + goto out_power; + } + + if (mcan_class->pm_wake_source) + device_init_wakeup(&spi->dev, true); + + ret = m_can_class_register(mcan_class); + if (ret) { + dev_err(&spi->dev, "Failed registering m_can device %pe\n", + ERR_PTR(ret)); + goto out_power; + } + + netdev_info(mcan_class->net, "TCAN4X5X successfully initialized.\n"); + return 0; + +out_power: + tcan4x5x_power_enable(priv->power, 0); + out_m_can_class_free_dev: + m_can_class_free_dev(mcan_class->net); + return ret; +} + +static void tcan4x5x_can_remove(struct spi_device *spi) +{ + struct tcan4x5x_priv *priv = spi_get_drvdata(spi); + + m_can_class_unregister(&priv->cdev); + + tcan4x5x_power_enable(priv->power, 0); + + m_can_class_free_dev(priv->cdev.net); +} + +static int __maybe_unused tcan4x5x_suspend(struct device *dev) +{ + struct m_can_classdev *cdev = dev_get_drvdata(dev); + struct spi_device *spi = to_spi_device(dev); + + if (cdev->pm_wake_source) + enable_irq_wake(spi->irq); + + return m_can_class_suspend(dev); +} + +static int __maybe_unused tcan4x5x_resume(struct device *dev) +{ + struct m_can_classdev *cdev = dev_get_drvdata(dev); + struct spi_device *spi = to_spi_device(dev); + int ret = m_can_class_resume(dev); + + if (cdev->pm_wake_source) + disable_irq_wake(spi->irq); + + return ret; +} + +static const struct of_device_id tcan4x5x_of_match[] = { + { + .compatible = "ti,tcan4x5x", + }, { + /* sentinel */ + }, +}; +MODULE_DEVICE_TABLE(of, tcan4x5x_of_match); + +static const struct spi_device_id tcan4x5x_id_table[] = { + { + .name = "tcan4x5x", + }, { + /* sentinel */ + }, +}; +MODULE_DEVICE_TABLE(spi, tcan4x5x_id_table); + +static const struct dev_pm_ops tcan4x5x_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(tcan4x5x_suspend, tcan4x5x_resume) +}; + +static struct spi_driver tcan4x5x_can_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = tcan4x5x_of_match, + .pm = &tcan4x5x_pm_ops, + }, + .id_table = tcan4x5x_id_table, + .probe = tcan4x5x_can_probe, + .remove = tcan4x5x_can_remove, +}; +module_spi_driver(tcan4x5x_can_driver); + +MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>"); +MODULE_DESCRIPTION("Texas Instruments TCAN4x5x CAN driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/can/m_can/tcan4x5x-regmap.c b/drivers/net/can/m_can/tcan4x5x-regmap.c new file mode 100644 index 000000000000..fafa6daa67e6 --- /dev/null +++ b/drivers/net/can/m_can/tcan4x5x-regmap.c @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// tcan4x5x - Texas Instruments TCAN4x5x Family CAN controller driver +// +// Copyright (c) 2020 Pengutronix, +// Marc Kleine-Budde <kernel@pengutronix.de> +// Copyright (c) 2018-2019 Texas Instruments Incorporated +// http://www.ti.com/ + +#include "tcan4x5x.h" + +#define TCAN4X5X_SPI_INSTRUCTION_WRITE (0x61 << 24) +#define TCAN4X5X_SPI_INSTRUCTION_READ (0x41 << 24) + +#define TCAN4X5X_MAX_REGISTER 0x87fc + +static int tcan4x5x_regmap_gather_write(void *context, + const void *reg, size_t reg_len, + const void *val, size_t val_len) +{ + struct spi_device *spi = context; + struct tcan4x5x_priv *priv = spi_get_drvdata(spi); + struct tcan4x5x_map_buf *buf_tx = &priv->map_buf_tx; + struct spi_transfer xfer[] = { + { + .tx_buf = buf_tx, + .len = sizeof(buf_tx->cmd) + val_len, + }, + }; + + memcpy(&buf_tx->cmd, reg, sizeof(buf_tx->cmd.cmd) + + sizeof(buf_tx->cmd.addr)); + tcan4x5x_spi_cmd_set_len(&buf_tx->cmd, val_len); + memcpy(buf_tx->data, val, val_len); + + return spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer)); +} + +static int tcan4x5x_regmap_write(void *context, const void *data, size_t count) +{ + return tcan4x5x_regmap_gather_write(context, data, sizeof(__be32), + data + sizeof(__be32), + count - sizeof(__be32)); +} + +static int tcan4x5x_regmap_read(void *context, + const void *reg_buf, size_t reg_len, + void *val_buf, size_t val_len) +{ + struct spi_device *spi = context; + struct tcan4x5x_priv *priv = spi_get_drvdata(spi); + struct tcan4x5x_map_buf *buf_rx = &priv->map_buf_rx; + struct tcan4x5x_map_buf *buf_tx = &priv->map_buf_tx; + struct spi_transfer xfer[2] = { + { + .tx_buf = buf_tx, + } + }; + struct spi_message msg; + int err; + + spi_message_init(&msg); + spi_message_add_tail(&xfer[0], &msg); + + memcpy(&buf_tx->cmd, reg_buf, sizeof(buf_tx->cmd.cmd) + + sizeof(buf_tx->cmd.addr)); + tcan4x5x_spi_cmd_set_len(&buf_tx->cmd, val_len); + + if (spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX) { + xfer[0].len = sizeof(buf_tx->cmd); + + xfer[1].rx_buf = val_buf; + xfer[1].len = val_len; + spi_message_add_tail(&xfer[1], &msg); + } else { + xfer[0].rx_buf = buf_rx; + xfer[0].len = sizeof(buf_tx->cmd) + val_len; + + if (TCAN4X5X_SANITIZE_SPI) + memset(buf_tx->data, 0x0, val_len); + } + + err = spi_sync(spi, &msg); + if (err) + return err; + + if (!(spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX)) + memcpy(val_buf, buf_rx->data, val_len); + + return 0; +} + +static const struct regmap_range tcan4x5x_reg_table_wr_range[] = { + /* Device ID and SPI Registers */ + regmap_reg_range(0x000c, 0x0010), + /* Device configuration registers and Interrupt Flags*/ + regmap_reg_range(0x0800, 0x080c), + regmap_reg_range(0x0820, 0x0820), + regmap_reg_range(0x0830, 0x0830), + /* M_CAN */ + regmap_reg_range(0x100c, 0x102c), + regmap_reg_range(0x1048, 0x1048), + regmap_reg_range(0x1050, 0x105c), + regmap_reg_range(0x1080, 0x1088), + regmap_reg_range(0x1090, 0x1090), + regmap_reg_range(0x1098, 0x10a0), + regmap_reg_range(0x10a8, 0x10b0), + regmap_reg_range(0x10b8, 0x10c0), + regmap_reg_range(0x10c8, 0x10c8), + regmap_reg_range(0x10d0, 0x10d4), + regmap_reg_range(0x10e0, 0x10e4), + regmap_reg_range(0x10f0, 0x10f0), + regmap_reg_range(0x10f8, 0x10f8), + /* MRAM */ + regmap_reg_range(0x8000, 0x87fc), +}; + +static const struct regmap_range tcan4x5x_reg_table_rd_range[] = { + regmap_reg_range(0x0000, 0x0010), /* Device ID and SPI Registers */ + regmap_reg_range(0x0800, 0x0830), /* Device configuration registers and Interrupt Flags*/ + regmap_reg_range(0x1000, 0x10fc), /* M_CAN */ + regmap_reg_range(0x8000, 0x87fc), /* MRAM */ +}; + +static const struct regmap_access_table tcan4x5x_reg_table_wr = { + .yes_ranges = tcan4x5x_reg_table_wr_range, + .n_yes_ranges = ARRAY_SIZE(tcan4x5x_reg_table_wr_range), +}; + +static const struct regmap_access_table tcan4x5x_reg_table_rd = { + .yes_ranges = tcan4x5x_reg_table_rd_range, + .n_yes_ranges = ARRAY_SIZE(tcan4x5x_reg_table_rd_range), +}; + +static const struct regmap_config tcan4x5x_regmap = { + .reg_bits = 24, + .reg_stride = 4, + .pad_bits = 8, + .val_bits = 32, + .wr_table = &tcan4x5x_reg_table_wr, + .rd_table = &tcan4x5x_reg_table_rd, + .max_register = TCAN4X5X_MAX_REGISTER, + .cache_type = REGCACHE_NONE, + .read_flag_mask = (__force unsigned long) + cpu_to_be32(TCAN4X5X_SPI_INSTRUCTION_READ), + .write_flag_mask = (__force unsigned long) + cpu_to_be32(TCAN4X5X_SPI_INSTRUCTION_WRITE), +}; + +static const struct regmap_bus tcan4x5x_bus = { + .write = tcan4x5x_regmap_write, + .gather_write = tcan4x5x_regmap_gather_write, + .read = tcan4x5x_regmap_read, + .reg_format_endian_default = REGMAP_ENDIAN_BIG, + .val_format_endian_default = REGMAP_ENDIAN_BIG, + .max_raw_read = 256, + .max_raw_write = 256, +}; + +int tcan4x5x_regmap_init(struct tcan4x5x_priv *priv) +{ + priv->regmap = devm_regmap_init(&priv->spi->dev, &tcan4x5x_bus, + priv->spi, &tcan4x5x_regmap); + return PTR_ERR_OR_ZERO(priv->regmap); +} diff --git a/drivers/net/can/m_can/tcan4x5x.h b/drivers/net/can/m_can/tcan4x5x.h new file mode 100644 index 000000000000..203399d5e8cc --- /dev/null +++ b/drivers/net/can/m_can/tcan4x5x.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * tcan4x5x - Texas Instruments TCAN4x5x Family CAN controller driver + * + * Copyright (c) 2020 Pengutronix, + * Marc Kleine-Budde <kernel@pengutronix.de> + */ + +#ifndef _TCAN4X5X_H +#define _TCAN4X5X_H + +#include <linux/gpio/consumer.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> +#include <linux/spi/spi.h> + +#include "m_can.h" + +#define TCAN4X5X_SANITIZE_SPI 1 + +struct __packed tcan4x5x_buf_cmd { + u8 cmd; + __be16 addr; + u8 len; +}; + +struct tcan4x5x_map_buf { + struct tcan4x5x_buf_cmd cmd; + u8 data[256 * sizeof(u32)]; +} ____cacheline_aligned; + +struct tcan4x5x_priv { + struct m_can_classdev cdev; + + struct regmap *regmap; + struct spi_device *spi; + + struct gpio_desc *reset_gpio; + struct gpio_desc *device_wake_gpio; + struct gpio_desc *device_state_gpio; + struct regulator *power; + + struct tcan4x5x_map_buf map_buf_rx; + struct tcan4x5x_map_buf map_buf_tx; + + bool nwkrq_voltage_vio; +}; + +static inline void +tcan4x5x_spi_cmd_set_len(struct tcan4x5x_buf_cmd *cmd, u8 len) +{ + /* number of u32 */ + cmd->len = len >> 2; +} + +int tcan4x5x_regmap_init(struct tcan4x5x_priv *priv); + +#endif diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig index 81c711719490..dfe6bd9947bb 100644 --- a/drivers/net/can/mscan/Kconfig +++ b/drivers/net/can/mscan/Kconfig @@ -1,7 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only config CAN_MSCAN depends on PPC tristate "Support for Freescale MSCAN based chips" - ---help--- + help The Motorola Scalable Controller Area Network (MSCAN) definition is based on the MSCAN12 definition which is the specific implementation of the Motorola Scalable CAN concept targeted for @@ -12,7 +13,7 @@ if CAN_MSCAN config CAN_MPC5XXX tristate "Freescale MPC5xxx onboard CAN controller" depends on (PPC_MPC52xx || PPC_MPC512x) - ---help--- + help If you say yes here you get support for Freescale's MPC5xxx onboard CAN controller. Currently, the MPC5200, MPC5200B and MPC5121 (Rev. 2 and later) are supported. diff --git a/drivers/net/can/mscan/Makefile b/drivers/net/can/mscan/Makefile index 58903b45f5fb..6c114bed439f 100644 --- a/drivers/net/can/mscan/Makefile +++ b/drivers/net/can/mscan/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_CAN_MPC5XXX) += mscan-mpc5xxx.o mscan-mpc5xxx-objs := mscan.o mpc5xxx_can.o diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index 2949a381a94d..0080c39ee182 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c @@ -1,30 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * CAN bus driver for the Freescale MPC5xxx embedded CPU. * * Copyright (C) 2004-2005 Andrey Volkov <avolkov@varma-el.com>, * Varma Electronics Oy * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com> - * Copyright (C) 2009 Wolfram Sang, Pengutronix <w.sang@pengutronix.de> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. + * Copyright (C) 2009 Wolfram Sang, Pengutronix <kernel@pengutronix.de> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/netdevice.h> #include <linux/can/dev.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <sysdev/fsl_soc.h> #include <linux/clk.h> @@ -72,7 +65,7 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev, else *mscan_clksrc = MSCAN_CLKSRC_XTAL; - freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node); + freq = mpc5xxx_get_bus_frequency(&ofdev->dev); if (!freq) return 0; @@ -290,7 +283,6 @@ static u32 mpc512x_can_get_clock(struct platform_device *ofdev, static const struct of_device_id mpc5xxx_can_table[]; static int mpc5xxx_can_probe(struct platform_device *ofdev) { - const struct of_device_id *match; const struct mpc5xxx_can_data *data; struct device_node *np = ofdev->dev.of_node; struct net_device *dev; @@ -300,16 +292,13 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev) int irq, mscan_clksrc = 0; int err = -ENOMEM; - match = of_match_device(mpc5xxx_can_table, &ofdev->dev); - if (!match) + data = device_get_match_data(&ofdev->dev); + if (!data) return -EINVAL; - data = match->data; base = of_iomap(np, 0); - if (!base) { - dev_err(&ofdev->dev, "couldn't ioremap\n"); - return err; - } + if (!base) + return dev_err_probe(&ofdev->dev, err, "couldn't ioremap\n"); irq = irq_of_parse_and_map(np, 0); if (!irq) { @@ -330,20 +319,19 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev) clock_name = of_get_property(np, "fsl,mscan-clock-source", NULL); - BUG_ON(!data); priv->type = data->type; priv->can.clock.freq = data->get_clock(ofdev, clock_name, &mscan_clksrc); if (!priv->can.clock.freq) { dev_err(&ofdev->dev, "couldn't get MSCAN clock properties\n"); - goto exit_free_mscan; + goto exit_put_clock; } err = register_mscandev(dev, mscan_clksrc); if (err) { dev_err(&ofdev->dev, "registering %s failed (err=%d)\n", DRV_NAME, err); - goto exit_free_mscan; + goto exit_put_clock; } dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n", @@ -351,7 +339,9 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev) return 0; -exit_free_mscan: +exit_put_clock: + if (data->put_clock) + data->put_clock(ofdev); free_candev(dev); exit_dispose_irq: irq_dispose_mapping(irq); @@ -361,15 +351,13 @@ exit_unmap_mem: return err; } -static int mpc5xxx_can_remove(struct platform_device *ofdev) +static void mpc5xxx_can_remove(struct platform_device *ofdev) { - const struct of_device_id *match; const struct mpc5xxx_can_data *data; struct net_device *dev = platform_get_drvdata(ofdev); struct mscan_priv *priv = netdev_priv(dev); - match = of_match_device(mpc5xxx_can_table, &ofdev->dev); - data = match ? match->data : NULL; + data = device_get_match_data(&ofdev->dev); unregister_mscandev(dev); if (data && data->put_clock) @@ -377,8 +365,6 @@ static int mpc5xxx_can_remove(struct platform_device *ofdev) iounmap(priv->reg_base); irq_dispose_mapping(dev->irq); free_candev(dev); - - return 0; } #ifdef CONFIG_PM diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c index acb708fc1463..39c7aa2a0b2f 100644 --- a/drivers/net/can/mscan/mscan.c +++ b/drivers/net/can/mscan/mscan.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * CAN bus driver for the alone generic (as possible as) MSCAN controller. * @@ -5,18 +6,6 @@ * Varma Electronics Oy * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com> * Copyright (C) 2008-2009 Pengutronix <kernel@pengutronix.de> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> @@ -45,12 +34,6 @@ static const struct can_bittiming_const mscan_bittiming_const = { .brp_inc = 1, }; -struct mscan_state { - u8 mode; - u8 canrier; - u8 cantier; -}; - static enum can_state state_map[] = { CAN_STATE_ERROR_ACTIVE, CAN_STATE_ERROR_WARNING, @@ -202,7 +185,7 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev) int i, rtr, buf_id; u32 can_id; - if (can_dropped_invalid_skb(dev, skb)) + if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; out_8(®s->cantier, 0); @@ -220,6 +203,7 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev) * since buffer with lower id have higher priority (hell..) */ netif_stop_queue(dev); + fallthrough; case 2: if (buf_id < priv->prev_buf_id) { priv->cur_pri++; @@ -260,16 +244,16 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev) void __iomem *data = ®s->tx.dsr1_0; u16 *payload = (u16 *)frame->data; - for (i = 0; i < frame->can_dlc / 2; i++) { + for (i = 0; i < frame->len / 2; i++) { out_be16(data, *payload++); data += 2 + _MSCAN_RESERVED_DSR_SIZE; } /* write remaining byte if necessary */ - if (frame->can_dlc & 1) - out_8(data, frame->data[frame->can_dlc - 1]); + if (frame->len & 1) + out_8(data, frame->data[frame->len - 1]); } - out_8(®s->tx.dlr, frame->can_dlc); + out_8(®s->tx.dlr, frame->len); out_8(®s->tx.tbpr, priv->cur_pri); /* Start transmission. */ @@ -280,7 +264,7 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev) list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head); - can_put_echo_skb(skb, dev, buf_id); + can_put_echo_skb(skb, dev, buf_id, 0); /* Enable interrupt. */ priv->tx_active |= 1 << buf_id; @@ -322,19 +306,19 @@ static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame) if (can_id & 1) frame->can_id |= CAN_RTR_FLAG; - frame->can_dlc = get_can_dlc(in_8(®s->rx.dlr) & 0xf); + frame->len = can_cc_dlc2len(in_8(®s->rx.dlr) & 0xf); if (!(frame->can_id & CAN_RTR_FLAG)) { void __iomem *data = ®s->rx.dsr1_0; u16 *payload = (u16 *)frame->data; - for (i = 0; i < frame->can_dlc / 2; i++) { + for (i = 0; i < frame->len / 2; i++) { *payload++ = in_be16(data); data += 2 + _MSCAN_RESERVED_DSR_SIZE; } /* read remaining byte if necessary */ - if (frame->can_dlc & 1) - frame->data[frame->can_dlc - 1] = in_8(data); + if (frame->len & 1) + frame->data[frame->len - 1] = in_8(data); } out_8(®s->canrflg, MSCAN_RXF); @@ -382,7 +366,7 @@ static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame, } } priv->shadow_statflg = canrflg & MSCAN_STAT_MSK; - frame->can_dlc = CAN_ERR_DLC; + frame->len = CAN_ERR_DLC; out_8(®s->canrflg, MSCAN_ERR_IF); } @@ -392,13 +376,12 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota) struct net_device *dev = napi->dev; struct mscan_regs __iomem *regs = priv->reg_base; struct net_device_stats *stats = &dev->stats; - int npackets = 0; - int ret = 1; + int work_done = 0; struct sk_buff *skb; struct can_frame *frame; u8 canrflg; - while (npackets < quota) { + while (work_done < quota) { canrflg = in_8(®s->canrflg); if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF))) break; @@ -412,25 +395,27 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota) continue; } - if (canrflg & MSCAN_RXF) + if (canrflg & MSCAN_RXF) { mscan_get_rx_frame(dev, frame); - else if (canrflg & MSCAN_ERR_IF) + stats->rx_packets++; + if (!(frame->can_id & CAN_RTR_FLAG)) + stats->rx_bytes += frame->len; + } else if (canrflg & MSCAN_ERR_IF) { mscan_get_err_frame(dev, frame, canrflg); + } - stats->rx_packets++; - stats->rx_bytes += frame->can_dlc; - npackets++; + work_done++; netif_receive_skb(skb); } - if (!(in_8(®s->canrflg) & (MSCAN_RXF | MSCAN_ERR_IF))) { - napi_complete(&priv->napi); - clear_bit(F_RX_PROGRESS, &priv->flags); - if (priv->can.state < CAN_STATE_BUS_OFF) - out_8(®s->canrier, priv->shadow_canrier); - ret = 0; + if (work_done < quota) { + if (likely(napi_complete_done(&priv->napi, work_done))) { + clear_bit(F_RX_PROGRESS, &priv->flags); + if (priv->can.state < CAN_STATE_BUS_OFF) + out_8(®s->canrier, priv->shadow_canrier); + } } - return ret; + return work_done; } static irqreturn_t mscan_isr(int irq, void *dev_id) @@ -457,9 +442,9 @@ static irqreturn_t mscan_isr(int irq, void *dev_id) continue; out_8(®s->cantbsel, mask); - stats->tx_bytes += in_8(®s->tx.dlr); + stats->tx_bytes += can_get_echo_skb(dev, entry->id, + NULL); stats->tx_packets++; - can_get_echo_skb(dev, entry->id); priv->tx_active &= ~mask; list_del(pos); } @@ -552,16 +537,12 @@ static int mscan_open(struct net_device *dev) struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; - if (priv->clk_ipg) { - ret = clk_prepare_enable(priv->clk_ipg); - if (ret) - goto exit_retcode; - } - if (priv->clk_can) { - ret = clk_prepare_enable(priv->clk_can); - if (ret) - goto exit_dis_ipg_clock; - } + ret = clk_prepare_enable(priv->clk_ipg); + if (ret) + goto exit_retcode; + ret = clk_prepare_enable(priv->clk_can); + if (ret) + goto exit_dis_ipg_clock; /* common open */ ret = open_candev(dev); @@ -595,11 +576,9 @@ exit_napi_disable: napi_disable(&priv->napi); close_candev(dev); exit_dis_can_clock: - if (priv->clk_can) - clk_disable_unprepare(priv->clk_can); + clk_disable_unprepare(priv->clk_can); exit_dis_ipg_clock: - if (priv->clk_ipg) - clk_disable_unprepare(priv->clk_ipg); + clk_disable_unprepare(priv->clk_ipg); exit_retcode: return ret; } @@ -618,10 +597,8 @@ static int mscan_close(struct net_device *dev) close_candev(dev); free_irq(dev->irq, dev); - if (priv->clk_can) - clk_disable_unprepare(priv->clk_can); - if (priv->clk_ipg) - clk_disable_unprepare(priv->clk_ipg); + clk_disable_unprepare(priv->clk_can); + clk_disable_unprepare(priv->clk_ipg); return 0; } @@ -630,7 +607,10 @@ static const struct net_device_ops mscan_netdev_ops = { .ndo_open = mscan_open, .ndo_stop = mscan_close, .ndo_start_xmit = mscan_start_xmit, - .ndo_change_mtu = can_change_mtu, +}; + +static const struct ethtool_ops mscan_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, }; int register_mscandev(struct net_device *dev, int mscan_clksrc) @@ -693,10 +673,11 @@ struct net_device *alloc_mscandev(void) priv = netdev_priv(dev); dev->netdev_ops = &mscan_netdev_ops; + dev->ethtool_ops = &mscan_ethtool_ops; dev->flags |= IFF_ECHO; /* we support local echo */ - netif_napi_add(dev, &priv->napi, mscan_rx_poll, 8); + netif_napi_add_weight(dev, &priv->napi, mscan_rx_poll, 8); priv->can.bittiming_const = &mscan_bittiming_const; priv->can.do_set_bittiming = mscan_do_set_bittiming; diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h index ad8e08f9c496..25639a5e1ca9 100644 --- a/drivers/net/can/mscan/mscan.h +++ b/drivers/net/can/mscan/mscan.h @@ -1,20 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Definitions of consts/structs to drive the Freescale MSCAN. * * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>, * Varma Electronics Oy - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #ifndef __MSCAN_H__ diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c deleted file mode 100644 index c1317889d3d8..000000000000 --- a/drivers/net/can/pch_can.c +++ /dev/null @@ -1,1279 +0,0 @@ -/* - * Copyright (C) 1999 - 2010 Intel Corporation. - * Copyright (C) 2010 LAPIS SEMICONDUCTOR CO., LTD. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - */ - -#include <linux/interrupt.h> -#include <linux/delay.h> -#include <linux/io.h> -#include <linux/module.h> -#include <linux/sched.h> -#include <linux/pci.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/netdevice.h> -#include <linux/skbuff.h> -#include <linux/can.h> -#include <linux/can/dev.h> -#include <linux/can/error.h> - -#define PCH_CTRL_INIT BIT(0) /* The INIT bit of CANCONT register. */ -#define PCH_CTRL_IE BIT(1) /* The IE bit of CAN control register */ -#define PCH_CTRL_IE_SIE_EIE (BIT(3) | BIT(2) | BIT(1)) -#define PCH_CTRL_CCE BIT(6) -#define PCH_CTRL_OPT BIT(7) /* The OPT bit of CANCONT register. */ -#define PCH_OPT_SILENT BIT(3) /* The Silent bit of CANOPT reg. */ -#define PCH_OPT_LBACK BIT(4) /* The LoopBack bit of CANOPT reg. */ - -#define PCH_CMASK_RX_TX_SET 0x00f3 -#define PCH_CMASK_RX_TX_GET 0x0073 -#define PCH_CMASK_ALL 0xff -#define PCH_CMASK_NEWDAT BIT(2) -#define PCH_CMASK_CLRINTPND BIT(3) -#define PCH_CMASK_CTRL BIT(4) -#define PCH_CMASK_ARB BIT(5) -#define PCH_CMASK_MASK BIT(6) -#define PCH_CMASK_RDWR BIT(7) -#define PCH_IF_MCONT_NEWDAT BIT(15) -#define PCH_IF_MCONT_MSGLOST BIT(14) -#define PCH_IF_MCONT_INTPND BIT(13) -#define PCH_IF_MCONT_UMASK BIT(12) -#define PCH_IF_MCONT_TXIE BIT(11) -#define PCH_IF_MCONT_RXIE BIT(10) -#define PCH_IF_MCONT_RMTEN BIT(9) -#define PCH_IF_MCONT_TXRQXT BIT(8) -#define PCH_IF_MCONT_EOB BIT(7) -#define PCH_IF_MCONT_DLC (BIT(0) | BIT(1) | BIT(2) | BIT(3)) -#define PCH_MASK2_MDIR_MXTD (BIT(14) | BIT(15)) -#define PCH_ID2_DIR BIT(13) -#define PCH_ID2_XTD BIT(14) -#define PCH_ID_MSGVAL BIT(15) -#define PCH_IF_CREQ_BUSY BIT(15) - -#define PCH_STATUS_INT 0x8000 -#define PCH_RP 0x00008000 -#define PCH_REC 0x00007f00 -#define PCH_TEC 0x000000ff - -#define PCH_TX_OK BIT(3) -#define PCH_RX_OK BIT(4) -#define PCH_EPASSIV BIT(5) -#define PCH_EWARN BIT(6) -#define PCH_BUS_OFF BIT(7) - -/* bit position of certain controller bits. */ -#define PCH_BIT_BRP_SHIFT 0 -#define PCH_BIT_SJW_SHIFT 6 -#define PCH_BIT_TSEG1_SHIFT 8 -#define PCH_BIT_TSEG2_SHIFT 12 -#define PCH_BIT_BRPE_BRPE_SHIFT 6 - -#define PCH_MSK_BITT_BRP 0x3f -#define PCH_MSK_BRPE_BRPE 0x3c0 -#define PCH_MSK_CTRL_IE_SIE_EIE 0x07 -#define PCH_COUNTER_LIMIT 10 - -#define PCH_CAN_CLK 50000000 /* 50MHz */ - -/* - * Define the number of message object. - * PCH CAN communications are done via Message RAM. - * The Message RAM consists of 32 message objects. - */ -#define PCH_RX_OBJ_NUM 26 -#define PCH_TX_OBJ_NUM 6 -#define PCH_RX_OBJ_START 1 -#define PCH_RX_OBJ_END PCH_RX_OBJ_NUM -#define PCH_TX_OBJ_START (PCH_RX_OBJ_END + 1) -#define PCH_TX_OBJ_END (PCH_RX_OBJ_NUM + PCH_TX_OBJ_NUM) - -#define PCH_FIFO_THRESH 16 - -/* TxRqst2 show status of MsgObjNo.17~32 */ -#define PCH_TREQ2_TX_MASK (((1 << PCH_TX_OBJ_NUM) - 1) <<\ - (PCH_RX_OBJ_END - 16)) - -enum pch_ifreg { - PCH_RX_IFREG, - PCH_TX_IFREG, -}; - -enum pch_can_err { - PCH_STUF_ERR = 1, - PCH_FORM_ERR, - PCH_ACK_ERR, - PCH_BIT1_ERR, - PCH_BIT0_ERR, - PCH_CRC_ERR, - PCH_LEC_ALL, -}; - -enum pch_can_mode { - PCH_CAN_ENABLE, - PCH_CAN_DISABLE, - PCH_CAN_ALL, - PCH_CAN_NONE, - PCH_CAN_STOP, - PCH_CAN_RUN, -}; - -struct pch_can_if_regs { - u32 creq; - u32 cmask; - u32 mask1; - u32 mask2; - u32 id1; - u32 id2; - u32 mcont; - u32 data[4]; - u32 rsv[13]; -}; - -struct pch_can_regs { - u32 cont; - u32 stat; - u32 errc; - u32 bitt; - u32 intr; - u32 opt; - u32 brpe; - u32 reserve; - struct pch_can_if_regs ifregs[2]; /* [0]=if1 [1]=if2 */ - u32 reserve1[8]; - u32 treq1; - u32 treq2; - u32 reserve2[6]; - u32 data1; - u32 data2; - u32 reserve3[6]; - u32 canipend1; - u32 canipend2; - u32 reserve4[6]; - u32 canmval1; - u32 canmval2; - u32 reserve5[37]; - u32 srst; -}; - -struct pch_can_priv { - struct can_priv can; - struct pci_dev *dev; - u32 tx_enable[PCH_TX_OBJ_END]; - u32 rx_enable[PCH_TX_OBJ_END]; - u32 rx_link[PCH_TX_OBJ_END]; - u32 int_enables; - struct net_device *ndev; - struct pch_can_regs __iomem *regs; - struct napi_struct napi; - int tx_obj; /* Point next Tx Obj index */ - int use_msi; -}; - -static const struct can_bittiming_const pch_can_bittiming_const = { - .name = KBUILD_MODNAME, - .tseg1_min = 2, - .tseg1_max = 16, - .tseg2_min = 1, - .tseg2_max = 8, - .sjw_max = 4, - .brp_min = 1, - .brp_max = 1024, /* 6bit + extended 4bit */ - .brp_inc = 1, -}; - -static const struct pci_device_id pch_pci_tbl[] = { - {PCI_VENDOR_ID_INTEL, 0x8818, PCI_ANY_ID, PCI_ANY_ID,}, - {0,} -}; -MODULE_DEVICE_TABLE(pci, pch_pci_tbl); - -static inline void pch_can_bit_set(void __iomem *addr, u32 mask) -{ - iowrite32(ioread32(addr) | mask, addr); -} - -static inline void pch_can_bit_clear(void __iomem *addr, u32 mask) -{ - iowrite32(ioread32(addr) & ~mask, addr); -} - -static void pch_can_set_run_mode(struct pch_can_priv *priv, - enum pch_can_mode mode) -{ - switch (mode) { - case PCH_CAN_RUN: - pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_INIT); - break; - - case PCH_CAN_STOP: - pch_can_bit_set(&priv->regs->cont, PCH_CTRL_INIT); - break; - - default: - netdev_err(priv->ndev, "%s -> Invalid Mode.\n", __func__); - break; - } -} - -static void pch_can_set_optmode(struct pch_can_priv *priv) -{ - u32 reg_val = ioread32(&priv->regs->opt); - - if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) - reg_val |= PCH_OPT_SILENT; - - if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) - reg_val |= PCH_OPT_LBACK; - - pch_can_bit_set(&priv->regs->cont, PCH_CTRL_OPT); - iowrite32(reg_val, &priv->regs->opt); -} - -static void pch_can_rw_msg_obj(void __iomem *creq_addr, u32 num) -{ - int counter = PCH_COUNTER_LIMIT; - u32 ifx_creq; - - iowrite32(num, creq_addr); - while (counter) { - ifx_creq = ioread32(creq_addr) & PCH_IF_CREQ_BUSY; - if (!ifx_creq) - break; - counter--; - udelay(1); - } - if (!counter) - pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__); -} - -static void pch_can_set_int_enables(struct pch_can_priv *priv, - enum pch_can_mode interrupt_no) -{ - switch (interrupt_no) { - case PCH_CAN_DISABLE: - pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE); - break; - - case PCH_CAN_ALL: - pch_can_bit_set(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE); - break; - - case PCH_CAN_NONE: - pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE); - break; - - default: - netdev_err(priv->ndev, "Invalid interrupt number.\n"); - break; - } -} - -static void pch_can_set_rxtx(struct pch_can_priv *priv, u32 buff_num, - int set, enum pch_ifreg dir) -{ - u32 ie; - - if (dir) - ie = PCH_IF_MCONT_TXIE; - else - ie = PCH_IF_MCONT_RXIE; - - /* Reading the Msg buffer from Message RAM to IF1/2 registers. */ - iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask); - pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num); - - /* Setting the IF1/2MASK1 register to access MsgVal and RxIE bits */ - iowrite32(PCH_CMASK_RDWR | PCH_CMASK_ARB | PCH_CMASK_CTRL, - &priv->regs->ifregs[dir].cmask); - - if (set) { - /* Setting the MsgVal and RxIE/TxIE bits */ - pch_can_bit_set(&priv->regs->ifregs[dir].mcont, ie); - pch_can_bit_set(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL); - } else { - /* Clearing the MsgVal and RxIE/TxIE bits */ - pch_can_bit_clear(&priv->regs->ifregs[dir].mcont, ie); - pch_can_bit_clear(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL); - } - - pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num); -} - -static void pch_can_set_rx_all(struct pch_can_priv *priv, int set) -{ - int i; - - /* Traversing to obtain the object configured as receivers. */ - for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) - pch_can_set_rxtx(priv, i, set, PCH_RX_IFREG); -} - -static void pch_can_set_tx_all(struct pch_can_priv *priv, int set) -{ - int i; - - /* Traversing to obtain the object configured as transmit object. */ - for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++) - pch_can_set_rxtx(priv, i, set, PCH_TX_IFREG); -} - -static u32 pch_can_int_pending(struct pch_can_priv *priv) -{ - return ioread32(&priv->regs->intr) & 0xffff; -} - -static void pch_can_clear_if_buffers(struct pch_can_priv *priv) -{ - int i; /* Msg Obj ID (1~32) */ - - for (i = PCH_RX_OBJ_START; i <= PCH_TX_OBJ_END; i++) { - iowrite32(PCH_CMASK_RX_TX_SET, &priv->regs->ifregs[0].cmask); - iowrite32(0xffff, &priv->regs->ifregs[0].mask1); - iowrite32(0xffff, &priv->regs->ifregs[0].mask2); - iowrite32(0x0, &priv->regs->ifregs[0].id1); - iowrite32(0x0, &priv->regs->ifregs[0].id2); - iowrite32(0x0, &priv->regs->ifregs[0].mcont); - iowrite32(0x0, &priv->regs->ifregs[0].data[0]); - iowrite32(0x0, &priv->regs->ifregs[0].data[1]); - iowrite32(0x0, &priv->regs->ifregs[0].data[2]); - iowrite32(0x0, &priv->regs->ifregs[0].data[3]); - iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK | - PCH_CMASK_ARB | PCH_CMASK_CTRL, - &priv->regs->ifregs[0].cmask); - pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i); - } -} - -static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv) -{ - int i; - - for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) { - iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask); - pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i); - - iowrite32(0x0, &priv->regs->ifregs[0].id1); - iowrite32(0x0, &priv->regs->ifregs[0].id2); - - pch_can_bit_set(&priv->regs->ifregs[0].mcont, - PCH_IF_MCONT_UMASK); - - /* In case FIFO mode, Last EoB of Rx Obj must be 1 */ - if (i == PCH_RX_OBJ_END) - pch_can_bit_set(&priv->regs->ifregs[0].mcont, - PCH_IF_MCONT_EOB); - else - pch_can_bit_clear(&priv->regs->ifregs[0].mcont, - PCH_IF_MCONT_EOB); - - iowrite32(0, &priv->regs->ifregs[0].mask1); - pch_can_bit_clear(&priv->regs->ifregs[0].mask2, - 0x1fff | PCH_MASK2_MDIR_MXTD); - - /* Setting CMASK for writing */ - iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK | PCH_CMASK_ARB | - PCH_CMASK_CTRL, &priv->regs->ifregs[0].cmask); - - pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i); - } - - for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++) { - iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[1].cmask); - pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, i); - - /* Resetting DIR bit for reception */ - iowrite32(0x0, &priv->regs->ifregs[1].id1); - iowrite32(PCH_ID2_DIR, &priv->regs->ifregs[1].id2); - - /* Setting EOB bit for transmitter */ - iowrite32(PCH_IF_MCONT_EOB | PCH_IF_MCONT_UMASK, - &priv->regs->ifregs[1].mcont); - - iowrite32(0, &priv->regs->ifregs[1].mask1); - pch_can_bit_clear(&priv->regs->ifregs[1].mask2, 0x1fff); - - /* Setting CMASK for writing */ - iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK | PCH_CMASK_ARB | - PCH_CMASK_CTRL, &priv->regs->ifregs[1].cmask); - - pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, i); - } -} - -static void pch_can_init(struct pch_can_priv *priv) -{ - /* Stopping the Can device. */ - pch_can_set_run_mode(priv, PCH_CAN_STOP); - - /* Clearing all the message object buffers. */ - pch_can_clear_if_buffers(priv); - - /* Configuring the respective message object as either rx/tx object. */ - pch_can_config_rx_tx_buffers(priv); - - /* Enabling the interrupts. */ - pch_can_set_int_enables(priv, PCH_CAN_ALL); -} - -static void pch_can_release(struct pch_can_priv *priv) -{ - /* Stooping the CAN device. */ - pch_can_set_run_mode(priv, PCH_CAN_STOP); - - /* Disabling the interrupts. */ - pch_can_set_int_enables(priv, PCH_CAN_NONE); - - /* Disabling all the receive object. */ - pch_can_set_rx_all(priv, 0); - - /* Disabling all the transmit object. */ - pch_can_set_tx_all(priv, 0); -} - -/* This function clears interrupt(s) from the CAN device. */ -static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask) -{ - /* Clear interrupt for transmit object */ - if ((mask >= PCH_RX_OBJ_START) && (mask <= PCH_RX_OBJ_END)) { - /* Setting CMASK for clearing the reception interrupts. */ - iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB, - &priv->regs->ifregs[0].cmask); - - /* Clearing the Dir bit. */ - pch_can_bit_clear(&priv->regs->ifregs[0].id2, PCH_ID2_DIR); - - /* Clearing NewDat & IntPnd */ - pch_can_bit_clear(&priv->regs->ifregs[0].mcont, - PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND); - - pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, mask); - } else if ((mask >= PCH_TX_OBJ_START) && (mask <= PCH_TX_OBJ_END)) { - /* - * Setting CMASK for clearing interrupts for frame transmission. - */ - iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB, - &priv->regs->ifregs[1].cmask); - - /* Resetting the ID registers. */ - pch_can_bit_set(&priv->regs->ifregs[1].id2, - PCH_ID2_DIR | (0x7ff << 2)); - iowrite32(0x0, &priv->regs->ifregs[1].id1); - - /* Claring NewDat, TxRqst & IntPnd */ - pch_can_bit_clear(&priv->regs->ifregs[1].mcont, - PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND | - PCH_IF_MCONT_TXRQXT); - pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, mask); - } -} - -static void pch_can_reset(struct pch_can_priv *priv) -{ - /* write to sw reset register */ - iowrite32(1, &priv->regs->srst); - iowrite32(0, &priv->regs->srst); -} - -static void pch_can_error(struct net_device *ndev, u32 status) -{ - struct sk_buff *skb; - struct pch_can_priv *priv = netdev_priv(ndev); - struct can_frame *cf; - u32 errc, lec; - struct net_device_stats *stats = &(priv->ndev->stats); - enum can_state state = priv->can.state; - - skb = alloc_can_err_skb(ndev, &cf); - if (!skb) - return; - - if (status & PCH_BUS_OFF) { - pch_can_set_tx_all(priv, 0); - pch_can_set_rx_all(priv, 0); - state = CAN_STATE_BUS_OFF; - cf->can_id |= CAN_ERR_BUSOFF; - priv->can.can_stats.bus_off++; - can_bus_off(ndev); - } - - errc = ioread32(&priv->regs->errc); - /* Warning interrupt. */ - if (status & PCH_EWARN) { - state = CAN_STATE_ERROR_WARNING; - priv->can.can_stats.error_warning++; - cf->can_id |= CAN_ERR_CRTL; - if (((errc & PCH_REC) >> 8) > 96) - cf->data[1] |= CAN_ERR_CRTL_RX_WARNING; - if ((errc & PCH_TEC) > 96) - cf->data[1] |= CAN_ERR_CRTL_TX_WARNING; - netdev_dbg(ndev, - "%s -> Error Counter is more than 96.\n", __func__); - } - /* Error passive interrupt. */ - if (status & PCH_EPASSIV) { - priv->can.can_stats.error_passive++; - state = CAN_STATE_ERROR_PASSIVE; - cf->can_id |= CAN_ERR_CRTL; - if (errc & PCH_RP) - cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; - if ((errc & PCH_TEC) > 127) - cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; - netdev_dbg(ndev, - "%s -> CAN controller is ERROR PASSIVE .\n", __func__); - } - - lec = status & PCH_LEC_ALL; - switch (lec) { - case PCH_STUF_ERR: - cf->data[2] |= CAN_ERR_PROT_STUFF; - priv->can.can_stats.bus_error++; - stats->rx_errors++; - break; - case PCH_FORM_ERR: - cf->data[2] |= CAN_ERR_PROT_FORM; - priv->can.can_stats.bus_error++; - stats->rx_errors++; - break; - case PCH_ACK_ERR: - cf->can_id |= CAN_ERR_ACK; - priv->can.can_stats.bus_error++; - stats->rx_errors++; - break; - case PCH_BIT1_ERR: - case PCH_BIT0_ERR: - cf->data[2] |= CAN_ERR_PROT_BIT; - priv->can.can_stats.bus_error++; - stats->rx_errors++; - break; - case PCH_CRC_ERR: - cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; - priv->can.can_stats.bus_error++; - stats->rx_errors++; - break; - case PCH_LEC_ALL: /* Written by CPU. No error status */ - break; - } - - cf->data[6] = errc & PCH_TEC; - cf->data[7] = (errc & PCH_REC) >> 8; - - priv->can.state = state; - netif_receive_skb(skb); - - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; -} - -static irqreturn_t pch_can_interrupt(int irq, void *dev_id) -{ - struct net_device *ndev = (struct net_device *)dev_id; - struct pch_can_priv *priv = netdev_priv(ndev); - - if (!pch_can_int_pending(priv)) - return IRQ_NONE; - - pch_can_set_int_enables(priv, PCH_CAN_NONE); - napi_schedule(&priv->napi); - return IRQ_HANDLED; -} - -static void pch_fifo_thresh(struct pch_can_priv *priv, int obj_id) -{ - if (obj_id < PCH_FIFO_THRESH) { - iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | - PCH_CMASK_ARB, &priv->regs->ifregs[0].cmask); - - /* Clearing the Dir bit. */ - pch_can_bit_clear(&priv->regs->ifregs[0].id2, PCH_ID2_DIR); - - /* Clearing NewDat & IntPnd */ - pch_can_bit_clear(&priv->regs->ifregs[0].mcont, - PCH_IF_MCONT_INTPND); - pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_id); - } else if (obj_id > PCH_FIFO_THRESH) { - pch_can_int_clr(priv, obj_id); - } else if (obj_id == PCH_FIFO_THRESH) { - int cnt; - for (cnt = 0; cnt < PCH_FIFO_THRESH; cnt++) - pch_can_int_clr(priv, cnt + 1); - } -} - -static void pch_can_rx_msg_lost(struct net_device *ndev, int obj_id) -{ - struct pch_can_priv *priv = netdev_priv(ndev); - struct net_device_stats *stats = &(priv->ndev->stats); - struct sk_buff *skb; - struct can_frame *cf; - - netdev_dbg(priv->ndev, "Msg Obj is overwritten.\n"); - pch_can_bit_clear(&priv->regs->ifregs[0].mcont, - PCH_IF_MCONT_MSGLOST); - iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL, - &priv->regs->ifregs[0].cmask); - pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_id); - - skb = alloc_can_err_skb(ndev, &cf); - if (!skb) - return; - - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; - stats->rx_over_errors++; - stats->rx_errors++; - - netif_receive_skb(skb); -} - -static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota) -{ - u32 reg; - canid_t id; - int rcv_pkts = 0; - struct sk_buff *skb; - struct can_frame *cf; - struct pch_can_priv *priv = netdev_priv(ndev); - struct net_device_stats *stats = &(priv->ndev->stats); - int i; - u32 id2; - u16 data_reg; - - do { - /* Reading the message object from the Message RAM */ - iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask); - pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_num); - - /* Reading the MCONT register. */ - reg = ioread32(&priv->regs->ifregs[0].mcont); - - if (reg & PCH_IF_MCONT_EOB) - break; - - /* If MsgLost bit set. */ - if (reg & PCH_IF_MCONT_MSGLOST) { - pch_can_rx_msg_lost(ndev, obj_num); - rcv_pkts++; - quota--; - obj_num++; - continue; - } else if (!(reg & PCH_IF_MCONT_NEWDAT)) { - obj_num++; - continue; - } - - skb = alloc_can_skb(priv->ndev, &cf); - if (!skb) { - netdev_err(ndev, "alloc_can_skb Failed\n"); - return rcv_pkts; - } - - /* Get Received data */ - id2 = ioread32(&priv->regs->ifregs[0].id2); - if (id2 & PCH_ID2_XTD) { - id = (ioread32(&priv->regs->ifregs[0].id1) & 0xffff); - id |= (((id2) & 0x1fff) << 16); - cf->can_id = id | CAN_EFF_FLAG; - } else { - id = (id2 >> 2) & CAN_SFF_MASK; - cf->can_id = id; - } - - if (id2 & PCH_ID2_DIR) - cf->can_id |= CAN_RTR_FLAG; - - cf->can_dlc = get_can_dlc((ioread32(&priv->regs-> - ifregs[0].mcont)) & 0xF); - - for (i = 0; i < cf->can_dlc; i += 2) { - data_reg = ioread16(&priv->regs->ifregs[0].data[i / 2]); - cf->data[i] = data_reg; - cf->data[i + 1] = data_reg >> 8; - } - - netif_receive_skb(skb); - rcv_pkts++; - stats->rx_packets++; - quota--; - stats->rx_bytes += cf->can_dlc; - - pch_fifo_thresh(priv, obj_num); - obj_num++; - } while (quota > 0); - - return rcv_pkts; -} - -static void pch_can_tx_complete(struct net_device *ndev, u32 int_stat) -{ - struct pch_can_priv *priv = netdev_priv(ndev); - struct net_device_stats *stats = &(priv->ndev->stats); - u32 dlc; - - can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1); - iowrite32(PCH_CMASK_RX_TX_GET | PCH_CMASK_CLRINTPND, - &priv->regs->ifregs[1].cmask); - pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, int_stat); - dlc = get_can_dlc(ioread32(&priv->regs->ifregs[1].mcont) & - PCH_IF_MCONT_DLC); - stats->tx_bytes += dlc; - stats->tx_packets++; - if (int_stat == PCH_TX_OBJ_END) - netif_wake_queue(ndev); -} - -static int pch_can_poll(struct napi_struct *napi, int quota) -{ - struct net_device *ndev = napi->dev; - struct pch_can_priv *priv = netdev_priv(ndev); - u32 int_stat; - u32 reg_stat; - int quota_save = quota; - - int_stat = pch_can_int_pending(priv); - if (!int_stat) - goto end; - - if (int_stat == PCH_STATUS_INT) { - reg_stat = ioread32(&priv->regs->stat); - - if ((reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) && - ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)) { - pch_can_error(ndev, reg_stat); - quota--; - } - - if (reg_stat & (PCH_TX_OK | PCH_RX_OK)) - pch_can_bit_clear(&priv->regs->stat, - reg_stat & (PCH_TX_OK | PCH_RX_OK)); - - int_stat = pch_can_int_pending(priv); - } - - if (quota == 0) - goto end; - - if ((int_stat >= PCH_RX_OBJ_START) && (int_stat <= PCH_RX_OBJ_END)) { - quota -= pch_can_rx_normal(ndev, int_stat, quota); - } else if ((int_stat >= PCH_TX_OBJ_START) && - (int_stat <= PCH_TX_OBJ_END)) { - /* Handle transmission interrupt */ - pch_can_tx_complete(ndev, int_stat); - } - -end: - napi_complete(napi); - pch_can_set_int_enables(priv, PCH_CAN_ALL); - - return quota_save - quota; -} - -static int pch_set_bittiming(struct net_device *ndev) -{ - struct pch_can_priv *priv = netdev_priv(ndev); - const struct can_bittiming *bt = &priv->can.bittiming; - u32 canbit; - u32 bepe; - - /* Setting the CCE bit for accessing the Can Timing register. */ - pch_can_bit_set(&priv->regs->cont, PCH_CTRL_CCE); - - canbit = (bt->brp - 1) & PCH_MSK_BITT_BRP; - canbit |= (bt->sjw - 1) << PCH_BIT_SJW_SHIFT; - canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << PCH_BIT_TSEG1_SHIFT; - canbit |= (bt->phase_seg2 - 1) << PCH_BIT_TSEG2_SHIFT; - bepe = ((bt->brp - 1) & PCH_MSK_BRPE_BRPE) >> PCH_BIT_BRPE_BRPE_SHIFT; - iowrite32(canbit, &priv->regs->bitt); - iowrite32(bepe, &priv->regs->brpe); - pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_CCE); - - return 0; -} - -static void pch_can_start(struct net_device *ndev) -{ - struct pch_can_priv *priv = netdev_priv(ndev); - - if (priv->can.state != CAN_STATE_STOPPED) - pch_can_reset(priv); - - pch_set_bittiming(ndev); - pch_can_set_optmode(priv); - - pch_can_set_tx_all(priv, 1); - pch_can_set_rx_all(priv, 1); - - /* Setting the CAN to run mode. */ - pch_can_set_run_mode(priv, PCH_CAN_RUN); - - priv->can.state = CAN_STATE_ERROR_ACTIVE; - - return; -} - -static int pch_can_do_set_mode(struct net_device *ndev, enum can_mode mode) -{ - int ret = 0; - - switch (mode) { - case CAN_MODE_START: - pch_can_start(ndev); - netif_wake_queue(ndev); - break; - default: - ret = -EOPNOTSUPP; - break; - } - - return ret; -} - -static int pch_can_open(struct net_device *ndev) -{ - struct pch_can_priv *priv = netdev_priv(ndev); - int retval; - - /* Regstering the interrupt. */ - retval = request_irq(priv->dev->irq, pch_can_interrupt, IRQF_SHARED, - ndev->name, ndev); - if (retval) { - netdev_err(ndev, "request_irq failed.\n"); - goto req_irq_err; - } - - /* Open common can device */ - retval = open_candev(ndev); - if (retval) { - netdev_err(ndev, "open_candev() failed %d\n", retval); - goto err_open_candev; - } - - pch_can_init(priv); - pch_can_start(ndev); - napi_enable(&priv->napi); - netif_start_queue(ndev); - - return 0; - -err_open_candev: - free_irq(priv->dev->irq, ndev); -req_irq_err: - pch_can_release(priv); - - return retval; -} - -static int pch_close(struct net_device *ndev) -{ - struct pch_can_priv *priv = netdev_priv(ndev); - - netif_stop_queue(ndev); - napi_disable(&priv->napi); - pch_can_release(priv); - free_irq(priv->dev->irq, ndev); - close_candev(ndev); - priv->can.state = CAN_STATE_STOPPED; - return 0; -} - -static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev) -{ - struct pch_can_priv *priv = netdev_priv(ndev); - struct can_frame *cf = (struct can_frame *)skb->data; - int tx_obj_no; - int i; - u32 id2; - - if (can_dropped_invalid_skb(ndev, skb)) - return NETDEV_TX_OK; - - tx_obj_no = priv->tx_obj; - if (priv->tx_obj == PCH_TX_OBJ_END) { - if (ioread32(&priv->regs->treq2) & PCH_TREQ2_TX_MASK) - netif_stop_queue(ndev); - - priv->tx_obj = PCH_TX_OBJ_START; - } else { - priv->tx_obj++; - } - - /* Setting the CMASK register. */ - pch_can_bit_set(&priv->regs->ifregs[1].cmask, PCH_CMASK_ALL); - - /* If ID extended is set. */ - if (cf->can_id & CAN_EFF_FLAG) { - iowrite32(cf->can_id & 0xffff, &priv->regs->ifregs[1].id1); - id2 = ((cf->can_id >> 16) & 0x1fff) | PCH_ID2_XTD; - } else { - iowrite32(0, &priv->regs->ifregs[1].id1); - id2 = (cf->can_id & CAN_SFF_MASK) << 2; - } - - id2 |= PCH_ID_MSGVAL; - - /* If remote frame has to be transmitted.. */ - if (!(cf->can_id & CAN_RTR_FLAG)) - id2 |= PCH_ID2_DIR; - - iowrite32(id2, &priv->regs->ifregs[1].id2); - - /* Copy data to register */ - for (i = 0; i < cf->can_dlc; i += 2) { - iowrite16(cf->data[i] | (cf->data[i + 1] << 8), - &priv->regs->ifregs[1].data[i / 2]); - } - - can_put_echo_skb(skb, ndev, tx_obj_no - PCH_RX_OBJ_END - 1); - - /* Set the size of the data. Update if2_mcont */ - iowrite32(cf->can_dlc | PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_TXRQXT | - PCH_IF_MCONT_TXIE, &priv->regs->ifregs[1].mcont); - - pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, tx_obj_no); - - return NETDEV_TX_OK; -} - -static const struct net_device_ops pch_can_netdev_ops = { - .ndo_open = pch_can_open, - .ndo_stop = pch_close, - .ndo_start_xmit = pch_xmit, - .ndo_change_mtu = can_change_mtu, -}; - -static void pch_can_remove(struct pci_dev *pdev) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - struct pch_can_priv *priv = netdev_priv(ndev); - - unregister_candev(priv->ndev); - if (priv->use_msi) - pci_disable_msi(priv->dev); - pci_release_regions(pdev); - pci_disable_device(pdev); - pch_can_reset(priv); - pci_iounmap(pdev, priv->regs); - free_candev(priv->ndev); -} - -#ifdef CONFIG_PM -static void pch_can_set_int_custom(struct pch_can_priv *priv) -{ - /* Clearing the IE, SIE and EIE bits of Can control register. */ - pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE); - - /* Appropriately setting them. */ - pch_can_bit_set(&priv->regs->cont, - ((priv->int_enables & PCH_MSK_CTRL_IE_SIE_EIE) << 1)); -} - -/* This function retrieves interrupt enabled for the CAN device. */ -static u32 pch_can_get_int_enables(struct pch_can_priv *priv) -{ - /* Obtaining the status of IE, SIE and EIE interrupt bits. */ - return (ioread32(&priv->regs->cont) & PCH_CTRL_IE_SIE_EIE) >> 1; -} - -static u32 pch_can_get_rxtx_ir(struct pch_can_priv *priv, u32 buff_num, - enum pch_ifreg dir) -{ - u32 ie, enable; - - if (dir) - ie = PCH_IF_MCONT_RXIE; - else - ie = PCH_IF_MCONT_TXIE; - - iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask); - pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num); - - if (((ioread32(&priv->regs->ifregs[dir].id2)) & PCH_ID_MSGVAL) && - ((ioread32(&priv->regs->ifregs[dir].mcont)) & ie)) - enable = 1; - else - enable = 0; - - return enable; -} - -static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv, - u32 buffer_num, int set) -{ - iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask); - pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num); - iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL, - &priv->regs->ifregs[0].cmask); - if (set) - pch_can_bit_clear(&priv->regs->ifregs[0].mcont, - PCH_IF_MCONT_EOB); - else - pch_can_bit_set(&priv->regs->ifregs[0].mcont, PCH_IF_MCONT_EOB); - - pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num); -} - -static u32 pch_can_get_rx_buffer_link(struct pch_can_priv *priv, u32 buffer_num) -{ - u32 link; - - iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask); - pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num); - - if (ioread32(&priv->regs->ifregs[0].mcont) & PCH_IF_MCONT_EOB) - link = 0; - else - link = 1; - return link; -} - -static int pch_can_get_buffer_status(struct pch_can_priv *priv) -{ - return (ioread32(&priv->regs->treq1) & 0xffff) | - (ioread32(&priv->regs->treq2) << 16); -} - -static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state) -{ - int i; - int retval; - u32 buf_stat; /* Variable for reading the transmit buffer status. */ - int counter = PCH_COUNTER_LIMIT; - - struct net_device *dev = pci_get_drvdata(pdev); - struct pch_can_priv *priv = netdev_priv(dev); - - /* Stop the CAN controller */ - pch_can_set_run_mode(priv, PCH_CAN_STOP); - - /* Indicate that we are aboutto/in suspend */ - priv->can.state = CAN_STATE_STOPPED; - - /* Waiting for all transmission to complete. */ - while (counter) { - buf_stat = pch_can_get_buffer_status(priv); - if (!buf_stat) - break; - counter--; - udelay(1); - } - if (!counter) - dev_err(&pdev->dev, "%s -> Transmission time out.\n", __func__); - - /* Save interrupt configuration and then disable them */ - priv->int_enables = pch_can_get_int_enables(priv); - pch_can_set_int_enables(priv, PCH_CAN_DISABLE); - - /* Save Tx buffer enable state */ - for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++) - priv->tx_enable[i - 1] = pch_can_get_rxtx_ir(priv, i, - PCH_TX_IFREG); - - /* Disable all Transmit buffers */ - pch_can_set_tx_all(priv, 0); - - /* Save Rx buffer enable state */ - for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) { - priv->rx_enable[i - 1] = pch_can_get_rxtx_ir(priv, i, - PCH_RX_IFREG); - priv->rx_link[i - 1] = pch_can_get_rx_buffer_link(priv, i); - } - - /* Disable all Receive buffers */ - pch_can_set_rx_all(priv, 0); - retval = pci_save_state(pdev); - if (retval) { - dev_err(&pdev->dev, "pci_save_state failed.\n"); - } else { - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_disable_device(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - } - - return retval; -} - -static int pch_can_resume(struct pci_dev *pdev) -{ - int i; - int retval; - struct net_device *dev = pci_get_drvdata(pdev); - struct pch_can_priv *priv = netdev_priv(dev); - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - retval = pci_enable_device(pdev); - if (retval) { - dev_err(&pdev->dev, "pci_enable_device failed.\n"); - return retval; - } - - pci_enable_wake(pdev, PCI_D3hot, 0); - - priv->can.state = CAN_STATE_ERROR_ACTIVE; - - /* Disabling all interrupts. */ - pch_can_set_int_enables(priv, PCH_CAN_DISABLE); - - /* Setting the CAN device in Stop Mode. */ - pch_can_set_run_mode(priv, PCH_CAN_STOP); - - /* Configuring the transmit and receive buffers. */ - pch_can_config_rx_tx_buffers(priv); - - /* Restore the CAN state */ - pch_set_bittiming(dev); - - /* Listen/Active */ - pch_can_set_optmode(priv); - - /* Enabling the transmit buffer. */ - for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++) - pch_can_set_rxtx(priv, i, priv->tx_enable[i - 1], PCH_TX_IFREG); - - /* Configuring the receive buffer and enabling them. */ - for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) { - /* Restore buffer link */ - pch_can_set_rx_buffer_link(priv, i, priv->rx_link[i - 1]); - - /* Restore buffer enables */ - pch_can_set_rxtx(priv, i, priv->rx_enable[i - 1], PCH_RX_IFREG); - } - - /* Enable CAN Interrupts */ - pch_can_set_int_custom(priv); - - /* Restore Run Mode */ - pch_can_set_run_mode(priv, PCH_CAN_RUN); - - return retval; -} -#else -#define pch_can_suspend NULL -#define pch_can_resume NULL -#endif - -static int pch_can_get_berr_counter(const struct net_device *dev, - struct can_berr_counter *bec) -{ - struct pch_can_priv *priv = netdev_priv(dev); - u32 errc = ioread32(&priv->regs->errc); - - bec->txerr = errc & PCH_TEC; - bec->rxerr = (errc & PCH_REC) >> 8; - - return 0; -} - -static int pch_can_probe(struct pci_dev *pdev, - const struct pci_device_id *id) -{ - struct net_device *ndev; - struct pch_can_priv *priv; - int rc; - void __iomem *addr; - - rc = pci_enable_device(pdev); - if (rc) { - dev_err(&pdev->dev, "Failed pci_enable_device %d\n", rc); - goto probe_exit_endev; - } - - rc = pci_request_regions(pdev, KBUILD_MODNAME); - if (rc) { - dev_err(&pdev->dev, "Failed pci_request_regions %d\n", rc); - goto probe_exit_pcireq; - } - - addr = pci_iomap(pdev, 1, 0); - if (!addr) { - rc = -EIO; - dev_err(&pdev->dev, "Failed pci_iomap\n"); - goto probe_exit_ipmap; - } - - ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_END); - if (!ndev) { - rc = -ENOMEM; - dev_err(&pdev->dev, "Failed alloc_candev\n"); - goto probe_exit_alloc_candev; - } - - priv = netdev_priv(ndev); - priv->ndev = ndev; - priv->regs = addr; - priv->dev = pdev; - priv->can.bittiming_const = &pch_can_bittiming_const; - priv->can.do_set_mode = pch_can_do_set_mode; - priv->can.do_get_berr_counter = pch_can_get_berr_counter; - priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | - CAN_CTRLMODE_LOOPBACK; - priv->tx_obj = PCH_TX_OBJ_START; /* Point head of Tx Obj */ - - ndev->irq = pdev->irq; - ndev->flags |= IFF_ECHO; - - pci_set_drvdata(pdev, ndev); - SET_NETDEV_DEV(ndev, &pdev->dev); - ndev->netdev_ops = &pch_can_netdev_ops; - priv->can.clock.freq = PCH_CAN_CLK; /* Hz */ - - netif_napi_add(ndev, &priv->napi, pch_can_poll, PCH_RX_OBJ_END); - - rc = pci_enable_msi(priv->dev); - if (rc) { - netdev_err(ndev, "PCH CAN opened without MSI\n"); - priv->use_msi = 0; - } else { - netdev_err(ndev, "PCH CAN opened with MSI\n"); - pci_set_master(pdev); - priv->use_msi = 1; - } - - rc = register_candev(ndev); - if (rc) { - dev_err(&pdev->dev, "Failed register_candev %d\n", rc); - goto probe_exit_reg_candev; - } - - return 0; - -probe_exit_reg_candev: - if (priv->use_msi) - pci_disable_msi(priv->dev); - free_candev(ndev); -probe_exit_alloc_candev: - pci_iounmap(pdev, addr); -probe_exit_ipmap: - pci_release_regions(pdev); -probe_exit_pcireq: - pci_disable_device(pdev); -probe_exit_endev: - return rc; -} - -static struct pci_driver pch_can_pci_driver = { - .name = "pch_can", - .id_table = pch_pci_tbl, - .probe = pch_can_probe, - .remove = pch_can_remove, - .suspend = pch_can_suspend, - .resume = pch_can_resume, -}; - -module_pci_driver(pch_can_pci_driver); - -MODULE_DESCRIPTION("Intel EG20T PCH CAN(Controller Area Network) Driver"); -MODULE_LICENSE("GPL v2"); -MODULE_VERSION("0.94"); diff --git a/drivers/net/can/peak_canfd/Kconfig b/drivers/net/can/peak_canfd/Kconfig index 84b30978a19f..f7e412766dbd 100644 --- a/drivers/net/can/peak_canfd/Kconfig +++ b/drivers/net/can/peak_canfd/Kconfig @@ -1,7 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only config CAN_PEAK_PCIEFD depends on PCI tristate "PEAK-System PCAN-PCIe FD cards" - ---help--- + help This driver adds support for the PEAK-System PCI Express FD CAN-FD cards family. These 1x or 2x CAN-FD channels cards offer CAN 2.0 a/b as well as diff --git a/drivers/net/can/peak_canfd/Makefile b/drivers/net/can/peak_canfd/Makefile index 3dc7a6a0ba59..14719b35e0b9 100644 --- a/drivers/net/can/peak_canfd/Makefile +++ b/drivers/net/can/peak_canfd/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only # # Makefile for the PEAK-System CAN-FD IP module drivers # diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c index 5696d7e80751..06cb2629f66a 100644 --- a/drivers/net/can/peak_canfd/peak_canfd.c +++ b/drivers/net/can/peak_canfd/peak_canfd.c @@ -1,21 +1,13 @@ -/* - * Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com> - * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com> +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com> * - * Copyright (C) 2016 PEAK System-Technik GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * Copyright (C) 2016-2025 PEAK System-Technik GmbH + * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com> */ #include <linux/can.h> #include <linux/can/dev.h> +#include <linux/ethtool.h> #include "peak_canfd_user.h" @@ -130,7 +122,8 @@ static int pucan_set_timing_slow(struct peak_canfd_priv *priv, cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TIMING_SLOW); cmd->sjw_t = PUCAN_TSLOW_SJW_T(pbt->sjw - 1, - priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES); + priv->can.ctrlmode & + CAN_CTRLMODE_3_SAMPLES); cmd->tseg1 = PUCAN_TSLOW_TSEG1(pbt->prop_seg + pbt->phase_seg1 - 1); cmd->tseg2 = PUCAN_TSLOW_TSEG2(pbt->phase_seg2 - 1); cmd->brp = cpu_to_le16(PUCAN_TSLOW_BRP(pbt->brp - 1)); @@ -240,6 +233,20 @@ static int pucan_setup_rx_barrier(struct peak_canfd_priv *priv) return pucan_write_cmd(priv); } +static int pucan_netif_rx(struct sk_buff *skb, __le32 ts_low, __le32 ts_high) +{ + struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb); + u64 ts_us; + + ts_us = (u64)le32_to_cpu(ts_high) << 32; + ts_us |= le32_to_cpu(ts_low); + + /* IP core timestamps are µs. */ + hwts->hwtstamp = ns_to_ktime(ts_us * NSEC_PER_USEC); + + return netif_rx(skb); +} + /* handle the reception of one CAN frame */ static int pucan_handle_can_rx(struct peak_canfd_priv *priv, struct pucan_rx_msg *msg) @@ -251,27 +258,31 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv, u8 cf_len; if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) - cf_len = can_dlc2len(get_canfd_dlc(pucan_msg_get_dlc(msg))); + cf_len = can_fd_dlc2len(pucan_msg_get_dlc(msg)); else - cf_len = get_can_dlc(pucan_msg_get_dlc(msg)); + cf_len = can_cc_dlc2len(pucan_msg_get_dlc(msg)); /* if this frame is an echo, */ - if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) && - !(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) { + if (rx_msg_flags & PUCAN_MSG_LOOPED_BACK) { unsigned long flags; spin_lock_irqsave(&priv->echo_lock, flags); - can_get_echo_skb(priv->ndev, msg->client); /* count bytes of the echo instead of skb */ - stats->tx_bytes += cf_len; + stats->tx_bytes += can_get_echo_skb(priv->ndev, msg->client, NULL); stats->tx_packets++; /* restart tx queue (a slot is free) */ netif_wake_queue(priv->ndev); spin_unlock_irqrestore(&priv->echo_lock, flags); - return 0; + + /* if this frame is only an echo, stop here. Otherwise, + * continue to push this application self-received frame into + * its own rx queue. + */ + if (!(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) + return 0; } /* otherwise, it should be pushed into rx fifo */ @@ -299,15 +310,16 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv, if (rx_msg_flags & PUCAN_MSG_EXT_ID) cf->can_id |= CAN_EFF_FLAG; - if (rx_msg_flags & PUCAN_MSG_RTR) + if (rx_msg_flags & PUCAN_MSG_RTR) { cf->can_id |= CAN_RTR_FLAG; - else + } else { memcpy(cf->data, msg->d, cf->len); - stats->rx_bytes += cf->len; + stats->rx_bytes += cf->len; + } stats->rx_packets++; - netif_rx(skb); + pucan_netif_rx(skb, msg->ts_low, msg->ts_high); return 0; } @@ -333,7 +345,6 @@ static int pucan_handle_status(struct peak_canfd_priv *priv, /* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */ if (pucan_status_is_rx_barrier(msg)) { - if (priv->enable_tx_path) { int err = priv->enable_tx_path(priv); @@ -341,8 +352,8 @@ static int pucan_handle_status(struct peak_canfd_priv *priv, return err; } - /* start network queue (echo_skb array is empty) */ - netif_start_queue(ndev); + /* wake network queue up (echo_skb array is empty) */ + netif_wake_queue(ndev); return 0; } @@ -363,7 +374,7 @@ static int pucan_handle_status(struct peak_canfd_priv *priv, priv->can.state = CAN_STATE_ERROR_PASSIVE; priv->can.can_stats.error_passive++; if (skb) { - cf->can_id |= CAN_ERR_CRTL; + cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ? CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; @@ -376,7 +387,7 @@ static int pucan_handle_status(struct peak_canfd_priv *priv, priv->can.state = CAN_STATE_ERROR_WARNING; priv->can.can_stats.error_warning++; if (skb) { - cf->can_id |= CAN_ERR_CRTL; + cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ? CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; @@ -399,9 +410,7 @@ static int pucan_handle_status(struct peak_canfd_priv *priv, return -ENOMEM; } - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_rx(skb); + pucan_netif_rx(skb, msg->ts_low, msg->ts_high); return 0; } @@ -422,14 +431,12 @@ static int pucan_handle_cache_critical(struct peak_canfd_priv *priv) return -ENOMEM; } - cf->can_id |= CAN_ERR_CRTL; + cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; cf->data[6] = priv->bec.txerr; cf->data[7] = priv->bec.rxerr; - stats->rx_bytes += cf->can_dlc; - stats->rx_packets++; netif_rx(skb); return 0; @@ -617,7 +624,7 @@ static int peak_canfd_set_data_bittiming(struct net_device *ndev) { struct peak_canfd_priv *priv = netdev_priv(ndev); - return pucan_set_timing_fast(priv, &priv->can.data_bittiming); + return pucan_set_timing_fast(priv, &priv->can.fd.data_bittiming); } static int peak_canfd_close(struct net_device *ndev) @@ -642,9 +649,9 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb, unsigned long flags; bool should_stop_tx_queue; int room_left; - u8 can_dlc; + u8 len; - if (can_dropped_invalid_skb(ndev, skb)) + if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; msg_size = ALIGN(sizeof(*msg) + cf->len, 4); @@ -672,7 +679,7 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb, if (can_is_canfd_skb(skb)) { /* CAN FD frame format */ - can_dlc = can_len2dlc(cf->len); + len = can_fd_len2dlc(cf->len); msg_flags |= PUCAN_MSG_EXT_DATA_LEN; @@ -683,7 +690,7 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb, msg_flags |= PUCAN_MSG_ERROR_STATE_IND; } else { /* CAN 2.0 frame format */ - can_dlc = cf->len; + len = cf->len; if (cf->can_id & CAN_RTR_FLAG) msg_flags |= PUCAN_MSG_RTR; @@ -697,7 +704,7 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb, msg_flags |= PUCAN_MSG_SELF_RECEIVE; msg->flags = cpu_to_le16(msg_flags); - msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(priv->index, can_dlc); + msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(priv->index, len); memcpy(msg->d, cf->data, cf->len); /* struct msg client field is used as an index in the echo skbs ring */ @@ -706,7 +713,7 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb, spin_lock_irqsave(&priv->echo_lock, flags); /* prepare and save echo skb in internal slot */ - can_put_echo_skb(skb, ndev, priv->echo_idx); + can_put_echo_skb(skb, ndev, priv->echo_idx, 0); /* move echo index to the next slot */ priv->echo_idx = (priv->echo_idx + 1) % priv->can.echo_skb_max; @@ -736,11 +743,50 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } +static int peak_eth_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) +{ + config->tx_type = HWTSTAMP_TX_OFF; + config->rx_filter = HWTSTAMP_FILTER_ALL; + + return 0; +} + +static int peak_eth_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + if (config->tx_type == HWTSTAMP_TX_OFF && + config->rx_filter == HWTSTAMP_FILTER_ALL) + return 0; + + NL_SET_ERR_MSG_MOD(extack, "Only RX HWTSTAMP_FILTER_ALL is supported"); + return -ERANGE; +} + static const struct net_device_ops peak_canfd_netdev_ops = { .ndo_open = peak_canfd_open, .ndo_stop = peak_canfd_close, .ndo_start_xmit = peak_canfd_start_xmit, - .ndo_change_mtu = can_change_mtu, + .ndo_hwtstamp_get = peak_eth_hwtstamp_get, + .ndo_hwtstamp_set = peak_eth_hwtstamp_set, +}; + +static int peak_get_ts_info(struct net_device *dev, + struct kernel_ethtool_ts_info *info) +{ + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->tx_types = BIT(HWTSTAMP_TX_OFF); + info->rx_filters = BIT(HWTSTAMP_FILTER_ALL); + + return 0; +} + +static const struct ethtool_ops peak_canfd_ethtool_ops = { + .get_ts_info = peak_get_ts_info, }; struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index, @@ -763,12 +809,12 @@ struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index, /* complete now socket-can initialization side */ priv->can.state = CAN_STATE_STOPPED; priv->can.bittiming_const = &peak_canfd_nominal_const; - priv->can.data_bittiming_const = &peak_canfd_data_const; + priv->can.fd.data_bittiming_const = &peak_canfd_data_const; priv->can.do_set_mode = peak_canfd_set_mode; priv->can.do_get_berr_counter = peak_canfd_get_berr_counter; priv->can.do_set_bittiming = peak_canfd_set_bittiming; - priv->can.do_set_data_bittiming = peak_canfd_set_data_bittiming; + priv->can.fd.do_set_data_bittiming = peak_canfd_set_data_bittiming; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_3_SAMPLES | @@ -783,6 +829,7 @@ struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index, ndev->flags |= IFF_ECHO; ndev->netdev_ops = &peak_canfd_netdev_ops; + ndev->ethtool_ops = &peak_canfd_ethtool_ops; ndev->dev_id = index; return ndev; diff --git a/drivers/net/can/peak_canfd/peak_canfd_user.h b/drivers/net/can/peak_canfd/peak_canfd_user.h index bf6de47f69c2..60c6542028cf 100644 --- a/drivers/net/can/peak_canfd/peak_canfd_user.h +++ b/drivers/net/can/peak_canfd/peak_canfd_user.h @@ -1,17 +1,8 @@ -/* - * CAN driver for PEAK System micro-CAN based adapters +/* SPDX-License-Identifier: GPL-2.0-only */ +/* CAN driver for PEAK System micro-CAN based adapters * - * Copyright (C) 2003-2011 PEAK System-Technik GmbH - * Copyright (C) 2011-2013 Stephane Grosjean <s.grosjean@peak-system.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published - * by the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. + * Copyright (C) 2003-2025 PEAK System-Technik GmbH + * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com> */ #ifndef PEAK_CANFD_USER_H #define PEAK_CANFD_USER_H diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c index c458d5fdc8d3..93558e33bc02 100644 --- a/drivers/net/can/peak_canfd/peak_pciefd_main.c +++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c @@ -1,19 +1,10 @@ -/* - * Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com> - * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com> +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com> * * Derived from the PCAN project file driver/src/pcan_pci.c: * - * Copyright (C) 2001-2006 PEAK System-Technik GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * Copyright (C) 2001-2025 PEAK System-Technik GmbH + * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com> */ #include <linux/kernel.h> @@ -28,9 +19,8 @@ #include "peak_canfd_user.h" -MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>"); +MODULE_AUTHOR("Stéphane Grosjean <stephane.grosjean@hms-networks.com>"); MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe/M.2 FD family cards"); -MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe/M.2 FD CAN cards"); MODULE_LICENSE("GPL v2"); #define PCIEFD_DRV_NAME "peak_pciefd" @@ -125,8 +115,6 @@ MODULE_LICENSE("GPL v2"); #define CANFD_CTL_IRQ_CL_DEF 16 /* Rx msg max nb per IRQ in Rx DMA */ #define CANFD_CTL_IRQ_TL_DEF 10 /* Time before IRQ if < CL (x100 µs) */ -#define CANFD_OPTIONS_SET (CANFD_OPTION_ERROR | CANFD_OPTION_BUSLOAD) - /* Tx anticipation window (link logical address should be aligned on 2K * boundary) */ @@ -155,7 +143,7 @@ struct pciefd_rx_dma { __le32 irq_status; __le32 sys_time_low; __le32 sys_time_high; - struct pucan_rx_msg msg[0]; + struct pucan_rx_msg msg[]; } __packed __aligned(4); /* Tx Link record */ @@ -203,7 +191,7 @@ struct pciefd_board { struct pci_dev *pci_dev; int can_count; spinlock_t cmd_lock; /* 64-bits cmds must be atomic */ - struct pciefd_can *can[0]; /* array of network devices */ + struct pciefd_can *can[]; /* array of network devices */ }; /* supported device ids. */ @@ -668,7 +656,7 @@ static int pciefd_can_probe(struct pciefd_board *pciefd) pciefd_can_writereg(priv, CANFD_CLK_SEL_80MHZ, PCIEFD_REG_CAN_CLK_SEL); - /* fallthough */ + fallthrough; case CANFD_CLK_SEL_80MHZ: priv->ucan.can.clock.freq = 80 * 1000 * 1000; break; @@ -849,7 +837,8 @@ err_disable_pci: /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while * the probe() function must return a negative errno in case of failure - * (err is unchanged if negative) */ + * (err is unchanged if negative) + */ return pcibios_err_to_errno(err); } diff --git a/drivers/net/can/rcar/Kconfig b/drivers/net/can/rcar/Kconfig index bd5a8fcd83e1..c66762ef631b 100644 --- a/drivers/net/can/rcar/Kconfig +++ b/drivers/net/can/rcar/Kconfig @@ -1,18 +1,18 @@ # SPDX-License-Identifier: GPL-2.0 config CAN_RCAR - tristate "Renesas R-Car CAN controller" - depends on ARCH_RENESAS || ARM - ---help--- + tristate "Renesas R-Car and RZ/G CAN controller" + depends on ARCH_RENESAS || COMPILE_TEST + help Say Y here if you want to use CAN controller found on Renesas R-Car - SoCs. + or RZ/G SoCs. To compile this driver as a module, choose M here: the module will be called rcar_can. config CAN_RCAR_CANFD tristate "Renesas R-Car CAN FD controller" - depends on ARCH_RENESAS || ARM - ---help--- + depends on ARCH_RENESAS || COMPILE_TEST + help Say Y here if you want to use CAN FD controller found on Renesas R-Car SoCs. The driver puts the controller in CAN FD only mode, which can interoperate with CAN2.0 nodes but does not support diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c index 13e66297b65f..fc3df328e877 100644 --- a/drivers/net/can/rcar/rcar_can.c +++ b/drivers/net/can/rcar/rcar_can.c @@ -5,21 +5,30 @@ * Copyright (C) 2013 Renesas Solutions Corp. */ +#include <linux/bitfield.h> +#include <linux/bits.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/errno.h> +#include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/platform_device.h> -#include <linux/can/led.h> #include <linux/can/dev.h> #include <linux/clk.h> -#include <linux/can/platform/rcar_can.h> #include <linux/of.h> +#include <linux/pm_runtime.h> #define RCAR_CAN_DRV_NAME "rcar_can" +/* Clock Select Register settings */ +enum CLKR { + CLKR_CLKP1 = 0, /* Peripheral clock (clkp1) */ + CLKR_CLKP2 = 1, /* Peripheral clock (clkp2) */ + CLKR_CLKEXT = 3, /* Externally input clock */ +}; + #define RCAR_SUPPORTED_CLOCKS (BIT(CLKR_CLKP1) | BIT(CLKR_CLKP2) | \ BIT(CLKR_CLKEXT)) @@ -86,9 +95,7 @@ struct rcar_can_priv { struct net_device *ndev; struct napi_struct napi; struct rcar_can_regs __iomem *regs; - struct clk *clk; struct clk *can_clk; - u8 tx_dlc[RCAR_CAN_FIFO_DEPTH]; u32 tx_head; u32 tx_tail; u8 clock_select; @@ -108,100 +115,102 @@ static const struct can_bittiming_const rcar_can_bittiming_const = { }; /* Control Register bits */ -#define RCAR_CAN_CTLR_BOM (3 << 11) /* Bus-Off Recovery Mode Bits */ -#define RCAR_CAN_CTLR_BOM_ENT (1 << 11) /* Entry to halt mode */ - /* at bus-off entry */ -#define RCAR_CAN_CTLR_SLPM (1 << 10) -#define RCAR_CAN_CTLR_CANM (3 << 8) /* Operating Mode Select Bit */ -#define RCAR_CAN_CTLR_CANM_HALT (1 << 9) -#define RCAR_CAN_CTLR_CANM_RESET (1 << 8) -#define RCAR_CAN_CTLR_CANM_FORCE_RESET (3 << 8) -#define RCAR_CAN_CTLR_MLM (1 << 3) /* Message Lost Mode Select */ -#define RCAR_CAN_CTLR_IDFM (3 << 1) /* ID Format Mode Select Bits */ -#define RCAR_CAN_CTLR_IDFM_MIXED (1 << 2) /* Mixed ID mode */ -#define RCAR_CAN_CTLR_MBM (1 << 0) /* Mailbox Mode select */ +#define RCAR_CAN_CTLR_BOM GENMASK(12, 11) /* Bus-Off Recovery Mode Bits */ +#define RCAR_CAN_CTLR_BOM_ENT 1 /* Entry to halt mode */ + /* at bus-off entry */ +#define RCAR_CAN_CTLR_SLPM BIT(10) /* Sleep Mode */ +#define RCAR_CAN_CTLR_CANM GENMASK(9, 8) /* Operating Mode Select Bit */ +#define RCAR_CAN_CTLR_CANM_OPER 0 /* Operation Mode */ +#define RCAR_CAN_CTLR_CANM_RESET 1 /* Reset Mode */ +#define RCAR_CAN_CTLR_CANM_HALT 2 /* Halt Mode */ +#define RCAR_CAN_CTLR_CANM_FORCE_RESET 3 /* Reset Mode (forcible) */ +#define RCAR_CAN_CTLR_MLM BIT(3) /* Message Lost Mode Select */ +#define RCAR_CAN_CTLR_IDFM GENMASK(2, 1) /* ID Format Mode Select Bits */ +#define RCAR_CAN_CTLR_IDFM_STD 0 /* Standard ID mode */ +#define RCAR_CAN_CTLR_IDFM_EXT 1 /* Extended ID mode */ +#define RCAR_CAN_CTLR_IDFM_MIXED 2 /* Mixed ID mode */ +#define RCAR_CAN_CTLR_MBM BIT(0) /* Mailbox Mode select */ /* Status Register bits */ -#define RCAR_CAN_STR_RSTST (1 << 8) /* Reset Status Bit */ +#define RCAR_CAN_STR_RSTST BIT(8) /* Reset Status Bit */ /* FIFO Received ID Compare Registers 0 and 1 bits */ -#define RCAR_CAN_FIDCR_IDE (1 << 31) /* ID Extension Bit */ -#define RCAR_CAN_FIDCR_RTR (1 << 30) /* Remote Transmission Request Bit */ +#define RCAR_CAN_FIDCR_IDE BIT(31) /* ID Extension Bit */ +#define RCAR_CAN_FIDCR_RTR BIT(30) /* Remote Transmission Request Bit */ /* Receive FIFO Control Register bits */ -#define RCAR_CAN_RFCR_RFEST (1 << 7) /* Receive FIFO Empty Status Flag */ -#define RCAR_CAN_RFCR_RFE (1 << 0) /* Receive FIFO Enable */ +#define RCAR_CAN_RFCR_RFEST BIT(7) /* Receive FIFO Empty Status Flag */ +#define RCAR_CAN_RFCR_RFE BIT(0) /* Receive FIFO Enable */ /* Transmit FIFO Control Register bits */ -#define RCAR_CAN_TFCR_TFUST (7 << 1) /* Transmit FIFO Unsent Message */ - /* Number Status Bits */ -#define RCAR_CAN_TFCR_TFUST_SHIFT 1 /* Offset of Transmit FIFO Unsent */ - /* Message Number Status Bits */ -#define RCAR_CAN_TFCR_TFE (1 << 0) /* Transmit FIFO Enable */ - -#define RCAR_CAN_N_RX_MKREGS1 2 /* Number of mask registers */ - /* for Rx mailboxes 0-31 */ +#define RCAR_CAN_TFCR_TFUST GENMASK(3, 1) /* Transmit FIFO Unsent Message */ + /* Number Status Bits */ +#define RCAR_CAN_TFCR_TFE BIT(0) /* Transmit FIFO Enable */ + +#define RCAR_CAN_N_RX_MKREGS1 2 /* Number of mask registers */ + /* for Rx mailboxes 0-31 */ #define RCAR_CAN_N_RX_MKREGS2 8 /* Bit Configuration Register settings */ -#define RCAR_CAN_BCR_TSEG1(x) (((x) & 0x0f) << 20) -#define RCAR_CAN_BCR_BPR(x) (((x) & 0x3ff) << 8) -#define RCAR_CAN_BCR_SJW(x) (((x) & 0x3) << 4) -#define RCAR_CAN_BCR_TSEG2(x) ((x) & 0x07) +#define RCAR_CAN_BCR_TSEG1 GENMASK(23, 20) +#define RCAR_CAN_BCR_BRP GENMASK(17, 8) +#define RCAR_CAN_BCR_SJW GENMASK(5, 4) +#define RCAR_CAN_BCR_TSEG2 GENMASK(2, 0) /* Mailbox and Mask Registers bits */ -#define RCAR_CAN_IDE (1 << 31) -#define RCAR_CAN_RTR (1 << 30) -#define RCAR_CAN_SID_SHIFT 18 +#define RCAR_CAN_IDE BIT(31) /* ID Extension */ +#define RCAR_CAN_RTR BIT(30) /* Remote Transmission Request */ +#define RCAR_CAN_SID GENMASK(28, 18) /* Standard ID */ +#define RCAR_CAN_EID GENMASK(28, 0) /* Extended ID */ /* Mailbox Interrupt Enable Register 1 bits */ -#define RCAR_CAN_MIER1_RXFIE (1 << 28) /* Receive FIFO Interrupt Enable */ -#define RCAR_CAN_MIER1_TXFIE (1 << 24) /* Transmit FIFO Interrupt Enable */ +#define RCAR_CAN_MIER1_RXFIE BIT(28) /* Receive FIFO Interrupt Enable */ +#define RCAR_CAN_MIER1_TXFIE BIT(24) /* Transmit FIFO Interrupt Enable */ /* Interrupt Enable Register bits */ -#define RCAR_CAN_IER_ERSIE (1 << 5) /* Error (ERS) Interrupt Enable Bit */ -#define RCAR_CAN_IER_RXFIE (1 << 4) /* Reception FIFO Interrupt */ - /* Enable Bit */ -#define RCAR_CAN_IER_TXFIE (1 << 3) /* Transmission FIFO Interrupt */ - /* Enable Bit */ +#define RCAR_CAN_IER_ERSIE BIT(5) /* Error (ERS) Interrupt Enable Bit */ +#define RCAR_CAN_IER_RXFIE BIT(4) /* Reception FIFO Interrupt */ + /* Enable Bit */ +#define RCAR_CAN_IER_TXFIE BIT(3) /* Transmission FIFO Interrupt */ + /* Enable Bit */ /* Interrupt Status Register bits */ -#define RCAR_CAN_ISR_ERSF (1 << 5) /* Error (ERS) Interrupt Status Bit */ -#define RCAR_CAN_ISR_RXFF (1 << 4) /* Reception FIFO Interrupt */ - /* Status Bit */ -#define RCAR_CAN_ISR_TXFF (1 << 3) /* Transmission FIFO Interrupt */ - /* Status Bit */ +#define RCAR_CAN_ISR_ERSF BIT(5) /* Error (ERS) Interrupt Status Bit */ +#define RCAR_CAN_ISR_RXFF BIT(4) /* Reception FIFO Interrupt */ + /* Status Bit */ +#define RCAR_CAN_ISR_TXFF BIT(3) /* Transmission FIFO Interrupt */ + /* Status Bit */ /* Error Interrupt Enable Register bits */ -#define RCAR_CAN_EIER_BLIE (1 << 7) /* Bus Lock Interrupt Enable */ -#define RCAR_CAN_EIER_OLIE (1 << 6) /* Overload Frame Transmit */ - /* Interrupt Enable */ -#define RCAR_CAN_EIER_ORIE (1 << 5) /* Receive Overrun Interrupt Enable */ -#define RCAR_CAN_EIER_BORIE (1 << 4) /* Bus-Off Recovery Interrupt Enable */ -#define RCAR_CAN_EIER_BOEIE (1 << 3) /* Bus-Off Entry Interrupt Enable */ -#define RCAR_CAN_EIER_EPIE (1 << 2) /* Error Passive Interrupt Enable */ -#define RCAR_CAN_EIER_EWIE (1 << 1) /* Error Warning Interrupt Enable */ -#define RCAR_CAN_EIER_BEIE (1 << 0) /* Bus Error Interrupt Enable */ +#define RCAR_CAN_EIER_BLIE BIT(7) /* Bus Lock Interrupt Enable */ +#define RCAR_CAN_EIER_OLIE BIT(6) /* Overload Frame Transmit */ + /* Interrupt Enable */ +#define RCAR_CAN_EIER_ORIE BIT(5) /* Receive Overrun Interrupt Enable */ +#define RCAR_CAN_EIER_BORIE BIT(4) /* Bus-Off Recovery Interrupt Enable */ +#define RCAR_CAN_EIER_BOEIE BIT(3) /* Bus-Off Entry Interrupt Enable */ +#define RCAR_CAN_EIER_EPIE BIT(2) /* Error Passive Interrupt Enable */ +#define RCAR_CAN_EIER_EWIE BIT(1) /* Error Warning Interrupt Enable */ +#define RCAR_CAN_EIER_BEIE BIT(0) /* Bus Error Interrupt Enable */ /* Error Interrupt Factor Judge Register bits */ -#define RCAR_CAN_EIFR_BLIF (1 << 7) /* Bus Lock Detect Flag */ -#define RCAR_CAN_EIFR_OLIF (1 << 6) /* Overload Frame Transmission */ - /* Detect Flag */ -#define RCAR_CAN_EIFR_ORIF (1 << 5) /* Receive Overrun Detect Flag */ -#define RCAR_CAN_EIFR_BORIF (1 << 4) /* Bus-Off Recovery Detect Flag */ -#define RCAR_CAN_EIFR_BOEIF (1 << 3) /* Bus-Off Entry Detect Flag */ -#define RCAR_CAN_EIFR_EPIF (1 << 2) /* Error Passive Detect Flag */ -#define RCAR_CAN_EIFR_EWIF (1 << 1) /* Error Warning Detect Flag */ -#define RCAR_CAN_EIFR_BEIF (1 << 0) /* Bus Error Detect Flag */ +#define RCAR_CAN_EIFR_BLIF BIT(7) /* Bus Lock Detect Flag */ +#define RCAR_CAN_EIFR_OLIF BIT(6) /* Overload Frame Transmission */ + /* Detect Flag */ +#define RCAR_CAN_EIFR_ORIF BIT(5) /* Receive Overrun Detect Flag */ +#define RCAR_CAN_EIFR_BORIF BIT(4) /* Bus-Off Recovery Detect Flag */ +#define RCAR_CAN_EIFR_BOEIF BIT(3) /* Bus-Off Entry Detect Flag */ +#define RCAR_CAN_EIFR_EPIF BIT(2) /* Error Passive Detect Flag */ +#define RCAR_CAN_EIFR_EWIF BIT(1) /* Error Warning Detect Flag */ +#define RCAR_CAN_EIFR_BEIF BIT(0) /* Bus Error Detect Flag */ /* Error Code Store Register bits */ -#define RCAR_CAN_ECSR_EDPM (1 << 7) /* Error Display Mode Select Bit */ -#define RCAR_CAN_ECSR_ADEF (1 << 6) /* ACK Delimiter Error Flag */ -#define RCAR_CAN_ECSR_BE0F (1 << 5) /* Bit Error (dominant) Flag */ -#define RCAR_CAN_ECSR_BE1F (1 << 4) /* Bit Error (recessive) Flag */ -#define RCAR_CAN_ECSR_CEF (1 << 3) /* CRC Error Flag */ -#define RCAR_CAN_ECSR_AEF (1 << 2) /* ACK Error Flag */ -#define RCAR_CAN_ECSR_FEF (1 << 1) /* Form Error Flag */ -#define RCAR_CAN_ECSR_SEF (1 << 0) /* Stuff Error Flag */ +#define RCAR_CAN_ECSR_EDPM BIT(7) /* Error Display Mode Select Bit */ +#define RCAR_CAN_ECSR_ADEF BIT(6) /* ACK Delimiter Error Flag */ +#define RCAR_CAN_ECSR_BE0F BIT(5) /* Bit Error (dominant) Flag */ +#define RCAR_CAN_ECSR_BE1F BIT(4) /* Bit Error (recessive) Flag */ +#define RCAR_CAN_ECSR_CEF BIT(3) /* CRC Error Flag */ +#define RCAR_CAN_ECSR_AEF BIT(2) /* ACK Error Flag */ +#define RCAR_CAN_ECSR_FEF BIT(1) /* Form Error Flag */ +#define RCAR_CAN_ECSR_SEF BIT(0) /* Stuff Error Flag */ #define RCAR_CAN_NAPI_WEIGHT 4 #define MAX_STR_READS 0x100 @@ -211,13 +220,12 @@ static void tx_failure_cleanup(struct net_device *ndev) int i; for (i = 0; i < RCAR_CAN_FIFO_DEPTH; i++) - can_free_echo_skb(ndev, i); + can_free_echo_skb(ndev, i, NULL); } static void rcar_can_error(struct net_device *ndev) { struct rcar_can_priv *priv = netdev_priv(ndev); - struct net_device_stats *stats = &ndev->stats; struct can_frame *cf; struct sk_buff *skb; u8 eifr, txerr = 0, rxerr = 0; @@ -229,11 +237,8 @@ static void rcar_can_error(struct net_device *ndev) if (eifr & (RCAR_CAN_EIFR_EWIF | RCAR_CAN_EIFR_EPIF)) { txerr = readb(&priv->regs->tecr); rxerr = readb(&priv->regs->recr); - if (skb) { + if (skb) cf->can_id |= CAN_ERR_CRTL; - cf->data[6] = txerr; - cf->data[7] = rxerr; - } } if (eifr & RCAR_CAN_EIFR_BEIF) { int rx_errors = 0, tx_errors = 0; @@ -247,35 +252,35 @@ static void rcar_can_error(struct net_device *ndev) if (ecsr & RCAR_CAN_ECSR_ADEF) { netdev_dbg(priv->ndev, "ACK Delimiter Error\n"); tx_errors++; - writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr); + writeb((u8)~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr); if (skb) cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL; } if (ecsr & RCAR_CAN_ECSR_BE0F) { netdev_dbg(priv->ndev, "Bit Error (dominant)\n"); tx_errors++; - writeb(~RCAR_CAN_ECSR_BE0F, &priv->regs->ecsr); + writeb((u8)~RCAR_CAN_ECSR_BE0F, &priv->regs->ecsr); if (skb) cf->data[2] |= CAN_ERR_PROT_BIT0; } if (ecsr & RCAR_CAN_ECSR_BE1F) { netdev_dbg(priv->ndev, "Bit Error (recessive)\n"); tx_errors++; - writeb(~RCAR_CAN_ECSR_BE1F, &priv->regs->ecsr); + writeb((u8)~RCAR_CAN_ECSR_BE1F, &priv->regs->ecsr); if (skb) cf->data[2] |= CAN_ERR_PROT_BIT1; } if (ecsr & RCAR_CAN_ECSR_CEF) { netdev_dbg(priv->ndev, "CRC Error\n"); rx_errors++; - writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr); + writeb((u8)~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr); if (skb) cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; } if (ecsr & RCAR_CAN_ECSR_AEF) { netdev_dbg(priv->ndev, "ACK Error\n"); tx_errors++; - writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr); + writeb((u8)~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr); if (skb) { cf->can_id |= CAN_ERR_ACK; cf->data[3] = CAN_ERR_PROT_LOC_ACK; @@ -284,14 +289,14 @@ static void rcar_can_error(struct net_device *ndev) if (ecsr & RCAR_CAN_ECSR_FEF) { netdev_dbg(priv->ndev, "Form Error\n"); rx_errors++; - writeb(~RCAR_CAN_ECSR_FEF, &priv->regs->ecsr); + writeb((u8)~RCAR_CAN_ECSR_FEF, &priv->regs->ecsr); if (skb) cf->data[2] |= CAN_ERR_PROT_FORM; } if (ecsr & RCAR_CAN_ECSR_SEF) { netdev_dbg(priv->ndev, "Stuff Error\n"); rx_errors++; - writeb(~RCAR_CAN_ECSR_SEF, &priv->regs->ecsr); + writeb((u8)~RCAR_CAN_ECSR_SEF, &priv->regs->ecsr); if (skb) cf->data[2] |= CAN_ERR_PROT_STUFF; } @@ -299,14 +304,14 @@ static void rcar_can_error(struct net_device *ndev) priv->can.can_stats.bus_error++; ndev->stats.rx_errors += rx_errors; ndev->stats.tx_errors += tx_errors; - writeb(~RCAR_CAN_EIFR_BEIF, &priv->regs->eifr); + writeb((u8)~RCAR_CAN_EIFR_BEIF, &priv->regs->eifr); } if (eifr & RCAR_CAN_EIFR_EWIF) { netdev_dbg(priv->ndev, "Error warning interrupt\n"); priv->can.state = CAN_STATE_ERROR_WARNING; priv->can.can_stats.error_warning++; /* Clear interrupt condition */ - writeb(~RCAR_CAN_EIFR_EWIF, &priv->regs->eifr); + writeb((u8)~RCAR_CAN_EIFR_EWIF, &priv->regs->eifr); if (skb) cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; @@ -316,7 +321,7 @@ static void rcar_can_error(struct net_device *ndev) priv->can.state = CAN_STATE_ERROR_PASSIVE; priv->can.can_stats.error_passive++; /* Clear interrupt condition */ - writeb(~RCAR_CAN_EIFR_EPIF, &priv->regs->eifr); + writeb((u8)~RCAR_CAN_EIFR_EPIF, &priv->regs->eifr); if (skb) cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; @@ -328,17 +333,21 @@ static void rcar_can_error(struct net_device *ndev) writeb(priv->ier, &priv->regs->ier); priv->can.state = CAN_STATE_BUS_OFF; /* Clear interrupt condition */ - writeb(~RCAR_CAN_EIFR_BOEIF, &priv->regs->eifr); + writeb((u8)~RCAR_CAN_EIFR_BOEIF, &priv->regs->eifr); priv->can.can_stats.bus_off++; can_bus_off(ndev); if (skb) cf->can_id |= CAN_ERR_BUSOFF; + } else if (skb) { + cf->can_id |= CAN_ERR_CNT; + cf->data[6] = txerr; + cf->data[7] = rxerr; } if (eifr & RCAR_CAN_EIFR_ORIF) { netdev_dbg(priv->ndev, "Receive overrun error interrupt\n"); ndev->stats.rx_over_errors++; ndev->stats.rx_errors++; - writeb(~RCAR_CAN_EIFR_ORIF, &priv->regs->eifr); + writeb((u8)~RCAR_CAN_EIFR_ORIF, &priv->regs->eifr); if (skb) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; @@ -349,18 +358,15 @@ static void rcar_can_error(struct net_device *ndev) "Overload Frame Transmission error interrupt\n"); ndev->stats.rx_over_errors++; ndev->stats.rx_errors++; - writeb(~RCAR_CAN_EIFR_OLIF, &priv->regs->eifr); + writeb((u8)~RCAR_CAN_EIFR_OLIF, &priv->regs->eifr); if (skb) { cf->can_id |= CAN_ERR_PROT; cf->data[2] |= CAN_ERR_PROT_OVERLOAD; } } - if (skb) { - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + if (skb) netif_rx(skb); - } } static void rcar_can_tx_done(struct net_device *ndev) @@ -370,24 +376,23 @@ static void rcar_can_tx_done(struct net_device *ndev) u8 isr; while (1) { - u8 unsent = readb(&priv->regs->tfcr); + u8 unsent = FIELD_GET(RCAR_CAN_TFCR_TFUST, + readb(&priv->regs->tfcr)); - unsent = (unsent & RCAR_CAN_TFCR_TFUST) >> - RCAR_CAN_TFCR_TFUST_SHIFT; if (priv->tx_head - priv->tx_tail <= unsent) break; stats->tx_packets++; - stats->tx_bytes += priv->tx_dlc[priv->tx_tail % - RCAR_CAN_FIFO_DEPTH]; - priv->tx_dlc[priv->tx_tail % RCAR_CAN_FIFO_DEPTH] = 0; - can_get_echo_skb(ndev, priv->tx_tail % RCAR_CAN_FIFO_DEPTH); + stats->tx_bytes += + can_get_echo_skb(ndev, + priv->tx_tail % RCAR_CAN_FIFO_DEPTH, + NULL); + priv->tx_tail++; netif_wake_queue(ndev); } /* Clear interrupt */ isr = readb(&priv->regs->isr); writeb(isr & ~RCAR_CAN_ISR_TXFF, &priv->regs->isr); - can_led_event(ndev, CAN_LED_EVENT_TX); } static irqreturn_t rcar_can_interrupt(int irq, void *dev_id) @@ -418,15 +423,16 @@ static irqreturn_t rcar_can_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static void rcar_can_set_bittiming(struct net_device *dev) +static void rcar_can_set_bittiming(struct net_device *ndev) { - struct rcar_can_priv *priv = netdev_priv(dev); + struct rcar_can_priv *priv = netdev_priv(ndev); struct can_bittiming *bt = &priv->can.bittiming; u32 bcr; - bcr = RCAR_CAN_BCR_TSEG1(bt->phase_seg1 + bt->prop_seg - 1) | - RCAR_CAN_BCR_BPR(bt->brp - 1) | RCAR_CAN_BCR_SJW(bt->sjw - 1) | - RCAR_CAN_BCR_TSEG2(bt->phase_seg2 - 1); + bcr = FIELD_PREP(RCAR_CAN_BCR_TSEG1, bt->phase_seg1 + bt->prop_seg - 1) | + FIELD_PREP(RCAR_CAN_BCR_BRP, bt->brp - 1) | + FIELD_PREP(RCAR_CAN_BCR_SJW, bt->sjw - 1) | + FIELD_PREP(RCAR_CAN_BCR_TSEG2, bt->phase_seg2 - 1); /* Don't overwrite CLKR with 32-bit BCR access; CLKR has 8-bit access. * All the registers are big-endian but they get byte-swapped on 32-bit * read/write (but not on 8-bit, contrary to the manuals)... @@ -450,16 +456,17 @@ static void rcar_can_start(struct net_device *ndev) ctlr &= ~RCAR_CAN_CTLR_SLPM; writew(ctlr, &priv->regs->ctlr); /* Go to reset mode */ - ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET; + ctlr |= FIELD_PREP(RCAR_CAN_CTLR_CANM, RCAR_CAN_CTLR_CANM_FORCE_RESET); writew(ctlr, &priv->regs->ctlr); for (i = 0; i < MAX_STR_READS; i++) { if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST) break; } rcar_can_set_bittiming(ndev); - ctlr |= RCAR_CAN_CTLR_IDFM_MIXED; /* Select mixed ID mode */ - ctlr |= RCAR_CAN_CTLR_BOM_ENT; /* Entry to halt mode automatically */ - /* at bus-off */ + /* Select mixed ID mode */ + ctlr |= FIELD_PREP(RCAR_CAN_CTLR_IDFM, RCAR_CAN_CTLR_IDFM_MIXED); + /* Entry to halt mode automatically at bus-off */ + ctlr |= FIELD_PREP(RCAR_CAN_CTLR_BOM, RCAR_CAN_CTLR_BOM_ENT); ctlr |= RCAR_CAN_CTLR_MBM; /* Select FIFO mailbox mode */ ctlr |= RCAR_CAN_CTLR_MLM; /* Overrun mode */ writew(ctlr, &priv->regs->ctlr); @@ -489,7 +496,9 @@ static void rcar_can_start(struct net_device *ndev) priv->can.state = CAN_STATE_ERROR_ACTIVE; /* Go to operation mode */ - writew(ctlr & ~RCAR_CAN_CTLR_CANM, &priv->regs->ctlr); + ctlr &= ~RCAR_CAN_CTLR_CANM; + ctlr |= FIELD_PREP(RCAR_CAN_CTLR_CANM, RCAR_CAN_CTLR_CANM_OPER); + writew(ctlr, &priv->regs->ctlr); for (i = 0; i < MAX_STR_READS; i++) { if (!(readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)) break; @@ -504,32 +513,30 @@ static int rcar_can_open(struct net_device *ndev) struct rcar_can_priv *priv = netdev_priv(ndev); int err; - err = clk_prepare_enable(priv->clk); + err = pm_runtime_resume_and_get(ndev->dev.parent); if (err) { - netdev_err(ndev, - "failed to enable peripheral clock, error %d\n", - err); + netdev_err(ndev, "pm_runtime_resume_and_get() failed %pe\n", + ERR_PTR(err)); goto out; } err = clk_prepare_enable(priv->can_clk); if (err) { - netdev_err(ndev, "failed to enable CAN clock, error %d\n", - err); - goto out_clock; + netdev_err(ndev, "failed to enable CAN clock: %pe\n", + ERR_PTR(err)); + goto out_rpm; } err = open_candev(ndev); if (err) { - netdev_err(ndev, "open_candev() failed, error %d\n", err); + netdev_err(ndev, "open_candev() failed %pe\n", ERR_PTR(err)); goto out_can_clock; } napi_enable(&priv->napi); err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev); if (err) { - netdev_err(ndev, "request_irq(%d) failed, error %d\n", - ndev->irq, err); + netdev_err(ndev, "request_irq(%d) failed %pe\n", ndev->irq, + ERR_PTR(err)); goto out_close; } - can_led_event(ndev, CAN_LED_EVENT_OPEN); rcar_can_start(ndev); netif_start_queue(ndev); return 0; @@ -538,8 +545,8 @@ out_close: close_candev(ndev); out_can_clock: clk_disable_unprepare(priv->can_clk); -out_clock: - clk_disable_unprepare(priv->clk); +out_rpm: + pm_runtime_put(ndev->dev.parent); out: return err; } @@ -552,7 +559,7 @@ static void rcar_can_stop(struct net_device *ndev) /* Go to (force) reset mode */ ctlr = readw(&priv->regs->ctlr); - ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET; + ctlr |= FIELD_PREP(RCAR_CAN_CTLR_CANM, RCAR_CAN_CTLR_CANM_FORCE_RESET); writew(ctlr, &priv->regs->ctlr); for (i = 0; i < MAX_STR_READS; i++) { if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST) @@ -577,9 +584,8 @@ static int rcar_can_close(struct net_device *ndev) free_irq(ndev->irq, ndev); napi_disable(&priv->napi); clk_disable_unprepare(priv->can_clk); - clk_disable_unprepare(priv->clk); + pm_runtime_put(ndev->dev.parent); close_candev(ndev); - can_led_event(ndev, CAN_LED_EVENT_STOP); return 0; } @@ -590,28 +596,28 @@ static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb, struct can_frame *cf = (struct can_frame *)skb->data; u32 data, i; - if (can_dropped_invalid_skb(ndev, skb)) + if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */ - data = (cf->can_id & CAN_EFF_MASK) | RCAR_CAN_IDE; + data = FIELD_PREP(RCAR_CAN_EID, cf->can_id & CAN_EFF_MASK) | + RCAR_CAN_IDE; else /* Standard frame format */ - data = (cf->can_id & CAN_SFF_MASK) << RCAR_CAN_SID_SHIFT; + data = FIELD_PREP(RCAR_CAN_SID, cf->can_id & CAN_SFF_MASK); if (cf->can_id & CAN_RTR_FLAG) { /* Remote transmission request */ data |= RCAR_CAN_RTR; } else { - for (i = 0; i < cf->can_dlc; i++) + for (i = 0; i < cf->len; i++) writeb(cf->data[i], &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].data[i]); } writel(data, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].id); - writeb(cf->can_dlc, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].dlc); + writeb(cf->len, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].dlc); - priv->tx_dlc[priv->tx_head % RCAR_CAN_FIFO_DEPTH] = cf->can_dlc; - can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH); + can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH, 0); priv->tx_head++; /* Start Tx: write 0xff to the TFPCR register to increment * the CPU-side pointer for the transmit FIFO to the next @@ -629,7 +635,10 @@ static const struct net_device_ops rcar_can_netdev_ops = { .ndo_open = rcar_can_open, .ndo_stop = rcar_can_close, .ndo_start_xmit = rcar_can_start_xmit, - .ndo_change_mtu = can_change_mtu, +}; + +static const struct ethtool_ops rcar_can_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, }; static void rcar_can_rx_pkt(struct rcar_can_priv *priv) @@ -648,24 +657,23 @@ static void rcar_can_rx_pkt(struct rcar_can_priv *priv) data = readl(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].id); if (data & RCAR_CAN_IDE) - cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG; + cf->can_id = FIELD_GET(RCAR_CAN_EID, data) | CAN_EFF_FLAG; else - cf->can_id = (data >> RCAR_CAN_SID_SHIFT) & CAN_SFF_MASK; + cf->can_id = FIELD_GET(RCAR_CAN_SID, data); dlc = readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].dlc); - cf->can_dlc = get_can_dlc(dlc); + cf->len = can_cc_dlc2len(dlc); if (data & RCAR_CAN_RTR) { cf->can_id |= CAN_RTR_FLAG; } else { - for (dlc = 0; dlc < cf->can_dlc; dlc++) + for (dlc = 0; dlc < cf->len; dlc++) cf->data[dlc] = readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].data[dlc]); - } - can_led_event(priv->ndev, CAN_LED_EVENT_RX); - - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; + } stats->rx_packets++; + netif_receive_skb(skb); } @@ -713,18 +721,21 @@ static int rcar_can_do_set_mode(struct net_device *ndev, enum can_mode mode) } } -static int rcar_can_get_berr_counter(const struct net_device *dev, +static int rcar_can_get_berr_counter(const struct net_device *ndev, struct can_berr_counter *bec) { - struct rcar_can_priv *priv = netdev_priv(dev); + struct rcar_can_priv *priv = netdev_priv(ndev); int err; - err = clk_prepare_enable(priv->clk); + err = pm_runtime_resume_and_get(ndev->dev.parent); if (err) return err; + bec->txerr = readb(&priv->regs->tecr); bec->rxerr = readb(&priv->regs->recr); - clk_disable_unprepare(priv->clk); + + pm_runtime_put(ndev->dev.parent); + return 0; } @@ -736,36 +747,24 @@ static const char * const clock_names[] = { static int rcar_can_probe(struct platform_device *pdev) { - struct rcar_can_platform_data *pdata; + struct device *dev = &pdev->dev; struct rcar_can_priv *priv; struct net_device *ndev; - struct resource *mem; void __iomem *addr; u32 clock_select = CLKR_CLKP1; int err = -ENODEV; int irq; - if (pdev->dev.of_node) { - of_property_read_u32(pdev->dev.of_node, - "renesas,can-clock-select", &clock_select); - } else { - pdata = dev_get_platdata(&pdev->dev); - if (!pdata) { - dev_err(&pdev->dev, "No platform data provided!\n"); - goto fail; - } - clock_select = pdata->clock_select; - } + of_property_read_u32(dev->of_node, "renesas,can-clock-select", + &clock_select); irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(&pdev->dev, "No IRQ resource\n"); err = irq; goto fail; } - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - addr = devm_ioremap_resource(&pdev->dev, mem); + addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(addr)) { err = PTR_ERR(addr); goto fail; @@ -773,34 +772,26 @@ static int rcar_can_probe(struct platform_device *pdev) ndev = alloc_candev(sizeof(struct rcar_can_priv), RCAR_CAN_FIFO_DEPTH); if (!ndev) { - dev_err(&pdev->dev, "alloc_candev() failed\n"); err = -ENOMEM; goto fail; } priv = netdev_priv(ndev); - priv->clk = devm_clk_get(&pdev->dev, "clkp1"); - if (IS_ERR(priv->clk)) { - err = PTR_ERR(priv->clk); - dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n", - err); - goto fail_clk; - } - if (!(BIT(clock_select) & RCAR_SUPPORTED_CLOCKS)) { err = -EINVAL; - dev_err(&pdev->dev, "invalid CAN clock selected\n"); + dev_err(dev, "invalid CAN clock selected\n"); goto fail_clk; } - priv->can_clk = devm_clk_get(&pdev->dev, clock_names[clock_select]); + priv->can_clk = devm_clk_get(dev, clock_names[clock_select]); if (IS_ERR(priv->can_clk)) { + dev_err(dev, "cannot get CAN clock: %pe\n", priv->can_clk); err = PTR_ERR(priv->can_clk); - dev_err(&pdev->dev, "cannot get CAN clock, error %d\n", err); goto fail_clk; } ndev->netdev_ops = &rcar_can_netdev_ops; + ndev->ethtool_ops = &rcar_can_ethtool_ops; ndev->irq = irq; ndev->flags |= IFF_ECHO; priv->ndev = ndev; @@ -812,23 +803,24 @@ static int rcar_can_probe(struct platform_device *pdev) priv->can.do_get_berr_counter = rcar_can_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING; platform_set_drvdata(pdev, ndev); - SET_NETDEV_DEV(ndev, &pdev->dev); + SET_NETDEV_DEV(ndev, dev); + + netif_napi_add_weight(ndev, &priv->napi, rcar_can_rx_poll, + RCAR_CAN_NAPI_WEIGHT); + + pm_runtime_enable(dev); - netif_napi_add(ndev, &priv->napi, rcar_can_rx_poll, - RCAR_CAN_NAPI_WEIGHT); err = register_candev(ndev); if (err) { - dev_err(&pdev->dev, "register_candev() failed, error %d\n", - err); - goto fail_candev; + dev_err(dev, "register_candev() failed %pe\n", ERR_PTR(err)); + goto fail_rpm; } - devm_can_led_init(ndev); - - dev_info(&pdev->dev, "device registered (IRQ%d)\n", ndev->irq); + dev_info(dev, "device registered (IRQ%d)\n", ndev->irq); return 0; -fail_candev: +fail_rpm: + pm_runtime_disable(dev); netif_napi_del(&priv->napi); fail_clk: free_candev(ndev); @@ -836,66 +828,65 @@ fail: return err; } -static int rcar_can_remove(struct platform_device *pdev) +static void rcar_can_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct rcar_can_priv *priv = netdev_priv(ndev); unregister_candev(ndev); + pm_runtime_disable(&pdev->dev); netif_napi_del(&priv->napi); free_candev(ndev); - return 0; } -static int __maybe_unused rcar_can_suspend(struct device *dev) +static int rcar_can_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct rcar_can_priv *priv = netdev_priv(ndev); u16 ctlr; - if (netif_running(ndev)) { - netif_stop_queue(ndev); - netif_device_detach(ndev); - } + if (!netif_running(ndev)) + return 0; + + netif_stop_queue(ndev); + netif_device_detach(ndev); + ctlr = readw(&priv->regs->ctlr); - ctlr |= RCAR_CAN_CTLR_CANM_HALT; + ctlr |= FIELD_PREP(RCAR_CAN_CTLR_CANM, RCAR_CAN_CTLR_CANM_HALT); writew(ctlr, &priv->regs->ctlr); ctlr |= RCAR_CAN_CTLR_SLPM; writew(ctlr, &priv->regs->ctlr); priv->can.state = CAN_STATE_SLEEPING; - clk_disable(priv->clk); + pm_runtime_put(dev); return 0; } -static int __maybe_unused rcar_can_resume(struct device *dev) +static int rcar_can_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); - struct rcar_can_priv *priv = netdev_priv(ndev); - u16 ctlr; int err; - err = clk_enable(priv->clk); + if (!netif_running(ndev)) + return 0; + + err = pm_runtime_resume_and_get(dev); if (err) { - netdev_err(ndev, "clk_enable() failed, error %d\n", err); + netdev_err(ndev, "pm_runtime_resume_and_get() failed %pe\n", + ERR_PTR(err)); return err; } - ctlr = readw(&priv->regs->ctlr); - ctlr &= ~RCAR_CAN_CTLR_SLPM; - writew(ctlr, &priv->regs->ctlr); - ctlr &= ~RCAR_CAN_CTLR_CANM; - writew(ctlr, &priv->regs->ctlr); - priv->can.state = CAN_STATE_ERROR_ACTIVE; + rcar_can_start(ndev); + + netif_device_attach(ndev); + netif_start_queue(ndev); - if (netif_running(ndev)) { - netif_device_attach(ndev); - netif_start_queue(ndev); - } return 0; } -static SIMPLE_DEV_PM_OPS(rcar_can_pm_ops, rcar_can_suspend, rcar_can_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(rcar_can_pm_ops, rcar_can_suspend, + rcar_can_resume); static const struct of_device_id rcar_can_of_table[] __maybe_unused = { { .compatible = "renesas,can-r8a7778" }, @@ -913,7 +904,7 @@ static struct platform_driver rcar_can_driver = { .driver = { .name = RCAR_CAN_DRV_NAME, .of_match_table = of_match_ptr(rcar_can_of_table), - .pm = &rcar_can_pm_ops, + .pm = pm_sleep_ptr(&rcar_can_pm_ops), }, .probe = rcar_can_probe, .remove = rcar_can_remove, diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index 05410008aa6b..7895e1fdea1c 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -21,22 +21,24 @@ * wherever it is modified to a readable name. */ +#include <linux/bitfield.h> +#include <linux/bitmap.h> +#include <linux/bitops.h> +#include <linux/can/dev.h> +#include <linux/clk.h> +#include <linux/errno.h> +#include <linux/ethtool.h> +#include <linux/interrupt.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/interrupt.h> -#include <linux/errno.h> #include <linux/netdevice.h> -#include <linux/platform_device.h> -#include <linux/can/led.h> -#include <linux/can/dev.h> -#include <linux/clk.h> #include <linux/of.h> -#include <linux/of_device.h> -#include <linux/bitmap.h> -#include <linux/bitops.h> -#include <linux/iopoll.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/reset.h> +#include <linux/types.h> #define RCANFD_DRV_NAME "rcar_canfd" @@ -73,27 +75,24 @@ #define RCANFD_GSTS_GNOPM (BIT(0) | BIT(1) | BIT(2) | BIT(3)) /* RSCFDnCFDGERFL / RSCFDnGERFL */ -#define RCANFD_GERFL_EEF1 BIT(17) -#define RCANFD_GERFL_EEF0 BIT(16) +#define RCANFD_GERFL_EEF GENMASK(23, 16) #define RCANFD_GERFL_CMPOF BIT(3) /* CAN FD only */ #define RCANFD_GERFL_THLES BIT(2) #define RCANFD_GERFL_MES BIT(1) #define RCANFD_GERFL_DEF BIT(0) -#define RCANFD_GERFL_ERR(gpriv, x) ((x) & (RCANFD_GERFL_EEF1 |\ - RCANFD_GERFL_EEF0 | RCANFD_GERFL_MES |\ - (gpriv->fdmode ?\ - RCANFD_GERFL_CMPOF : 0))) +#define RCANFD_GERFL_ERR(gpriv, x) \ +({\ + typeof(gpriv) (_gpriv) = (gpriv); \ + ((x) & ((FIELD_PREP(RCANFD_GERFL_EEF, (_gpriv)->channels_mask)) | \ + RCANFD_GERFL_MES | ((_gpriv)->fdmode ? RCANFD_GERFL_CMPOF : 0))); \ +}) /* AFL Rx rules registers */ -/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */ -#define RCANFD_GAFLCFG_SETRNC(n, x) (((x) & 0xff) << (24 - n * 8)) -#define RCANFD_GAFLCFG_GETRNC(n, x) (((x) >> (24 - n * 8)) & 0xff) - /* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */ #define RCANFD_GAFLECTR_AFLDAE BIT(8) -#define RCANFD_GAFLECTR_AFLPN(x) ((x) & 0x1f) +#define RCANFD_GAFLECTR_AFLPN(gpriv, page_num) ((page_num) & (gpriv)->info->max_aflpn) /* RSCFDnCFDGAFLIDj / RSCFDnGAFLIDj */ #define RCANFD_GAFLID_GAFLLB BIT(29) @@ -104,16 +103,13 @@ /* Channel register bits */ /* RSCFDnCmCFG - Classical CAN only */ -#define RCANFD_CFG_SJW(x) (((x) & 0x3) << 24) -#define RCANFD_CFG_TSEG2(x) (((x) & 0x7) << 20) -#define RCANFD_CFG_TSEG1(x) (((x) & 0xf) << 16) -#define RCANFD_CFG_BRP(x) (((x) & 0x3ff) << 0) +#define RCANFD_CFG_SJW GENMASK(25, 24) +#define RCANFD_CFG_TSEG2 GENMASK(22, 20) +#define RCANFD_CFG_TSEG1 GENMASK(19, 16) +#define RCANFD_CFG_BRP GENMASK(9, 0) /* RSCFDnCFDCmNCFG - CAN FD only */ -#define RCANFD_NCFG_NTSEG2(x) (((x) & 0x1f) << 24) -#define RCANFD_NCFG_NTSEG1(x) (((x) & 0x7f) << 16) -#define RCANFD_NCFG_NSJW(x) (((x) & 0x1f) << 11) -#define RCANFD_NCFG_NBRP(x) (((x) & 0x3ff) << 0) +#define RCANFD_NCFG_NBRP GENMASK(9, 0) /* RSCFDnCFDCmCTR / RSCFDnCmCTR */ #define RCANFD_CCTR_CTME BIT(24) @@ -173,15 +169,24 @@ #define RCANFD_CERFL_ERR(x) ((x) & (0x7fff)) /* above bits 14:0 */ /* RSCFDnCFDCmDCFG */ -#define RCANFD_DCFG_DSJW(x) (((x) & 0x7) << 24) -#define RCANFD_DCFG_DTSEG2(x) (((x) & 0x7) << 20) -#define RCANFD_DCFG_DTSEG1(x) (((x) & 0xf) << 16) -#define RCANFD_DCFG_DBRP(x) (((x) & 0xff) << 0) +#define RCANFD_DCFG_DBRP GENMASK(7, 0) /* RSCFDnCFDCmFDCFG */ +#define RCANFD_GEN4_FDCFG_CLOE BIT(30) +#define RCANFD_GEN4_FDCFG_FDOE BIT(28) +#define RCANFD_FDCFG_TDCO GENMASK(23, 16) #define RCANFD_FDCFG_TDCE BIT(9) #define RCANFD_FDCFG_TDCOC BIT(8) -#define RCANFD_FDCFG_TDCO(x) (((x) & 0x7f) >> 16) + +/* RSCFDnCFDCmFDSTS */ +#define RCANFD_FDSTS_SOC GENMASK(31, 24) +#define RCANFD_FDSTS_EOC GENMASK(23, 16) +#define RCANFD_GEN4_FDSTS_TDCVF BIT(15) +#define RCANFD_GEN4_FDSTS_PNSTS GENMASK(13, 12) +#define RCANFD_FDSTS_SOCO BIT(9) +#define RCANFD_FDSTS_EOCO BIT(8) +#define RCANFD_FDSTS_TDCVF BIT(7) +#define RCANFD_FDSTS_TDCR GENMASK(7, 0) /* RSCFDnCFDRFCCx */ #define RCANFD_RFCC_RFIM BIT(12) @@ -202,8 +207,6 @@ /* RSCFDnCFDRFPTRx */ #define RCANFD_RFPTR_RFDLC(x) (((x) >> 28) & 0xf) -#define RCANFD_RFPTR_RFPTR(x) (((x) >> 16) & 0xfff) -#define RCANFD_RFPTR_RFTS(x) (((x) >> 0) & 0xffff) /* RSCFDnCFDRFFDSTSx */ #define RCANFD_RFFDSTS_RFFDF BIT(2) @@ -213,10 +216,14 @@ /* Common FIFO bits */ /* RSCFDnCFDCFCCk */ -#define RCANFD_CFCC_CFTML(x) (((x) & 0xf) << 20) -#define RCANFD_CFCC_CFM(x) (((x) & 0x3) << 16) +#define RCANFD_CFCC_CFTML(gpriv, cftml) \ +({\ + typeof(gpriv) (_gpriv) = (gpriv); \ + (((cftml) & (_gpriv)->info->max_cftml) << (_gpriv)->info->sh->cftml); \ +}) +#define RCANFD_CFCC_CFM(gpriv, x) (((x) & 0x3) << (gpriv)->info->sh->cfm) #define RCANFD_CFCC_CFIM BIT(12) -#define RCANFD_CFCC_CFDC(x) (((x) & 0x7) << 8) +#define RCANFD_CFCC_CFDC(gpriv, x) (((x) & 0x7) << (gpriv)->info->sh->cfdc) #define RCANFD_CFCC_CFPLS(x) (((x) & 0x7) << 4) #define RCANFD_CFCC_CFTXIE BIT(2) #define RCANFD_CFCC_CFE BIT(0) @@ -231,12 +238,9 @@ /* RSCFDnCFDCFIDk */ #define RCANFD_CFID_CFIDE BIT(31) #define RCANFD_CFID_CFRTR BIT(30) -#define RCANFD_CFID_CFID_MASK(x) ((x) & 0x1fffffff) /* RSCFDnCFDCFPTRk */ #define RCANFD_CFPTR_CFDLC(x) (((x) & 0xf) << 28) -#define RCANFD_CFPTR_CFPTR(x) (((x) & 0xfff) << 16) -#define RCANFD_CFPTR_CFTS(x) (((x) & 0xff) << 0) /* RSCFDnCFDCFFDCSTSk */ #define RCANFD_CFFDCSTS_CFFDF BIT(2) @@ -276,87 +280,32 @@ #define RCANFD_GTSC (0x0094) /* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */ #define RCANFD_GAFLECTR (0x0098) -/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */ -#define RCANFD_GAFLCFG0 (0x009c) -/* RSCFDnCFDGAFLCFG1 / RSCFDnGAFLCFG1 */ -#define RCANFD_GAFLCFG1 (0x00a0) +/* RSCFDnCFDGAFLCFG / RSCFDnGAFLCFG */ +#define RCANFD_GAFLCFG(w) (0x009c + (0x04 * (w))) /* RSCFDnCFDRMNB / RSCFDnRMNB */ #define RCANFD_RMNB (0x00a4) /* RSCFDnCFDRMND / RSCFDnRMND */ #define RCANFD_RMND(y) (0x00a8 + (0x04 * (y))) /* RSCFDnCFDRFCCx / RSCFDnRFCCx */ -#define RCANFD_RFCC(x) (0x00b8 + (0x04 * (x))) +#define RCANFD_RFCC(gpriv, x) ((gpriv)->info->regs->rfcc + (0x04 * (x))) /* RSCFDnCFDRFSTSx / RSCFDnRFSTSx */ -#define RCANFD_RFSTS(x) (0x00d8 + (0x04 * (x))) +#define RCANFD_RFSTS(gpriv, x) (RCANFD_RFCC(gpriv, x) + 0x20) /* RSCFDnCFDRFPCTRx / RSCFDnRFPCTRx */ -#define RCANFD_RFPCTR(x) (0x00f8 + (0x04 * (x))) +#define RCANFD_RFPCTR(gpriv, x) (RCANFD_RFCC(gpriv, x) + 0x40) /* Common FIFO Control registers */ /* RSCFDnCFDCFCCx / RSCFDnCFCCx */ -#define RCANFD_CFCC(ch, idx) (0x0118 + (0x0c * (ch)) + \ - (0x04 * (idx))) +#define RCANFD_CFCC(gpriv, ch, idx) \ + ((gpriv)->info->regs->cfcc + (0x0c * (ch)) + (0x04 * (idx))) /* RSCFDnCFDCFSTSx / RSCFDnCFSTSx */ -#define RCANFD_CFSTS(ch, idx) (0x0178 + (0x0c * (ch)) + \ - (0x04 * (idx))) +#define RCANFD_CFSTS(gpriv, ch, idx) \ + ((gpriv)->info->regs->cfsts + (0x0c * (ch)) + (0x04 * (idx))) /* RSCFDnCFDCFPCTRx / RSCFDnCFPCTRx */ -#define RCANFD_CFPCTR(ch, idx) (0x01d8 + (0x0c * (ch)) + \ - (0x04 * (idx))) - -/* RSCFDnCFDFESTS / RSCFDnFESTS */ -#define RCANFD_FESTS (0x0238) -/* RSCFDnCFDFFSTS / RSCFDnFFSTS */ -#define RCANFD_FFSTS (0x023c) -/* RSCFDnCFDFMSTS / RSCFDnFMSTS */ -#define RCANFD_FMSTS (0x0240) -/* RSCFDnCFDRFISTS / RSCFDnRFISTS */ -#define RCANFD_RFISTS (0x0244) -/* RSCFDnCFDCFRISTS / RSCFDnCFRISTS */ -#define RCANFD_CFRISTS (0x0248) -/* RSCFDnCFDCFTISTS / RSCFDnCFTISTS */ -#define RCANFD_CFTISTS (0x024c) - -/* RSCFDnCFDTMCp / RSCFDnTMCp */ -#define RCANFD_TMC(p) (0x0250 + (0x01 * (p))) -/* RSCFDnCFDTMSTSp / RSCFDnTMSTSp */ -#define RCANFD_TMSTS(p) (0x02d0 + (0x01 * (p))) - -/* RSCFDnCFDTMTRSTSp / RSCFDnTMTRSTSp */ -#define RCANFD_TMTRSTS(y) (0x0350 + (0x04 * (y))) -/* RSCFDnCFDTMTARSTSp / RSCFDnTMTARSTSp */ -#define RCANFD_TMTARSTS(y) (0x0360 + (0x04 * (y))) -/* RSCFDnCFDTMTCSTSp / RSCFDnTMTCSTSp */ -#define RCANFD_TMTCSTS(y) (0x0370 + (0x04 * (y))) -/* RSCFDnCFDTMTASTSp / RSCFDnTMTASTSp */ -#define RCANFD_TMTASTS(y) (0x0380 + (0x04 * (y))) -/* RSCFDnCFDTMIECy / RSCFDnTMIECy */ -#define RCANFD_TMIEC(y) (0x0390 + (0x04 * (y))) - -/* RSCFDnCFDTXQCCm / RSCFDnTXQCCm */ -#define RCANFD_TXQCC(m) (0x03a0 + (0x04 * (m))) -/* RSCFDnCFDTXQSTSm / RSCFDnTXQSTSm */ -#define RCANFD_TXQSTS(m) (0x03c0 + (0x04 * (m))) -/* RSCFDnCFDTXQPCTRm / RSCFDnTXQPCTRm */ -#define RCANFD_TXQPCTR(m) (0x03e0 + (0x04 * (m))) - -/* RSCFDnCFDTHLCCm / RSCFDnTHLCCm */ -#define RCANFD_THLCC(m) (0x0400 + (0x04 * (m))) -/* RSCFDnCFDTHLSTSm / RSCFDnTHLSTSm */ -#define RCANFD_THLSTS(m) (0x0420 + (0x04 * (m))) -/* RSCFDnCFDTHLPCTRm / RSCFDnTHLPCTRm */ -#define RCANFD_THLPCTR(m) (0x0440 + (0x04 * (m))) - -/* RSCFDnCFDGTINTSTS0 / RSCFDnGTINTSTS0 */ -#define RCANFD_GTINTSTS0 (0x0460) -/* RSCFDnCFDGTINTSTS1 / RSCFDnGTINTSTS1 */ -#define RCANFD_GTINTSTS1 (0x0464) -/* RSCFDnCFDGTSTCFG / RSCFDnGTSTCFG */ -#define RCANFD_GTSTCFG (0x0468) -/* RSCFDnCFDGTSTCTR / RSCFDnGTSTCTR */ -#define RCANFD_GTSTCTR (0x046c) -/* RSCFDnCFDGLOCKK / RSCFDnGLOCKK */ -#define RCANFD_GLOCKK (0x047c) +#define RCANFD_CFPCTR(gpriv, ch, idx) \ + ((gpriv)->info->regs->cfpctr + (0x0c * (ch)) + (0x04 * (idx))) + /* RSCFDnCFDGRMCFG */ #define RCANFD_GRMCFG (0x04fc) @@ -374,98 +323,72 @@ /* RSCFDnGAFLXXXj offset */ #define RCANFD_C_GAFL_OFFSET (0x0500) -/* RSCFDnRMXXXq -> RCANFD_C_RMXXX(q) */ -#define RCANFD_C_RMID(q) (0x0600 + (0x10 * (q))) -#define RCANFD_C_RMPTR(q) (0x0604 + (0x10 * (q))) -#define RCANFD_C_RMDF0(q) (0x0608 + (0x10 * (q))) -#define RCANFD_C_RMDF1(q) (0x060c + (0x10 * (q))) - /* RSCFDnRFXXx -> RCANFD_C_RFXX(x) */ -#define RCANFD_C_RFOFFSET (0x0e00) -#define RCANFD_C_RFID(x) (RCANFD_C_RFOFFSET + (0x10 * (x))) -#define RCANFD_C_RFPTR(x) (RCANFD_C_RFOFFSET + 0x04 + \ - (0x10 * (x))) -#define RCANFD_C_RFDF(x, df) (RCANFD_C_RFOFFSET + 0x08 + \ - (0x10 * (x)) + (0x04 * (df))) +#define RCANFD_C_RFOFFSET (0x0e00) +#define RCANFD_C_RFID(x) (RCANFD_C_RFOFFSET + (0x10 * (x))) +#define RCANFD_C_RFPTR(x) (RCANFD_C_RFOFFSET + 0x04 + (0x10 * (x))) +#define RCANFD_C_RFDF(x, df) \ + (RCANFD_C_RFOFFSET + 0x08 + (0x10 * (x)) + (0x04 * (df))) /* RSCFDnCFXXk -> RCANFD_C_CFXX(ch, k) */ #define RCANFD_C_CFOFFSET (0x0e80) -#define RCANFD_C_CFID(ch, idx) (RCANFD_C_CFOFFSET + (0x30 * (ch)) + \ - (0x10 * (idx))) -#define RCANFD_C_CFPTR(ch, idx) (RCANFD_C_CFOFFSET + 0x04 + \ - (0x30 * (ch)) + (0x10 * (idx))) -#define RCANFD_C_CFDF(ch, idx, df) (RCANFD_C_CFOFFSET + 0x08 + \ - (0x30 * (ch)) + (0x10 * (idx)) + \ - (0x04 * (df))) - -/* RSCFDnTMXXp -> RCANFD_C_TMXX(p) */ -#define RCANFD_C_TMID(p) (0x1000 + (0x10 * (p))) -#define RCANFD_C_TMPTR(p) (0x1004 + (0x10 * (p))) -#define RCANFD_C_TMDF0(p) (0x1008 + (0x10 * (p))) -#define RCANFD_C_TMDF1(p) (0x100c + (0x10 * (p))) - -/* RSCFDnTHLACCm */ -#define RCANFD_C_THLACC(m) (0x1800 + (0x04 * (m))) -/* RSCFDnRPGACCr */ -#define RCANFD_C_RPGACC(r) (0x1900 + (0x04 * (r))) + +#define RCANFD_C_CFID(ch, idx) \ + (RCANFD_C_CFOFFSET + (0x30 * (ch)) + (0x10 * (idx))) + +#define RCANFD_C_CFPTR(ch, idx) \ + (RCANFD_C_CFOFFSET + 0x04 + (0x30 * (ch)) + (0x10 * (idx))) + +#define RCANFD_C_CFDF(ch, idx, df) \ + (RCANFD_C_CFOFFSET + 0x08 + (0x30 * (ch)) + (0x10 * (idx)) + (0x04 * (df))) + +/* R-Car Gen4 Classical and CAN FD mode specific register map */ +#define RCANFD_GEN4_GAFL_OFFSET (0x1800) /* CAN FD mode specific register map */ -/* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */ -#define RCANFD_F_DCFG(m) (0x0500 + (0x20 * (m))) -#define RCANFD_F_CFDCFG(m) (0x0504 + (0x20 * (m))) -#define RCANFD_F_CFDCTR(m) (0x0508 + (0x20 * (m))) -#define RCANFD_F_CFDSTS(m) (0x050c + (0x20 * (m))) -#define RCANFD_F_CFDCRC(m) (0x0510 + (0x20 * (m))) +/* RSCFDnCFDCmXXX -> gpriv->fcbase[m].xxx */ +struct rcar_canfd_f_c { + u32 dcfg; + u32 cfdcfg; + u32 cfdctr; + u32 cfdsts; + u32 cfdcrc; + u32 pad[3]; +}; /* RSCFDnCFDGAFLXXXj offset */ #define RCANFD_F_GAFL_OFFSET (0x1000) -/* RSCFDnCFDRMXXXq -> RCANFD_F_RMXXX(q) */ -#define RCANFD_F_RMID(q) (0x2000 + (0x20 * (q))) -#define RCANFD_F_RMPTR(q) (0x2004 + (0x20 * (q))) -#define RCANFD_F_RMFDSTS(q) (0x2008 + (0x20 * (q))) -#define RCANFD_F_RMDF(q, b) (0x200c + (0x04 * (b)) + (0x20 * (q))) - /* RSCFDnCFDRFXXx -> RCANFD_F_RFXX(x) */ -#define RCANFD_F_RFOFFSET (0x3000) -#define RCANFD_F_RFID(x) (RCANFD_F_RFOFFSET + (0x80 * (x))) -#define RCANFD_F_RFPTR(x) (RCANFD_F_RFOFFSET + 0x04 + \ - (0x80 * (x))) -#define RCANFD_F_RFFDSTS(x) (RCANFD_F_RFOFFSET + 0x08 + \ - (0x80 * (x))) -#define RCANFD_F_RFDF(x, df) (RCANFD_F_RFOFFSET + 0x0c + \ - (0x80 * (x)) + (0x04 * (df))) +#define RCANFD_F_RFOFFSET(gpriv) ((gpriv)->info->regs->rfoffset) +#define RCANFD_F_RFID(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + (0x80 * (x))) +#define RCANFD_F_RFPTR(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x04 + (0x80 * (x))) +#define RCANFD_F_RFFDSTS(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x08 + (0x80 * (x))) +#define RCANFD_F_RFDF(gpriv, x, df) \ + (RCANFD_F_RFOFFSET(gpriv) + 0x0c + (0x80 * (x)) + (0x04 * (df))) /* RSCFDnCFDCFXXk -> RCANFD_F_CFXX(ch, k) */ -#define RCANFD_F_CFOFFSET (0x3400) -#define RCANFD_F_CFID(ch, idx) (RCANFD_F_CFOFFSET + (0x180 * (ch)) + \ - (0x80 * (idx))) -#define RCANFD_F_CFPTR(ch, idx) (RCANFD_F_CFOFFSET + 0x04 + \ - (0x180 * (ch)) + (0x80 * (idx))) -#define RCANFD_F_CFFDCSTS(ch, idx) (RCANFD_F_CFOFFSET + 0x08 + \ - (0x180 * (ch)) + (0x80 * (idx))) -#define RCANFD_F_CFDF(ch, idx, df) (RCANFD_F_CFOFFSET + 0x0c + \ - (0x180 * (ch)) + (0x80 * (idx)) + \ - (0x04 * (df))) - -/* RSCFDnCFDTMXXp -> RCANFD_F_TMXX(p) */ -#define RCANFD_F_TMID(p) (0x4000 + (0x20 * (p))) -#define RCANFD_F_TMPTR(p) (0x4004 + (0x20 * (p))) -#define RCANFD_F_TMFDCTR(p) (0x4008 + (0x20 * (p))) -#define RCANFD_F_TMDF(p, b) (0x400c + (0x20 * (p)) + (0x04 * (b))) - -/* RSCFDnCFDTHLACCm */ -#define RCANFD_F_THLACC(m) (0x6000 + (0x04 * (m))) -/* RSCFDnCFDRPGACCr */ -#define RCANFD_F_RPGACC(r) (0x6400 + (0x04 * (r))) +#define RCANFD_F_CFOFFSET(gpriv) ((gpriv)->info->regs->cfoffset) + +#define RCANFD_F_CFID(gpriv, ch, idx) \ + (RCANFD_F_CFOFFSET(gpriv) + (0x180 * (ch)) + (0x80 * (idx))) + +#define RCANFD_F_CFPTR(gpriv, ch, idx) \ + (RCANFD_F_CFOFFSET(gpriv) + 0x04 + (0x180 * (ch)) + (0x80 * (idx))) + +#define RCANFD_F_CFFDCSTS(gpriv, ch, idx) \ + (RCANFD_F_CFOFFSET(gpriv) + 0x08 + (0x180 * (ch)) + (0x80 * (idx))) + +#define RCANFD_F_CFDF(gpriv, ch, idx, df) \ + (RCANFD_F_CFOFFSET(gpriv) + 0x0c + (0x180 * (ch)) + (0x80 * (idx)) + \ + (0x04 * (df))) /* Constants */ #define RCANFD_FIFO_DEPTH 8 /* Tx FIFO depth */ #define RCANFD_NAPI_WEIGHT 8 /* Rx poll quota */ -#define RCANFD_NUM_CHANNELS 2 /* Two channels max */ -#define RCANFD_CHANNELS_MASK BIT((RCANFD_NUM_CHANNELS) - 1) +#define RCANFD_NUM_CHANNELS 8 /* Eight channels max */ #define RCANFD_GAFL_PAGENUM(entry) ((entry) / 16) #define RCANFD_CHANNEL_NUMRULES 1 /* only one rule per channel */ @@ -481,13 +404,47 @@ */ #define RCANFD_CFFIFO_IDX 0 -/* fCAN clock select register settings */ -enum rcar_canfd_fcanclk { - RCANFD_CANFDCLK = 0, /* CANFD clock */ - RCANFD_EXTCLK, /* Externally input clock */ +struct rcar_canfd_global; + +struct rcar_canfd_regs { + u16 rfcc; /* RX FIFO Configuration/Control Register */ + u16 cfcc; /* Common FIFO Configuration/Control Register */ + u16 cfsts; /* Common FIFO Status Register */ + u16 cfpctr; /* Common FIFO Pointer Control Register */ + u16 coffset; /* Channel Data Bitrate Configuration Register */ + u16 rfoffset; /* Receive FIFO buffer access ID register */ + u16 cfoffset; /* Transmit/receive FIFO buffer access ID register */ }; -struct rcar_canfd_global; +struct rcar_canfd_shift_data { + u8 ntseg2; /* Nominal Bit Rate Time Segment 2 Control */ + u8 ntseg1; /* Nominal Bit Rate Time Segment 1 Control */ + u8 nsjw; /* Nominal Bit Rate Resynchronization Jump Width Control */ + u8 dtseg2; /* Data Bit Rate Time Segment 2 Control */ + u8 dtseg1; /* Data Bit Rate Time Segment 1 Control */ + u8 cftml; /* Common FIFO TX Message Buffer Link */ + u8 cfm; /* Common FIFO Mode */ + u8 cfdc; /* Common FIFO Depth Configuration */ +}; + +struct rcar_canfd_hw_info { + const struct can_bittiming_const *nom_bittiming; + const struct can_bittiming_const *data_bittiming; + const struct can_tdc_const *tdc_const; + const struct rcar_canfd_regs *regs; + const struct rcar_canfd_shift_data *sh; + u8 rnc_field_width; + u8 max_aflpn; + u8 max_cftml; + u8 max_channels; + u8 postdiv; + /* hardware features */ + unsigned shared_global_irqs:1; /* Has shared global irqs */ + unsigned multi_channel_irqs:1; /* Has multiple channel irqs */ + unsigned ch_interface_mode:1; /* Has channel interface mode */ + unsigned shared_can_regs:1; /* Has shared classical can registers */ + unsigned external_clk:1; /* Has external clock */ +}; /* Channel priv data */ struct rcar_canfd_channel { @@ -495,8 +452,8 @@ struct rcar_canfd_channel { struct net_device *ndev; struct rcar_canfd_global *gpriv; /* Controller reference */ void __iomem *base; /* Register base address */ + struct phy *transceiver; /* Optional transceiver */ struct napi_struct napi; - u8 tx_len[RCANFD_FIFO_DEPTH]; /* For net stats */ u32 tx_head; /* Incremented on xmit */ u32 tx_tail; /* Incremented on xmit done */ u32 channel; /* Channel number */ @@ -507,16 +464,21 @@ struct rcar_canfd_channel { struct rcar_canfd_global { struct rcar_canfd_channel *ch[RCANFD_NUM_CHANNELS]; void __iomem *base; /* Register base address */ + struct rcar_canfd_f_c __iomem *fcbase; struct platform_device *pdev; /* Respective platform device */ struct clk *clkp; /* Peripheral clock */ struct clk *can_clk; /* fCAN clock */ - enum rcar_canfd_fcanclk fcan; /* CANFD or Ext clock */ + struct clk *clk_ram; /* Clock RAM */ unsigned long channels_mask; /* Enabled channels mask */ + bool extclk; /* CANFD or Ext clock */ bool fdmode; /* CAN FD or Classical CAN only mode */ + struct reset_control *rstc1; + struct reset_control *rstc2; + const struct rcar_canfd_hw_info *info; }; /* CAN FD mode nominal rate constants */ -static const struct can_bittiming_const rcar_canfd_nom_bittiming_const = { +static const struct can_bittiming_const rcar_canfd_gen3_nom_bittiming_const = { .name = RCANFD_DRV_NAME, .tseg1_min = 2, .tseg1_max = 128, @@ -528,8 +490,20 @@ static const struct can_bittiming_const rcar_canfd_nom_bittiming_const = { .brp_inc = 1, }; +static const struct can_bittiming_const rcar_canfd_gen4_nom_bittiming_const = { + .name = RCANFD_DRV_NAME, + .tseg1_min = 2, + .tseg1_max = 256, + .tseg2_min = 2, + .tseg2_max = 128, + .sjw_max = 128, + .brp_min = 1, + .brp_max = 1024, + .brp_inc = 1, +}; + /* CAN FD mode data rate constants */ -static const struct can_bittiming_const rcar_canfd_data_bittiming_const = { +static const struct can_bittiming_const rcar_canfd_gen3_data_bittiming_const = { .name = RCANFD_DRV_NAME, .tseg1_min = 2, .tseg1_max = 16, @@ -541,6 +515,18 @@ static const struct can_bittiming_const rcar_canfd_data_bittiming_const = { .brp_inc = 1, }; +static const struct can_bittiming_const rcar_canfd_gen4_data_bittiming_const = { + .name = RCANFD_DRV_NAME, + .tseg1_min = 2, + .tseg1_max = 32, + .tseg2_min = 2, + .tseg2_max = 16, + .sjw_max = 16, + .brp_min = 1, + .brp_max = 256, + .brp_inc = 1, +}; + /* Classical CAN mode bitrate constants */ static const struct can_bittiming_const rcar_canfd_bittiming_const = { .name = RCANFD_DRV_NAME, @@ -554,6 +540,135 @@ static const struct can_bittiming_const rcar_canfd_bittiming_const = { .brp_inc = 1, }; +/* CAN FD Transmission Delay Compensation constants */ +static const struct can_tdc_const rcar_canfd_gen3_tdc_const = { + .tdcv_min = 1, + .tdcv_max = 128, + .tdco_min = 1, + .tdco_max = 128, + .tdcf_min = 0, /* Filter window not supported */ + .tdcf_max = 0, +}; + +static const struct can_tdc_const rcar_canfd_gen4_tdc_const = { + .tdcv_min = 1, + .tdcv_max = 256, + .tdco_min = 1, + .tdco_max = 256, + .tdcf_min = 0, /* Filter window not supported */ + .tdcf_max = 0, +}; + +static const struct rcar_canfd_regs rcar_gen3_regs = { + .rfcc = 0x00b8, + .cfcc = 0x0118, + .cfsts = 0x0178, + .cfpctr = 0x01d8, + .coffset = 0x0500, + .rfoffset = 0x3000, + .cfoffset = 0x3400, +}; + +static const struct rcar_canfd_regs rcar_gen4_regs = { + .rfcc = 0x00c0, + .cfcc = 0x0120, + .cfsts = 0x01e0, + .cfpctr = 0x0240, + .coffset = 0x1400, + .rfoffset = 0x6000, + .cfoffset = 0x6400, +}; + +static const struct rcar_canfd_shift_data rcar_gen3_shift_data = { + .ntseg2 = 24, + .ntseg1 = 16, + .nsjw = 11, + .dtseg2 = 20, + .dtseg1 = 16, + .cftml = 20, + .cfm = 16, + .cfdc = 8, +}; + +static const struct rcar_canfd_shift_data rcar_gen4_shift_data = { + .ntseg2 = 25, + .ntseg1 = 17, + .nsjw = 10, + .dtseg2 = 16, + .dtseg1 = 8, + .cftml = 16, + .cfm = 8, + .cfdc = 21, +}; + +static const struct rcar_canfd_hw_info rcar_gen3_hw_info = { + .nom_bittiming = &rcar_canfd_gen3_nom_bittiming_const, + .data_bittiming = &rcar_canfd_gen3_data_bittiming_const, + .tdc_const = &rcar_canfd_gen3_tdc_const, + .regs = &rcar_gen3_regs, + .sh = &rcar_gen3_shift_data, + .rnc_field_width = 8, + .max_aflpn = 31, + .max_cftml = 15, + .max_channels = 2, + .postdiv = 2, + .shared_global_irqs = 1, + .ch_interface_mode = 0, + .shared_can_regs = 0, + .external_clk = 1, +}; + +static const struct rcar_canfd_hw_info rcar_gen4_hw_info = { + .nom_bittiming = &rcar_canfd_gen4_nom_bittiming_const, + .data_bittiming = &rcar_canfd_gen4_data_bittiming_const, + .tdc_const = &rcar_canfd_gen4_tdc_const, + .regs = &rcar_gen4_regs, + .sh = &rcar_gen4_shift_data, + .rnc_field_width = 16, + .max_aflpn = 127, + .max_cftml = 31, + .max_channels = 8, + .postdiv = 2, + .shared_global_irqs = 1, + .ch_interface_mode = 1, + .shared_can_regs = 1, + .external_clk = 1, +}; + +static const struct rcar_canfd_hw_info rzg2l_hw_info = { + .nom_bittiming = &rcar_canfd_gen3_nom_bittiming_const, + .data_bittiming = &rcar_canfd_gen3_data_bittiming_const, + .tdc_const = &rcar_canfd_gen3_tdc_const, + .regs = &rcar_gen3_regs, + .sh = &rcar_gen3_shift_data, + .rnc_field_width = 8, + .max_aflpn = 31, + .max_cftml = 15, + .max_channels = 2, + .postdiv = 1, + .multi_channel_irqs = 1, + .ch_interface_mode = 0, + .shared_can_regs = 0, + .external_clk = 1, +}; + +static const struct rcar_canfd_hw_info r9a09g047_hw_info = { + .nom_bittiming = &rcar_canfd_gen4_nom_bittiming_const, + .data_bittiming = &rcar_canfd_gen4_data_bittiming_const, + .tdc_const = &rcar_canfd_gen4_tdc_const, + .regs = &rcar_gen4_regs, + .sh = &rcar_gen4_shift_data, + .rnc_field_width = 16, + .max_aflpn = 63, + .max_cftml = 31, + .max_channels = 6, + .postdiv = 1, + .multi_channel_irqs = 1, + .ch_interface_mode = 1, + .shared_can_regs = 1, + .external_clk = 0, +}; + /* Helper functions */ static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg) { @@ -566,50 +681,65 @@ static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg) static inline u32 rcar_canfd_read(void __iomem *base, u32 offset) { - return readl(base + (offset)); + return readl(base + offset); } static inline void rcar_canfd_write(void __iomem *base, u32 offset, u32 val) { - writel(val, base + (offset)); + writel(val, base + offset); } static void rcar_canfd_set_bit(void __iomem *base, u32 reg, u32 val) { - rcar_canfd_update(val, val, base + (reg)); + rcar_canfd_update(val, val, base + reg); } static void rcar_canfd_clear_bit(void __iomem *base, u32 reg, u32 val) { - rcar_canfd_update(val, 0, base + (reg)); + rcar_canfd_update(val, 0, base + reg); } static void rcar_canfd_update_bit(void __iomem *base, u32 reg, u32 mask, u32 val) { - rcar_canfd_update(mask, val, base + (reg)); + rcar_canfd_update(mask, val, base + reg); +} + +static void rcar_canfd_set_bit_reg(void __iomem *addr, u32 val) +{ + rcar_canfd_update(val, val, addr); +} + +static void rcar_canfd_clear_bit_reg(void __iomem *addr, u32 val) +{ + rcar_canfd_update(val, 0, addr); +} + +static void rcar_canfd_update_bit_reg(void __iomem *addr, u32 mask, u32 val) +{ + rcar_canfd_update(mask, val, addr); } static void rcar_canfd_get_data(struct rcar_canfd_channel *priv, struct canfd_frame *cf, u32 off) { + u32 *data = (u32 *)cf->data; u32 i, lwords; lwords = DIV_ROUND_UP(cf->len, sizeof(u32)); for (i = 0; i < lwords; i++) - *((u32 *)cf->data + i) = - rcar_canfd_read(priv->base, off + (i * sizeof(u32))); + data[i] = rcar_canfd_read(priv->base, off + i * sizeof(u32)); } static void rcar_canfd_put_data(struct rcar_canfd_channel *priv, struct canfd_frame *cf, u32 off) { + const u32 *data = (u32 *)cf->data; u32 i, lwords; lwords = DIV_ROUND_UP(cf->len, sizeof(u32)); for (i = 0; i < lwords; i++) - rcar_canfd_write(priv->base, off + (i * sizeof(u32)), - *((u32 *)cf->data + i)); + rcar_canfd_write(priv->base, off + i * sizeof(u32), data[i]); } static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev) @@ -617,11 +747,23 @@ static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev) u32 i; for (i = 0; i < RCANFD_FIFO_DEPTH; i++) - can_free_echo_skb(ndev, i); + can_free_echo_skb(ndev, i, NULL); +} + +static void rcar_canfd_set_rnc(struct rcar_canfd_global *gpriv, unsigned int ch, + unsigned int num_rules) +{ + unsigned int rnc_stride = 32 / gpriv->info->rnc_field_width; + unsigned int shift = 32 - (ch % rnc_stride + 1) * gpriv->info->rnc_field_width; + unsigned int w = ch / rnc_stride; + u32 rnc = num_rules << shift; + + rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG(w), rnc); } static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv) { + struct device *dev = &gpriv->pdev->dev; u32 sts, ch; int err; @@ -631,7 +773,7 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv) err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts, !(sts & RCANFD_GSTS_GRAMINIT), 2, 500000); if (err) { - dev_dbg(&gpriv->pdev->dev, "global raminit failed\n"); + dev_dbg(dev, "global raminit failed\n"); return err; } @@ -644,7 +786,7 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv) err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts, (sts & RCANFD_GSTS_GRSTSTS), 2, 500000); if (err) { - dev_dbg(&gpriv->pdev->dev, "global reset failed\n"); + dev_dbg(dev, "global reset failed\n"); return err; } @@ -652,15 +794,17 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv) rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0x0); /* Set the controller into appropriate mode */ - if (gpriv->fdmode) - rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG, - RCANFD_GRMCFG_RCMC); - else - rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG, - RCANFD_GRMCFG_RCMC); + if (!gpriv->info->ch_interface_mode) { + if (gpriv->fdmode) + rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG, + RCANFD_GRMCFG_RCMC); + else + rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG, + RCANFD_GRMCFG_RCMC); + } /* Transition all Channels to reset mode */ - for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) { + for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) { rcar_canfd_clear_bit(gpriv->base, RCANFD_CCTR(ch), RCANFD_CCTR_CSLPR); @@ -673,11 +817,27 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv) (sts & RCANFD_CSTS_CRSTSTS), 2, 500000); if (err) { - dev_dbg(&gpriv->pdev->dev, - "channel %u reset failed\n", ch); + dev_dbg(dev, "channel %u reset failed\n", ch); return err; } + + /* Set the controller into appropriate mode */ + if (gpriv->info->ch_interface_mode) { + /* Do not set CLOE and FDOE simultaneously */ + if (!gpriv->fdmode) { + rcar_canfd_clear_bit_reg(&gpriv->fcbase[ch].cfdcfg, + RCANFD_GEN4_FDCFG_FDOE); + rcar_canfd_set_bit_reg(&gpriv->fcbase[ch].cfdcfg, + RCANFD_GEN4_FDCFG_CLOE); + } else { + rcar_canfd_clear_bit_reg(&gpriv->fcbase[ch].cfdcfg, + RCANFD_GEN4_FDCFG_FDOE); + rcar_canfd_clear_bit_reg(&gpriv->fcbase[ch].cfdcfg, + RCANFD_GEN4_FDCFG_CLOE); + } + } } + return 0; } @@ -695,13 +855,13 @@ static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv) cfg |= RCANFD_GCFG_CMPOC; /* Set External Clock if selected */ - if (gpriv->fcan != RCANFD_CANFDCLK) + if (gpriv->extclk) cfg |= RCANFD_GCFG_DCS; rcar_canfd_set_bit(gpriv->base, RCANFD_GCFG, cfg); /* Channel configuration settings */ - for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) { + for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) { rcar_canfd_set_bit(gpriv->base, RCANFD_CCTR(ch), RCANFD_CCTR_ERRD); rcar_canfd_update_bit(gpriv->base, RCANFD_CCTR(ch), @@ -711,43 +871,36 @@ static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv) } static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv, - u32 ch) + u32 ch, u32 rule_entry) { - u32 cfg; - int offset, start, page, num_rules = RCANFD_CHANNEL_NUMRULES; + unsigned int offset, page, num_rules = RCANFD_CHANNEL_NUMRULES; + u32 rule_entry_index = rule_entry % 16; u32 ridx = ch + RCANFD_RFFIFO_IDX; - if (ch == 0) { - start = 0; /* Channel 0 always starts from 0th rule */ - } else { - /* Get number of Channel 0 rules and adjust */ - cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG0); - start = RCANFD_GAFLCFG_GETRNC(0, cfg); - } - /* Enable write access to entry */ - page = RCANFD_GAFL_PAGENUM(start); + page = RCANFD_GAFL_PAGENUM(rule_entry); rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLECTR, - (RCANFD_GAFLECTR_AFLPN(page) | + (RCANFD_GAFLECTR_AFLPN(gpriv, page) | RCANFD_GAFLECTR_AFLDAE)); /* Write number of rules for channel */ - rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG0, - RCANFD_GAFLCFG_SETRNC(ch, num_rules)); - if (gpriv->fdmode) + rcar_canfd_set_rnc(gpriv, ch, num_rules); + if (gpriv->info->shared_can_regs) + offset = RCANFD_GEN4_GAFL_OFFSET; + else if (gpriv->fdmode) offset = RCANFD_F_GAFL_OFFSET; else offset = RCANFD_C_GAFL_OFFSET; /* Accept all IDs */ - rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, start), 0); + rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, rule_entry_index), 0); /* IDE or RTR is not considered for matching */ - rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, start), 0); + rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, rule_entry_index), 0); /* Any data length accepted */ - rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, start), 0); + rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, rule_entry_index), 0); /* Place the msg in corresponding Rx FIFO entry */ - rcar_canfd_write(gpriv->base, RCANFD_GAFLP1(offset, start), - RCANFD_GAFLP1_GAFLFDP(ridx)); + rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, rule_entry_index), + RCANFD_GAFLP1_GAFLFDP(ridx)); /* Disable write access to page */ rcar_canfd_clear_bit(gpriv->base, @@ -771,7 +924,7 @@ static void rcar_canfd_configure_rx(struct rcar_canfd_global *gpriv, u32 ch) cfg = (RCANFD_RFCC_RFIM | RCANFD_RFCC_RFDC(rfdc) | RCANFD_RFCC_RFPLS(rfpls) | RCANFD_RFCC_RFIE); - rcar_canfd_write(gpriv->base, RCANFD_RFCC(ridx), cfg); + rcar_canfd_write(gpriv->base, RCANFD_RFCC(gpriv, ridx), cfg); } static void rcar_canfd_configure_tx(struct rcar_canfd_global *gpriv, u32 ch) @@ -793,15 +946,15 @@ static void rcar_canfd_configure_tx(struct rcar_canfd_global *gpriv, u32 ch) else cfpls = 0; /* b000 - Max 8 bytes payload */ - cfg = (RCANFD_CFCC_CFTML(cftml) | RCANFD_CFCC_CFM(cfm) | - RCANFD_CFCC_CFIM | RCANFD_CFCC_CFDC(cfdc) | + cfg = (RCANFD_CFCC_CFTML(gpriv, cftml) | RCANFD_CFCC_CFM(gpriv, cfm) | + RCANFD_CFCC_CFIM | RCANFD_CFCC_CFDC(gpriv, cfdc) | RCANFD_CFCC_CFPLS(cfpls) | RCANFD_CFCC_CFTXIE); - rcar_canfd_write(gpriv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX), cfg); + rcar_canfd_write(gpriv->base, RCANFD_CFCC(gpriv, ch, RCANFD_CFFIFO_IDX), cfg); if (gpriv->fdmode) /* Clear FD mode specific control/status register */ rcar_canfd_write(gpriv->base, - RCANFD_F_CFFDCSTS(ch, RCANFD_CFFIFO_IDX), 0); + RCANFD_F_CFFDCSTS(gpriv, ch, RCANFD_CFFIFO_IDX), 0); } static void rcar_canfd_enable_global_interrupts(struct rcar_canfd_global *gpriv) @@ -872,30 +1025,26 @@ static void rcar_canfd_global_error(struct net_device *ndev) u32 ridx = ch + RCANFD_RFFIFO_IDX; gerfl = rcar_canfd_read(priv->base, RCANFD_GERFL); - if ((gerfl & RCANFD_GERFL_EEF0) && (ch == 0)) { - netdev_dbg(ndev, "Ch0: ECC Error flag\n"); - stats->tx_dropped++; - } - if ((gerfl & RCANFD_GERFL_EEF1) && (ch == 1)) { - netdev_dbg(ndev, "Ch1: ECC Error flag\n"); + if (gerfl & FIELD_PREP(RCANFD_GERFL_EEF, BIT(ch))) { + netdev_dbg(ndev, "Ch%u: ECC Error flag\n", ch); stats->tx_dropped++; } if (gerfl & RCANFD_GERFL_MES) { sts = rcar_canfd_read(priv->base, - RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX)); + RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX)); if (sts & RCANFD_CFSTS_CFMLT) { netdev_dbg(ndev, "Tx Message Lost flag\n"); stats->tx_dropped++; rcar_canfd_write(priv->base, - RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX), + RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX), sts & ~RCANFD_CFSTS_CFMLT); } - sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx)); + sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx)); if (sts & RCANFD_RFSTS_RFMLT) { netdev_dbg(ndev, "Rx Message Lost flag\n"); stats->rx_dropped++; - rcar_canfd_write(priv->base, RCANFD_RFSTS(ridx), + rcar_canfd_write(priv->base, RCANFD_RFSTS(gpriv, ridx), sts & ~RCANFD_RFSTS_RFMLT); } } @@ -989,7 +1138,7 @@ static void rcar_canfd_error(struct net_device *ndev, u32 cerfl, netdev_dbg(ndev, "Error warning interrupt\n"); priv->can.state = CAN_STATE_ERROR_WARNING; priv->can.can_stats.error_warning++; - cf->can_id |= CAN_ERR_CRTL; + cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; cf->data[6] = txerr; @@ -999,7 +1148,7 @@ static void rcar_canfd_error(struct net_device *ndev, u32 cerfl, netdev_dbg(ndev, "Error passive interrupt\n"); priv->can.state = CAN_STATE_ERROR_PASSIVE; priv->can.can_stats.error_passive++; - cf->can_id |= CAN_ERR_CRTL; + cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; cf->data[6] = txerr; @@ -1024,14 +1173,13 @@ static void rcar_canfd_error(struct net_device *ndev, u32 cerfl, /* Clear channel error interrupts that are handled */ rcar_canfd_write(priv->base, RCANFD_CERFL(ch), RCANFD_CERFL_ERR(~cerfl)); - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; netif_rx(skb); } static void rcar_canfd_tx_done(struct net_device *ndev) { struct rcar_canfd_channel *priv = netdev_priv(ndev); + struct rcar_canfd_global *gpriv = priv->gpriv; struct net_device_stats *stats = &ndev->stats; u32 sts; unsigned long flags; @@ -1042,14 +1190,12 @@ static void rcar_canfd_tx_done(struct net_device *ndev) sent = priv->tx_tail % RCANFD_FIFO_DEPTH; stats->tx_packets++; - stats->tx_bytes += priv->tx_len[sent]; - priv->tx_len[sent] = 0; - can_get_echo_skb(ndev, sent); + stats->tx_bytes += can_get_echo_skb(ndev, sent, NULL); spin_lock_irqsave(&priv->tx_lock, flags); priv->tx_tail++; sts = rcar_canfd_read(priv->base, - RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX)); + RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX)); unsent = RCANFD_CFSTS_CFMC(sts); /* Wake producer only when there is room */ @@ -1065,43 +1211,76 @@ static void rcar_canfd_tx_done(struct net_device *ndev) } while (1); /* Clear interrupt */ - rcar_canfd_write(priv->base, RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX), + rcar_canfd_write(priv->base, RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX), sts & ~RCANFD_CFSTS_CFTXIF); - can_led_event(ndev, CAN_LED_EVENT_TX); +} + +static void rcar_canfd_handle_global_err(struct rcar_canfd_global *gpriv, u32 ch) +{ + struct rcar_canfd_channel *priv = gpriv->ch[ch]; + struct net_device *ndev = priv->ndev; + u32 gerfl; + + /* Handle global error interrupts */ + gerfl = rcar_canfd_read(priv->base, RCANFD_GERFL); + if (unlikely(RCANFD_GERFL_ERR(gpriv, gerfl))) + rcar_canfd_global_error(ndev); +} + +static irqreturn_t rcar_canfd_global_err_interrupt(int irq, void *dev_id) +{ + struct rcar_canfd_global *gpriv = dev_id; + u32 ch; + + for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) + rcar_canfd_handle_global_err(gpriv, ch); + + return IRQ_HANDLED; +} + +static void rcar_canfd_handle_global_receive(struct rcar_canfd_global *gpriv, u32 ch) +{ + struct rcar_canfd_channel *priv = gpriv->ch[ch]; + u32 ridx = ch + RCANFD_RFFIFO_IDX; + u32 sts, cc; + + /* Handle Rx interrupts */ + sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx)); + cc = rcar_canfd_read(priv->base, RCANFD_RFCC(gpriv, ridx)); + if (likely(sts & RCANFD_RFSTS_RFIF && + cc & RCANFD_RFCC_RFIE)) { + if (napi_schedule_prep(&priv->napi)) { + /* Disable Rx FIFO interrupts */ + rcar_canfd_clear_bit(priv->base, + RCANFD_RFCC(gpriv, ridx), + RCANFD_RFCC_RFIE); + __napi_schedule(&priv->napi); + } + } +} + +static irqreturn_t rcar_canfd_global_receive_fifo_interrupt(int irq, void *dev_id) +{ + struct rcar_canfd_global *gpriv = dev_id; + u32 ch; + + for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) + rcar_canfd_handle_global_receive(gpriv, ch); + + return IRQ_HANDLED; } static irqreturn_t rcar_canfd_global_interrupt(int irq, void *dev_id) { struct rcar_canfd_global *gpriv = dev_id; - struct net_device *ndev; - struct rcar_canfd_channel *priv; - u32 sts, gerfl; - u32 ch, ridx; + u32 ch; /* Global error interrupts still indicate a condition specific * to a channel. RxFIFO interrupt is a global interrupt. */ - for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) { - priv = gpriv->ch[ch]; - ndev = priv->ndev; - ridx = ch + RCANFD_RFFIFO_IDX; - - /* Global error interrupts */ - gerfl = rcar_canfd_read(priv->base, RCANFD_GERFL); - if (unlikely(RCANFD_GERFL_ERR(gpriv, gerfl))) - rcar_canfd_global_error(ndev); - - /* Handle Rx interrupts */ - sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx)); - if (likely(sts & RCANFD_RFSTS_RFIF)) { - if (napi_schedule_prep(&priv->napi)) { - /* Disable Rx FIFO interrupts */ - rcar_canfd_clear_bit(priv->base, - RCANFD_RFCC(ridx), - RCANFD_RFCC_RFIE); - __napi_schedule(&priv->napi); - } - } + for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) { + rcar_canfd_handle_global_err(gpriv, ch); + rcar_canfd_handle_global_receive(gpriv, ch); } return IRQ_HANDLED; } @@ -1133,54 +1312,122 @@ static void rcar_canfd_state_change(struct net_device *ndev, rx_state = txerr <= rxerr ? state : 0; can_change_state(ndev, cf, tx_state, rx_state); - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; netif_rx(skb); } } +static void rcar_canfd_handle_channel_tx(struct rcar_canfd_global *gpriv, u32 ch) +{ + struct rcar_canfd_channel *priv = gpriv->ch[ch]; + struct net_device *ndev = priv->ndev; + u32 sts; + + /* Handle Tx interrupts */ + sts = rcar_canfd_read(priv->base, + RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX)); + if (likely(sts & RCANFD_CFSTS_CFTXIF)) + rcar_canfd_tx_done(ndev); +} + +static irqreturn_t rcar_canfd_channel_tx_interrupt(int irq, void *dev_id) +{ + struct rcar_canfd_channel *priv = dev_id; + + rcar_canfd_handle_channel_tx(priv->gpriv, priv->channel); + + return IRQ_HANDLED; +} + +static void rcar_canfd_handle_channel_err(struct rcar_canfd_global *gpriv, u32 ch) +{ + struct rcar_canfd_channel *priv = gpriv->ch[ch]; + struct net_device *ndev = priv->ndev; + u16 txerr, rxerr; + u32 sts, cerfl; + + /* Handle channel error interrupts */ + cerfl = rcar_canfd_read(priv->base, RCANFD_CERFL(ch)); + sts = rcar_canfd_read(priv->base, RCANFD_CSTS(ch)); + txerr = RCANFD_CSTS_TECCNT(sts); + rxerr = RCANFD_CSTS_RECCNT(sts); + if (unlikely(RCANFD_CERFL_ERR(cerfl))) + rcar_canfd_error(ndev, cerfl, txerr, rxerr); + + /* Handle state change to lower states */ + if (unlikely(priv->can.state != CAN_STATE_ERROR_ACTIVE && + priv->can.state != CAN_STATE_BUS_OFF)) + rcar_canfd_state_change(ndev, txerr, rxerr); +} + +static irqreturn_t rcar_canfd_channel_err_interrupt(int irq, void *dev_id) +{ + struct rcar_canfd_channel *priv = dev_id; + + rcar_canfd_handle_channel_err(priv->gpriv, priv->channel); + + return IRQ_HANDLED; +} + static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id) { struct rcar_canfd_global *gpriv = dev_id; - struct net_device *ndev; - struct rcar_canfd_channel *priv; - u32 sts, ch, cerfl; - u16 txerr, rxerr; + u32 ch; /* Common FIFO is a per channel resource */ - for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) { - priv = gpriv->ch[ch]; - ndev = priv->ndev; - - /* Channel error interrupts */ - cerfl = rcar_canfd_read(priv->base, RCANFD_CERFL(ch)); - sts = rcar_canfd_read(priv->base, RCANFD_CSTS(ch)); - txerr = RCANFD_CSTS_TECCNT(sts); - rxerr = RCANFD_CSTS_RECCNT(sts); - if (unlikely(RCANFD_CERFL_ERR(cerfl))) - rcar_canfd_error(ndev, cerfl, txerr, rxerr); - - /* Handle state change to lower states */ - if (unlikely((priv->can.state != CAN_STATE_ERROR_ACTIVE) && - (priv->can.state != CAN_STATE_BUS_OFF))) - rcar_canfd_state_change(ndev, txerr, rxerr); - - /* Handle Tx interrupts */ - sts = rcar_canfd_read(priv->base, - RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX)); - if (likely(sts & RCANFD_CFSTS_CFTXIF)) - rcar_canfd_tx_done(ndev); + for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) { + rcar_canfd_handle_channel_err(gpriv, ch); + rcar_canfd_handle_channel_tx(gpriv, ch); } + return IRQ_HANDLED; } -static void rcar_canfd_set_bittiming(struct net_device *dev) +static inline u32 rcar_canfd_compute_nominal_bit_rate_cfg(struct rcar_canfd_channel *priv, + u16 tseg1, u16 tseg2, u16 sjw, u16 brp) +{ + struct rcar_canfd_global *gpriv = priv->gpriv; + const struct rcar_canfd_hw_info *info = gpriv->info; + u32 ntseg1, ntseg2, nsjw, nbrp; + + if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || gpriv->info->shared_can_regs) { + ntseg1 = (tseg1 & (info->nom_bittiming->tseg1_max - 1)) << info->sh->ntseg1; + ntseg2 = (tseg2 & (info->nom_bittiming->tseg2_max - 1)) << info->sh->ntseg2; + nsjw = (sjw & (info->nom_bittiming->sjw_max - 1)) << info->sh->nsjw; + nbrp = FIELD_PREP(RCANFD_NCFG_NBRP, brp); + } else { + ntseg1 = FIELD_PREP(RCANFD_CFG_TSEG1, tseg1); + ntseg2 = FIELD_PREP(RCANFD_CFG_TSEG2, tseg2); + nsjw = FIELD_PREP(RCANFD_CFG_SJW, sjw); + nbrp = FIELD_PREP(RCANFD_CFG_BRP, brp); + } + + return (ntseg1 | ntseg2 | nsjw | nbrp); +} + +static inline u32 rcar_canfd_compute_data_bit_rate_cfg(const struct rcar_canfd_hw_info *info, + u16 tseg1, u16 tseg2, u16 sjw, u16 brp) { - struct rcar_canfd_channel *priv = netdev_priv(dev); + u32 dtseg1, dtseg2, dsjw, dbrp; + + dtseg1 = (tseg1 & (info->data_bittiming->tseg1_max - 1)) << info->sh->dtseg1; + dtseg2 = (tseg2 & (info->data_bittiming->tseg2_max - 1)) << info->sh->dtseg2; + dsjw = (sjw & (info->data_bittiming->sjw_max - 1)) << 24; + dbrp = FIELD_PREP(RCANFD_DCFG_DBRP, brp); + + return (dtseg1 | dtseg2 | dsjw | dbrp); +} + +static void rcar_canfd_set_bittiming(struct net_device *ndev) +{ + u32 mask = RCANFD_FDCFG_TDCO | RCANFD_FDCFG_TDCE | RCANFD_FDCFG_TDCOC; + struct rcar_canfd_channel *priv = netdev_priv(ndev); + struct rcar_canfd_global *gpriv = priv->gpriv; const struct can_bittiming *bt = &priv->can.bittiming; - const struct can_bittiming *dbt = &priv->can.data_bittiming; + const struct can_bittiming *dbt = &priv->can.fd.data_bittiming; + const struct can_tdc_const *tdc_const = priv->can.fd.tdc_const; + const struct can_tdc *tdc = &priv->can.fd.tdc; + u32 cfg, tdcmode = 0, tdco = 0; u16 brp, sjw, tseg1, tseg2; - u32 cfg; u32 ch = priv->channel; /* Nominal bit timing settings */ @@ -1188,43 +1435,39 @@ static void rcar_canfd_set_bittiming(struct net_device *dev) sjw = bt->sjw - 1; tseg1 = bt->prop_seg + bt->phase_seg1 - 1; tseg2 = bt->phase_seg2 - 1; + cfg = rcar_canfd_compute_nominal_bit_rate_cfg(priv, tseg1, tseg2, sjw, brp); + rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg); - if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { - /* CAN FD only mode */ - cfg = (RCANFD_NCFG_NTSEG1(tseg1) | RCANFD_NCFG_NBRP(brp) | - RCANFD_NCFG_NSJW(sjw) | RCANFD_NCFG_NTSEG2(tseg2)); - - rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg); - netdev_dbg(priv->ndev, "nrate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n", - brp, sjw, tseg1, tseg2); - - /* Data bit timing settings */ - brp = dbt->brp - 1; - sjw = dbt->sjw - 1; - tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1; - tseg2 = dbt->phase_seg2 - 1; - - cfg = (RCANFD_DCFG_DTSEG1(tseg1) | RCANFD_DCFG_DBRP(brp) | - RCANFD_DCFG_DSJW(sjw) | RCANFD_DCFG_DTSEG2(tseg2)); - - rcar_canfd_write(priv->base, RCANFD_F_DCFG(ch), cfg); - netdev_dbg(priv->ndev, "drate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n", - brp, sjw, tseg1, tseg2); - } else { - /* Classical CAN only mode */ - cfg = (RCANFD_CFG_TSEG1(tseg1) | RCANFD_CFG_BRP(brp) | - RCANFD_CFG_SJW(sjw) | RCANFD_CFG_TSEG2(tseg2)); - - rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg); - netdev_dbg(priv->ndev, - "rate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n", - brp, sjw, tseg1, tseg2); + if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD)) + return; + + /* Data bit timing settings */ + brp = dbt->brp - 1; + sjw = dbt->sjw - 1; + tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1; + tseg2 = dbt->phase_seg2 - 1; + cfg = rcar_canfd_compute_data_bit_rate_cfg(gpriv->info, tseg1, tseg2, sjw, brp); + writel(cfg, &gpriv->fcbase[ch].dcfg); + + /* Transceiver Delay Compensation */ + if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_AUTO) { + /* TDC enabled, measured + offset */ + tdcmode = RCANFD_FDCFG_TDCE; + tdco = tdc->tdco - 1; + } else if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_MANUAL) { + /* TDC enabled, offset only */ + tdcmode = RCANFD_FDCFG_TDCE | RCANFD_FDCFG_TDCOC; + tdco = min(tdc->tdcv + tdc->tdco, tdc_const->tdco_max) - 1; } + + rcar_canfd_update_bit_reg(&gpriv->fcbase[ch].cfdcfg, mask, + tdcmode | FIELD_PREP(RCANFD_FDCFG_TDCO, tdco)); } static int rcar_canfd_start(struct net_device *ndev) { struct rcar_canfd_channel *priv = netdev_priv(ndev); + struct rcar_canfd_global *gpriv = priv->gpriv; int err = -EOPNOTSUPP; u32 sts, ch = priv->channel; u32 ridx = ch + RCANFD_RFFIFO_IDX; @@ -1246,9 +1489,9 @@ static int rcar_canfd_start(struct net_device *ndev) } /* Enable Common & Rx FIFO */ - rcar_canfd_set_bit(priv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX), + rcar_canfd_set_bit(priv->base, RCANFD_CFCC(gpriv, ch, RCANFD_CFFIFO_IDX), RCANFD_CFCC_CFE); - rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx), RCANFD_RFCC_RFE); + rcar_canfd_set_bit(priv->base, RCANFD_RFCC(gpriv, ridx), RCANFD_RFCC_RFE); priv->can.state = CAN_STATE_ERROR_ACTIVE; return 0; @@ -1264,16 +1507,22 @@ static int rcar_canfd_open(struct net_device *ndev) struct rcar_canfd_global *gpriv = priv->gpriv; int err; + err = phy_power_on(priv->transceiver); + if (err) { + netdev_err(ndev, "failed to power on PHY: %pe\n", ERR_PTR(err)); + return err; + } + /* Peripheral clock is already enabled in probe */ err = clk_prepare_enable(gpriv->can_clk); if (err) { - netdev_err(ndev, "failed to enable CAN clock, error %d\n", err); - goto out_clock; + netdev_err(ndev, "failed to enable CAN clock: %pe\n", ERR_PTR(err)); + goto out_phy; } err = open_candev(ndev); if (err) { - netdev_err(ndev, "open_candev() failed, error %d\n", err); + netdev_err(ndev, "open_candev() failed: %pe\n", ERR_PTR(err)); goto out_can_clock; } @@ -1282,20 +1531,21 @@ static int rcar_canfd_open(struct net_device *ndev) if (err) goto out_close; netif_start_queue(ndev); - can_led_event(ndev, CAN_LED_EVENT_OPEN); return 0; out_close: napi_disable(&priv->napi); close_candev(ndev); out_can_clock: clk_disable_unprepare(gpriv->can_clk); -out_clock: +out_phy: + phy_power_off(priv->transceiver); return err; } static void rcar_canfd_stop(struct net_device *ndev) { struct rcar_canfd_channel *priv = netdev_priv(ndev); + struct rcar_canfd_global *gpriv = priv->gpriv; int err; u32 sts, ch = priv->channel; u32 ridx = ch + RCANFD_RFFIFO_IDX; @@ -1313,9 +1563,9 @@ static void rcar_canfd_stop(struct net_device *ndev) rcar_canfd_disable_channel_interrupts(priv); /* Disable Common & Rx FIFO */ - rcar_canfd_clear_bit(priv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX), + rcar_canfd_clear_bit(priv->base, RCANFD_CFCC(gpriv, ch, RCANFD_CFFIFO_IDX), RCANFD_CFCC_CFE); - rcar_canfd_clear_bit(priv->base, RCANFD_RFCC(ridx), RCANFD_RFCC_RFE); + rcar_canfd_clear_bit(priv->base, RCANFD_RFCC(gpriv, ridx), RCANFD_RFCC_RFE); /* Set the state as STOPPED */ priv->can.state = CAN_STATE_STOPPED; @@ -1329,9 +1579,9 @@ static int rcar_canfd_close(struct net_device *ndev) netif_stop_queue(ndev); rcar_canfd_stop(ndev); napi_disable(&priv->napi); - clk_disable_unprepare(gpriv->can_clk); close_candev(ndev); - can_led_event(ndev, CAN_LED_EVENT_STOP); + clk_disable_unprepare(gpriv->can_clk); + phy_power_off(priv->transceiver); return 0; } @@ -1339,12 +1589,13 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct rcar_canfd_channel *priv = netdev_priv(ndev); + struct rcar_canfd_global *gpriv = priv->gpriv; struct canfd_frame *cf = (struct canfd_frame *)skb->data; u32 sts = 0, id, dlc; unsigned long flags; u32 ch = priv->channel; - if (can_dropped_invalid_skb(ndev, skb)) + if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; if (cf->can_id & CAN_EFF_FLAG) { @@ -1357,13 +1608,13 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb, if (cf->can_id & CAN_RTR_FLAG) id |= RCANFD_CFID_CFRTR; - dlc = RCANFD_CFPTR_CFDLC(can_len2dlc(cf->len)); + dlc = RCANFD_CFPTR_CFDLC(can_fd_len2dlc(cf->len)); - if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { + if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || gpriv->info->shared_can_regs) { rcar_canfd_write(priv->base, - RCANFD_F_CFID(ch, RCANFD_CFFIFO_IDX), id); + RCANFD_F_CFID(gpriv, ch, RCANFD_CFFIFO_IDX), id); rcar_canfd_write(priv->base, - RCANFD_F_CFPTR(ch, RCANFD_CFFIFO_IDX), dlc); + RCANFD_F_CFPTR(gpriv, ch, RCANFD_CFFIFO_IDX), dlc); if (can_is_canfd_skb(skb)) { /* CAN FD frame format */ @@ -1376,10 +1627,10 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb, } rcar_canfd_write(priv->base, - RCANFD_F_CFFDCSTS(ch, RCANFD_CFFIFO_IDX), sts); + RCANFD_F_CFFDCSTS(gpriv, ch, RCANFD_CFFIFO_IDX), sts); rcar_canfd_put_data(priv, cf, - RCANFD_F_CFDF(ch, RCANFD_CFFIFO_IDX, 0)); + RCANFD_F_CFDF(gpriv, ch, RCANFD_CFFIFO_IDX, 0)); } else { rcar_canfd_write(priv->base, RCANFD_C_CFID(ch, RCANFD_CFFIFO_IDX), id); @@ -1389,8 +1640,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb, RCANFD_C_CFDF(ch, RCANFD_CFFIFO_IDX, 0)); } - priv->tx_len[priv->tx_head % RCANFD_FIFO_DEPTH] = cf->len; - can_put_echo_skb(skb, ndev, priv->tx_head % RCANFD_FIFO_DEPTH); + can_put_echo_skb(skb, ndev, priv->tx_head % RCANFD_FIFO_DEPTH, 0); spin_lock_irqsave(&priv->tx_lock, flags); priv->tx_head++; @@ -1403,7 +1653,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb, * pointer for the Common FIFO */ rcar_canfd_write(priv->base, - RCANFD_CFPCTR(ch, RCANFD_CFFIFO_IDX), 0xff); + RCANFD_CFPCTR(gpriv, ch, RCANFD_CFFIFO_IDX), 0xff); spin_unlock_irqrestore(&priv->tx_lock, flags); return NETDEV_TX_OK; @@ -1411,27 +1661,30 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb, static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv) { - struct net_device_stats *stats = &priv->ndev->stats; + struct net_device *ndev = priv->ndev; + struct net_device_stats *stats = &ndev->stats; + struct rcar_canfd_global *gpriv = priv->gpriv; struct canfd_frame *cf; struct sk_buff *skb; u32 sts = 0, id, dlc; u32 ch = priv->channel; u32 ridx = ch + RCANFD_RFFIFO_IDX; - if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { - id = rcar_canfd_read(priv->base, RCANFD_F_RFID(ridx)); - dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(ridx)); + if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || gpriv->info->shared_can_regs) { + id = rcar_canfd_read(priv->base, RCANFD_F_RFID(gpriv, ridx)); + dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(gpriv, ridx)); - sts = rcar_canfd_read(priv->base, RCANFD_F_RFFDSTS(ridx)); - if (sts & RCANFD_RFFDSTS_RFFDF) - skb = alloc_canfd_skb(priv->ndev, &cf); + sts = rcar_canfd_read(priv->base, RCANFD_F_RFFDSTS(gpriv, ridx)); + + if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) && + sts & RCANFD_RFFDSTS_RFFDF) + skb = alloc_canfd_skb(ndev, &cf); else - skb = alloc_can_skb(priv->ndev, - (struct can_frame **)&cf); + skb = alloc_can_skb(ndev, (struct can_frame **)&cf); } else { id = rcar_canfd_read(priv->base, RCANFD_C_RFID(ridx)); dlc = rcar_canfd_read(priv->base, RCANFD_C_RFPTR(ridx)); - skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cf); + skb = alloc_can_skb(ndev, (struct can_frame **)&cf); } if (!skb) { @@ -1446,13 +1699,13 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv) if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { if (sts & RCANFD_RFFDSTS_RFFDF) - cf->len = can_dlc2len(RCANFD_RFPTR_RFDLC(dlc)); + cf->len = can_fd_dlc2len(RCANFD_RFPTR_RFDLC(dlc)); else - cf->len = get_can_dlc(RCANFD_RFPTR_RFDLC(dlc)); + cf->len = can_cc_dlc2len(RCANFD_RFPTR_RFDLC(dlc)); if (sts & RCANFD_RFFDSTS_RFESI) { cf->flags |= CANFD_ESI; - netdev_dbg(priv->ndev, "ESI Error\n"); + netdev_dbg(ndev, "ESI Error\n"); } if (!(sts & RCANFD_RFFDSTS_RFFDF) && (id & RCANFD_RFID_RFRTR)) { @@ -1461,12 +1714,14 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv) if (sts & RCANFD_RFFDSTS_RFBRS) cf->flags |= CANFD_BRS; - rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(ridx, 0)); + rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(gpriv, ridx, 0)); } } else { - cf->len = get_can_dlc(RCANFD_RFPTR_RFDLC(dlc)); + cf->len = can_cc_dlc2len(RCANFD_RFPTR_RFDLC(dlc)); if (id & RCANFD_RFID_RFRTR) cf->can_id |= CAN_RTR_FLAG; + else if (gpriv->info->shared_can_regs) + rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(gpriv, ridx, 0)); else rcar_canfd_get_data(priv, cf, RCANFD_C_RFDF(ridx, 0)); } @@ -1474,11 +1729,10 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv) /* Write 0xff to RFPC to increment the CPU-side * pointer of the Rx FIFO */ - rcar_canfd_write(priv->base, RCANFD_RFPCTR(ridx), 0xff); - - can_led_event(priv->ndev, CAN_LED_EVENT_RX); + rcar_canfd_write(priv->base, RCANFD_RFPCTR(gpriv, ridx), 0xff); - stats->rx_bytes += cf->len; + if (!(cf->can_id & CAN_RTR_FLAG)) + stats->rx_bytes += cf->len; stats->rx_packets++; netif_receive_skb(skb); } @@ -1487,13 +1741,14 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota) { struct rcar_canfd_channel *priv = container_of(napi, struct rcar_canfd_channel, napi); + struct rcar_canfd_global *gpriv = priv->gpriv; int num_pkts; u32 sts; u32 ch = priv->channel; u32 ridx = ch + RCANFD_RFFIFO_IDX; for (num_pkts = 0; num_pkts < quota; num_pkts++) { - sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx)); + sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx)); /* Check FIFO empty condition */ if (sts & RCANFD_RFSTS_RFEMP) break; @@ -1502,20 +1757,44 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota) /* Clear interrupt bit */ if (sts & RCANFD_RFSTS_RFIF) - rcar_canfd_write(priv->base, RCANFD_RFSTS(ridx), + rcar_canfd_write(priv->base, RCANFD_RFSTS(gpriv, ridx), sts & ~RCANFD_RFSTS_RFIF); } /* All packets processed */ if (num_pkts < quota) { - napi_complete_done(napi, num_pkts); - /* Enable Rx FIFO interrupts */ - rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx), - RCANFD_RFCC_RFIE); + if (napi_complete_done(napi, num_pkts)) { + /* Enable Rx FIFO interrupts */ + rcar_canfd_set_bit(priv->base, RCANFD_RFCC(gpriv, ridx), + RCANFD_RFCC_RFIE); + } } return num_pkts; } +static unsigned int rcar_canfd_get_tdcr(struct rcar_canfd_global *gpriv, + unsigned int ch) +{ + u32 sts = readl(&gpriv->fcbase[ch].cfdsts); + u32 tdcr = FIELD_GET(RCANFD_FDSTS_TDCR, sts); + + return tdcr & (gpriv->info->tdc_const->tdcv_max - 1); +} + +static int rcar_canfd_get_auto_tdcv(const struct net_device *ndev, u32 *tdcv) +{ + struct rcar_canfd_channel *priv = netdev_priv(ndev); + u32 tdco = priv->can.fd.tdc.tdco; + u32 tdcr; + + /* Transceiver Delay Compensation Result */ + tdcr = rcar_canfd_get_tdcr(priv->gpriv, priv->channel) + 1; + + *tdcv = tdcr < tdco ? 0 : tdcr - tdco; + + return 0; +} + static int rcar_canfd_do_set_mode(struct net_device *ndev, enum can_mode mode) { int err; @@ -1532,10 +1811,10 @@ static int rcar_canfd_do_set_mode(struct net_device *ndev, enum can_mode mode) } } -static int rcar_canfd_get_berr_counter(const struct net_device *dev, +static int rcar_canfd_get_berr_counter(const struct net_device *ndev, struct can_berr_counter *bec) { - struct rcar_canfd_channel *priv = netdev_priv(dev); + struct rcar_canfd_channel *priv = netdev_priv(ndev); u32 val, ch = priv->channel; /* Peripheral clock is already enabled in probe */ @@ -1549,70 +1828,134 @@ static const struct net_device_ops rcar_canfd_netdev_ops = { .ndo_open = rcar_canfd_open, .ndo_stop = rcar_canfd_close, .ndo_start_xmit = rcar_canfd_start_xmit, - .ndo_change_mtu = can_change_mtu, +}; + +static const struct ethtool_ops rcar_canfd_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, }; static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch, - u32 fcan_freq) + u32 fcan_freq, struct phy *transceiver) { + const struct rcar_canfd_hw_info *info = gpriv->info; struct platform_device *pdev = gpriv->pdev; + struct device *dev = &pdev->dev; struct rcar_canfd_channel *priv; struct net_device *ndev; int err = -ENODEV; ndev = alloc_candev(sizeof(*priv), RCANFD_FIFO_DEPTH); - if (!ndev) { - dev_err(&pdev->dev, "alloc_candev() failed\n"); - err = -ENOMEM; - goto fail; - } + if (!ndev) + return -ENOMEM; + priv = netdev_priv(ndev); ndev->netdev_ops = &rcar_canfd_netdev_ops; + ndev->ethtool_ops = &rcar_canfd_ethtool_ops; ndev->flags |= IFF_ECHO; priv->ndev = ndev; priv->base = gpriv->base; + priv->transceiver = transceiver; priv->channel = ch; + priv->gpriv = gpriv; + if (transceiver) + priv->can.bitrate_max = transceiver->attrs.max_link_rate; priv->can.clock.freq = fcan_freq; - dev_info(&pdev->dev, "can_clk rate is %u\n", priv->can.clock.freq); + dev_info(dev, "can_clk rate is %u\n", priv->can.clock.freq); + + if (info->multi_channel_irqs) { + char *irq_name; + char name[10]; + int err_irq; + int tx_irq; + + scnprintf(name, sizeof(name), "ch%u_err", ch); + err_irq = platform_get_irq_byname(pdev, name); + if (err_irq < 0) { + err = err_irq; + goto fail; + } + + scnprintf(name, sizeof(name), "ch%u_trx", ch); + tx_irq = platform_get_irq_byname(pdev, name); + if (tx_irq < 0) { + err = tx_irq; + goto fail; + } + + irq_name = devm_kasprintf(dev, GFP_KERNEL, "canfd.ch%d_err", + ch); + if (!irq_name) { + err = -ENOMEM; + goto fail; + } + err = devm_request_irq(dev, err_irq, + rcar_canfd_channel_err_interrupt, 0, + irq_name, priv); + if (err) { + dev_err(dev, "devm_request_irq CH Err %d failed: %pe\n", + err_irq, ERR_PTR(err)); + goto fail; + } + irq_name = devm_kasprintf(dev, GFP_KERNEL, "canfd.ch%d_trx", + ch); + if (!irq_name) { + err = -ENOMEM; + goto fail; + } + err = devm_request_irq(dev, tx_irq, + rcar_canfd_channel_tx_interrupt, 0, + irq_name, priv); + if (err) { + dev_err(dev, "devm_request_irq Tx %d failed: %pe\n", + tx_irq, ERR_PTR(err)); + goto fail; + } + } if (gpriv->fdmode) { - priv->can.bittiming_const = &rcar_canfd_nom_bittiming_const; - priv->can.data_bittiming_const = - &rcar_canfd_data_bittiming_const; + priv->can.bittiming_const = gpriv->info->nom_bittiming; + priv->can.fd.data_bittiming_const = gpriv->info->data_bittiming; + priv->can.fd.tdc_const = gpriv->info->tdc_const; /* Controller starts in CAN FD only mode */ - can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD); - priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING; + err = can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD); + if (err) + goto fail; + + priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING | + CAN_CTRLMODE_TDC_AUTO | + CAN_CTRLMODE_TDC_MANUAL; + priv->can.fd.do_get_auto_tdcv = rcar_canfd_get_auto_tdcv; } else { /* Controller starts in Classical CAN only mode */ - priv->can.bittiming_const = &rcar_canfd_bittiming_const; + if (gpriv->info->shared_can_regs) + priv->can.bittiming_const = gpriv->info->nom_bittiming; + else + priv->can.bittiming_const = &rcar_canfd_bittiming_const; priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING; } priv->can.do_set_mode = rcar_canfd_do_set_mode; priv->can.do_get_berr_counter = rcar_canfd_get_berr_counter; - priv->gpriv = gpriv; - SET_NETDEV_DEV(ndev, &pdev->dev); + SET_NETDEV_DEV(ndev, dev); - netif_napi_add(ndev, &priv->napi, rcar_canfd_rx_poll, - RCANFD_NAPI_WEIGHT); + netif_napi_add_weight(ndev, &priv->napi, rcar_canfd_rx_poll, + RCANFD_NAPI_WEIGHT); + spin_lock_init(&priv->tx_lock); + gpriv->ch[priv->channel] = priv; err = register_candev(ndev); if (err) { - dev_err(&pdev->dev, - "register_candev() failed, error %d\n", err); + dev_err(dev, "register_candev() failed: %pe\n", ERR_PTR(err)); goto fail_candev; } - spin_lock_init(&priv->tx_lock); - devm_can_led_init(ndev); - gpriv->ch[priv->channel] = priv; - dev_info(&pdev->dev, "device registered (channel %u)\n", priv->channel); + dev_info(dev, "device registered (channel %u)\n", priv->channel); return 0; fail_candev: netif_napi_del(&priv->napi); - free_candev(ndev); fail: + free_candev(ndev); return err; } @@ -1627,210 +1970,372 @@ static void rcar_canfd_channel_remove(struct rcar_canfd_global *gpriv, u32 ch) } } +static int rcar_canfd_global_init(struct rcar_canfd_global *gpriv) +{ + struct device *dev = &gpriv->pdev->dev; + u32 rule_entry = 0; + u32 ch, sts; + int err; + + err = reset_control_reset(gpriv->rstc1); + if (err) + return err; + + err = reset_control_reset(gpriv->rstc2); + if (err) + goto fail_reset1; + + /* Enable peripheral clock for register access */ + err = clk_prepare_enable(gpriv->clkp); + if (err) { + dev_err(dev, "failed to enable peripheral clock: %pe\n", + ERR_PTR(err)); + goto fail_reset2; + } + + /* Enable RAM clock */ + err = clk_prepare_enable(gpriv->clk_ram); + if (err) { + dev_err(dev, + "failed to enable RAM clock, error %d\n", err); + goto fail_clk; + } + + err = rcar_canfd_reset_controller(gpriv); + if (err) { + dev_err(dev, "reset controller failed: %pe\n", ERR_PTR(err)); + goto fail_ram_clk; + } + + /* Controller in Global reset & Channel reset mode */ + rcar_canfd_configure_controller(gpriv); + + /* Configure per channel attributes */ + for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) { + /* Configure Channel's Rx fifo */ + rcar_canfd_configure_rx(gpriv, ch); + + /* Configure Channel's Tx (Common) fifo */ + rcar_canfd_configure_tx(gpriv, ch); + + /* Configure receive rules */ + rcar_canfd_configure_afl_rules(gpriv, ch, rule_entry); + rule_entry += RCANFD_CHANNEL_NUMRULES; + } + + /* Configure common interrupts */ + rcar_canfd_enable_global_interrupts(gpriv); + + /* Start Global operation mode */ + rcar_canfd_update_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GMDC_MASK, + RCANFD_GCTR_GMDC_GOPM); + + /* Verify mode change */ + err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts, + !(sts & RCANFD_GSTS_GNOPM), 2, 500000); + if (err) { + dev_err(dev, "global operational mode failed\n"); + goto fail_mode; + } + + return 0; + +fail_mode: + rcar_canfd_disable_global_interrupts(gpriv); +fail_ram_clk: + clk_disable_unprepare(gpriv->clk_ram); +fail_clk: + clk_disable_unprepare(gpriv->clkp); +fail_reset2: + reset_control_assert(gpriv->rstc2); +fail_reset1: + reset_control_assert(gpriv->rstc1); + return err; +} + +static void rcar_canfd_global_deinit(struct rcar_canfd_global *gpriv, bool full) +{ + rcar_canfd_disable_global_interrupts(gpriv); + + if (full) { + rcar_canfd_reset_controller(gpriv); + + /* Enter global sleep mode */ + rcar_canfd_set_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GSLPR); + } + + clk_disable_unprepare(gpriv->clk_ram); + clk_disable_unprepare(gpriv->clkp); + reset_control_assert(gpriv->rstc2); + reset_control_assert(gpriv->rstc1); +} + static int rcar_canfd_probe(struct platform_device *pdev) { - struct resource *mem; + struct phy *transceivers[RCANFD_NUM_CHANNELS] = { NULL, }; + const struct rcar_canfd_hw_info *info; + struct device *dev = &pdev->dev; void __iomem *addr; - u32 sts, ch, fcan_freq; struct rcar_canfd_global *gpriv; struct device_node *of_child; unsigned long channels_mask = 0; int err, ch_irq, g_irq; + int g_err_irq, g_recc_irq; bool fdmode = true; /* CAN FD only mode - default */ + char name[9] = "channelX"; + u32 ch, fcan_freq; + int i; + + info = of_device_get_match_data(dev); - if (of_property_read_bool(pdev->dev.of_node, "renesas,no-can-fd")) + if (of_property_read_bool(dev->of_node, "renesas,no-can-fd")) fdmode = false; /* Classical CAN only mode */ - of_child = of_get_child_by_name(pdev->dev.of_node, "channel0"); - if (of_child && of_device_is_available(of_child)) - channels_mask |= BIT(0); /* Channel 0 */ + for (i = 0; i < info->max_channels; ++i) { + name[7] = '0' + i; + of_child = of_get_available_child_by_name(dev->of_node, name); + if (of_child) { + channels_mask |= BIT(i); + transceivers[i] = devm_of_phy_optional_get(dev, + of_child, NULL); + of_node_put(of_child); + } + if (IS_ERR(transceivers[i])) + return PTR_ERR(transceivers[i]); + } - of_child = of_get_child_by_name(pdev->dev.of_node, "channel1"); - if (of_child && of_device_is_available(of_child)) - channels_mask |= BIT(1); /* Channel 1 */ + if (info->shared_global_irqs) { + ch_irq = platform_get_irq_byname_optional(pdev, "ch_int"); + if (ch_irq < 0) { + /* For backward compatibility get irq by index */ + ch_irq = platform_get_irq(pdev, 0); + if (ch_irq < 0) + return ch_irq; + } - ch_irq = platform_get_irq(pdev, 0); - if (ch_irq < 0) { - dev_err(&pdev->dev, "no Channel IRQ resource\n"); - err = ch_irq; - goto fail_dev; - } + g_irq = platform_get_irq_byname_optional(pdev, "g_int"); + if (g_irq < 0) { + /* For backward compatibility get irq by index */ + g_irq = platform_get_irq(pdev, 1); + if (g_irq < 0) + return g_irq; + } + } else { + g_err_irq = platform_get_irq_byname(pdev, "g_err"); + if (g_err_irq < 0) + return g_err_irq; - g_irq = platform_get_irq(pdev, 1); - if (g_irq < 0) { - dev_err(&pdev->dev, "no Global IRQ resource\n"); - err = g_irq; - goto fail_dev; + g_recc_irq = platform_get_irq_byname(pdev, "g_recc"); + if (g_recc_irq < 0) + return g_recc_irq; } /* Global controller context */ - gpriv = devm_kzalloc(&pdev->dev, sizeof(*gpriv), GFP_KERNEL); - if (!gpriv) { - err = -ENOMEM; - goto fail_dev; - } + gpriv = devm_kzalloc(dev, sizeof(*gpriv), GFP_KERNEL); + if (!gpriv) + return -ENOMEM; + gpriv->pdev = pdev; gpriv->channels_mask = channels_mask; gpriv->fdmode = fdmode; + gpriv->info = info; + + gpriv->rstc1 = devm_reset_control_get_optional_exclusive(dev, "rstp_n"); + if (IS_ERR(gpriv->rstc1)) + return dev_err_probe(dev, PTR_ERR(gpriv->rstc1), + "failed to get rstp_n\n"); + + gpriv->rstc2 = devm_reset_control_get_optional_exclusive(dev, "rstc_n"); + if (IS_ERR(gpriv->rstc2)) + return dev_err_probe(dev, PTR_ERR(gpriv->rstc2), + "failed to get rstc_n\n"); /* Peripheral clock */ - gpriv->clkp = devm_clk_get(&pdev->dev, "fck"); - if (IS_ERR(gpriv->clkp)) { - err = PTR_ERR(gpriv->clkp); - dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n", - err); - goto fail_dev; - } + gpriv->clkp = devm_clk_get(dev, "fck"); + if (IS_ERR(gpriv->clkp)) + return dev_err_probe(dev, PTR_ERR(gpriv->clkp), + "cannot get peripheral clock\n"); /* fCAN clock: Pick External clock. If not available fallback to * CANFD clock */ - gpriv->can_clk = devm_clk_get(&pdev->dev, "can_clk"); + gpriv->can_clk = devm_clk_get(dev, "can_clk"); if (IS_ERR(gpriv->can_clk) || (clk_get_rate(gpriv->can_clk) == 0)) { - gpriv->can_clk = devm_clk_get(&pdev->dev, "canfd"); - if (IS_ERR(gpriv->can_clk)) { - err = PTR_ERR(gpriv->can_clk); - dev_err(&pdev->dev, - "cannot get canfd clock, error %d\n", err); - goto fail_dev; - } - gpriv->fcan = RCANFD_CANFDCLK; + gpriv->can_clk = devm_clk_get(dev, "canfd"); + if (IS_ERR(gpriv->can_clk)) + return dev_err_probe(dev, PTR_ERR(gpriv->can_clk), + "cannot get canfd clock\n"); + /* CANFD clock may be further divided within the IP */ + fcan_freq = clk_get_rate(gpriv->can_clk) / info->postdiv; } else { - gpriv->fcan = RCANFD_EXTCLK; + fcan_freq = clk_get_rate(gpriv->can_clk); + gpriv->extclk = gpriv->info->external_clk; } - fcan_freq = clk_get_rate(gpriv->can_clk); - if (gpriv->fcan == RCANFD_CANFDCLK) - /* CANFD clock is further divided by (1/2) within the IP */ - fcan_freq /= 2; + gpriv->clk_ram = devm_clk_get_optional(dev, "ram_clk"); + if (IS_ERR(gpriv->clk_ram)) + return dev_err_probe(dev, PTR_ERR(gpriv->clk_ram), + "cannot get ram clock\n"); - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - addr = devm_ioremap_resource(&pdev->dev, mem); + addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(addr)) { err = PTR_ERR(addr); goto fail_dev; } gpriv->base = addr; + gpriv->fcbase = addr + gpriv->info->regs->coffset; /* Request IRQ that's common for both channels */ - err = devm_request_irq(&pdev->dev, ch_irq, - rcar_canfd_channel_interrupt, 0, - "canfd.chn", gpriv); - if (err) { - dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n", - ch_irq, err); - goto fail_dev; - } - err = devm_request_irq(&pdev->dev, g_irq, - rcar_canfd_global_interrupt, 0, - "canfd.gbl", gpriv); - if (err) { - dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n", - g_irq, err); - goto fail_dev; - } - - /* Enable peripheral clock for register access */ - err = clk_prepare_enable(gpriv->clkp); - if (err) { - dev_err(&pdev->dev, - "failed to enable peripheral clock, error %d\n", err); - goto fail_dev; - } - - err = rcar_canfd_reset_controller(gpriv); - if (err) { - dev_err(&pdev->dev, "reset controller failed\n"); - goto fail_clk; - } - - /* Controller in Global reset & Channel reset mode */ - rcar_canfd_configure_controller(gpriv); + if (info->shared_global_irqs) { + err = devm_request_irq(dev, ch_irq, + rcar_canfd_channel_interrupt, 0, + "canfd.ch_int", gpriv); + if (err) { + dev_err(dev, "devm_request_irq %d failed: %pe\n", + ch_irq, ERR_PTR(err)); + goto fail_dev; + } - /* Configure per channel attributes */ - for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) { - /* Configure Channel's Rx fifo */ - rcar_canfd_configure_rx(gpriv, ch); + err = devm_request_irq(dev, g_irq, rcar_canfd_global_interrupt, + 0, "canfd.g_int", gpriv); + if (err) { + dev_err(dev, "devm_request_irq %d failed: %pe\n", + g_irq, ERR_PTR(err)); + goto fail_dev; + } + } else { + err = devm_request_irq(dev, g_recc_irq, + rcar_canfd_global_receive_fifo_interrupt, 0, + "canfd.g_recc", gpriv); - /* Configure Channel's Tx (Common) fifo */ - rcar_canfd_configure_tx(gpriv, ch); + if (err) { + dev_err(dev, "devm_request_irq %d failed: %pe\n", + g_recc_irq, ERR_PTR(err)); + goto fail_dev; + } - /* Configure receive rules */ - rcar_canfd_configure_afl_rules(gpriv, ch); + err = devm_request_irq(dev, g_err_irq, + rcar_canfd_global_err_interrupt, 0, + "canfd.g_err", gpriv); + if (err) { + dev_err(dev, "devm_request_irq %d failed: %pe\n", + g_err_irq, ERR_PTR(err)); + goto fail_dev; + } } - /* Configure common interrupts */ - rcar_canfd_enable_global_interrupts(gpriv); - - /* Start Global operation mode */ - rcar_canfd_update_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GMDC_MASK, - RCANFD_GCTR_GMDC_GOPM); - - /* Verify mode change */ - err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts, - !(sts & RCANFD_GSTS_GNOPM), 2, 500000); - if (err) { - dev_err(&pdev->dev, "global operational mode failed\n"); + err = rcar_canfd_global_init(gpriv); + if (err) goto fail_mode; - } - for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) { - err = rcar_canfd_channel_probe(gpriv, ch, fcan_freq); + for_each_set_bit(ch, &gpriv->channels_mask, info->max_channels) { + err = rcar_canfd_channel_probe(gpriv, ch, fcan_freq, + transceivers[ch]); if (err) goto fail_channel; } platform_set_drvdata(pdev, gpriv); - dev_info(&pdev->dev, "global operational state (clk %d, fdmode %d)\n", - gpriv->fcan, gpriv->fdmode); + dev_info(dev, "global operational state (%s clk, %s mode)\n", + gpriv->extclk ? "ext" : "canfd", + gpriv->fdmode ? "fd" : "classical"); return 0; fail_channel: - for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) + for_each_set_bit(ch, &gpriv->channels_mask, info->max_channels) rcar_canfd_channel_remove(gpriv, ch); fail_mode: - rcar_canfd_disable_global_interrupts(gpriv); -fail_clk: - clk_disable_unprepare(gpriv->clkp); + rcar_canfd_global_deinit(gpriv, false); fail_dev: return err; } -static int rcar_canfd_remove(struct platform_device *pdev) +static void rcar_canfd_remove(struct platform_device *pdev) { struct rcar_canfd_global *gpriv = platform_get_drvdata(pdev); u32 ch; - rcar_canfd_reset_controller(gpriv); - rcar_canfd_disable_global_interrupts(gpriv); - - for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) { + for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) { rcar_canfd_disable_channel_interrupts(gpriv->ch[ch]); rcar_canfd_channel_remove(gpriv, ch); } - /* Enter global sleep mode */ - rcar_canfd_set_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GSLPR); - clk_disable_unprepare(gpriv->clkp); - return 0; + rcar_canfd_global_deinit(gpriv, true); } -static int __maybe_unused rcar_canfd_suspend(struct device *dev) +static int rcar_canfd_suspend(struct device *dev) { + struct rcar_canfd_global *gpriv = dev_get_drvdata(dev); + int err; + u32 ch; + + for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) { + struct rcar_canfd_channel *priv = gpriv->ch[ch]; + struct net_device *ndev = priv->ndev; + + if (!netif_running(ndev)) + continue; + + netif_device_detach(ndev); + + err = rcar_canfd_close(ndev); + if (err) { + netdev_err(ndev, "rcar_canfd_close() failed %pe\n", + ERR_PTR(err)); + return err; + } + + priv->can.state = CAN_STATE_SLEEPING; + } + + /* TODO Skip if wake-up (which is not yet supported) is enabled */ + rcar_canfd_global_deinit(gpriv, false); + return 0; } -static int __maybe_unused rcar_canfd_resume(struct device *dev) +static int rcar_canfd_resume(struct device *dev) { + struct rcar_canfd_global *gpriv = dev_get_drvdata(dev); + int err; + u32 ch; + + err = rcar_canfd_global_init(gpriv); + if (err) { + dev_err(dev, "rcar_canfd_global_init() failed %pe\n", ERR_PTR(err)); + return err; + } + + for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) { + struct rcar_canfd_channel *priv = gpriv->ch[ch]; + struct net_device *ndev = priv->ndev; + + if (!netif_running(ndev)) + continue; + + err = rcar_canfd_open(ndev); + if (err) { + netdev_err(ndev, "rcar_canfd_open() failed %pe\n", + ERR_PTR(err)); + return err; + } + + netif_device_attach(ndev); + } + return 0; } -static SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend, - rcar_canfd_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend, + rcar_canfd_resume); -static const struct of_device_id rcar_canfd_of_table[] = { - { .compatible = "renesas,rcar-gen3-canfd" }, +static const __maybe_unused struct of_device_id rcar_canfd_of_table[] = { + { .compatible = "renesas,r8a779a0-canfd", .data = &rcar_gen4_hw_info }, + { .compatible = "renesas,r9a09g047-canfd", .data = &r9a09g047_hw_info }, + { .compatible = "renesas,rcar-gen3-canfd", .data = &rcar_gen3_hw_info }, + { .compatible = "renesas,rcar-gen4-canfd", .data = &rcar_gen4_hw_info }, + { .compatible = "renesas,rzg2l-canfd", .data = &rzg2l_hw_info }, { } }; @@ -1840,7 +2345,7 @@ static struct platform_driver rcar_canfd_driver = { .driver = { .name = RCANFD_DRV_NAME, .of_match_table = of_match_ptr(rcar_canfd_of_table), - .pm = &rcar_canfd_pm_ops, + .pm = pm_sleep_ptr(&rcar_canfd_pm_ops), }, .probe = rcar_canfd_probe, .remove = rcar_canfd_remove, diff --git a/drivers/net/can/rockchip/Kconfig b/drivers/net/can/rockchip/Kconfig new file mode 100644 index 000000000000..d203c530551f --- /dev/null +++ b/drivers/net/can/rockchip/Kconfig @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 + +config CAN_ROCKCHIP_CANFD + tristate "Rockchip CAN-FD controller" + depends on OF + depends on ARCH_ROCKCHIP || COMPILE_TEST + select CAN_RX_OFFLOAD + help + Say Y here if you want to use CAN-FD controller found on + Rockchip SoCs. diff --git a/drivers/net/can/rockchip/Makefile b/drivers/net/can/rockchip/Makefile new file mode 100644 index 000000000000..3760d3e1baa3 --- /dev/null +++ b/drivers/net/can/rockchip/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_CAN_ROCKCHIP_CANFD) += rockchip_canfd.o + +rockchip_canfd-objs := +rockchip_canfd-objs += rockchip_canfd-core.o +rockchip_canfd-objs += rockchip_canfd-ethtool.o +rockchip_canfd-objs += rockchip_canfd-rx.o +rockchip_canfd-objs += rockchip_canfd-timestamp.o +rockchip_canfd-objs += rockchip_canfd-tx.o diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c new file mode 100644 index 000000000000..29de0c01e4ed --- /dev/null +++ b/drivers/net/can/rockchip/rockchip_canfd-core.c @@ -0,0 +1,962 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2023, 2024 Pengutronix, +// Marc Kleine-Budde <kernel@pengutronix.de> +// +// Based on: +// +// Rockchip CANFD driver +// +// Copyright (c) 2020 Rockchip Electronics Co. Ltd. +// + +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/string.h> + +#include "rockchip_canfd.h" + +static const struct rkcanfd_devtype_data rkcanfd_devtype_data_rk3568v2 = { + .model = RKCANFD_MODEL_RK3568V2, + .quirks = RKCANFD_QUIRK_RK3568_ERRATUM_1 | RKCANFD_QUIRK_RK3568_ERRATUM_2 | + RKCANFD_QUIRK_RK3568_ERRATUM_3 | RKCANFD_QUIRK_RK3568_ERRATUM_4 | + RKCANFD_QUIRK_RK3568_ERRATUM_5 | RKCANFD_QUIRK_RK3568_ERRATUM_6 | + RKCANFD_QUIRK_RK3568_ERRATUM_7 | RKCANFD_QUIRK_RK3568_ERRATUM_8 | + RKCANFD_QUIRK_RK3568_ERRATUM_9 | RKCANFD_QUIRK_RK3568_ERRATUM_10 | + RKCANFD_QUIRK_RK3568_ERRATUM_11 | RKCANFD_QUIRK_RK3568_ERRATUM_12 | + RKCANFD_QUIRK_CANFD_BROKEN, +}; + +/* The rk3568 CAN-FD errata sheet as of Tue 07 Nov 2023 11:25:31 +08:00 + * states that only the rk3568v2 is affected by erratum 5, but tests + * with the rk3568v2 and rk3568v3 show that the RX_FIFO_CNT is + * sometimes too high. In contrast to the errata sheet mark rk3568v3 + * as effected by erratum 5, too. + */ +static const struct rkcanfd_devtype_data rkcanfd_devtype_data_rk3568v3 = { + .model = RKCANFD_MODEL_RK3568V3, + .quirks = RKCANFD_QUIRK_RK3568_ERRATUM_1 | RKCANFD_QUIRK_RK3568_ERRATUM_2 | + RKCANFD_QUIRK_RK3568_ERRATUM_5 | RKCANFD_QUIRK_RK3568_ERRATUM_7 | + RKCANFD_QUIRK_RK3568_ERRATUM_8 | RKCANFD_QUIRK_RK3568_ERRATUM_10 | + RKCANFD_QUIRK_RK3568_ERRATUM_11 | RKCANFD_QUIRK_RK3568_ERRATUM_12 | + RKCANFD_QUIRK_CANFD_BROKEN, +}; + +static const char *__rkcanfd_get_model_str(enum rkcanfd_model model) +{ + switch (model) { + case RKCANFD_MODEL_RK3568V2: + return "rk3568v2"; + case RKCANFD_MODEL_RK3568V3: + return "rk3568v3"; + } + + return "<unknown>"; +} + +static inline const char * +rkcanfd_get_model_str(const struct rkcanfd_priv *priv) +{ + return __rkcanfd_get_model_str(priv->devtype_data.model); +} + +/* Note: + * + * The formula to calculate the CAN System Clock is: + * + * Tsclk = 2 x Tclk x (brp + 1) + * + * Double the data sheet's brp_min, brp_max and brp_inc values (both + * for the arbitration and data bit timing) to take the "2 x" into + * account. + */ +static const struct can_bittiming_const rkcanfd_bittiming_const = { + .name = DEVICE_NAME, + .tseg1_min = 1, + .tseg1_max = 256, + .tseg2_min = 1, + .tseg2_max = 128, + .sjw_max = 128, + .brp_min = 2, /* value from data sheet x2 */ + .brp_max = 512, /* value from data sheet x2 */ + .brp_inc = 2, /* value from data sheet x2 */ +}; + +static const struct can_bittiming_const rkcanfd_data_bittiming_const = { + .name = DEVICE_NAME, + .tseg1_min = 1, + .tseg1_max = 32, + .tseg2_min = 1, + .tseg2_max = 16, + .sjw_max = 16, + .brp_min = 2, /* value from data sheet x2 */ + .brp_max = 512, /* value from data sheet x2 */ + .brp_inc = 2, /* value from data sheet x2 */ +}; + +static void rkcanfd_chip_set_reset_mode(const struct rkcanfd_priv *priv) +{ + reset_control_assert(priv->reset); + udelay(2); + reset_control_deassert(priv->reset); + + rkcanfd_write(priv, RKCANFD_REG_MODE, 0x0); +} + +static void rkcanfd_chip_set_work_mode(const struct rkcanfd_priv *priv) +{ + rkcanfd_write(priv, RKCANFD_REG_MODE, priv->reg_mode_default); +} + +static int rkcanfd_set_bittiming(struct rkcanfd_priv *priv) +{ + const struct can_bittiming *dbt = &priv->can.fd.data_bittiming; + const struct can_bittiming *bt = &priv->can.bittiming; + u32 reg_nbt, reg_dbt, reg_tdc; + u32 tdco; + + reg_nbt = FIELD_PREP(RKCANFD_REG_FD_NOMINAL_BITTIMING_SJW, + bt->sjw - 1) | + FIELD_PREP(RKCANFD_REG_FD_NOMINAL_BITTIMING_BRP, + (bt->brp / 2) - 1) | + FIELD_PREP(RKCANFD_REG_FD_NOMINAL_BITTIMING_TSEG2, + bt->phase_seg2 - 1) | + FIELD_PREP(RKCANFD_REG_FD_NOMINAL_BITTIMING_TSEG1, + bt->prop_seg + bt->phase_seg1 - 1); + + rkcanfd_write(priv, RKCANFD_REG_FD_NOMINAL_BITTIMING, reg_nbt); + + if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD)) + return 0; + + reg_dbt = FIELD_PREP(RKCANFD_REG_FD_DATA_BITTIMING_SJW, + dbt->sjw - 1) | + FIELD_PREP(RKCANFD_REG_FD_DATA_BITTIMING_BRP, + (dbt->brp / 2) - 1) | + FIELD_PREP(RKCANFD_REG_FD_DATA_BITTIMING_TSEG2, + dbt->phase_seg2 - 1) | + FIELD_PREP(RKCANFD_REG_FD_DATA_BITTIMING_TSEG1, + dbt->prop_seg + dbt->phase_seg1 - 1); + + rkcanfd_write(priv, RKCANFD_REG_FD_DATA_BITTIMING, reg_dbt); + + tdco = (priv->can.clock.freq / dbt->bitrate) * 2 / 3; + tdco = min(tdco, FIELD_MAX(RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_OFFSET)); + + reg_tdc = FIELD_PREP(RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_OFFSET, tdco) | + RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_ENABLE; + rkcanfd_write(priv, RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION, + reg_tdc); + + return 0; +} + +static void rkcanfd_get_berr_counter_corrected(struct rkcanfd_priv *priv, + struct can_berr_counter *bec) +{ + struct can_berr_counter bec_raw; + u32 reg_state; + + bec->rxerr = rkcanfd_read(priv, RKCANFD_REG_RXERRORCNT); + bec->txerr = rkcanfd_read(priv, RKCANFD_REG_TXERRORCNT); + bec_raw = *bec; + + /* Tests show that sometimes both CAN bus error counters read + * 0x0, even if the controller is in warning mode + * (RKCANFD_REG_STATE_ERROR_WARNING_STATE in RKCANFD_REG_STATE + * set). + * + * In case both error counters read 0x0, use the struct + * priv->bec, otherwise save the read value to priv->bec. + * + * rkcanfd_handle_rx_int_one() handles the decrementing of + * priv->bec.rxerr for successfully RX'ed CAN frames. + * + * Luckily the controller doesn't decrement the RX CAN bus + * error counter in hardware for self received TX'ed CAN + * frames (RKCANFD_REG_MODE_RXSTX_MODE), so RXSTX doesn't + * interfere with proper RX CAN bus error counters. + * + * rkcanfd_handle_tx_done_one() handles the decrementing of + * priv->bec.txerr for successfully TX'ed CAN frames. + */ + if (!bec->rxerr && !bec->txerr) + *bec = priv->bec; + else + priv->bec = *bec; + + reg_state = rkcanfd_read(priv, RKCANFD_REG_STATE); + netdev_vdbg(priv->ndev, + "%s: Raw/Cor: txerr=%3u/%3u rxerr=%3u/%3u Bus Off=%u Warning=%u\n", + __func__, + bec_raw.txerr, bec->txerr, bec_raw.rxerr, bec->rxerr, + !!(reg_state & RKCANFD_REG_STATE_BUS_OFF_STATE), + !!(reg_state & RKCANFD_REG_STATE_ERROR_WARNING_STATE)); +} + +static int rkcanfd_get_berr_counter(const struct net_device *ndev, + struct can_berr_counter *bec) +{ + struct rkcanfd_priv *priv = netdev_priv(ndev); + int err; + + err = pm_runtime_resume_and_get(ndev->dev.parent); + if (err) + return err; + + rkcanfd_get_berr_counter_corrected(priv, bec); + + pm_runtime_put(ndev->dev.parent); + + return 0; +} + +static void rkcanfd_chip_interrupts_enable(const struct rkcanfd_priv *priv) +{ + rkcanfd_write(priv, RKCANFD_REG_INT_MASK, priv->reg_int_mask_default); + + netdev_dbg(priv->ndev, "%s: reg_int_mask=0x%08x\n", __func__, + rkcanfd_read(priv, RKCANFD_REG_INT_MASK)); +} + +static void rkcanfd_chip_interrupts_disable(const struct rkcanfd_priv *priv) +{ + rkcanfd_write(priv, RKCANFD_REG_INT_MASK, RKCANFD_REG_INT_ALL); +} + +static void rkcanfd_chip_fifo_setup(struct rkcanfd_priv *priv) +{ + u32 reg; + + /* RX FIFO */ + reg = rkcanfd_read(priv, RKCANFD_REG_RX_FIFO_CTRL); + reg |= RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_ENABLE; + rkcanfd_write(priv, RKCANFD_REG_RX_FIFO_CTRL, reg); + + WRITE_ONCE(priv->tx_head, 0); + WRITE_ONCE(priv->tx_tail, 0); + netdev_reset_queue(priv->ndev); +} + +static void rkcanfd_chip_start(struct rkcanfd_priv *priv) +{ + u32 reg; + + rkcanfd_chip_set_reset_mode(priv); + + /* Receiving Filter: accept all */ + rkcanfd_write(priv, RKCANFD_REG_IDCODE, 0x0); + rkcanfd_write(priv, RKCANFD_REG_IDMASK, RKCANFD_REG_IDCODE_EXTENDED_FRAME_ID); + + /* enable: + * - CAN_FD: enable CAN-FD + * - AUTO_RETX_MODE: auto retransmission on TX error + * - COVER_MODE: RX-FIFO overwrite mode, do not send OVERLOAD frames + * - RXSTX_MODE: Receive Self Transmit data mode + * - WORK_MODE: transition from reset to working mode + */ + reg = rkcanfd_read(priv, RKCANFD_REG_MODE); + priv->reg_mode_default = reg | + RKCANFD_REG_MODE_CAN_FD_MODE_ENABLE | + RKCANFD_REG_MODE_AUTO_RETX_MODE | + RKCANFD_REG_MODE_COVER_MODE | + RKCANFD_REG_MODE_RXSTX_MODE | + RKCANFD_REG_MODE_WORK_MODE; + + if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) + priv->reg_mode_default |= RKCANFD_REG_MODE_LBACK_MODE | + RKCANFD_REG_MODE_SILENT_MODE | + RKCANFD_REG_MODE_SELF_TEST; + + /* mask, i.e. ignore: + * - TIMESTAMP_COUNTER_OVERFLOW_INT - timestamp counter overflow interrupt + * - TX_ARBIT_FAIL_INT - TX arbitration fail interrupt + * - OVERLOAD_INT - CAN bus overload interrupt + * - TX_FINISH_INT - Transmit finish interrupt + */ + priv->reg_int_mask_default = + RKCANFD_REG_INT_TIMESTAMP_COUNTER_OVERFLOW_INT | + RKCANFD_REG_INT_TX_ARBIT_FAIL_INT | + RKCANFD_REG_INT_OVERLOAD_INT | + RKCANFD_REG_INT_TX_FINISH_INT; + + /* Do not mask the bus error interrupt if the bus error + * reporting is requested. + */ + if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) + priv->reg_int_mask_default |= RKCANFD_REG_INT_ERROR_INT; + + memset(&priv->bec, 0x0, sizeof(priv->bec)); + + rkcanfd_chip_fifo_setup(priv); + rkcanfd_timestamp_init(priv); + rkcanfd_timestamp_start(priv); + + rkcanfd_set_bittiming(priv); + + rkcanfd_chip_interrupts_disable(priv); + rkcanfd_chip_set_work_mode(priv); + + priv->can.state = CAN_STATE_ERROR_ACTIVE; + + netdev_dbg(priv->ndev, "%s: reg_mode=0x%08x\n", __func__, + rkcanfd_read(priv, RKCANFD_REG_MODE)); +} + +static void __rkcanfd_chip_stop(struct rkcanfd_priv *priv, const enum can_state state) +{ + priv->can.state = state; + + rkcanfd_chip_set_reset_mode(priv); + rkcanfd_chip_interrupts_disable(priv); +} + +static void rkcanfd_chip_stop(struct rkcanfd_priv *priv, const enum can_state state) +{ + priv->can.state = state; + + rkcanfd_timestamp_stop(priv); + __rkcanfd_chip_stop(priv, state); +} + +static void rkcanfd_chip_stop_sync(struct rkcanfd_priv *priv, const enum can_state state) +{ + priv->can.state = state; + + rkcanfd_timestamp_stop_sync(priv); + __rkcanfd_chip_stop(priv, state); +} + +static int rkcanfd_set_mode(struct net_device *ndev, + enum can_mode mode) +{ + struct rkcanfd_priv *priv = netdev_priv(ndev); + + switch (mode) { + case CAN_MODE_START: + rkcanfd_chip_start(priv); + rkcanfd_chip_interrupts_enable(priv); + netif_wake_queue(ndev); + break; + + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static struct sk_buff * +rkcanfd_alloc_can_err_skb(struct rkcanfd_priv *priv, + struct can_frame **cf, u32 *timestamp) +{ + struct sk_buff *skb; + + *timestamp = rkcanfd_get_timestamp(priv); + + skb = alloc_can_err_skb(priv->ndev, cf); + if (skb) + rkcanfd_skb_set_timestamp(priv, skb, *timestamp); + + return skb; +} + +static const char *rkcanfd_get_error_type_str(unsigned int type) +{ + switch (type) { + case RKCANFD_REG_ERROR_CODE_TYPE_BIT: + return "Bit"; + case RKCANFD_REG_ERROR_CODE_TYPE_STUFF: + return "Stuff"; + case RKCANFD_REG_ERROR_CODE_TYPE_FORM: + return "Form"; + case RKCANFD_REG_ERROR_CODE_TYPE_ACK: + return "ACK"; + case RKCANFD_REG_ERROR_CODE_TYPE_CRC: + return "CRC"; + } + + return "<unknown>"; +} + +#define RKCAN_ERROR_CODE(reg_ec, code) \ + ((reg_ec) & RKCANFD_REG_ERROR_CODE_##code ? __stringify(code) " " : "") + +static void +rkcanfd_handle_error_int_reg_ec(struct rkcanfd_priv *priv, struct can_frame *cf, + const u32 reg_ec) +{ + struct net_device_stats *stats = &priv->ndev->stats; + unsigned int type; + u32 reg_state, reg_cmd; + + type = FIELD_GET(RKCANFD_REG_ERROR_CODE_TYPE, reg_ec); + reg_cmd = rkcanfd_read(priv, RKCANFD_REG_CMD); + reg_state = rkcanfd_read(priv, RKCANFD_REG_STATE); + + netdev_dbg(priv->ndev, "%s Error in %s %s Phase: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s(0x%08x) CMD=%u RX=%u TX=%u Error-Warning=%u Bus-Off=%u\n", + rkcanfd_get_error_type_str(type), + reg_ec & RKCANFD_REG_ERROR_CODE_DIRECTION_RX ? "RX" : "TX", + reg_ec & RKCANFD_REG_ERROR_CODE_PHASE ? "Data" : "Arbitration", + RKCAN_ERROR_CODE(reg_ec, TX_OVERLOAD), + RKCAN_ERROR_CODE(reg_ec, TX_ERROR), + RKCAN_ERROR_CODE(reg_ec, TX_ACK), + RKCAN_ERROR_CODE(reg_ec, TX_ACK_EOF), + RKCAN_ERROR_CODE(reg_ec, TX_CRC), + RKCAN_ERROR_CODE(reg_ec, TX_STUFF_COUNT), + RKCAN_ERROR_CODE(reg_ec, TX_DATA), + RKCAN_ERROR_CODE(reg_ec, TX_SOF_DLC), + RKCAN_ERROR_CODE(reg_ec, TX_IDLE), + RKCAN_ERROR_CODE(reg_ec, RX_BUF_INT), + RKCAN_ERROR_CODE(reg_ec, RX_SPACE), + RKCAN_ERROR_CODE(reg_ec, RX_EOF), + RKCAN_ERROR_CODE(reg_ec, RX_ACK_LIM), + RKCAN_ERROR_CODE(reg_ec, RX_ACK), + RKCAN_ERROR_CODE(reg_ec, RX_CRC_LIM), + RKCAN_ERROR_CODE(reg_ec, RX_CRC), + RKCAN_ERROR_CODE(reg_ec, RX_STUFF_COUNT), + RKCAN_ERROR_CODE(reg_ec, RX_DATA), + RKCAN_ERROR_CODE(reg_ec, RX_DLC), + RKCAN_ERROR_CODE(reg_ec, RX_BRS_ESI), + RKCAN_ERROR_CODE(reg_ec, RX_RES), + RKCAN_ERROR_CODE(reg_ec, RX_FDF), + RKCAN_ERROR_CODE(reg_ec, RX_ID2_RTR), + RKCAN_ERROR_CODE(reg_ec, RX_SOF_IDE), + RKCAN_ERROR_CODE(reg_ec, RX_IDLE), + reg_ec, reg_cmd, + !!(reg_state & RKCANFD_REG_STATE_RX_PERIOD), + !!(reg_state & RKCANFD_REG_STATE_TX_PERIOD), + !!(reg_state & RKCANFD_REG_STATE_ERROR_WARNING_STATE), + !!(reg_state & RKCANFD_REG_STATE_BUS_OFF_STATE)); + + priv->can.can_stats.bus_error++; + + if (reg_ec & RKCANFD_REG_ERROR_CODE_DIRECTION_RX) + stats->rx_errors++; + else + stats->tx_errors++; + + if (!cf) + return; + + if (reg_ec & RKCANFD_REG_ERROR_CODE_DIRECTION_RX) { + if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_SOF_IDE) + cf->data[3] = CAN_ERR_PROT_LOC_SOF; + else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_ID2_RTR) + cf->data[3] = CAN_ERR_PROT_LOC_RTR; + /* RKCANFD_REG_ERROR_CODE_RX_FDF */ + else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_RES) + cf->data[3] = CAN_ERR_PROT_LOC_RES0; + /* RKCANFD_REG_ERROR_CODE_RX_BRS_ESI */ + else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_DLC) + cf->data[3] = CAN_ERR_PROT_LOC_DLC; + else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_DATA) + cf->data[3] = CAN_ERR_PROT_LOC_DATA; + /* RKCANFD_REG_ERROR_CODE_RX_STUFF_COUNT */ + else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_CRC) + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_CRC_LIM) + cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL; + else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_ACK) + cf->data[3] = CAN_ERR_PROT_LOC_ACK; + else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_ACK_LIM) + cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL; + else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_EOF) + cf->data[3] = CAN_ERR_PROT_LOC_EOF; + else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_SPACE) + cf->data[3] = CAN_ERR_PROT_LOC_EOF; + else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_BUF_INT) + cf->data[3] = CAN_ERR_PROT_LOC_INTERM; + } else { + cf->data[2] |= CAN_ERR_PROT_TX; + + if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_SOF_DLC) + cf->data[3] = CAN_ERR_PROT_LOC_SOF; + else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_DATA) + cf->data[3] = CAN_ERR_PROT_LOC_DATA; + /* RKCANFD_REG_ERROR_CODE_TX_STUFF_COUNT */ + else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_CRC) + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_ACK_EOF) + cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL; + else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_ACK) + cf->data[3] = CAN_ERR_PROT_LOC_ACK; + /* RKCANFD_REG_ERROR_CODE_TX_ERROR */ + else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_OVERLOAD) + cf->data[2] |= CAN_ERR_PROT_OVERLOAD; + } + + switch (reg_ec & RKCANFD_REG_ERROR_CODE_TYPE) { + case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE, + RKCANFD_REG_ERROR_CODE_TYPE_BIT): + + cf->data[2] |= CAN_ERR_PROT_BIT; + break; + case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE, + RKCANFD_REG_ERROR_CODE_TYPE_STUFF): + cf->data[2] |= CAN_ERR_PROT_STUFF; + break; + case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE, + RKCANFD_REG_ERROR_CODE_TYPE_FORM): + cf->data[2] |= CAN_ERR_PROT_FORM; + break; + case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE, + RKCANFD_REG_ERROR_CODE_TYPE_ACK): + cf->can_id |= CAN_ERR_ACK; + break; + case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE, + RKCANFD_REG_ERROR_CODE_TYPE_CRC): + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + break; + } +} + +static int rkcanfd_handle_error_int(struct rkcanfd_priv *priv) +{ + struct net_device_stats *stats = &priv->ndev->stats; + struct can_frame *cf = NULL; + u32 reg_ec, timestamp; + struct sk_buff *skb; + int err; + + reg_ec = rkcanfd_read(priv, RKCANFD_REG_ERROR_CODE); + + if (!reg_ec) + return 0; + + if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) { + skb = rkcanfd_alloc_can_err_skb(priv, &cf, ×tamp); + if (cf) { + struct can_berr_counter bec; + + rkcanfd_get_berr_counter_corrected(priv, &bec); + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR | CAN_ERR_CNT; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + } + } + + rkcanfd_handle_error_int_reg_ec(priv, cf, reg_ec); + + if (!cf) + return 0; + + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); + if (err) + stats->rx_fifo_errors++; + + return 0; +} + +static int rkcanfd_handle_state_error_int(struct rkcanfd_priv *priv) +{ + struct net_device_stats *stats = &priv->ndev->stats; + enum can_state new_state, rx_state, tx_state; + struct net_device *ndev = priv->ndev; + struct can_berr_counter bec; + struct can_frame *cf = NULL; + struct sk_buff *skb; + u32 timestamp; + int err; + + rkcanfd_get_berr_counter_corrected(priv, &bec); + can_state_get_by_berr_counter(ndev, &bec, &tx_state, &rx_state); + + new_state = max(tx_state, rx_state); + if (new_state == priv->can.state) + return 0; + + /* The skb allocation might fail, but can_change_state() + * handles cf == NULL. + */ + skb = rkcanfd_alloc_can_err_skb(priv, &cf, ×tamp); + can_change_state(ndev, cf, tx_state, rx_state); + + if (new_state == CAN_STATE_BUS_OFF) { + rkcanfd_chip_stop(priv, CAN_STATE_BUS_OFF); + can_bus_off(ndev); + } + + if (!skb) + return 0; + + if (new_state != CAN_STATE_BUS_OFF) { + cf->can_id |= CAN_ERR_CNT; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + } + + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); + if (err) + stats->rx_fifo_errors++; + + return 0; +} + +static int +rkcanfd_handle_rx_fifo_overflow_int(struct rkcanfd_priv *priv) +{ + struct net_device_stats *stats = &priv->ndev->stats; + struct can_berr_counter bec; + struct can_frame *cf = NULL; + struct sk_buff *skb; + u32 timestamp; + int err; + + stats->rx_over_errors++; + stats->rx_errors++; + + netdev_dbg(priv->ndev, "RX-FIFO overflow\n"); + + skb = rkcanfd_alloc_can_err_skb(priv, &cf, ×tamp); + if (!skb) + return 0; + + rkcanfd_get_berr_counter_corrected(priv, &bec); + + cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); + if (err) + stats->rx_fifo_errors++; + + return 0; +} + +#define rkcanfd_handle(priv, irq, ...) \ +({ \ + struct rkcanfd_priv *_priv = (priv); \ + int err; \ +\ + err = rkcanfd_handle_##irq(_priv, ## __VA_ARGS__); \ + if (err) \ + netdev_err(_priv->ndev, \ + "IRQ handler rkcanfd_handle_%s() returned error: %pe\n", \ + __stringify(irq), ERR_PTR(err)); \ + err; \ +}) + +static irqreturn_t rkcanfd_irq(int irq, void *dev_id) +{ + struct rkcanfd_priv *priv = dev_id; + u32 reg_int_unmasked, reg_int; + + reg_int_unmasked = rkcanfd_read(priv, RKCANFD_REG_INT); + reg_int = reg_int_unmasked & ~priv->reg_int_mask_default; + + if (!reg_int) + return IRQ_NONE; + + /* First ACK then handle, to avoid lost-IRQ race condition on + * fast re-occurring interrupts. + */ + rkcanfd_write(priv, RKCANFD_REG_INT, reg_int); + + if (reg_int & RKCANFD_REG_INT_RX_FINISH_INT) + rkcanfd_handle(priv, rx_int); + + if (reg_int & RKCANFD_REG_INT_ERROR_INT) + rkcanfd_handle(priv, error_int); + + if (reg_int & (RKCANFD_REG_INT_BUS_OFF_INT | + RKCANFD_REG_INT_PASSIVE_ERROR_INT | + RKCANFD_REG_INT_ERROR_WARNING_INT) || + priv->can.state > CAN_STATE_ERROR_ACTIVE) + rkcanfd_handle(priv, state_error_int); + + if (reg_int & RKCANFD_REG_INT_RX_FIFO_OVERFLOW_INT) + rkcanfd_handle(priv, rx_fifo_overflow_int); + + if (reg_int & ~(RKCANFD_REG_INT_ALL_ERROR | + RKCANFD_REG_INT_RX_FIFO_OVERFLOW_INT | + RKCANFD_REG_INT_RX_FINISH_INT)) + netdev_err(priv->ndev, "%s: int=0x%08x\n", __func__, reg_int); + + if (reg_int & RKCANFD_REG_INT_WAKEUP_INT) + netdev_info(priv->ndev, "%s: WAKEUP_INT\n", __func__); + + if (reg_int & RKCANFD_REG_INT_TXE_FIFO_FULL_INT) + netdev_info(priv->ndev, "%s: TXE_FIFO_FULL_INT\n", __func__); + + if (reg_int & RKCANFD_REG_INT_TXE_FIFO_OV_INT) + netdev_info(priv->ndev, "%s: TXE_FIFO_OV_INT\n", __func__); + + if (reg_int & RKCANFD_REG_INT_BUS_OFF_RECOVERY_INT) + netdev_info(priv->ndev, "%s: BUS_OFF_RECOVERY_INT\n", __func__); + + if (reg_int & RKCANFD_REG_INT_RX_FIFO_FULL_INT) + netdev_info(priv->ndev, "%s: RX_FIFO_FULL_INT\n", __func__); + + if (reg_int & RKCANFD_REG_INT_OVERLOAD_INT) + netdev_info(priv->ndev, "%s: OVERLOAD_INT\n", __func__); + + can_rx_offload_irq_finish(&priv->offload); + + return IRQ_HANDLED; +} + +static int rkcanfd_open(struct net_device *ndev) +{ + struct rkcanfd_priv *priv = netdev_priv(ndev); + int err; + + err = open_candev(ndev); + if (err) + return err; + + err = pm_runtime_resume_and_get(ndev->dev.parent); + if (err) + goto out_close_candev; + + rkcanfd_chip_start(priv); + can_rx_offload_enable(&priv->offload); + + err = request_irq(ndev->irq, rkcanfd_irq, IRQF_SHARED, ndev->name, priv); + if (err) + goto out_rkcanfd_chip_stop; + + rkcanfd_chip_interrupts_enable(priv); + + netif_start_queue(ndev); + + return 0; + +out_rkcanfd_chip_stop: + rkcanfd_chip_stop_sync(priv, CAN_STATE_STOPPED); + pm_runtime_put(ndev->dev.parent); +out_close_candev: + close_candev(ndev); + return err; +} + +static int rkcanfd_stop(struct net_device *ndev) +{ + struct rkcanfd_priv *priv = netdev_priv(ndev); + + netif_stop_queue(ndev); + + rkcanfd_chip_interrupts_disable(priv); + free_irq(ndev->irq, priv); + can_rx_offload_disable(&priv->offload); + rkcanfd_chip_stop_sync(priv, CAN_STATE_STOPPED); + close_candev(ndev); + + pm_runtime_put(ndev->dev.parent); + + return 0; +} + +static const struct net_device_ops rkcanfd_netdev_ops = { + .ndo_open = rkcanfd_open, + .ndo_stop = rkcanfd_stop, + .ndo_start_xmit = rkcanfd_start_xmit, +}; + +static int __maybe_unused rkcanfd_runtime_suspend(struct device *dev) +{ + struct rkcanfd_priv *priv = dev_get_drvdata(dev); + + clk_bulk_disable_unprepare(priv->clks_num, priv->clks); + + return 0; +} + +static int __maybe_unused rkcanfd_runtime_resume(struct device *dev) +{ + struct rkcanfd_priv *priv = dev_get_drvdata(dev); + + return clk_bulk_prepare_enable(priv->clks_num, priv->clks); +} + +static void rkcanfd_register_done(const struct rkcanfd_priv *priv) +{ + u32 dev_id; + + dev_id = rkcanfd_read(priv, RKCANFD_REG_RTL_VERSION); + + netdev_info(priv->ndev, + "Rockchip-CANFD %s rev%lu.%lu (errata 0x%04x) found\n", + rkcanfd_get_model_str(priv), + FIELD_GET(RKCANFD_REG_RTL_VERSION_MAJOR, dev_id), + FIELD_GET(RKCANFD_REG_RTL_VERSION_MINOR, dev_id), + priv->devtype_data.quirks); + + if (priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_5 && + priv->can.clock.freq < RKCANFD_ERRATUM_5_SYSCLOCK_HZ_MIN) + netdev_info(priv->ndev, + "Erratum 5: CAN clock frequency (%luMHz) lower than known good (%luMHz), expect degraded performance\n", + priv->can.clock.freq / MEGA, + RKCANFD_ERRATUM_5_SYSCLOCK_HZ_MIN / MEGA); +} + +static int rkcanfd_register(struct rkcanfd_priv *priv) +{ + struct net_device *ndev = priv->ndev; + int err; + + pm_runtime_enable(ndev->dev.parent); + + err = pm_runtime_resume_and_get(ndev->dev.parent); + if (err) + goto out_pm_runtime_disable; + + rkcanfd_ethtool_init(priv); + + err = register_candev(ndev); + if (err) + goto out_pm_runtime_put_sync; + + rkcanfd_register_done(priv); + + pm_runtime_put(ndev->dev.parent); + + return 0; + +out_pm_runtime_put_sync: + pm_runtime_put_sync(ndev->dev.parent); +out_pm_runtime_disable: + pm_runtime_disable(ndev->dev.parent); + + return err; +} + +static inline void rkcanfd_unregister(struct rkcanfd_priv *priv) +{ + struct net_device *ndev = priv->ndev; + + unregister_candev(ndev); + pm_runtime_disable(ndev->dev.parent); +} + +static const struct of_device_id rkcanfd_of_match[] = { + { + .compatible = "rockchip,rk3568v2-canfd", + .data = &rkcanfd_devtype_data_rk3568v2, + }, { + .compatible = "rockchip,rk3568v3-canfd", + .data = &rkcanfd_devtype_data_rk3568v3, + }, { + /* sentinel */ + }, +}; +MODULE_DEVICE_TABLE(of, rkcanfd_of_match); + +static int rkcanfd_probe(struct platform_device *pdev) +{ + struct rkcanfd_priv *priv; + struct net_device *ndev; + const void *match; + int err; + + ndev = alloc_candev(sizeof(struct rkcanfd_priv), RKCANFD_TXFIFO_DEPTH); + if (!ndev) + return -ENOMEM; + + priv = netdev_priv(ndev); + + ndev->irq = platform_get_irq(pdev, 0); + if (ndev->irq < 0) { + err = ndev->irq; + goto out_free_candev; + } + + priv->clks_num = devm_clk_bulk_get_all(&pdev->dev, &priv->clks); + if (priv->clks_num < 0) { + err = priv->clks_num; + goto out_free_candev; + } + + priv->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->regs)) { + err = PTR_ERR(priv->regs); + goto out_free_candev; + } + + priv->reset = devm_reset_control_array_get_exclusive(&pdev->dev); + if (IS_ERR(priv->reset)) { + err = dev_err_probe(&pdev->dev, PTR_ERR(priv->reset), + "Failed to get reset line\n"); + goto out_free_candev; + } + + SET_NETDEV_DEV(ndev, &pdev->dev); + + ndev->netdev_ops = &rkcanfd_netdev_ops; + ndev->flags |= IFF_ECHO; + + platform_set_drvdata(pdev, priv); + priv->can.clock.freq = clk_get_rate(priv->clks[0].clk); + priv->can.bittiming_const = &rkcanfd_bittiming_const; + priv->can.fd.data_bittiming_const = &rkcanfd_data_bittiming_const; + priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | + CAN_CTRLMODE_BERR_REPORTING; + priv->can.do_set_mode = rkcanfd_set_mode; + priv->can.do_get_berr_counter = rkcanfd_get_berr_counter; + priv->ndev = ndev; + + match = device_get_match_data(&pdev->dev); + if (match) { + priv->devtype_data = *(struct rkcanfd_devtype_data *)match; + if (!(priv->devtype_data.quirks & RKCANFD_QUIRK_CANFD_BROKEN)) + priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD; + } + + err = can_rx_offload_add_manual(ndev, &priv->offload, + RKCANFD_NAPI_WEIGHT); + if (err) + goto out_free_candev; + + err = rkcanfd_register(priv); + if (err) + goto out_can_rx_offload_del; + + return 0; + +out_can_rx_offload_del: + can_rx_offload_del(&priv->offload); +out_free_candev: + free_candev(ndev); + + return err; +} + +static void rkcanfd_remove(struct platform_device *pdev) +{ + struct rkcanfd_priv *priv = platform_get_drvdata(pdev); + struct net_device *ndev = priv->ndev; + + rkcanfd_unregister(priv); + can_rx_offload_del(&priv->offload); + free_candev(ndev); +} + +static const struct dev_pm_ops rkcanfd_pm_ops = { + SET_RUNTIME_PM_OPS(rkcanfd_runtime_suspend, + rkcanfd_runtime_resume, NULL) +}; + +static struct platform_driver rkcanfd_driver = { + .driver = { + .name = DEVICE_NAME, + .pm = &rkcanfd_pm_ops, + .of_match_table = rkcanfd_of_match, + }, + .probe = rkcanfd_probe, + .remove = rkcanfd_remove, +}; +module_platform_driver(rkcanfd_driver); + +MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>"); +MODULE_DESCRIPTION("Rockchip CAN-FD Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/can/rockchip/rockchip_canfd-ethtool.c b/drivers/net/can/rockchip/rockchip_canfd-ethtool.c new file mode 100644 index 000000000000..5aeeef64a67a --- /dev/null +++ b/drivers/net/can/rockchip/rockchip_canfd-ethtool.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2023, 2024 Pengutronix, +// Marc Kleine-Budde <kernel@pengutronix.de> +// + +#include <linux/ethtool.h> + +#include "rockchip_canfd.h" + +enum rkcanfd_stats_type { + RKCANFD_STATS_TYPE_RX_FIFO_EMPTY_ERRORS, + RKCANFD_STATS_TYPE_TX_EXTENDED_AS_STANDARD_ERRORS, +}; + +static const char rkcanfd_stats_strings[][ETH_GSTRING_LEN] = { + [RKCANFD_STATS_TYPE_RX_FIFO_EMPTY_ERRORS] = "rx_fifo_empty_errors", + [RKCANFD_STATS_TYPE_TX_EXTENDED_AS_STANDARD_ERRORS] = "tx_extended_as_standard_errors", +}; + +static void +rkcanfd_ethtool_get_strings(struct net_device *ndev, u32 stringset, u8 *buf) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(buf, rkcanfd_stats_strings, + sizeof(rkcanfd_stats_strings)); + } +} + +static int rkcanfd_ethtool_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(rkcanfd_stats_strings); + default: + return -EOPNOTSUPP; + } +} + +static void +rkcanfd_ethtool_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct rkcanfd_priv *priv = netdev_priv(ndev); + struct rkcanfd_stats *rkcanfd_stats; + unsigned int start; + + rkcanfd_stats = &priv->stats; + + do { + start = u64_stats_fetch_begin(&rkcanfd_stats->syncp); + + data[RKCANFD_STATS_TYPE_RX_FIFO_EMPTY_ERRORS] = + u64_stats_read(&rkcanfd_stats->rx_fifo_empty_errors); + data[RKCANFD_STATS_TYPE_TX_EXTENDED_AS_STANDARD_ERRORS] = + u64_stats_read(&rkcanfd_stats->tx_extended_as_standard_errors); + } while (u64_stats_fetch_retry(&rkcanfd_stats->syncp, start)); +} + +static const struct ethtool_ops rkcanfd_ethtool_ops = { + .get_ts_info = can_ethtool_op_get_ts_info_hwts, + .get_strings = rkcanfd_ethtool_get_strings, + .get_sset_count = rkcanfd_ethtool_get_sset_count, + .get_ethtool_stats = rkcanfd_ethtool_get_ethtool_stats, +}; + +void rkcanfd_ethtool_init(struct rkcanfd_priv *priv) +{ + priv->ndev->ethtool_ops = &rkcanfd_ethtool_ops; + + u64_stats_init(&priv->stats.syncp); +} diff --git a/drivers/net/can/rockchip/rockchip_canfd-rx.c b/drivers/net/can/rockchip/rockchip_canfd-rx.c new file mode 100644 index 000000000000..475c0409e215 --- /dev/null +++ b/drivers/net/can/rockchip/rockchip_canfd-rx.c @@ -0,0 +1,299 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2023, 2024 Pengutronix, +// Marc Kleine-Budde <kernel@pengutronix.de> +// + +#include <net/netdev_queues.h> + +#include "rockchip_canfd.h" + +static bool rkcanfd_can_frame_header_equal(const struct canfd_frame *const cfd1, + const struct canfd_frame *const cfd2, + const bool is_canfd) +{ + const u8 mask_flags = CANFD_BRS | CANFD_ESI | CANFD_FDF; + canid_t mask = CAN_EFF_FLAG; + + if (canfd_sanitize_len(cfd1->len) != canfd_sanitize_len(cfd2->len)) + return false; + + if (!is_canfd) + mask |= CAN_RTR_FLAG; + + if (cfd1->can_id & CAN_EFF_FLAG) + mask |= CAN_EFF_MASK; + else + mask |= CAN_SFF_MASK; + + if ((cfd1->can_id & mask) != (cfd2->can_id & mask)) + return false; + + if (is_canfd && + (cfd1->flags & mask_flags) != (cfd2->flags & mask_flags)) + return false; + + return true; +} + +static bool rkcanfd_can_frame_data_equal(const struct canfd_frame *cfd1, + const struct canfd_frame *cfd2, + const bool is_canfd) +{ + u8 len; + + if (!is_canfd && (cfd1->can_id & CAN_RTR_FLAG)) + return true; + + len = canfd_sanitize_len(cfd1->len); + + return !memcmp(cfd1->data, cfd2->data, len); +} + +static unsigned int +rkcanfd_fifo_header_to_cfd_header(const struct rkcanfd_priv *priv, + const struct rkcanfd_fifo_header *header, + struct canfd_frame *cfd) +{ + unsigned int len = sizeof(*cfd) - sizeof(cfd->data); + u8 dlc; + + if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_FRAME_FORMAT) + cfd->can_id = FIELD_GET(RKCANFD_REG_FD_ID_EFF, header->id) | + CAN_EFF_FLAG; + else + cfd->can_id = FIELD_GET(RKCANFD_REG_FD_ID_SFF, header->id); + + dlc = FIELD_GET(RKCANFD_REG_FD_FRAMEINFO_DATA_LENGTH, + header->frameinfo); + + /* CAN-FD */ + if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_FDF) { + cfd->len = can_fd_dlc2len(dlc); + + /* The cfd is not allocated by alloc_canfd_skb(), so + * set CANFD_FDF here. + */ + cfd->flags |= CANFD_FDF; + + if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_BRS) + cfd->flags |= CANFD_BRS; + } else { + cfd->len = can_cc_dlc2len(dlc); + + if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_RTR) { + cfd->can_id |= CAN_RTR_FLAG; + + return len; + } + } + + return len + cfd->len; +} + +static int rkcanfd_rxstx_filter(struct rkcanfd_priv *priv, + const struct canfd_frame *cfd_rx, const u32 ts, + bool *tx_done) +{ + struct net_device_stats *stats = &priv->ndev->stats; + struct rkcanfd_stats *rkcanfd_stats = &priv->stats; + const struct canfd_frame *cfd_nominal; + const struct sk_buff *skb; + unsigned int tx_tail; + + tx_tail = rkcanfd_get_tx_tail(priv); + skb = priv->can.echo_skb[tx_tail]; + if (!skb) { + netdev_err(priv->ndev, + "%s: echo_skb[%u]=NULL tx_head=0x%08x tx_tail=0x%08x\n", + __func__, tx_tail, + priv->tx_head, priv->tx_tail); + + return -ENOMSG; + } + cfd_nominal = (struct canfd_frame *)skb->data; + + /* We RX'ed a frame identical to our pending TX frame. */ + if (rkcanfd_can_frame_header_equal(cfd_rx, cfd_nominal, + cfd_rx->flags & CANFD_FDF) && + rkcanfd_can_frame_data_equal(cfd_rx, cfd_nominal, + cfd_rx->flags & CANFD_FDF)) { + unsigned int frame_len; + + rkcanfd_handle_tx_done_one(priv, ts, &frame_len); + + WRITE_ONCE(priv->tx_tail, priv->tx_tail + 1); + netif_subqueue_completed_wake(priv->ndev, 0, 1, frame_len, + rkcanfd_get_effective_tx_free(priv), + RKCANFD_TX_START_THRESHOLD); + + *tx_done = true; + + return 0; + } + + if (!(priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_6)) + return 0; + + /* Erratum 6: Extended frames may be send as standard frames. + * + * Not affected if: + * - TX'ed a standard frame -or- + * - RX'ed an extended frame + */ + if (!(cfd_nominal->can_id & CAN_EFF_FLAG) || + (cfd_rx->can_id & CAN_EFF_FLAG)) + return 0; + + /* Not affected if: + * - standard part and RTR flag of the TX'ed frame + * is not equal the CAN-ID and RTR flag of the RX'ed frame. + */ + if ((cfd_nominal->can_id & (CAN_RTR_FLAG | CAN_SFF_MASK)) != + (cfd_rx->can_id & (CAN_RTR_FLAG | CAN_SFF_MASK))) + return 0; + + /* Not affected if: + * - length is not the same + */ + if (cfd_nominal->len != cfd_rx->len) + return 0; + + /* Not affected if: + * - the data of non RTR frames is different + */ + if (!(cfd_nominal->can_id & CAN_RTR_FLAG) && + memcmp(cfd_nominal->data, cfd_rx->data, cfd_nominal->len)) + return 0; + + /* Affected by Erratum 6 */ + u64_stats_update_begin(&rkcanfd_stats->syncp); + u64_stats_inc(&rkcanfd_stats->tx_extended_as_standard_errors); + u64_stats_update_end(&rkcanfd_stats->syncp); + + /* Manual handling of CAN Bus Error counters. See + * rkcanfd_get_corrected_berr_counter() for detailed + * explanation. + */ + if (priv->bec.txerr) + priv->bec.txerr--; + + *tx_done = true; + + stats->tx_packets++; + stats->tx_errors++; + + rkcanfd_xmit_retry(priv); + + return 0; +} + +static inline bool +rkcanfd_fifo_header_empty(const struct rkcanfd_fifo_header *header) +{ + /* Erratum 5: If the FIFO is empty, we read the same value for + * all elements. + */ + return header->frameinfo == header->id && + header->frameinfo == header->ts; +} + +static int rkcanfd_handle_rx_int_one(struct rkcanfd_priv *priv) +{ + struct net_device_stats *stats = &priv->ndev->stats; + struct canfd_frame cfd[1] = { }, *skb_cfd; + struct rkcanfd_fifo_header header[1] = { }; + struct sk_buff *skb; + unsigned int len; + int err; + + /* read header into separate struct and convert it later */ + rkcanfd_read_rep(priv, RKCANFD_REG_RX_FIFO_RDATA, + header, sizeof(*header)); + /* read data directly into cfd */ + rkcanfd_read_rep(priv, RKCANFD_REG_RX_FIFO_RDATA, + cfd->data, sizeof(cfd->data)); + + /* Erratum 5: Counters for TXEFIFO and RXFIFO may be wrong */ + if (rkcanfd_fifo_header_empty(header)) { + struct rkcanfd_stats *rkcanfd_stats = &priv->stats; + + u64_stats_update_begin(&rkcanfd_stats->syncp); + u64_stats_inc(&rkcanfd_stats->rx_fifo_empty_errors); + u64_stats_update_end(&rkcanfd_stats->syncp); + + return 0; + } + + len = rkcanfd_fifo_header_to_cfd_header(priv, header, cfd); + + /* Drop any received CAN-FD frames if CAN-FD mode is not + * requested. + */ + if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_FDF && + !(priv->can.ctrlmode & CAN_CTRLMODE_FD)) { + stats->rx_dropped++; + + return 0; + } + + if (rkcanfd_get_tx_pending(priv)) { + bool tx_done = false; + + err = rkcanfd_rxstx_filter(priv, cfd, header->ts, &tx_done); + if (err) + return err; + if (tx_done && !(priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) + return 0; + } + + /* Manual handling of CAN Bus Error counters. See + * rkcanfd_get_corrected_berr_counter() for detailed + * explanation. + */ + if (priv->bec.rxerr) + priv->bec.rxerr = min(CAN_ERROR_PASSIVE_THRESHOLD, + priv->bec.rxerr) - 1; + + if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_FDF) + skb = alloc_canfd_skb(priv->ndev, &skb_cfd); + else + skb = alloc_can_skb(priv->ndev, (struct can_frame **)&skb_cfd); + + if (!skb) { + stats->rx_dropped++; + + return 0; + } + + memcpy(skb_cfd, cfd, len); + rkcanfd_skb_set_timestamp(priv, skb, header->ts); + + err = can_rx_offload_queue_timestamp(&priv->offload, skb, header->ts); + if (err) + stats->rx_fifo_errors++; + + return 0; +} + +static inline unsigned int +rkcanfd_rx_fifo_get_len(const struct rkcanfd_priv *priv) +{ + const u32 reg = rkcanfd_read(priv, RKCANFD_REG_RX_FIFO_CTRL); + + return FIELD_GET(RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_CNT, reg); +} + +int rkcanfd_handle_rx_int(struct rkcanfd_priv *priv) +{ + unsigned int len; + int err; + + while ((len = rkcanfd_rx_fifo_get_len(priv))) { + err = rkcanfd_handle_rx_int_one(priv); + if (err) + return err; + } + + return 0; +} diff --git a/drivers/net/can/rockchip/rockchip_canfd-timestamp.c b/drivers/net/can/rockchip/rockchip_canfd-timestamp.c new file mode 100644 index 000000000000..72774cd2f94b --- /dev/null +++ b/drivers/net/can/rockchip/rockchip_canfd-timestamp.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2023, 2024 Pengutronix, +// Marc Kleine-Budde <kernel@pengutronix.de> +// + +#include <linux/clocksource.h> + +#include "rockchip_canfd.h" + +static u64 rkcanfd_timestamp_read(struct cyclecounter *cc) +{ + const struct rkcanfd_priv *priv = container_of(cc, struct rkcanfd_priv, cc); + + return rkcanfd_get_timestamp(priv); +} + +void rkcanfd_skb_set_timestamp(const struct rkcanfd_priv *priv, + struct sk_buff *skb, const u32 timestamp) +{ + struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); + u64 ns; + + ns = timecounter_cyc2time(&priv->tc, timestamp); + + hwtstamps->hwtstamp = ns_to_ktime(ns); +} + +static void rkcanfd_timestamp_work(struct work_struct *work) +{ + const struct delayed_work *delayed_work = to_delayed_work(work); + struct rkcanfd_priv *priv; + + priv = container_of(delayed_work, struct rkcanfd_priv, timestamp); + timecounter_read(&priv->tc); + + schedule_delayed_work(&priv->timestamp, priv->work_delay_jiffies); +} + +void rkcanfd_timestamp_init(struct rkcanfd_priv *priv) +{ + const struct can_bittiming *dbt = &priv->can.fd.data_bittiming; + const struct can_bittiming *bt = &priv->can.bittiming; + struct cyclecounter *cc = &priv->cc; + u32 bitrate, div, reg, rate; + u64 work_delay_ns; + u64 max_cycles; + + /* At the standard clock rate of 300Mhz on the rk3658, the 32 + * bit timer overflows every 14s. This means that we have to + * poll it quite often to avoid missing a wrap around. + * + * Divide it down to a reasonable rate, at least twice the bit + * rate. + */ + bitrate = max(bt->bitrate, dbt->bitrate); + div = min(DIV_ROUND_UP(priv->can.clock.freq, bitrate * 2), + FIELD_MAX(RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_PRESCALE) + 1); + + reg = FIELD_PREP(RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_PRESCALE, + div - 1) | + RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_ENABLE; + rkcanfd_write(priv, RKCANFD_REG_TIMESTAMP_CTRL, reg); + + cc->read = rkcanfd_timestamp_read; + cc->mask = CYCLECOUNTER_MASK(32); + + rate = priv->can.clock.freq / div; + clocks_calc_mult_shift(&cc->mult, &cc->shift, rate, NSEC_PER_SEC, + RKCANFD_TIMESTAMP_WORK_MAX_DELAY_SEC); + + max_cycles = div_u64(ULLONG_MAX, cc->mult); + max_cycles = min(max_cycles, cc->mask); + work_delay_ns = clocksource_cyc2ns(max_cycles, cc->mult, cc->shift); + priv->work_delay_jiffies = div_u64(work_delay_ns, 3u * NSEC_PER_SEC / HZ); + INIT_DELAYED_WORK(&priv->timestamp, rkcanfd_timestamp_work); + + netdev_dbg(priv->ndev, "clock=%lu.%02luMHz bitrate=%lu.%02luMBit/s div=%u rate=%lu.%02luMHz mult=%u shift=%u delay=%lus\n", + priv->can.clock.freq / MEGA, + priv->can.clock.freq % MEGA / KILO / 10, + bitrate / MEGA, + bitrate % MEGA / KILO / 100, + div, + rate / MEGA, + rate % MEGA / KILO / 10, + cc->mult, cc->shift, + priv->work_delay_jiffies / HZ); +} + +void rkcanfd_timestamp_start(struct rkcanfd_priv *priv) +{ + timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns()); + + schedule_delayed_work(&priv->timestamp, priv->work_delay_jiffies); +} + +void rkcanfd_timestamp_stop(struct rkcanfd_priv *priv) +{ + cancel_delayed_work(&priv->timestamp); +} + +void rkcanfd_timestamp_stop_sync(struct rkcanfd_priv *priv) +{ + cancel_delayed_work_sync(&priv->timestamp); +} diff --git a/drivers/net/can/rockchip/rockchip_canfd-tx.c b/drivers/net/can/rockchip/rockchip_canfd-tx.c new file mode 100644 index 000000000000..12200dcfd338 --- /dev/null +++ b/drivers/net/can/rockchip/rockchip_canfd-tx.c @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2023, 2024 Pengutronix, +// Marc Kleine-Budde <kernel@pengutronix.de> +// + +#include <net/netdev_queues.h> + +#include "rockchip_canfd.h" + +static bool rkcanfd_tx_tail_is_eff(const struct rkcanfd_priv *priv) +{ + const struct canfd_frame *cfd; + const struct sk_buff *skb; + unsigned int tx_tail; + + if (!rkcanfd_get_tx_pending(priv)) + return false; + + tx_tail = rkcanfd_get_tx_tail(priv); + skb = priv->can.echo_skb[tx_tail]; + if (!skb) { + netdev_err(priv->ndev, + "%s: echo_skb[%u]=NULL tx_head=0x%08x tx_tail=0x%08x\n", + __func__, tx_tail, + priv->tx_head, priv->tx_tail); + + return false; + } + + cfd = (struct canfd_frame *)skb->data; + + return cfd->can_id & CAN_EFF_FLAG; +} + +unsigned int rkcanfd_get_effective_tx_free(const struct rkcanfd_priv *priv) +{ + if (priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_6 && + rkcanfd_tx_tail_is_eff(priv)) + return 0; + + return rkcanfd_get_tx_free(priv); +} + +static void rkcanfd_start_xmit_write_cmd(const struct rkcanfd_priv *priv, + const u32 reg_cmd) +{ + if (priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_12) + rkcanfd_write(priv, RKCANFD_REG_MODE, priv->reg_mode_default | + RKCANFD_REG_MODE_SPACE_RX_MODE); + + rkcanfd_write(priv, RKCANFD_REG_CMD, reg_cmd); + + if (priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_12) + rkcanfd_write(priv, RKCANFD_REG_MODE, priv->reg_mode_default); +} + +void rkcanfd_xmit_retry(struct rkcanfd_priv *priv) +{ + const unsigned int tx_head = rkcanfd_get_tx_head(priv); + const u32 reg_cmd = RKCANFD_REG_CMD_TX_REQ(tx_head); + + rkcanfd_start_xmit_write_cmd(priv, reg_cmd); +} + +netdev_tx_t rkcanfd_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct rkcanfd_priv *priv = netdev_priv(ndev); + u32 reg_frameinfo, reg_id, reg_cmd; + unsigned int tx_head, frame_len; + const struct canfd_frame *cfd; + int err; + u8 i; + + if (can_dev_dropped_skb(ndev, skb)) + return NETDEV_TX_OK; + + if (!netif_subqueue_maybe_stop(priv->ndev, 0, + rkcanfd_get_effective_tx_free(priv), + RKCANFD_TX_STOP_THRESHOLD, + RKCANFD_TX_START_THRESHOLD)) { + if (net_ratelimit()) + netdev_info(priv->ndev, + "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, tx_pending=%d)\n", + priv->tx_head, priv->tx_tail, + rkcanfd_get_tx_pending(priv)); + + return NETDEV_TX_BUSY; + } + + cfd = (struct canfd_frame *)skb->data; + + if (cfd->can_id & CAN_EFF_FLAG) { + reg_frameinfo = RKCANFD_REG_FD_FRAMEINFO_FRAME_FORMAT; + reg_id = FIELD_PREP(RKCANFD_REG_FD_ID_EFF, cfd->can_id); + } else { + reg_frameinfo = 0; + reg_id = FIELD_PREP(RKCANFD_REG_FD_ID_SFF, cfd->can_id); + } + + if (cfd->can_id & CAN_RTR_FLAG) + reg_frameinfo |= RKCANFD_REG_FD_FRAMEINFO_RTR; + + if (can_is_canfd_skb(skb)) { + reg_frameinfo |= RKCANFD_REG_FD_FRAMEINFO_FDF; + + if (cfd->flags & CANFD_BRS) + reg_frameinfo |= RKCANFD_REG_FD_FRAMEINFO_BRS; + + reg_frameinfo |= FIELD_PREP(RKCANFD_REG_FD_FRAMEINFO_DATA_LENGTH, + can_fd_len2dlc(cfd->len)); + } else { + reg_frameinfo |= FIELD_PREP(RKCANFD_REG_FD_FRAMEINFO_DATA_LENGTH, + cfd->len); + } + + tx_head = rkcanfd_get_tx_head(priv); + reg_cmd = RKCANFD_REG_CMD_TX_REQ(tx_head); + + rkcanfd_write(priv, RKCANFD_REG_FD_TXFRAMEINFO, reg_frameinfo); + rkcanfd_write(priv, RKCANFD_REG_FD_TXID, reg_id); + for (i = 0; i < cfd->len; i += 4) + rkcanfd_write(priv, RKCANFD_REG_FD_TXDATA0 + i, + *(u32 *)(cfd->data + i)); + + frame_len = can_skb_get_frame_len(skb); + err = can_put_echo_skb(skb, ndev, tx_head, frame_len); + if (!err) + netdev_sent_queue(priv->ndev, frame_len); + + WRITE_ONCE(priv->tx_head, priv->tx_head + 1); + + rkcanfd_start_xmit_write_cmd(priv, reg_cmd); + + netif_subqueue_maybe_stop(priv->ndev, 0, + rkcanfd_get_effective_tx_free(priv), + RKCANFD_TX_STOP_THRESHOLD, + RKCANFD_TX_START_THRESHOLD); + + return NETDEV_TX_OK; +} + +void rkcanfd_handle_tx_done_one(struct rkcanfd_priv *priv, const u32 ts, + unsigned int *frame_len_p) +{ + struct net_device_stats *stats = &priv->ndev->stats; + unsigned int tx_tail; + struct sk_buff *skb; + + tx_tail = rkcanfd_get_tx_tail(priv); + skb = priv->can.echo_skb[tx_tail]; + + /* Manual handling of CAN Bus Error counters. See + * rkcanfd_get_corrected_berr_counter() for detailed + * explanation. + */ + if (priv->bec.txerr) + priv->bec.txerr--; + + if (skb) + rkcanfd_skb_set_timestamp(priv, skb, ts); + stats->tx_bytes += + can_rx_offload_get_echo_skb_queue_timestamp(&priv->offload, + tx_tail, ts, + frame_len_p); + stats->tx_packets++; +} diff --git a/drivers/net/can/rockchip/rockchip_canfd.h b/drivers/net/can/rockchip/rockchip_canfd.h new file mode 100644 index 000000000000..93131c7d7f54 --- /dev/null +++ b/drivers/net/can/rockchip/rockchip_canfd.h @@ -0,0 +1,553 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (c) 2023, 2024 Pengutronix, + * Marc Kleine-Budde <kernel@pengutronix.de> + */ + +#ifndef _ROCKCHIP_CANFD_H +#define _ROCKCHIP_CANFD_H + +#include <linux/bitfield.h> +#include <linux/can/dev.h> +#include <linux/can/rx-offload.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/netdevice.h> +#include <linux/reset.h> +#include <linux/skbuff.h> +#include <linux/timecounter.h> +#include <linux/types.h> +#include <linux/u64_stats_sync.h> +#include <linux/units.h> + +#define RKCANFD_REG_MODE 0x000 +#define RKCANFD_REG_MODE_CAN_FD_MODE_ENABLE BIT(15) +#define RKCANFD_REG_MODE_DPEE BIT(14) +#define RKCANFD_REG_MODE_BRSD BIT(13) +#define RKCANFD_REG_MODE_SPACE_RX_MODE BIT(12) +#define RKCANFD_REG_MODE_AUTO_BUS_ON BIT(11) +#define RKCANFD_REG_MODE_AUTO_RETX_MODE BIT(10) +#define RKCANFD_REG_MODE_OVLD_MODE BIT(9) +#define RKCANFD_REG_MODE_COVER_MODE BIT(8) +#define RKCANFD_REG_MODE_RXSORT_MODE BIT(7) +#define RKCANFD_REG_MODE_TXORDER_MODE BIT(6) +#define RKCANFD_REG_MODE_RXSTX_MODE BIT(5) +#define RKCANFD_REG_MODE_LBACK_MODE BIT(4) +#define RKCANFD_REG_MODE_SILENT_MODE BIT(3) +#define RKCANFD_REG_MODE_SELF_TEST BIT(2) +#define RKCANFD_REG_MODE_SLEEP_MODE BIT(1) +#define RKCANFD_REG_MODE_WORK_MODE BIT(0) + +#define RKCANFD_REG_CMD 0x004 +#define RKCANFD_REG_CMD_TX1_REQ BIT(1) +#define RKCANFD_REG_CMD_TX0_REQ BIT(0) +#define RKCANFD_REG_CMD_TX_REQ(i) (RKCANFD_REG_CMD_TX0_REQ << (i)) + +#define RKCANFD_REG_STATE 0x008 +#define RKCANFD_REG_STATE_SLEEP_STATE BIT(6) +#define RKCANFD_REG_STATE_BUS_OFF_STATE BIT(5) +#define RKCANFD_REG_STATE_ERROR_WARNING_STATE BIT(4) +#define RKCANFD_REG_STATE_TX_PERIOD BIT(3) +#define RKCANFD_REG_STATE_RX_PERIOD BIT(2) +#define RKCANFD_REG_STATE_TX_BUFFER_FULL BIT(1) +#define RKCANFD_REG_STATE_RX_BUFFER_FULL BIT(0) + +#define RKCANFD_REG_INT 0x00c +#define RKCANFD_REG_INT_WAKEUP_INT BIT(14) +#define RKCANFD_REG_INT_TXE_FIFO_FULL_INT BIT(13) +#define RKCANFD_REG_INT_TXE_FIFO_OV_INT BIT(12) +#define RKCANFD_REG_INT_TIMESTAMP_COUNTER_OVERFLOW_INT BIT(11) +#define RKCANFD_REG_INT_BUS_OFF_RECOVERY_INT BIT(10) +#define RKCANFD_REG_INT_BUS_OFF_INT BIT(9) +#define RKCANFD_REG_INT_RX_FIFO_OVERFLOW_INT BIT(8) +#define RKCANFD_REG_INT_RX_FIFO_FULL_INT BIT(7) +#define RKCANFD_REG_INT_ERROR_INT BIT(6) +#define RKCANFD_REG_INT_TX_ARBIT_FAIL_INT BIT(5) +#define RKCANFD_REG_INT_PASSIVE_ERROR_INT BIT(4) +#define RKCANFD_REG_INT_OVERLOAD_INT BIT(3) +#define RKCANFD_REG_INT_ERROR_WARNING_INT BIT(2) +#define RKCANFD_REG_INT_TX_FINISH_INT BIT(1) +#define RKCANFD_REG_INT_RX_FINISH_INT BIT(0) + +#define RKCANFD_REG_INT_ALL \ + (RKCANFD_REG_INT_WAKEUP_INT | \ + RKCANFD_REG_INT_TXE_FIFO_FULL_INT | \ + RKCANFD_REG_INT_TXE_FIFO_OV_INT | \ + RKCANFD_REG_INT_TIMESTAMP_COUNTER_OVERFLOW_INT | \ + RKCANFD_REG_INT_BUS_OFF_RECOVERY_INT | \ + RKCANFD_REG_INT_BUS_OFF_INT | \ + RKCANFD_REG_INT_RX_FIFO_OVERFLOW_INT | \ + RKCANFD_REG_INT_RX_FIFO_FULL_INT | \ + RKCANFD_REG_INT_ERROR_INT | \ + RKCANFD_REG_INT_TX_ARBIT_FAIL_INT | \ + RKCANFD_REG_INT_PASSIVE_ERROR_INT | \ + RKCANFD_REG_INT_OVERLOAD_INT | \ + RKCANFD_REG_INT_ERROR_WARNING_INT | \ + RKCANFD_REG_INT_TX_FINISH_INT | \ + RKCANFD_REG_INT_RX_FINISH_INT) + +#define RKCANFD_REG_INT_ALL_ERROR \ + (RKCANFD_REG_INT_BUS_OFF_INT | \ + RKCANFD_REG_INT_ERROR_INT | \ + RKCANFD_REG_INT_PASSIVE_ERROR_INT | \ + RKCANFD_REG_INT_ERROR_WARNING_INT) + +#define RKCANFD_REG_INT_MASK 0x010 + +#define RKCANFD_REG_DMA_CTL 0x014 +#define RKCANFD_REG_DMA_CTL_DMA_RX_MODE BIT(1) +#define RKCANFD_REG_DMA_CTL_DMA_TX_MODE BIT(9) + +#define RKCANFD_REG_BITTIMING 0x018 +#define RKCANFD_REG_BITTIMING_SAMPLE_MODE BIT(16) +#define RKCANFD_REG_BITTIMING_SJW GENMASK(15, 14) +#define RKCANFD_REG_BITTIMING_BRP GENMASK(13, 8) +#define RKCANFD_REG_BITTIMING_TSEG2 GENMASK(6, 4) +#define RKCANFD_REG_BITTIMING_TSEG1 GENMASK(3, 0) + +#define RKCANFD_REG_ARBITFAIL 0x028 +#define RKCANFD_REG_ARBITFAIL_ARBIT_FAIL_CODE GENMASK(6, 0) + +/* Register seems to be clear or read */ +#define RKCANFD_REG_ERROR_CODE 0x02c +#define RKCANFD_REG_ERROR_CODE_PHASE BIT(29) +#define RKCANFD_REG_ERROR_CODE_TYPE GENMASK(28, 26) +#define RKCANFD_REG_ERROR_CODE_TYPE_BIT 0x0 +#define RKCANFD_REG_ERROR_CODE_TYPE_STUFF 0x1 +#define RKCANFD_REG_ERROR_CODE_TYPE_FORM 0x2 +#define RKCANFD_REG_ERROR_CODE_TYPE_ACK 0x3 +#define RKCANFD_REG_ERROR_CODE_TYPE_CRC 0x4 +#define RKCANFD_REG_ERROR_CODE_DIRECTION_RX BIT(25) +#define RKCANFD_REG_ERROR_CODE_TX GENMASK(24, 16) +#define RKCANFD_REG_ERROR_CODE_TX_OVERLOAD BIT(24) +#define RKCANFD_REG_ERROR_CODE_TX_ERROR BIT(23) +#define RKCANFD_REG_ERROR_CODE_TX_ACK BIT(22) +#define RKCANFD_REG_ERROR_CODE_TX_ACK_EOF BIT(21) +#define RKCANFD_REG_ERROR_CODE_TX_CRC BIT(20) +#define RKCANFD_REG_ERROR_CODE_TX_STUFF_COUNT BIT(19) +#define RKCANFD_REG_ERROR_CODE_TX_DATA BIT(18) +#define RKCANFD_REG_ERROR_CODE_TX_SOF_DLC BIT(17) +#define RKCANFD_REG_ERROR_CODE_TX_IDLE BIT(16) +#define RKCANFD_REG_ERROR_CODE_RX GENMASK(15, 0) +#define RKCANFD_REG_ERROR_CODE_RX_BUF_INT BIT(15) +#define RKCANFD_REG_ERROR_CODE_RX_SPACE BIT(14) +#define RKCANFD_REG_ERROR_CODE_RX_EOF BIT(13) +#define RKCANFD_REG_ERROR_CODE_RX_ACK_LIM BIT(12) +#define RKCANFD_REG_ERROR_CODE_RX_ACK BIT(11) +#define RKCANFD_REG_ERROR_CODE_RX_CRC_LIM BIT(10) +#define RKCANFD_REG_ERROR_CODE_RX_CRC BIT(9) +#define RKCANFD_REG_ERROR_CODE_RX_STUFF_COUNT BIT(8) +#define RKCANFD_REG_ERROR_CODE_RX_DATA BIT(7) +#define RKCANFD_REG_ERROR_CODE_RX_DLC BIT(6) +#define RKCANFD_REG_ERROR_CODE_RX_BRS_ESI BIT(5) +#define RKCANFD_REG_ERROR_CODE_RX_RES BIT(4) +#define RKCANFD_REG_ERROR_CODE_RX_FDF BIT(3) +#define RKCANFD_REG_ERROR_CODE_RX_ID2_RTR BIT(2) +#define RKCANFD_REG_ERROR_CODE_RX_SOF_IDE BIT(1) +#define RKCANFD_REG_ERROR_CODE_RX_IDLE BIT(0) + +#define RKCANFD_REG_ERROR_CODE_NOACK \ + (FIELD_PREP(RKCANFD_REG_ERROR_CODE_TYPE, \ + RKCANFD_REG_ERROR_CODE_TYPE_ACK) | \ + RKCANFD_REG_ERROR_CODE_TX_ACK_EOF | \ + RKCANFD_REG_ERROR_CODE_RX_ACK) + +#define RKCANFD_REG_RXERRORCNT 0x034 +#define RKCANFD_REG_RXERRORCNT_RX_ERR_CNT GENMASK(7, 0) + +#define RKCANFD_REG_TXERRORCNT 0x038 +#define RKCANFD_REG_TXERRORCNT_TX_ERR_CNT GENMASK(8, 0) + +#define RKCANFD_REG_IDCODE 0x03c +#define RKCANFD_REG_IDCODE_STANDARD_FRAME_ID GENMASK(10, 0) +#define RKCANFD_REG_IDCODE_EXTENDED_FRAME_ID GENMASK(28, 0) + +#define RKCANFD_REG_IDMASK 0x040 + +#define RKCANFD_REG_TXFRAMEINFO 0x050 +#define RKCANFD_REG_FRAMEINFO_FRAME_FORMAT BIT(7) +#define RKCANFD_REG_FRAMEINFO_RTR BIT(6) +#define RKCANFD_REG_FRAMEINFO_DATA_LENGTH GENMASK(3, 0) + +#define RKCANFD_REG_TXID 0x054 +#define RKCANFD_REG_TXID_TX_ID GENMASK(28, 0) + +#define RKCANFD_REG_TXDATA0 0x058 +#define RKCANFD_REG_TXDATA1 0x05C +#define RKCANFD_REG_RXFRAMEINFO 0x060 +#define RKCANFD_REG_RXID 0x064 +#define RKCANFD_REG_RXDATA0 0x068 +#define RKCANFD_REG_RXDATA1 0x06c + +#define RKCANFD_REG_RTL_VERSION 0x070 +#define RKCANFD_REG_RTL_VERSION_MAJOR GENMASK(7, 4) +#define RKCANFD_REG_RTL_VERSION_MINOR GENMASK(3, 0) + +#define RKCANFD_REG_FD_NOMINAL_BITTIMING 0x100 +#define RKCANFD_REG_FD_NOMINAL_BITTIMING_SAMPLE_MODE BIT(31) +#define RKCANFD_REG_FD_NOMINAL_BITTIMING_SJW GENMASK(30, 24) +#define RKCANFD_REG_FD_NOMINAL_BITTIMING_BRP GENMASK(23, 16) +#define RKCANFD_REG_FD_NOMINAL_BITTIMING_TSEG2 GENMASK(14, 8) +#define RKCANFD_REG_FD_NOMINAL_BITTIMING_TSEG1 GENMASK(7, 0) + +#define RKCANFD_REG_FD_DATA_BITTIMING 0x104 +#define RKCANFD_REG_FD_DATA_BITTIMING_SAMPLE_MODE BIT(21) +#define RKCANFD_REG_FD_DATA_BITTIMING_SJW GENMASK(20, 17) +#define RKCANFD_REG_FD_DATA_BITTIMING_BRP GENMASK(16, 9) +#define RKCANFD_REG_FD_DATA_BITTIMING_TSEG2 GENMASK(8, 5) +#define RKCANFD_REG_FD_DATA_BITTIMING_TSEG1 GENMASK(4, 0) + +#define RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION 0x108 +#define RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_OFFSET GENMASK(6, 1) +#define RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_ENABLE BIT(0) + +#define RKCANFD_REG_TIMESTAMP_CTRL 0x10c +/* datasheet says 6:1, which is wrong */ +#define RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_PRESCALE GENMASK(5, 1) +#define RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_ENABLE BIT(0) + +#define RKCANFD_REG_TIMESTAMP 0x110 + +#define RKCANFD_REG_TXEVENT_FIFO_CTRL 0x114 +#define RKCANFD_REG_TXEVENT_FIFO_CTRL_TXE_FIFO_CNT GENMASK(8, 5) +#define RKCANFD_REG_TXEVENT_FIFO_CTRL_TXE_FIFO_WATERMARK GENMASK(4, 1) +#define RKCANFD_REG_TXEVENT_FIFO_CTRL_TXE_FIFO_ENABLE BIT(0) + +#define RKCANFD_REG_RX_FIFO_CTRL 0x118 +#define RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_CNT GENMASK(6, 4) +#define RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_FULL_WATERMARK GENMASK(3, 1) +#define RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_ENABLE BIT(0) + +#define RKCANFD_REG_AFC_CTRL 0x11c +#define RKCANFD_REG_AFC_CTRL_UAF5 BIT(4) +#define RKCANFD_REG_AFC_CTRL_UAF4 BIT(3) +#define RKCANFD_REG_AFC_CTRL_UAF3 BIT(2) +#define RKCANFD_REG_AFC_CTRL_UAF2 BIT(1) +#define RKCANFD_REG_AFC_CTRL_UAF1 BIT(0) + +#define RKCANFD_REG_IDCODE0 0x120 +#define RKCANFD_REG_IDMASK0 0x124 +#define RKCANFD_REG_IDCODE1 0x128 +#define RKCANFD_REG_IDMASK1 0x12c +#define RKCANFD_REG_IDCODE2 0x130 +#define RKCANFD_REG_IDMASK2 0x134 +#define RKCANFD_REG_IDCODE3 0x138 +#define RKCANFD_REG_IDMASK3 0x13c +#define RKCANFD_REG_IDCODE4 0x140 +#define RKCANFD_REG_IDMASK4 0x144 + +#define RKCANFD_REG_FD_TXFRAMEINFO 0x200 +#define RKCANFD_REG_FD_FRAMEINFO_FRAME_FORMAT BIT(7) +#define RKCANFD_REG_FD_FRAMEINFO_RTR BIT(6) +#define RKCANFD_REG_FD_FRAMEINFO_FDF BIT(5) +#define RKCANFD_REG_FD_FRAMEINFO_BRS BIT(4) +#define RKCANFD_REG_FD_FRAMEINFO_DATA_LENGTH GENMASK(3, 0) + +#define RKCANFD_REG_FD_TXID 0x204 +#define RKCANFD_REG_FD_ID_EFF GENMASK(28, 0) +#define RKCANFD_REG_FD_ID_SFF GENMASK(11, 0) + +#define RKCANFD_REG_FD_TXDATA0 0x208 +#define RKCANFD_REG_FD_TXDATA1 0x20c +#define RKCANFD_REG_FD_TXDATA2 0x210 +#define RKCANFD_REG_FD_TXDATA3 0x214 +#define RKCANFD_REG_FD_TXDATA4 0x218 +#define RKCANFD_REG_FD_TXDATA5 0x21c +#define RKCANFD_REG_FD_TXDATA6 0x220 +#define RKCANFD_REG_FD_TXDATA7 0x224 +#define RKCANFD_REG_FD_TXDATA8 0x228 +#define RKCANFD_REG_FD_TXDATA9 0x22c +#define RKCANFD_REG_FD_TXDATA10 0x230 +#define RKCANFD_REG_FD_TXDATA11 0x234 +#define RKCANFD_REG_FD_TXDATA12 0x238 +#define RKCANFD_REG_FD_TXDATA13 0x23c +#define RKCANFD_REG_FD_TXDATA14 0x240 +#define RKCANFD_REG_FD_TXDATA15 0x244 + +#define RKCANFD_REG_FD_RXFRAMEINFO 0x300 +#define RKCANFD_REG_FD_RXID 0x304 +#define RKCANFD_REG_FD_RXTIMESTAMP 0x308 +#define RKCANFD_REG_FD_RXDATA0 0x30c +#define RKCANFD_REG_FD_RXDATA1 0x310 +#define RKCANFD_REG_FD_RXDATA2 0x314 +#define RKCANFD_REG_FD_RXDATA3 0x318 +#define RKCANFD_REG_FD_RXDATA4 0x31c +#define RKCANFD_REG_FD_RXDATA5 0x320 +#define RKCANFD_REG_FD_RXDATA6 0x320 +#define RKCANFD_REG_FD_RXDATA7 0x328 +#define RKCANFD_REG_FD_RXDATA8 0x32c +#define RKCANFD_REG_FD_RXDATA9 0x330 +#define RKCANFD_REG_FD_RXDATA10 0x334 +#define RKCANFD_REG_FD_RXDATA11 0x338 +#define RKCANFD_REG_FD_RXDATA12 0x33c +#define RKCANFD_REG_FD_RXDATA13 0x340 +#define RKCANFD_REG_FD_RXDATA14 0x344 +#define RKCANFD_REG_FD_RXDATA15 0x348 + +#define RKCANFD_REG_RX_FIFO_RDATA 0x400 +#define RKCANFD_REG_TXE_FIFO_RDATA 0x500 + +#define DEVICE_NAME "rockchip_canfd" +#define RKCANFD_NAPI_WEIGHT 32 +#define RKCANFD_TXFIFO_DEPTH 2 +#define RKCANFD_TX_STOP_THRESHOLD 1 +#define RKCANFD_TX_START_THRESHOLD 1 + +#define RKCANFD_TIMESTAMP_WORK_MAX_DELAY_SEC 60 +#define RKCANFD_ERRATUM_5_SYSCLOCK_HZ_MIN (300 * MEGA) + +/* rk3568 CAN-FD Errata, as of Tue 07 Nov 2023 11:25:31 +08:00 */ + +/* Erratum 1: The error frame sent by the CAN controller has an + * abnormal format. + */ +#define RKCANFD_QUIRK_RK3568_ERRATUM_1 BIT(0) + +/* Erratum 2: The error frame sent after detecting a CRC error has an + * abnormal position. + */ +#define RKCANFD_QUIRK_RK3568_ERRATUM_2 BIT(1) + +/* Erratum 3: Intermittent CRC calculation errors. */ +#define RKCANFD_QUIRK_RK3568_ERRATUM_3 BIT(2) + +/* Erratum 4: Intermittent occurrence of stuffing errors. */ +#define RKCANFD_QUIRK_RK3568_ERRATUM_4 BIT(3) + +/* Erratum 5: Counters related to the TXFIFO and RXFIFO exhibit + * abnormal counting behavior. + * + * The rk3568 CAN-FD errata sheet as of Tue 07 Nov 2023 11:25:31 +08:00 + * states that only the rk3568v2 is affected by this erratum, but + * tests with the rk3568v2 and rk3568v3 show that the RX_FIFO_CNT is + * sometimes too high. This leads to CAN frames being read from the + * FIFO, which is then already empty. + * + * Further tests on the rk3568v2 and rk3568v3 show that in this + * situation (i.e. empty FIFO) all elements of the FIFO header + * (frameinfo, id, ts) contain the same data. + * + * On the rk3568v2 and rk3568v3, this problem only occurs extremely + * rarely with the standard clock of 300 MHz, but almost immediately + * at 80 MHz. + * + * To workaround this problem, check for empty FIFO with + * rkcanfd_fifo_header_empty() in rkcanfd_handle_rx_int_one() and exit + * early. + * + * To reproduce: + * assigned-clocks = <&cru CLK_CANx>; + * assigned-clock-rates = <80000000>; + */ +#define RKCANFD_QUIRK_RK3568_ERRATUM_5 BIT(4) + +/* Erratum 6: The CAN controller's transmission of extended frames may + * intermittently change into standard frames + * + * Work around this issue by activating self reception (RXSTX). If we + * have pending TX CAN frames, check all RX'ed CAN frames in + * rkcanfd_rxstx_filter(). + * + * If it's a frame we've send and it's OK, call the TX complete + * handler: rkcanfd_handle_tx_done_one(). Mask the TX complete IRQ. + * + * If it's a frame we've send, but the CAN-ID is mangled, resend the + * original extended frame. + * + * To reproduce: + * host: + * canfdtest -evx -g can0 + * candump any,0:80000000 -cexdtA + * dut: + * canfdtest -evx can0 + * ethtool -S can0 + */ +#define RKCANFD_QUIRK_RK3568_ERRATUM_6 BIT(5) + +/* Erratum 7: In the passive error state, the CAN controller's + * interframe space segment counting is inaccurate. + */ +#define RKCANFD_QUIRK_RK3568_ERRATUM_7 BIT(6) + +/* Erratum 8: The Format-Error error flag is transmitted one bit + * later. + */ +#define RKCANFD_QUIRK_RK3568_ERRATUM_8 BIT(7) + +/* Erratum 9: In the arbitration segment, the CAN controller will + * identify stuffing errors as arbitration failures. + */ +#define RKCANFD_QUIRK_RK3568_ERRATUM_9 BIT(8) + +/* Erratum 10: Does not support the BUSOFF slow recovery mechanism. */ +#define RKCANFD_QUIRK_RK3568_ERRATUM_10 BIT(9) + +/* Erratum 11: Arbitration error. */ +#define RKCANFD_QUIRK_RK3568_ERRATUM_11 BIT(10) + +/* Erratum 12: A dominant bit at the third bit of the intermission may + * cause a transmission error. + */ +#define RKCANFD_QUIRK_RK3568_ERRATUM_12 BIT(11) + +/* Tests on the rk3568v2 and rk3568v3 show that receiving certain + * CAN-FD frames trigger an Error Interrupt. + * + * - Form Error in RX Arbitration Phase: TX_IDLE RX_STUFF_COUNT (0x0a010100) CMD=0 RX=0 TX=0 + * Error-Warning=1 Bus-Off=0 + * To reproduce: + * host: + * cansend can0 002##01f + * DUT: + * candump any,0:0,#FFFFFFFF -cexdHtA + * + * - Form Error in RX Arbitration Phase: TX_IDLE RX_CRC (0x0a010200) CMD=0 RX=0 TX=0 + * Error-Warning=1 Bus-Off=0 + * To reproduce: + * host: + * cansend can0 002##07217010000000000 + * DUT: + * candump any,0:0,#FFFFFFFF -cexdHtA + */ +#define RKCANFD_QUIRK_CANFD_BROKEN BIT(12) + +/* known issues with rk3568v3: + * + * - Overload situation during high bus load + * To reproduce: + * host: + * # add a 2nd CAN adapter to the CAN bus + * cangen can0 -I 1 -Li -Di -p10 -g 0.3 + * cansequence -rve + * DUT: + * cangen can0 -I2 -L1 -Di -p10 -c10 -g 1 -e + * cansequence -rv -i 1 + * + * - TX starvation after repeated Bus-Off + * To reproduce: + * host: + * sleep 3 && cangen can0 -I2 -Li -Di -p10 -g 0.0 + * DUT: + * cangen can0 -I2 -Li -Di -p10 -g 0.05 + */ + +enum rkcanfd_model { + RKCANFD_MODEL_RK3568V2 = 0x35682, + RKCANFD_MODEL_RK3568V3 = 0x35683, +}; + +struct rkcanfd_devtype_data { + enum rkcanfd_model model; + u32 quirks; +}; + +struct rkcanfd_fifo_header { + u32 frameinfo; + u32 id; + u32 ts; +}; + +struct rkcanfd_stats { + struct u64_stats_sync syncp; + + /* Erratum 5 */ + u64_stats_t rx_fifo_empty_errors; + + /* Erratum 6 */ + u64_stats_t tx_extended_as_standard_errors; +}; + +struct rkcanfd_priv { + struct can_priv can; + struct can_rx_offload offload; + struct net_device *ndev; + + void __iomem *regs; + unsigned int tx_head; + unsigned int tx_tail; + + u32 reg_mode_default; + u32 reg_int_mask_default; + struct rkcanfd_devtype_data devtype_data; + + struct cyclecounter cc; + struct timecounter tc; + struct delayed_work timestamp; + unsigned long work_delay_jiffies; + + struct can_berr_counter bec; + + struct rkcanfd_stats stats; + + struct reset_control *reset; + struct clk_bulk_data *clks; + int clks_num; +}; + +static inline u32 +rkcanfd_read(const struct rkcanfd_priv *priv, u32 reg) +{ + return readl(priv->regs + reg); +} + +static inline void +rkcanfd_read_rep(const struct rkcanfd_priv *priv, u32 reg, + void *buf, unsigned int len) +{ + readsl(priv->regs + reg, buf, len / sizeof(u32)); +} + +static inline void +rkcanfd_write(const struct rkcanfd_priv *priv, u32 reg, u32 val) +{ + writel(val, priv->regs + reg); +} + +static inline u32 +rkcanfd_get_timestamp(const struct rkcanfd_priv *priv) +{ + return rkcanfd_read(priv, RKCANFD_REG_TIMESTAMP); +} + +static inline unsigned int +rkcanfd_get_tx_head(const struct rkcanfd_priv *priv) +{ + return READ_ONCE(priv->tx_head) & (RKCANFD_TXFIFO_DEPTH - 1); +} + +static inline unsigned int +rkcanfd_get_tx_tail(const struct rkcanfd_priv *priv) +{ + return READ_ONCE(priv->tx_tail) & (RKCANFD_TXFIFO_DEPTH - 1); +} + +static inline unsigned int +rkcanfd_get_tx_pending(const struct rkcanfd_priv *priv) +{ + return READ_ONCE(priv->tx_head) - READ_ONCE(priv->tx_tail); +} + +static inline unsigned int +rkcanfd_get_tx_free(const struct rkcanfd_priv *priv) +{ + return RKCANFD_TXFIFO_DEPTH - rkcanfd_get_tx_pending(priv); +} + +void rkcanfd_ethtool_init(struct rkcanfd_priv *priv); + +int rkcanfd_handle_rx_int(struct rkcanfd_priv *priv); + +void rkcanfd_skb_set_timestamp(const struct rkcanfd_priv *priv, + struct sk_buff *skb, const u32 timestamp); +void rkcanfd_timestamp_init(struct rkcanfd_priv *priv); +void rkcanfd_timestamp_start(struct rkcanfd_priv *priv); +void rkcanfd_timestamp_stop(struct rkcanfd_priv *priv); +void rkcanfd_timestamp_stop_sync(struct rkcanfd_priv *priv); + +unsigned int rkcanfd_get_effective_tx_free(const struct rkcanfd_priv *priv); +void rkcanfd_xmit_retry(struct rkcanfd_priv *priv); +netdev_tx_t rkcanfd_start_xmit(struct sk_buff *skb, struct net_device *ndev); +void rkcanfd_handle_tx_done_one(struct rkcanfd_priv *priv, const u32 ts, + unsigned int *frame_len_p); + +#endif diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c deleted file mode 100644 index 2ce4fa8698c7..000000000000 --- a/drivers/net/can/rx-offload.c +++ /dev/null @@ -1,338 +0,0 @@ -/* - * Copyright (c) 2014 David Jander, Protonic Holland - * Copyright (C) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - */ - -#include <linux/can/dev.h> -#include <linux/can/rx-offload.h> - -struct can_rx_offload_cb { - u32 timestamp; -}; - -static inline struct can_rx_offload_cb *can_rx_offload_get_cb(struct sk_buff *skb) -{ - BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb)); - - return (struct can_rx_offload_cb *)skb->cb; -} - -static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned int a, unsigned int b) -{ - if (offload->inc) - return a <= b; - else - return a >= b; -} - -static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val) -{ - if (offload->inc) - return (*val)++; - else - return (*val)--; -} - -static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota) -{ - struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi); - struct net_device *dev = offload->dev; - struct net_device_stats *stats = &dev->stats; - struct sk_buff *skb; - int work_done = 0; - - while ((work_done < quota) && - (skb = skb_dequeue(&offload->skb_queue))) { - struct can_frame *cf = (struct can_frame *)skb->data; - - work_done++; - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_receive_skb(skb); - } - - if (work_done < quota) { - napi_complete_done(napi, work_done); - - /* Check if there was another interrupt */ - if (!skb_queue_empty(&offload->skb_queue)) - napi_reschedule(&offload->napi); - } - - can_led_event(offload->dev, CAN_LED_EVENT_RX); - - return work_done; -} - -static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new, - int (*compare)(struct sk_buff *a, struct sk_buff *b)) -{ - struct sk_buff *pos, *insert = NULL; - - skb_queue_reverse_walk(head, pos) { - const struct can_rx_offload_cb *cb_pos, *cb_new; - - cb_pos = can_rx_offload_get_cb(pos); - cb_new = can_rx_offload_get_cb(new); - - netdev_dbg(new->dev, - "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n", - __func__, - cb_pos->timestamp, cb_new->timestamp, - cb_new->timestamp - cb_pos->timestamp, - skb_queue_len(head)); - - if (compare(pos, new) < 0) - continue; - insert = pos; - break; - } - if (!insert) - __skb_queue_head(head, new); - else - __skb_queue_after(head, insert, new); -} - -static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b) -{ - const struct can_rx_offload_cb *cb_a, *cb_b; - - cb_a = can_rx_offload_get_cb(a); - cb_b = can_rx_offload_get_cb(b); - - /* Substract two u32 and return result as int, to keep - * difference steady around the u32 overflow. - */ - return cb_b->timestamp - cb_a->timestamp; -} - -static struct sk_buff *can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) -{ - struct sk_buff *skb = NULL; - struct can_rx_offload_cb *cb; - struct can_frame *cf; - int ret; - - /* If queue is full or skb not available, read to discard mailbox */ - if (likely(skb_queue_len(&offload->skb_queue) <= - offload->skb_queue_len_max)) - skb = alloc_can_skb(offload->dev, &cf); - - if (!skb) { - struct can_frame cf_overflow; - u32 timestamp; - - ret = offload->mailbox_read(offload, &cf_overflow, - ×tamp, n); - if (ret) - offload->dev->stats.rx_dropped++; - - return NULL; - } - - cb = can_rx_offload_get_cb(skb); - ret = offload->mailbox_read(offload, cf, &cb->timestamp, n); - if (!ret) { - kfree_skb(skb); - return NULL; - } - - return skb; -} - -int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending) -{ - struct sk_buff_head skb_queue; - unsigned int i; - - __skb_queue_head_init(&skb_queue); - - for (i = offload->mb_first; - can_rx_offload_le(offload, i, offload->mb_last); - can_rx_offload_inc(offload, &i)) { - struct sk_buff *skb; - - if (!(pending & BIT_ULL(i))) - continue; - - skb = can_rx_offload_offload_one(offload, i); - if (!skb) - break; - - __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare); - } - - if (!skb_queue_empty(&skb_queue)) { - unsigned long flags; - u32 queue_len; - - spin_lock_irqsave(&offload->skb_queue.lock, flags); - skb_queue_splice_tail(&skb_queue, &offload->skb_queue); - spin_unlock_irqrestore(&offload->skb_queue.lock, flags); - - if ((queue_len = skb_queue_len(&offload->skb_queue)) > - (offload->skb_queue_len_max / 8)) - netdev_dbg(offload->dev, "%s: queue_len=%d\n", - __func__, queue_len); - - can_rx_offload_schedule(offload); - } - - return skb_queue_len(&skb_queue); -} -EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp); - -int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload) -{ - struct sk_buff *skb; - int received = 0; - - while ((skb = can_rx_offload_offload_one(offload, 0))) { - skb_queue_tail(&offload->skb_queue, skb); - received++; - } - - if (received) - can_rx_offload_schedule(offload); - - return received; -} -EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo); - -int can_rx_offload_queue_sorted(struct can_rx_offload *offload, - struct sk_buff *skb, u32 timestamp) -{ - struct can_rx_offload_cb *cb; - unsigned long flags; - - if (skb_queue_len(&offload->skb_queue) > - offload->skb_queue_len_max) - return -ENOMEM; - - cb = can_rx_offload_get_cb(skb); - cb->timestamp = timestamp; - - spin_lock_irqsave(&offload->skb_queue.lock, flags); - __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare); - spin_unlock_irqrestore(&offload->skb_queue.lock, flags); - - can_rx_offload_schedule(offload); - - return 0; -} -EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted); - -unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, - unsigned int idx, u32 timestamp) -{ - struct net_device *dev = offload->dev; - struct net_device_stats *stats = &dev->stats; - struct sk_buff *skb; - u8 len; - int err; - - skb = __can_get_echo_skb(dev, idx, &len); - if (!skb) - return 0; - - err = can_rx_offload_queue_sorted(offload, skb, timestamp); - if (err) { - stats->rx_errors++; - stats->tx_fifo_errors++; - } - - return len; -} -EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb); - -int can_rx_offload_queue_tail(struct can_rx_offload *offload, - struct sk_buff *skb) -{ - if (skb_queue_len(&offload->skb_queue) > - offload->skb_queue_len_max) - return -ENOMEM; - - skb_queue_tail(&offload->skb_queue, skb); - can_rx_offload_schedule(offload); - - return 0; -} -EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail); - -static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) -{ - offload->dev = dev; - - /* Limit queue len to 4x the weight (rounted to next power of two) */ - offload->skb_queue_len_max = 2 << fls(weight); - offload->skb_queue_len_max *= 4; - skb_queue_head_init(&offload->skb_queue); - - can_rx_offload_reset(offload); - netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight); - - dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n", - __func__, offload->skb_queue_len_max); - - return 0; -} - -int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload) -{ - unsigned int weight; - - if (offload->mb_first > BITS_PER_LONG_LONG || - offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read) - return -EINVAL; - - if (offload->mb_first < offload->mb_last) { - offload->inc = true; - weight = offload->mb_last - offload->mb_first; - } else { - offload->inc = false; - weight = offload->mb_first - offload->mb_last; - } - - return can_rx_offload_init_queue(dev, offload, weight); -} -EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp); - -int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) -{ - if (!offload->mailbox_read) - return -EINVAL; - - return can_rx_offload_init_queue(dev, offload, weight); -} -EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo); - -void can_rx_offload_enable(struct can_rx_offload *offload) -{ - can_rx_offload_reset(offload); - napi_enable(&offload->napi); -} -EXPORT_SYMBOL_GPL(can_rx_offload_enable); - -void can_rx_offload_del(struct can_rx_offload *offload) -{ - netif_napi_del(&offload->napi); - skb_queue_purge(&offload->skb_queue); -} -EXPORT_SYMBOL_GPL(can_rx_offload_del); - -void can_rx_offload_reset(struct can_rx_offload *offload) -{ -} -EXPORT_SYMBOL_GPL(can_rx_offload_reset); diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig index f6dc89927ece..e061e35769bf 100644 --- a/drivers/net/can/sja1000/Kconfig +++ b/drivers/net/can/sja1000/Kconfig @@ -1,55 +1,47 @@ +# SPDX-License-Identifier: GPL-2.0-only + menuconfig CAN_SJA1000 tristate "Philips/NXP SJA1000 devices" depends on HAS_IOMEM if CAN_SJA1000 -config CAN_SJA1000_ISA - tristate "ISA Bus based legacy SJA1000 driver" - ---help--- - This driver adds legacy support for SJA1000 chips connected to - the ISA bus using I/O port, memory mapped or indirect access. - -config CAN_SJA1000_PLATFORM - tristate "Generic Platform Bus based SJA1000 driver" - ---help--- - This driver adds support for the SJA1000 chips connected to - the "platform bus" (Linux abstraction for directly to the - processor attached devices). Which can be found on various - boards from Phytec (http://www.phytec.de) like the PCM027, - PCM038. It also provides the OpenFirmware "platform bus" found - on embedded systems with OpenFirmware bindings, e.g. if you - have a PowerPC based system you may want to enable this option. +config CAN_EMS_PCI + tristate "EMS CPC-PCI, CPC-PCIe and CPC-104P Card" + depends on PCI + help + This driver is for the one, two or four channel CPC-PCI, + CPC-PCIe and CPC-104P cards from EMS Dr. Thomas Wuensche + (http://www.ems-wuensche.de). config CAN_EMS_PCMCIA tristate "EMS CPC-CARD Card" depends on PCMCIA - ---help--- + help This driver is for the one or two channel CPC-CARD cards from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de). -config CAN_EMS_PCI - tristate "EMS CPC-PCI, CPC-PCIe and CPC-104P Card" +config CAN_F81601 + tristate "Fintek F81601 PCIE to 2 CAN Controller" depends on PCI - ---help--- - This driver is for the one, two or four channel CPC-PCI, - CPC-PCIe and CPC-104P cards from EMS Dr. Thomas Wuensche - (http://www.ems-wuensche.de). + help + This driver adds support for Fintek F81601 PCIE to 2 CAN + Controller. It had internal 24MHz clock source, but it can + be changed by manufacturer. Use modinfo to get usage for + parameters. Visit http://www.fintek.com.tw to get more + information. -config CAN_PEAK_PCMCIA - tristate "PEAK PCAN-PC Card" - depends on PCMCIA - depends on HAS_IOPORT_MAP - ---help--- - This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels) - from PEAK-System (http://www.peak-system.com). To compile this - driver as a module, choose M here: the module will be called - peak_pcmcia. +config CAN_KVASER_PCI + tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards" + depends on PCI + help + This driver is for the PCIcanx and PCIcan cards (1, 2 or + 4 channel) from Kvaser (http://www.kvaser.com). config CAN_PEAK_PCI tristate "PEAK PCAN-PCI/PCIe/miniPCI Cards" depends on PCI - ---help--- + help This driver is for the PCAN-PCI/PCIe/miniPCI cards (1, 2, 3 or 4 channels) from PEAK-System Technik (http://www.peak-system.com). @@ -60,22 +52,25 @@ config CAN_PEAK_PCIEC select I2C select I2C_ALGOBIT default y - ---help--- + help Say Y here if you want to use a PCAN-ExpressCard from PEAK-System Technik. This will also automatically select I2C and I2C_ALGO configuration options. -config CAN_KVASER_PCI - tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards" - depends on PCI - ---help--- - This driver is for the PCIcanx and PCIcan cards (1, 2 or - 4 channel) from Kvaser (http://www.kvaser.com). +config CAN_PEAK_PCMCIA + tristate "PEAK PCAN-PC Card" + depends on PCMCIA + depends on HAS_IOPORT_MAP + help + This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels) + from PEAK-System (http://www.peak-system.com). To compile this + driver as a module, choose M here: the module will be called + peak_pcmcia. config CAN_PLX_PCI tristate "PLX90xx PCI-bridge based Cards" depends on PCI - ---help--- + help This driver is for CAN interface cards based on the PLX90xx PCI bridge. Driver supports now: @@ -90,12 +85,30 @@ config CAN_PLX_PCI - Connect Tech Inc. CANpro/104-Plus Opto (CRG001) card (http://www.connecttech.com) - ASEM CAN raw - 2 isolated CAN channels (www.asem.it) +config CAN_SJA1000_ISA + tristate "ISA Bus based legacy SJA1000 driver" + depends on HAS_IOPORT + help + This driver adds legacy support for SJA1000 chips connected to + the ISA bus using I/O port, memory mapped or indirect access. + +config CAN_SJA1000_PLATFORM + tristate "Generic Platform Bus based SJA1000 driver" + help + This driver adds support for the SJA1000 chips connected to + the "platform bus" (Linux abstraction for directly to the + processor attached devices). Which can be found on various + boards from Phytec (http://www.phytec.de) like the PCM027, + PCM038. It also provides the OpenFirmware "platform bus" found + on embedded systems with OpenFirmware bindings, e.g. if you + have a PowerPC based system you may want to enable this option. + config CAN_TSCAN1 tristate "TS-CAN1 PC104 boards" - depends on ISA + depends on (ISA && PC104) || (COMPILE_TEST && HAS_IOPORT) help This driver is for Technologic Systems' TSCAN-1 PC104 boards. - http://www.embeddedarm.com/products/board-detail.php?product=TS-CAN1 + https://www.embeddedts.com/products/TS-CAN1 The driver supports multiple boards and automatically configures them: PLD IO base addresses are read from jumpers JP1 and JP2, IRQ numbers are read from jumpers JP4 and JP5, diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile index 9253aaf9e739..500ce1dddaec 100644 --- a/drivers/net/can/sja1000/Makefile +++ b/drivers/net/can/sja1000/Makefile @@ -3,13 +3,14 @@ # Makefile for the SJA1000 CAN controller drivers. # -obj-$(CONFIG_CAN_SJA1000) += sja1000.o -obj-$(CONFIG_CAN_SJA1000_ISA) += sja1000_isa.o -obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o -obj-$(CONFIG_CAN_EMS_PCMCIA) += ems_pcmcia.o obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o +obj-$(CONFIG_CAN_EMS_PCMCIA) += ems_pcmcia.o +obj-$(CONFIG_CAN_F81601) += f81601.o obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o -obj-$(CONFIG_CAN_PEAK_PCMCIA) += peak_pcmcia.o obj-$(CONFIG_CAN_PEAK_PCI) += peak_pci.o +obj-$(CONFIG_CAN_PEAK_PCMCIA) += peak_pcmcia.o obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o +obj-$(CONFIG_CAN_SJA1000) += sja1000.o +obj-$(CONFIG_CAN_SJA1000_ISA) += sja1000_isa.o +obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o obj-$(CONFIG_CAN_TSCAN1) += tscan1.o diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c index 7481c324a476..5bca719d61f5 100644 --- a/drivers/net/can/sja1000/ems_pci.c +++ b/drivers/net/can/sja1000/ems_pci.c @@ -1,19 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com> * Copyright (C) 2008 Markus Plessing <plessing@ems-wuensche.com> * Copyright (C) 2008 Sebastian Haas <haas@ems-wuensche.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. + * Copyright (C) 2023 EMS Dr. Thomas Wuensche */ #include <linux/kernel.h> @@ -30,13 +20,14 @@ #define DRV_NAME "ems_pci" -MODULE_AUTHOR("Sebastian Haas <haas@ems-wuenche.com>"); +MODULE_AUTHOR("Sebastian Haas <support@ems-wuensche.com>"); +MODULE_AUTHOR("Gerhard Uttenthaler <uttenthaler@ems-wuensche.com>"); MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-PCI/PCIe/104P CAN cards"); -MODULE_SUPPORTED_DEVICE("EMS CPC-PCI/PCIe/104P CAN card"); MODULE_LICENSE("GPL v2"); #define EMS_PCI_V1_MAX_CHAN 2 #define EMS_PCI_V2_MAX_CHAN 4 +#define EMS_PCI_V3_MAX_CHAN 4 #define EMS_PCI_MAX_CHAN EMS_PCI_V2_MAX_CHAN struct ems_pci_card { @@ -52,8 +43,7 @@ struct ems_pci_card { #define EMS_PCI_CAN_CLOCK (16000000 / 2) -/* - * Register definitions and descriptions are from LinCAN 0.3.3. +/* Register definitions and descriptions are from LinCAN 0.3.3. * * PSB4610 PITA-2 bridge control registers */ @@ -64,8 +54,7 @@ struct ems_pci_card { #define PITA2_MISC 0x1c /* Miscellaneous Register */ #define PITA2_MISC_CONFIG 0x04000000 /* Multiplexed parallel interface */ -/* - * Register definitions for the PLX 9030 +/* Register definitions for the PLX 9030 */ #define PLX_ICSR 0x4c /* Interrupt Control/Status register */ #define PLX_ICSR_LINTI1_ENA 0x0001 /* LINTi1 Enable */ @@ -74,8 +63,16 @@ struct ems_pci_card { #define PLX_ICSR_ENA_CLR (PLX_ICSR_LINTI1_ENA | PLX_ICSR_PCIINT_ENA | \ PLX_ICSR_LINTI1_CLR) -/* - * The board configuration is probably following: +/* Register definitions for the ASIX99100 + */ +#define ASIX_LINTSR 0x28 /* Interrupt Control/Status register */ +#define ASIX_LINTSR_INT0AC BIT(0) /* Writing 1 enables or clears interrupt */ + +#define ASIX_LIEMR 0x24 /* Local Interrupt Enable / Miscellaneous Register */ +#define ASIX_LIEMR_L0EINTEN BIT(16) /* Local INT0 input assertion enable */ +#define ASIX_LIEMR_LRST BIT(14) /* Local Reset assert */ + +/* The board configuration is probably following: * RX1 is connected to ground. * TX1 is not connected. * CLKO is not connected. @@ -84,23 +81,35 @@ struct ems_pci_card { */ #define EMS_PCI_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL) -/* - * In the CDR register, you should set CBP to 1. +/* In the CDR register, you should set CBP to 1. * You will probably also want to set the clock divider value to 7 * (meaning direct oscillator output) because the second SJA1000 chip * is driven by the first one CLKOUT output. */ #define EMS_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK) -#define EMS_PCI_V1_BASE_BAR 1 -#define EMS_PCI_V1_CONF_SIZE 4096 /* size of PITA control area */ -#define EMS_PCI_V2_BASE_BAR 2 -#define EMS_PCI_V2_CONF_SIZE 128 /* size of PLX control area */ -#define EMS_PCI_CAN_BASE_OFFSET 0x400 /* offset where the controllers starts */ -#define EMS_PCI_CAN_CTRL_SIZE 0x200 /* memory size for each controller */ +#define EMS_PCI_V1_BASE_BAR 1 +#define EMS_PCI_V1_CONF_BAR 0 +#define EMS_PCI_V1_CONF_SIZE 4096 /* size of PITA control area */ +#define EMS_PCI_V1_CAN_BASE_OFFSET 0x400 /* offset where the controllers start */ +#define EMS_PCI_V1_CAN_CTRL_SIZE 0x200 /* memory size for each controller */ + +#define EMS_PCI_V2_BASE_BAR 2 +#define EMS_PCI_V2_CONF_BAR 0 +#define EMS_PCI_V2_CONF_SIZE 128 /* size of PLX control area */ +#define EMS_PCI_V2_CAN_BASE_OFFSET 0x400 /* offset where the controllers start */ +#define EMS_PCI_V2_CAN_CTRL_SIZE 0x200 /* memory size for each controller */ + +#define EMS_PCI_V3_BASE_BAR 0 +#define EMS_PCI_V3_CONF_BAR 5 +#define EMS_PCI_V3_CONF_SIZE 128 /* size of ASIX control area */ +#define EMS_PCI_V3_CAN_BASE_OFFSET 0x00 /* offset where the controllers starts */ +#define EMS_PCI_V3_CAN_CTRL_SIZE 0x100 /* memory size for each controller */ #define EMS_PCI_BASE_SIZE 4096 /* size of controller area */ +#define PCI_SUBDEVICE_ID_EMS 0x4010 + static const struct pci_device_id ems_pci_tbl[] = { /* CPC-PCI v1 */ {PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,}, @@ -108,12 +117,13 @@ static const struct pci_device_id ems_pci_tbl[] = { {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_PLX, 0x4000}, /* CPC-104P v2 */ {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_PLX, 0x4002}, + /* CPC-PCIe v3 */ + {PCI_VENDOR_ID_ASIX, PCI_DEVICE_ID_ASIX_AX99100_LB, 0xa000, PCI_SUBDEVICE_ID_EMS}, {0,} }; MODULE_DEVICE_TABLE(pci, ems_pci_tbl); -/* - * Helper to read internal registers from card logic (not CAN) +/* Helper to read internal registers from card logic (not CAN) */ static u8 ems_pci_v1_readb(struct ems_pci_card *card, unsigned int port) { @@ -133,7 +143,7 @@ static void ems_pci_v1_write_reg(const struct sja1000_priv *priv, static void ems_pci_v1_post_irq(const struct sja1000_priv *priv) { - struct ems_pci_card *card = (struct ems_pci_card *)priv->priv; + struct ems_pci_card *card = priv->priv; /* reset int flag of pita */ writel(PITA2_ICR_INT0_EN | PITA2_ICR_INT0, @@ -153,13 +163,30 @@ static void ems_pci_v2_write_reg(const struct sja1000_priv *priv, static void ems_pci_v2_post_irq(const struct sja1000_priv *priv) { - struct ems_pci_card *card = (struct ems_pci_card *)priv->priv; + struct ems_pci_card *card = priv->priv; writel(PLX_ICSR_ENA_CLR, card->conf_addr + PLX_ICSR); } -/* - * Check if a CAN controller is present at the specified location +static u8 ems_pci_v3_read_reg(const struct sja1000_priv *priv, int port) +{ + return readb(priv->reg_base + port); +} + +static void ems_pci_v3_write_reg(const struct sja1000_priv *priv, + int port, u8 val) +{ + writeb(val, priv->reg_base + port); +} + +static void ems_pci_v3_post_irq(const struct sja1000_priv *priv) +{ + struct ems_pci_card *card = priv->priv; + + writel(ASIX_LINTSR_INT0AC, card->conf_addr + ASIX_LINTSR); +} + +/* Check if a CAN controller is present at the specified location * by trying to set 'em into the PeliCAN mode */ static inline int ems_pci_check_chan(const struct sja1000_priv *priv) @@ -197,10 +224,10 @@ static void ems_pci_del_card(struct pci_dev *pdev) free_sja1000dev(dev); } - if (card->base_addr != NULL) + if (card->base_addr) pci_iounmap(card->pci_dev, card->base_addr); - if (card->conf_addr != NULL) + if (card->conf_addr) pci_iounmap(card->pci_dev, card->conf_addr); kfree(card); @@ -214,8 +241,7 @@ static void ems_pci_card_reset(struct ems_pci_card *card) writeb(0, card->base_addr); } -/* - * Probe PCI device for EMS CAN signature and register each available +/* Probe PCI device for EMS CAN signature and register each available * CAN channel to SJA1000 Socket-CAN subsystem. */ static int ems_pci_add_card(struct pci_dev *pdev, @@ -224,7 +250,7 @@ static int ems_pci_add_card(struct pci_dev *pdev, struct sja1000_priv *priv; struct net_device *dev; struct ems_pci_card *card; - int max_chan, conf_size, base_bar; + int max_chan, conf_size, base_bar, conf_bar; int err, i; /* Enabling PCI device */ @@ -234,8 +260,8 @@ static int ems_pci_add_card(struct pci_dev *pdev, } /* Allocating card structures to hold addresses, ... */ - card = kzalloc(sizeof(struct ems_pci_card), GFP_KERNEL); - if (card == NULL) { + card = kzalloc(sizeof(*card), GFP_KERNEL); + if (!card) { pci_disable_device(pdev); return -ENOMEM; } @@ -246,27 +272,35 @@ static int ems_pci_add_card(struct pci_dev *pdev, card->channels = 0; - if (pdev->vendor == PCI_VENDOR_ID_PLX) { + if (pdev->vendor == PCI_VENDOR_ID_ASIX) { + card->version = 3; /* CPC-PCI v3 */ + max_chan = EMS_PCI_V3_MAX_CHAN; + base_bar = EMS_PCI_V3_BASE_BAR; + conf_bar = EMS_PCI_V3_CONF_BAR; + conf_size = EMS_PCI_V3_CONF_SIZE; + } else if (pdev->vendor == PCI_VENDOR_ID_PLX) { card->version = 2; /* CPC-PCI v2 */ max_chan = EMS_PCI_V2_MAX_CHAN; base_bar = EMS_PCI_V2_BASE_BAR; + conf_bar = EMS_PCI_V2_CONF_BAR; conf_size = EMS_PCI_V2_CONF_SIZE; } else { card->version = 1; /* CPC-PCI v1 */ max_chan = EMS_PCI_V1_MAX_CHAN; base_bar = EMS_PCI_V1_BASE_BAR; + conf_bar = EMS_PCI_V1_CONF_BAR; conf_size = EMS_PCI_V1_CONF_SIZE; } /* Remap configuration space and controller memory area */ - card->conf_addr = pci_iomap(pdev, 0, conf_size); - if (card->conf_addr == NULL) { + card->conf_addr = pci_iomap(pdev, conf_bar, conf_size); + if (!card->conf_addr) { err = -ENOMEM; goto failure_cleanup; } card->base_addr = pci_iomap(pdev, base_bar, EMS_PCI_BASE_SIZE); - if (card->base_addr == NULL) { + if (!card->base_addr) { err = -ENOMEM; goto failure_cleanup; } @@ -288,12 +322,20 @@ static int ems_pci_add_card(struct pci_dev *pdev, } } + if (card->version == 3) { + /* ASIX chip asserts local reset to CAN controllers + * after bootup until it is deasserted + */ + writel(readl(card->conf_addr + ASIX_LIEMR) & ~ASIX_LIEMR_LRST, + card->conf_addr + ASIX_LIEMR); + } + ems_pci_card_reset(card); /* Detect available channels */ for (i = 0; i < max_chan; i++) { dev = alloc_sja1000dev(0); - if (dev == NULL) { + if (!dev) { err = -ENOMEM; goto failure_cleanup; } @@ -304,16 +346,25 @@ static int ems_pci_add_card(struct pci_dev *pdev, priv->irq_flags = IRQF_SHARED; dev->irq = pdev->irq; - priv->reg_base = card->base_addr + EMS_PCI_CAN_BASE_OFFSET - + (i * EMS_PCI_CAN_CTRL_SIZE); + if (card->version == 1) { priv->read_reg = ems_pci_v1_read_reg; priv->write_reg = ems_pci_v1_write_reg; priv->post_irq = ems_pci_v1_post_irq; - } else { + priv->reg_base = card->base_addr + EMS_PCI_V1_CAN_BASE_OFFSET + + (i * EMS_PCI_V1_CAN_CTRL_SIZE); + } else if (card->version == 2) { priv->read_reg = ems_pci_v2_read_reg; priv->write_reg = ems_pci_v2_write_reg; priv->post_irq = ems_pci_v2_post_irq; + priv->reg_base = card->base_addr + EMS_PCI_V2_CAN_BASE_OFFSET + + (i * EMS_PCI_V2_CAN_CTRL_SIZE); + } else { + priv->read_reg = ems_pci_v3_read_reg; + priv->write_reg = ems_pci_v3_write_reg; + priv->post_irq = ems_pci_v3_post_irq; + priv->reg_base = card->base_addr + EMS_PCI_V3_CAN_BASE_OFFSET + + (i * EMS_PCI_V3_CAN_CTRL_SIZE); } /* Check if channel is present */ @@ -325,20 +376,28 @@ static int ems_pci_add_card(struct pci_dev *pdev, SET_NETDEV_DEV(dev, &pdev->dev); dev->dev_id = i; - if (card->version == 1) + if (card->version == 1) { /* reset int flag of pita */ writel(PITA2_ICR_INT0_EN | PITA2_ICR_INT0, card->conf_addr + PITA2_ICR); - else + } else if (card->version == 2) { /* enable IRQ in PLX 9030 */ writel(PLX_ICSR_ENA_CLR, card->conf_addr + PLX_ICSR); + } else { + /* Enable IRQ in AX99100 */ + writel(ASIX_LINTSR_INT0AC, card->conf_addr + ASIX_LINTSR); + /* Enable local INT0 input enable */ + writel(readl(card->conf_addr + ASIX_LIEMR) | ASIX_LIEMR_L0EINTEN, + card->conf_addr + ASIX_LIEMR); + } /* Register SJA1000 device */ err = register_sja1000dev(dev); if (err) { - dev_err(&pdev->dev, "Registering device failed " - "(err=%d)\n", err); + dev_err(&pdev->dev, + "Registering device failed: %pe\n", + ERR_PTR(err)); free_sja1000dev(dev); goto failure_cleanup; } @@ -346,7 +405,7 @@ static int ems_pci_add_card(struct pci_dev *pdev, card->channels++; dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d\n", - i + 1, priv->reg_base, dev->irq); + i + 1, priv->reg_base, dev->irq); } else { free_sja1000dev(dev); } diff --git a/drivers/net/can/sja1000/ems_pcmcia.c b/drivers/net/can/sja1000/ems_pcmcia.c index 381de998d2f1..4642b6d4aaf7 100644 --- a/drivers/net/can/sja1000/ems_pcmcia.c +++ b/drivers/net/can/sja1000/ems_pcmcia.c @@ -1,16 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2008 Sebastian Haas (initial chardev implementation) * Copyright (C) 2010 Markus Plessing <plessing@ems-wuensche.com> * Rework for mainline by Oliver Hartkopp <socketcan@hartkopp.net> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/kernel.h> @@ -29,7 +21,6 @@ MODULE_AUTHOR("Markus Plessing <plessing@ems-wuensche.com>"); MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-CARD cards"); -MODULE_SUPPORTED_DEVICE("EMS CPC-CARD CAN card"); MODULE_LICENSE("GPL v2"); #define EMS_PCMCIA_MAX_CHAN 2 @@ -243,7 +234,12 @@ static int ems_pcmcia_add_card(struct pcmcia_device *pdev, unsigned long base) free_sja1000dev(dev); } - err = request_irq(dev->irq, &ems_pcmcia_interrupt, IRQF_SHARED, + if (!card->channels) { + err = -ENODEV; + goto failure_cleanup; + } + + err = request_irq(pdev->irq, &ems_pcmcia_interrupt, IRQF_SHARED, DRV_NAME, card); if (!err) return 0; diff --git a/drivers/net/can/sja1000/f81601.c b/drivers/net/can/sja1000/f81601.c new file mode 100644 index 000000000000..8f25e95814ef --- /dev/null +++ b/drivers/net/can/sja1000/f81601.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Fintek F81601 PCIE to 2 CAN controller driver + * + * Copyright (C) 2019 Peter Hong <peter_hong@fintek.com.tw> + * Copyright (C) 2019 Linux Foundation + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/pci.h> +#include <linux/can/dev.h> +#include <linux/io.h> + +#include "sja1000.h" + +#define F81601_PCI_MAX_CHAN 2 + +#define F81601_DECODE_REG 0x209 +#define F81601_IO_MODE BIT(7) +#define F81601_MEM_MODE BIT(6) +#define F81601_CFG_MODE BIT(5) +#define F81601_CAN2_INTERNAL_CLK BIT(3) +#define F81601_CAN1_INTERNAL_CLK BIT(2) +#define F81601_CAN2_EN BIT(1) +#define F81601_CAN1_EN BIT(0) + +#define F81601_TRAP_REG 0x20a +#define F81601_CAN2_HAS_EN BIT(4) + +struct f81601_pci_card { + void __iomem *addr; + spinlock_t lock; /* use this spin lock only for write access */ + struct pci_dev *dev; + struct net_device *net_dev[F81601_PCI_MAX_CHAN]; +}; + +static const struct pci_device_id f81601_pci_tbl[] = { + { PCI_DEVICE(0x1c29, 0x1703) }, + { /* sentinel */ }, +}; + +MODULE_DEVICE_TABLE(pci, f81601_pci_tbl); + +static bool internal_clk = true; +module_param(internal_clk, bool, 0444); +MODULE_PARM_DESC(internal_clk, "Use internal clock, default true (24MHz)"); + +static unsigned int external_clk; +module_param(external_clk, uint, 0444); +MODULE_PARM_DESC(external_clk, "External clock when internal_clk disabled"); + +static u8 f81601_pci_read_reg(const struct sja1000_priv *priv, int port) +{ + return readb(priv->reg_base + port); +} + +static void f81601_pci_write_reg(const struct sja1000_priv *priv, int port, + u8 val) +{ + struct f81601_pci_card *card = priv->priv; + unsigned long flags; + + spin_lock_irqsave(&card->lock, flags); + writeb(val, priv->reg_base + port); + readb(priv->reg_base); + spin_unlock_irqrestore(&card->lock, flags); +} + +static void f81601_pci_remove(struct pci_dev *pdev) +{ + struct f81601_pci_card *card = pci_get_drvdata(pdev); + struct net_device *dev; + int i; + + for (i = 0; i < ARRAY_SIZE(card->net_dev); i++) { + dev = card->net_dev[i]; + if (!dev) + continue; + + dev_info(&pdev->dev, "%s: Removing %s\n", __func__, dev->name); + + unregister_sja1000dev(dev); + free_sja1000dev(dev); + } +} + +/* Probe F81601 based device for the SJA1000 chips and register each + * available CAN channel to SJA1000 Socket-CAN subsystem. + */ +static int f81601_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct sja1000_priv *priv; + struct net_device *dev; + struct f81601_pci_card *card; + int err, i, count; + u8 tmp; + + if (pcim_enable_device(pdev) < 0) { + dev_err(&pdev->dev, "Failed to enable PCI device\n"); + return -ENODEV; + } + + dev_info(&pdev->dev, "Detected card at slot #%i\n", + PCI_SLOT(pdev->devfn)); + + card = devm_kzalloc(&pdev->dev, sizeof(*card), GFP_KERNEL); + if (!card) + return -ENOMEM; + + card->dev = pdev; + spin_lock_init(&card->lock); + + pci_set_drvdata(pdev, card); + + tmp = F81601_IO_MODE | F81601_MEM_MODE | F81601_CFG_MODE | + F81601_CAN2_EN | F81601_CAN1_EN; + + if (internal_clk) { + tmp |= F81601_CAN2_INTERNAL_CLK | F81601_CAN1_INTERNAL_CLK; + + dev_info(&pdev->dev, + "F81601 running with internal clock: 24Mhz\n"); + } else { + dev_info(&pdev->dev, + "F81601 running with external clock: %dMhz\n", + external_clk / 1000000); + } + + pci_write_config_byte(pdev, F81601_DECODE_REG, tmp); + + card->addr = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0)); + + if (!card->addr) { + err = -ENOMEM; + dev_err(&pdev->dev, "%s: Failed to remap BAR\n", __func__); + goto failure_cleanup; + } + + /* read CAN2_HW_EN strap pin to detect how many CANBUS do we have */ + count = ARRAY_SIZE(card->net_dev); + pci_read_config_byte(pdev, F81601_TRAP_REG, &tmp); + if (!(tmp & F81601_CAN2_HAS_EN)) + count = 1; + + for (i = 0; i < count; i++) { + dev = alloc_sja1000dev(0); + if (!dev) { + err = -ENOMEM; + goto failure_cleanup; + } + + priv = netdev_priv(dev); + priv->priv = card; + priv->irq_flags = IRQF_SHARED; + priv->reg_base = card->addr + 0x80 * i; + priv->read_reg = f81601_pci_read_reg; + priv->write_reg = f81601_pci_write_reg; + + if (internal_clk) + priv->can.clock.freq = 24000000 / 2; + else + priv->can.clock.freq = external_clk / 2; + + priv->ocr = OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL; + priv->cdr = CDR_CBP; + + SET_NETDEV_DEV(dev, &pdev->dev); + dev->dev_id = i; + dev->irq = pdev->irq; + + /* Register SJA1000 device */ + err = register_sja1000dev(dev); + if (err) { + dev_err(&pdev->dev, + "%s: Registering device failed: %x\n", __func__, + err); + free_sja1000dev(dev); + goto failure_cleanup; + } + + card->net_dev[i] = dev; + dev_info(&pdev->dev, "Channel #%d, %s at 0x%p, irq %d\n", i, + dev->name, priv->reg_base, dev->irq); + } + + return 0; + + failure_cleanup: + dev_err(&pdev->dev, "%s: failed: %d. Cleaning Up.\n", __func__, err); + f81601_pci_remove(pdev); + + return err; +} + +static struct pci_driver f81601_pci_driver = { + .name = "f81601", + .id_table = f81601_pci_tbl, + .probe = f81601_pci_probe, + .remove = f81601_pci_remove, +}; + +MODULE_DESCRIPTION("Fintek F81601 PCIE to 2 CANBUS adaptor driver"); +MODULE_AUTHOR("Peter Hong <peter_hong@fintek.com.tw>"); +MODULE_LICENSE("GPL v2"); + +module_pci_driver(f81601_pci_driver); diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c index 15c00faeec61..95fe9ee1ce32 100644 --- a/drivers/net/can/sja1000/kvaser_pci.c +++ b/drivers/net/can/sja1000/kvaser_pci.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2008 Per Dalen <per.dalen@cnw.se> * @@ -15,18 +16,6 @@ * Copyright (c) 2002-2007 Volkswagen Group Electronic Research * Copyright (c) 2003 Matthias Brukner, Trajet Gmbh, Rebenring 33, * 38106 Braunschweig, GERMANY - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> @@ -44,7 +33,6 @@ MODULE_AUTHOR("Per Dalen <per.dalen@cnw.se>"); MODULE_DESCRIPTION("Socket-CAN driver for KVASER PCAN PCI cards"); -MODULE_SUPPORTED_DEVICE("KVASER PCAN PCI CAN card"); MODULE_LICENSE("GPL v2"); #define MAX_NO_OF_CHANNELS 4 /* max no of channels on a single card */ diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index a97b81d1d0da..10d88cbda465 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -1,19 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com> - * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com> * * Derived from the PCAN project file driver/src/pcan_pci.c: * - * Copyright (C) 2001-2006 PEAK System-Technik GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * Copyright (C) 2001-2025 PEAK System-Technik GmbH + * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com> */ #include <linux/kernel.h> @@ -30,14 +22,16 @@ #include "sja1000.h" -MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>"); +MODULE_AUTHOR("Stéphane Grosjean <stephane.grosjean@hms-networks.com>"); MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCI family cards"); -MODULE_SUPPORTED_DEVICE("PEAK PCAN PCI/PCIe/PCIeC miniPCI CAN cards"); -MODULE_SUPPORTED_DEVICE("PEAK PCAN miniPCIe/cPCI PC/104+ PCI/104e CAN Cards"); MODULE_LICENSE("GPL v2"); #define DRV_NAME "peak_pci" +/* FPGA cards FW version registers */ +#define PEAK_VER_REG1 0x40 +#define PEAK_VER_REG2 0x44 + struct peak_pciec_card; struct peak_pci_chan { void __iomem *cfg_base; /* Common for all channels */ @@ -51,9 +45,7 @@ struct peak_pci_chan { #define PEAK_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK) #define PEAK_PCI_OCR OCR_TX0_PUSHPULL -/* - * Important PITA registers - */ +/* Important PITA registers */ #define PITA_ICR 0x00 /* Interrupt control register */ #define PITA_GPIOICR 0x18 /* GPIO interface control register */ #define PITA_MISC 0x1C /* Miscellaneous register */ @@ -80,32 +72,52 @@ static const u16 peak_pci_icr_masks[PEAK_PCI_CHAN_MAX] = { }; static const struct pci_device_id peak_pci_tbl[] = { - {PEAK_PCI_VENDOR_ID, PEAK_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, - {PEAK_PCI_VENDOR_ID, PEAK_PCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, - {PEAK_PCI_VENDOR_ID, PEAK_MPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, - {PEAK_PCI_VENDOR_ID, PEAK_MPCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, - {PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, - {PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, - {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, - {PEAK_PCI_VENDOR_ID, PEAK_PCIE_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,}, + { + PEAK_PCI_VENDOR_ID, PEAK_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, + .driver_data = (kernel_ulong_t)"PCAN-PCI", + }, { + PEAK_PCI_VENDOR_ID, PEAK_PCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, + .driver_data = (kernel_ulong_t)"PCAN-PCI Express", + }, { + PEAK_PCI_VENDOR_ID, PEAK_MPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, + .driver_data = (kernel_ulong_t)"PCAN-miniPCI", + }, { + PEAK_PCI_VENDOR_ID, PEAK_MPCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, + .driver_data = (kernel_ulong_t)"PCAN-miniPCIe", + }, { + PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, + .driver_data = (kernel_ulong_t)"PCAN-PC/104-Plus Quad", + }, { + PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, + .driver_data = (kernel_ulong_t)"PCAN-PCI/104-Express", + }, { + PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, + .driver_data = (kernel_ulong_t)"PCAN-cPCI", + }, { + PEAK_PCI_VENDOR_ID, PEAK_PCIE_OEM_ID, PCI_ANY_ID, PCI_ANY_ID, + .driver_data = (kernel_ulong_t)"PCAN-Chip PCIe", + }, #ifdef CONFIG_CAN_PEAK_PCIEC - {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, - {PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, + { + PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, + .driver_data = (kernel_ulong_t)"PCAN-ExpressCard", + }, { + PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, + .driver_data = (kernel_ulong_t)"PCAN-ExpressCard 34", + }, #endif - {0,} + { /* sentinel */ } }; MODULE_DEVICE_TABLE(pci, peak_pci_tbl); #ifdef CONFIG_CAN_PEAK_PCIEC -/* - * PCAN-ExpressCard needs I2C bit-banging configuration option. - */ +/* PCAN-ExpressCard needs I2C bit-banging configuration option. */ /* GPIOICR byte access offsets */ #define PITA_GPOUT 0x18 /* GPx output value */ #define PITA_GPIN 0x19 /* GPx input value */ -#define PITA_GPOEN 0x1A /* configure GPx as ouput pin */ +#define PITA_GPOEN 0x1A /* configure GPx as output pin */ /* I2C GP bits */ #define PITA_GPIN_SCL 0x01 /* Serial Clock Line */ @@ -166,12 +178,14 @@ static void peak_pci_write_reg(const struct sja1000_priv *priv, static inline void pita_set_scl_highz(struct peak_pciec_card *card) { u8 gp_outen = readb(card->cfg_base + PITA_GPOEN) & ~PITA_GPIN_SCL; + writeb(gp_outen, card->cfg_base + PITA_GPOEN); } static inline void pita_set_sda_highz(struct peak_pciec_card *card) { u8 gp_outen = readb(card->cfg_base + PITA_GPOEN) & ~PITA_GPIN_SDA; + writeb(gp_outen, card->cfg_base + PITA_GPOEN); } @@ -240,9 +254,7 @@ static int pita_getscl(void *data) return (readb(card->cfg_base + PITA_GPIN) & PITA_GPIN_SCL) ? 1 : 0; } -/* - * write commands to the LED chip though the I2C-bus of the PCAN-PCIeC - */ +/* write commands to the LED chip though the I2C-bus of the PCAN-PCIeC */ static int peak_pciec_write_pca9553(struct peak_pciec_card *card, u8 offset, u8 data) { @@ -258,7 +270,7 @@ static int peak_pciec_write_pca9553(struct peak_pciec_card *card, int ret; /* cache led mask */ - if ((offset == 5) && (data == card->led_cache)) + if (offset == 5 && data == card->led_cache) return 0; ret = i2c_transfer(&card->led_chip, &msg, 1); @@ -271,9 +283,7 @@ static int peak_pciec_write_pca9553(struct peak_pciec_card *card, return 0; } -/* - * delayed work callback used to control the LEDs - */ +/* delayed work callback used to control the LEDs */ static void peak_pciec_led_work(struct work_struct *work) { struct peak_pciec_card *card = @@ -319,9 +329,7 @@ static void peak_pciec_led_work(struct work_struct *work) schedule_delayed_work(&card->led_work, HZ); } -/* - * set LEDs blinking state - */ +/* set LEDs blinking state */ static void peak_pciec_set_leds(struct peak_pciec_card *card, u8 led_mask, u8 s) { u8 new_led = card->led_cache; @@ -338,25 +346,19 @@ static void peak_pciec_set_leds(struct peak_pciec_card *card, u8 led_mask, u8 s) peak_pciec_write_pca9553(card, 5, new_led); } -/* - * start one second delayed work to control LEDs - */ +/* start one second delayed work to control LEDs */ static void peak_pciec_start_led_work(struct peak_pciec_card *card) { schedule_delayed_work(&card->led_work, HZ); } -/* - * stop LEDs delayed work - */ +/* stop LEDs delayed work */ static void peak_pciec_stop_led_work(struct peak_pciec_card *card) { cancel_delayed_work_sync(&card->led_work); } -/* - * initialize the PCA9553 4-bit I2C-bus LED chip - */ +/* initialize the PCA9553 4-bit I2C-bus LED chip */ static int peak_pciec_init_leds(struct peak_pciec_card *card) { int err; @@ -385,17 +387,14 @@ static int peak_pciec_init_leds(struct peak_pciec_card *card) return peak_pciec_write_pca9553(card, 5, PCA9553_LS0_INIT); } -/* - * restore LEDs state to off peak_pciec_leds_exit - */ +/* restore LEDs state to off peak_pciec_leds_exit */ static void peak_pciec_leds_exit(struct peak_pciec_card *card) { /* switch LEDs to off */ peak_pciec_write_pca9553(card, 5, PCA9553_LED_OFF_ALL); } -/* - * normal write sja1000 register method overloaded to catch when controller +/* normal write sja1000 register method overloaded to catch when controller * is started or stopped, to control leds */ static void peak_pciec_write_reg(const struct sja1000_priv *priv, @@ -425,7 +424,7 @@ static void peak_pciec_write_reg(const struct sja1000_priv *priv, peak_pci_write_reg(priv, port, val); } -static struct i2c_algo_bit_data peak_pciec_i2c_bit_ops = { +static const struct i2c_algo_bit_data peak_pciec_i2c_bit_ops = { .setsda = pita_setsda, .setscl = pita_setscl, .getsda = pita_getsda, @@ -453,7 +452,7 @@ static int peak_pciec_probe(struct pci_dev *pdev, struct net_device *dev) /* channel is the first one: do the init part */ } else { /* create the bit banging I2C adapter structure */ - card = kzalloc(sizeof(struct peak_pciec_card), GFP_KERNEL); + card = kzalloc(sizeof(*card), GFP_KERNEL); if (!card) return -ENOMEM; @@ -463,7 +462,7 @@ static int peak_pciec_probe(struct pci_dev *pdev, struct net_device *dev) card->led_chip.owner = THIS_MODULE; card->led_chip.dev.parent = &pdev->dev; card->led_chip.algo_data = &card->i2c_bit; - strncpy(card->led_chip.name, "peak_i2c", + strscpy(card->led_chip.name, "peak_i2c", sizeof(card->led_chip.name)); card->i2c_bit = peak_pciec_i2c_bit_ops; @@ -516,9 +515,7 @@ static void peak_pciec_remove(struct peak_pciec_card *card) #else /* CONFIG_CAN_PEAK_PCIEC */ -/* - * Placebo functions when PCAN-ExpressCard support is not selected - */ +/* Placebo functions when PCAN-ExpressCard support is not selected */ static inline int peak_pciec_probe(struct pci_dev *pdev, struct net_device *dev) { return -ENODEV; @@ -559,6 +556,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) void __iomem *cfg_base, *reg_base; u16 sub_sys_id, icr; int i, err, channels; + char fw_str[14] = ""; err = pci_enable_device(pdev); if (err) @@ -612,6 +610,21 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Leave parport mux mode */ writeb(0x04, cfg_base + PITA_MISC + 3); + /* FPGA equipped card if not 0 */ + if (readl(cfg_base + PEAK_VER_REG1)) { + /* FPGA card: display version of the running firmware */ + u32 fw_ver = readl(cfg_base + PEAK_VER_REG2); + + snprintf(fw_str, sizeof(fw_str), " FW v%u.%u.%u", + (fw_ver >> 12) & 0xf, + (fw_ver >> 8) & 0xf, + (fw_ver >> 4) & 0xf); + } + + /* Display commercial name (and, eventually, FW version) of the card */ + dev_info(&pdev->dev, "%ux CAN %s%s\n", + channels, (const char *)ent->driver_data, fw_str); + icr = readw(cfg_base + PITA_ICR + 2); for (i = 0; i < channels; i++) { @@ -652,8 +665,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) chan->prev_dev = pci_get_drvdata(pdev); pci_set_drvdata(pdev, dev); - /* - * PCAN-ExpressCard needs some additional i2c init. + /* PCAN-ExpressCard needs some additional i2c init. * This must be done *before* register_sja1000dev() but * *after* devices linkage */ @@ -719,7 +731,8 @@ failure_disable_pci: /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while * the probe() function must return a negative errno in case of failure - * (err is unchanged if negative) */ + * (err is unchanged if negative) + */ return pcibios_err_to_errno(err); } @@ -739,16 +752,15 @@ static void peak_pci_remove(struct pci_dev *pdev) struct net_device *prev_dev = chan->prev_dev; dev_info(&pdev->dev, "removing device %s\n", dev->name); + /* do that only for first channel */ + if (!prev_dev && chan->pciec_card) + peak_pciec_remove(chan->pciec_card); unregister_sja1000dev(dev); free_sja1000dev(dev); dev = prev_dev; - if (!dev) { - /* do that only for first channel */ - if (chan->pciec_card) - peak_pciec_remove(chan->pciec_card); + if (!dev) break; - } priv = netdev_priv(dev); chan = priv->priv; } diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c index b8c39ede7cd5..e1610b527d13 100644 --- a/drivers/net/can/sja1000/peak_pcmcia.c +++ b/drivers/net/can/sja1000/peak_pcmcia.c @@ -1,18 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2010-2012 Stephane Grosjean <s.grosjean@peak-system.com> - * * CAN driver for PEAK-System PCAN-PC Card * Derived from the PCAN project file driver/src/pcan_pccard.c - * Copyright (C) 2006-2010 PEAK System-Technik GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * Copyright (C) 2006-2025 PEAK System-Technik GmbH + * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com> */ #include <linux/kernel.h> #include <linux/module.h> @@ -27,10 +19,9 @@ #include <linux/can/dev.h> #include "sja1000.h" -MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>"); +MODULE_AUTHOR("Stéphane Grosjean <stephane.grosjean@hms-networks.com>"); MODULE_DESCRIPTION("CAN driver for PEAK-System PCAN-PC Cards"); MODULE_LICENSE("GPL v2"); -MODULE_SUPPORTED_DEVICE("PEAK PCAN-PC Card"); /* PEAK-System PCMCIA driver name */ #define PCC_NAME "peak_pcmcia" @@ -176,7 +167,7 @@ static void pcan_start_led_timer(struct pcan_pccard *card) */ static void pcan_stop_led_timer(struct pcan_pccard *card) { - del_timer_sync(&card->led_timer); + timer_delete_sync(&card->led_timer); } /* @@ -383,7 +374,7 @@ static inline void pcan_set_can_power(struct pcan_pccard *card, int onoff) */ static void pcan_led_timer(struct timer_list *t) { - struct pcan_pccard *card = from_timer(card, t, led_timer); + struct pcan_pccard *card = timer_container_of(card, t, led_timer); struct net_device *netdev; int i, up_count = 0; u8 ccr; @@ -487,7 +478,7 @@ static void pcan_free_channels(struct pcan_pccard *card) if (!netdev) continue; - strncpy(name, netdev->name, IFNAMSIZ); + strscpy(name, netdev->name, IFNAMSIZ); unregister_sja1000dev(netdev); @@ -679,7 +670,7 @@ static int pcan_probe(struct pcmcia_device *pdev) card->fw_major = pcan_read_reg(card, PCC_FW_MAJOR); card->fw_minor = pcan_read_reg(card, PCC_FW_MINOR); - /* display board name and firware version */ + /* display board name and firmware version */ dev_info(&pdev->dev, "PEAK-System pcmcia card %s fw %d.%d\n", pdev->prod_id[1] ? pdev->prod_id[1] : "PCAN-PC Card", card->fw_major, card->fw_minor); diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c index 9bcdefea138a..67e5316c6372 100644 --- a/drivers/net/can/sja1000/plx_pci.c +++ b/drivers/net/can/sja1000/plx_pci.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2008-2010 Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su> * @@ -5,18 +6,6 @@ * Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com> * Copyright (C) 2008 Markus Plessing <plessing@ems-wuensche.com> * Copyright (C) 2008 Sebastian Haas <haas@ems-wuensche.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> @@ -36,18 +25,6 @@ MODULE_AUTHOR("Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su>"); MODULE_DESCRIPTION("Socket-CAN driver for PLX90xx PCI-bridge cards with " "the SJA1000 chips"); -MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, " - "Adlink PCI-7841/cPCI-7841 SE, " - "Marathon CAN-bus-PCI, " - "Marathon CAN-bus-PCIe, " - "TEWS TECHNOLOGIES TPMC810, " - "esd CAN-PCI/CPCI/PCI104/200, " - "esd CAN-PCI/PMC/266, " - "esd CAN-PCIe/2000, " - "Connect Tech Inc. CANpro/104-Plus Opto (CRG001), " - "IXXAT PC-I 04/PCI, " - "ELCUS CAN-200-PCI, " - "ASEM DUAL CAN-RAW") MODULE_LICENSE("GPL v2"); #define PLX_PCI_MAX_CHAN 2 @@ -145,7 +122,6 @@ struct plx_pci_card { #define TEWS_PCI_VENDOR_ID 0x1498 #define TEWS_PCI_DEVICE_ID_TMPC810 0x032A -#define CTI_PCI_VENDOR_ID 0x12c4 #define CTI_PCI_DEVICE_ID_CRG001 0x0900 #define MOXA_PCI_VENDOR_ID 0x1393 @@ -381,7 +357,7 @@ static const struct pci_device_id plx_pci_tbl[] = { { /* Connect Tech Inc. CANpro/104-Plus Opto (CRG001) card */ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, - CTI_PCI_VENDOR_ID, CTI_PCI_DEVICE_ID_CRG001, + PCI_SUBVENDOR_ID_CONNECT_TECH, CTI_PCI_DEVICE_ID_CRG001, 0, 0, (kernel_ulong_t)&plx_pci_card_info_cti }, diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 9f107798f904..a8fa0d6516b9 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -52,6 +52,7 @@ #include <linux/ptrace.h> #include <linux/string.h> #include <linux/errno.h> +#include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/if_ether.h> @@ -60,7 +61,6 @@ #include <linux/can/dev.h> #include <linux/can/error.h> -#include <linux/can/led.h> #include "sja1000.h" @@ -184,8 +184,9 @@ static void chipset_init(struct net_device *dev) { struct sja1000_priv *priv = netdev_priv(dev); - /* set clock divider and output control register */ - priv->write_reg(priv, SJA1000_CDR, priv->cdr | CDR_PELICAN); + if (!(priv->flags & SJA1000_QUIRK_NO_CDR_REG)) + /* set clock divider and output control register */ + priv->write_reg(priv, SJA1000_CDR, priv->cdr | CDR_PELICAN); /* set acceptance filter (accept all) */ priv->write_reg(priv, SJA1000_ACCC0, 0x00); @@ -205,12 +206,13 @@ static void sja1000_start(struct net_device *dev) { struct sja1000_priv *priv = netdev_priv(dev); - /* leave reset mode */ + /* enter reset mode */ if (priv->can.state != CAN_STATE_STOPPED) set_reset_mode(dev); /* Initialize chip if uninitialized at this stage */ - if (!(priv->read_reg(priv, SJA1000_CDR) & CDR_PELICAN)) + if (!(priv->flags & SJA1000_QUIRK_NO_CDR_REG || + priv->read_reg(priv, SJA1000_CDR) & CDR_PELICAN)) chipset_init(dev); /* Clear error counters and error code capture */ @@ -284,18 +286,17 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb, struct sja1000_priv *priv = netdev_priv(dev); struct can_frame *cf = (struct can_frame *)skb->data; uint8_t fi; - uint8_t dlc; canid_t id; uint8_t dreg; u8 cmd_reg_val = 0x00; int i; - if (can_dropped_invalid_skb(dev, skb)) + if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; netif_stop_queue(dev); - fi = dlc = cf->can_dlc; + fi = can_get_cc_dlc(cf, priv->can.ctrlmode); id = cf->can_id; if (id & CAN_RTR_FLAG) @@ -316,10 +317,10 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb, priv->write_reg(priv, SJA1000_ID2, (id & 0x00000007) << 5); } - for (i = 0; i < dlc; i++) + for (i = 0; i < cf->len; i++) priv->write_reg(priv, dreg++, cf->data[i]); - can_put_echo_skb(skb, dev, 0); + can_put_echo_skb(skb, dev, 0, 0); if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) cmd_reg_val |= CMD_AT; @@ -367,24 +368,39 @@ static void sja1000_rx(struct net_device *dev) | (priv->read_reg(priv, SJA1000_ID2) >> 5); } - cf->can_dlc = get_can_dlc(fi & 0x0F); + can_frame_set_cc_len(cf, fi & 0x0F, priv->can.ctrlmode); if (fi & SJA1000_FI_RTR) { id |= CAN_RTR_FLAG; } else { - for (i = 0; i < cf->can_dlc; i++) + for (i = 0; i < cf->len; i++) cf->data[i] = priv->read_reg(priv, dreg++); + + stats->rx_bytes += cf->len; } + stats->rx_packets++; cf->can_id = id; /* release receive buffer */ sja1000_write_cmdreg(priv, CMD_RRB); - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; netif_rx(skb); +} + +static irqreturn_t sja1000_reset_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + + netdev_dbg(dev, "performing a soft reset upon overrun\n"); + + netif_tx_lock(dev); + + can_free_echo_skb(dev, 0, NULL); + sja1000_set_mode(dev, CAN_MODE_START); + + netif_tx_unlock(dev); - can_led_event(dev, CAN_LED_EVENT_RX); + return IRQ_HANDLED; } static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) @@ -397,25 +413,33 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) enum can_state rx_state, tx_state; unsigned int rxerr, txerr; uint8_t ecc, alc; + int ret = 0; skb = alloc_can_err_skb(dev, &cf); - if (skb == NULL) - return -ENOMEM; txerr = priv->read_reg(priv, SJA1000_TXERR); rxerr = priv->read_reg(priv, SJA1000_RXERR); - cf->data[6] = txerr; - cf->data[7] = rxerr; - if (isrc & IRQ_DOI) { /* data overrun interrupt */ netdev_dbg(dev, "data overrun interrupt\n"); - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + } + stats->rx_over_errors++; stats->rx_errors++; sja1000_write_cmdreg(priv, CMD_CDO); /* clear bit */ + + /* Some controllers needs additional handling upon overrun + * condition: the controller may sometimes be totally confused + * and refuse any new frame while its buffer is empty. The only + * way to re-sync the read vs. write buffer offsets is to + * stop any current handling and perform a reset. + */ + if (priv->flags & SJA1000_QUIRK_RESET_ON_OVERRUN) + ret = IRQ_WAKE_THREAD; } if (isrc & IRQ_EI) { @@ -429,36 +453,46 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) else state = CAN_STATE_ERROR_ACTIVE; } + if (state != CAN_STATE_BUS_OFF && skb) { + cf->can_id |= CAN_ERR_CNT; + cf->data[6] = txerr; + cf->data[7] = rxerr; + } if (isrc & IRQ_BEI) { /* bus error interrupt */ priv->can.can_stats.bus_error++; - stats->rx_errors++; ecc = priv->read_reg(priv, SJA1000_ECC); + if (skb) { + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - - /* set error type */ - switch (ecc & ECC_MASK) { - case ECC_BIT: - cf->data[2] |= CAN_ERR_PROT_BIT; - break; - case ECC_FORM: - cf->data[2] |= CAN_ERR_PROT_FORM; - break; - case ECC_STUFF: - cf->data[2] |= CAN_ERR_PROT_STUFF; - break; - default: - break; - } + /* set error type */ + switch (ecc & ECC_MASK) { + case ECC_BIT: + cf->data[2] |= CAN_ERR_PROT_BIT; + break; + case ECC_FORM: + cf->data[2] |= CAN_ERR_PROT_FORM; + break; + case ECC_STUFF: + cf->data[2] |= CAN_ERR_PROT_STUFF; + break; + default: + break; + } - /* set error location */ - cf->data[3] = ecc & ECC_SEG; + /* set error location */ + cf->data[3] = ecc & ECC_SEG; + } /* Error occurred during transmission? */ - if ((ecc & ECC_DIR) == 0) - cf->data[2] |= CAN_ERR_PROT_TX; + if ((ecc & ECC_DIR) == 0) { + stats->tx_errors++; + if (skb) + cf->data[2] |= CAN_ERR_PROT_TX; + } else { + stats->rx_errors++; + } } if (isrc & IRQ_EPI) { /* error passive interrupt */ @@ -474,9 +508,10 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) netdev_dbg(dev, "arbitration lost interrupt\n"); alc = priv->read_reg(priv, SJA1000_ALC); priv->can.can_stats.arbitration_lost++; - stats->tx_errors++; - cf->can_id |= CAN_ERR_LOSTARB; - cf->data[0] = alc & 0x1f; + if (skb) { + cf->can_id |= CAN_ERR_LOSTARB; + cf->data[0] = alc & 0x1f; + } } if (state != priv->can.state) { @@ -489,11 +524,12 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) can_bus_off(dev); } - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + if (!skb) + return -ENOMEM; + netif_rx(skb); - return 0; + return ret; } irqreturn_t sja1000_interrupt(int irq, void *dev_id) @@ -502,7 +538,8 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) struct sja1000_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; uint8_t isrc, status; - int n = 0; + irqreturn_t ret = 0; + int n = 0, err; if (priv->pre_irq) priv->pre_irq(priv); @@ -511,8 +548,8 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF) goto out; - while ((isrc = priv->read_reg(priv, SJA1000_IR)) && - (n < SJA1000_MAX_IRQ)) { + while ((n < SJA1000_MAX_IRQ) && + (isrc = priv->read_reg(priv, SJA1000_IR))) { status = priv->read_reg(priv, SJA1000_SR); /* check for absent controller due to hw unplug */ @@ -527,16 +564,13 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT && !(status & SR_TCS)) { stats->tx_errors++; - can_free_echo_skb(dev, 0); + can_free_echo_skb(dev, 0, NULL); } else { /* transmission complete */ - stats->tx_bytes += - priv->read_reg(priv, SJA1000_FI) & 0xf; + stats->tx_bytes += can_get_echo_skb(dev, 0, NULL); stats->tx_packets++; - can_get_echo_skb(dev, 0); } netif_wake_queue(dev); - can_led_event(dev, CAN_LED_EVENT_TX); } if (isrc & IRQ_RI) { /* receive interrupt */ @@ -550,19 +584,25 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) } if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) { /* error interrupt */ - if (sja1000_err(dev, isrc, status)) + err = sja1000_err(dev, isrc, status); + if (err == IRQ_WAKE_THREAD) + ret = err; + if (err) break; } n++; } out: + if (!ret) + ret = (n) ? IRQ_HANDLED : IRQ_NONE; + if (priv->post_irq) priv->post_irq(priv); if (n >= SJA1000_MAX_IRQ) netdev_dbg(dev, "%d messages handled in ISR", n); - return (n) ? IRQ_HANDLED : IRQ_NONE; + return ret; } EXPORT_SYMBOL_GPL(sja1000_interrupt); @@ -581,8 +621,9 @@ static int sja1000_open(struct net_device *dev) /* register interrupt handler, if not done by the device driver */ if (!(priv->flags & SJA1000_CUSTOM_IRQ_HANDLER)) { - err = request_irq(dev->irq, sja1000_interrupt, priv->irq_flags, - dev->name, (void *)dev); + err = request_threaded_irq(dev->irq, sja1000_interrupt, + sja1000_reset_interrupt, + priv->irq_flags, dev->name, (void *)dev); if (err) { close_candev(dev); return -EAGAIN; @@ -592,8 +633,6 @@ static int sja1000_open(struct net_device *dev) /* init and start chi */ sja1000_start(dev); - can_led_event(dev, CAN_LED_EVENT_OPEN); - netif_start_queue(dev); return 0; @@ -611,8 +650,6 @@ static int sja1000_close(struct net_device *dev) close_candev(dev); - can_led_event(dev, CAN_LED_EVENT_STOP); - return 0; } @@ -638,7 +675,8 @@ struct net_device *alloc_sja1000dev(int sizeof_priv) CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_BERR_REPORTING | - CAN_CTRLMODE_PRESUME_ACK; + CAN_CTRLMODE_PRESUME_ACK | + CAN_CTRLMODE_CC_LEN8_DLC; spin_lock_init(&priv->cmdreg_lock); @@ -659,28 +697,25 @@ static const struct net_device_ops sja1000_netdev_ops = { .ndo_open = sja1000_open, .ndo_stop = sja1000_close, .ndo_start_xmit = sja1000_start_xmit, - .ndo_change_mtu = can_change_mtu, +}; + +static const struct ethtool_ops sja1000_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, }; int register_sja1000dev(struct net_device *dev) { - int ret; - if (!sja1000_probe_chip(dev)) return -ENODEV; dev->flags |= IFF_ECHO; /* we support local echo */ dev->netdev_ops = &sja1000_netdev_ops; + dev->ethtool_ops = &sja1000_ethtool_ops; set_reset_mode(dev); chipset_init(dev); - ret = register_candev(dev); - - if (!ret) - devm_can_led_init(dev); - - return ret; + return register_candev(dev); } EXPORT_SYMBOL_GPL(register_sja1000dev); diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h index 9d46398f8154..f015e39e2224 100644 --- a/drivers/net/can/sja1000/sja1000.h +++ b/drivers/net/can/sja1000/sja1000.h @@ -145,7 +145,9 @@ /* * Flags for sja1000priv.flags */ -#define SJA1000_CUSTOM_IRQ_HANDLER 0x1 +#define SJA1000_CUSTOM_IRQ_HANDLER BIT(0) +#define SJA1000_QUIRK_NO_CDR_REG BIT(1) +#define SJA1000_QUIRK_RESET_ON_OVERRUN BIT(2) /* * SJA1000 private data structure diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c index 1a2ae6ce8d87..2d1f715459d7 100644 --- a/drivers/net/can/sja1000/sja1000_isa.c +++ b/drivers/net/can/sja1000/sja1000_isa.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2009 Wolfgang Grandegger <wg@grandegger.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> @@ -141,7 +130,7 @@ static int sja1000_isa_probe(struct platform_device *pdev) err = -EBUSY; goto exit; } - base = ioremap_nocache(mem[idx], iosize); + base = ioremap(mem[idx], iosize); if (!base) { err = -ENOMEM; goto exit_release; @@ -213,26 +202,28 @@ static int sja1000_isa_probe(struct platform_device *pdev) if (err) { dev_err(&pdev->dev, "registering %s failed (err=%d)\n", DRV_NAME, err); - goto exit_unmap; + goto exit_free; } dev_info(&pdev->dev, "%s device registered (reg_base=0x%p, irq=%d)\n", DRV_NAME, priv->reg_base, dev->irq); return 0; - exit_unmap: +exit_free: + free_sja1000dev(dev); +exit_unmap: if (mem[idx]) iounmap(base); - exit_release: +exit_release: if (mem[idx]) release_mem_region(mem[idx], iosize); else release_region(port[idx], iosize); - exit: +exit: return err; } -static int sja1000_isa_remove(struct platform_device *pdev) +static void sja1000_isa_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct sja1000_priv *priv = netdev_priv(dev); @@ -250,8 +241,6 @@ static int sja1000_isa_remove(struct platform_device *pdev) release_region(port[idx], SJA1000_IOSIZE); } free_sja1000dev(dev); - - return 0; } static struct platform_driver sja1000_isa_driver = { diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c index dc9c6db96c3c..2d555f854008 100644 --- a/drivers/net/can/sja1000/sja1000_platform.c +++ b/drivers/net/can/sja1000/sja1000_platform.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2005 Sascha Hauer, Pengutronix * Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> @@ -25,10 +14,9 @@ #include <linux/irq.h> #include <linux/can/dev.h> #include <linux/can/platform/sja1000.h> +#include <linux/clk.h> #include <linux/io.h> #include <linux/of.h> -#include <linux/of_device.h> -#include <linux/of_irq.h> #include "sja1000.h" @@ -43,7 +31,7 @@ MODULE_LICENSE("GPL v2"); struct sja1000_of_data { size_t priv_sz; - int (*init)(struct sja1000_priv *priv, struct device_node *of); + void (*init)(struct sja1000_priv *priv, struct device_node *of); }; struct technologic_priv { @@ -106,15 +94,18 @@ static void sp_technologic_write_reg16(const struct sja1000_priv *priv, spin_unlock_irqrestore(&tp->io_lock, flags); } -static int sp_technologic_init(struct sja1000_priv *priv, struct device_node *of) +static void sp_technologic_init(struct sja1000_priv *priv, struct device_node *of) { struct technologic_priv *tp = priv->priv; priv->read_reg = sp_technologic_read_reg16; priv->write_reg = sp_technologic_write_reg16; spin_lock_init(&tp->io_lock); +} - return 0; +static void sp_rzn1_init(struct sja1000_priv *priv, struct device_node *of) +{ + priv->flags = SJA1000_QUIRK_NO_CDR_REG | SJA1000_QUIRK_RESET_ON_OVERRUN; } static void sp_populate(struct sja1000_priv *priv, @@ -161,17 +152,19 @@ static void sp_populate_of(struct sja1000_priv *priv, struct device_node *of) priv->read_reg = sp_read_reg16; priv->write_reg = sp_write_reg16; break; - case 1: /* fallthrough */ + case 1: default: priv->read_reg = sp_read_reg8; priv->write_reg = sp_write_reg8; } - err = of_property_read_u32(of, "nxp,external-clock-frequency", &prop); - if (!err) - priv->can.clock.freq = prop / 2; - else - priv->can.clock.freq = SP_CAN_CLOCK; /* default */ + if (!priv->can.clock.freq) { + err = of_property_read_u32(of, "nxp,external-clock-frequency", &prop); + if (!err) + priv->can.clock.freq = prop / 2; + else + priv->can.clock.freq = SP_CAN_CLOCK; /* default */ + } err = of_property_read_u32(of, "nxp,tx-output-mode", &prop); if (!err) @@ -206,8 +199,13 @@ static struct sja1000_of_data technologic_data = { .init = sp_technologic_init, }; +static struct sja1000_of_data renesas_data = { + .init = sp_rzn1_init, +}; + static const struct of_device_id sp_of_table[] = { { .compatible = "nxp,sja1000", .data = NULL, }, + { .compatible = "renesas,rzn1-sja1000", .data = &renesas_data, }, { .compatible = "technologic,sja1000", .data = &technologic_data, }, { /* sentinel */ }, }; @@ -222,9 +220,9 @@ static int sp_probe(struct platform_device *pdev) struct resource *res_mem, *res_irq = NULL; struct sja1000_platform_data *pdata; struct device_node *of = pdev->dev.of_node; - const struct of_device_id *of_id; const struct sja1000_of_data *of_data = NULL; size_t priv_sz = 0; + struct clk *clk; pdata = dev_get_platdata(&pdev->dev); if (!pdata && !of) { @@ -232,32 +230,28 @@ static int sp_probe(struct platform_device *pdev) return -ENODEV; } - res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res_mem) - return -ENODEV; - - if (!devm_request_mem_region(&pdev->dev, res_mem->start, - resource_size(res_mem), DRV_NAME)) - return -EBUSY; + addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res_mem); + if (IS_ERR(addr)) + return PTR_ERR(addr); - addr = devm_ioremap_nocache(&pdev->dev, res_mem->start, - resource_size(res_mem)); - if (!addr) - return -ENOMEM; - - if (of) - irq = irq_of_parse_and_map(of, 0); - else + if (of) { + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + clk = devm_clk_get_optional_enabled(&pdev->dev, NULL); + if (IS_ERR(clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(clk), + "CAN clk operation failed"); + } else { res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res_irq) + return -ENODEV; + } - if (!irq && !res_irq) - return -ENODEV; - - of_id = of_match_device(sp_of_table, &pdev->dev); - if (of_id && of_id->data) { - of_data = of_id->data; + of_data = device_get_match_data(&pdev->dev); + if (of_data) priv_sz = of_data->priv_sz; - } dev = alloc_sja1000dev(priv_sz); if (!dev) @@ -273,17 +267,26 @@ static int sp_probe(struct platform_device *pdev) priv->irq_flags = IRQF_SHARED; } + if (priv->flags & SJA1000_QUIRK_RESET_ON_OVERRUN) + priv->irq_flags |= IRQF_ONESHOT; + dev->irq = irq; priv->reg_base = addr; if (of) { - sp_populate_of(priv, of); - - if (of_data && of_data->init) { - err = of_data->init(priv, of); - if (err) + if (clk) { + priv->can.clock.freq = clk_get_rate(clk) / 2; + if (!priv->can.clock.freq) { + err = -EINVAL; + dev_err(&pdev->dev, "Zero CAN clk rate"); goto exit_free; + } } + + sp_populate_of(priv, of); + + if (of_data && of_data->init) + of_data->init(priv, of); } else { sp_populate(priv, pdata, res_mem->flags); } @@ -307,14 +310,12 @@ static int sp_probe(struct platform_device *pdev) return err; } -static int sp_remove(struct platform_device *pdev) +static void sp_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); unregister_sja1000dev(dev); free_sja1000dev(dev); - - return 0; } static struct platform_driver sp_driver = { diff --git a/drivers/net/can/sja1000/tscan1.c b/drivers/net/can/sja1000/tscan1.c index 79572457a2d6..f3862bed3d40 100644 --- a/drivers/net/can/sja1000/tscan1.c +++ b/drivers/net/can/sja1000/tscan1.c @@ -1,26 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * tscan1.c: driver for Technologic Systems TS-CAN1 PC104 boards * * Copyright 2010 Andre B. Oliveira - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -/* - * References: - * - Getting started with TS-CAN1, Technologic Systems, Jun 2009 - * http://www.embeddedarm.com/documentation/ts-can1-manual.pdf +/* References: + * - Getting started with TS-CAN1, Technologic Systems, Feb 2022 + * https://docs.embeddedts.com/TS-CAN1 */ #include <linux/init.h> @@ -171,7 +158,7 @@ static int tscan1_probe(struct device *dev, unsigned id) return -ENXIO; } -static int tscan1_remove(struct device *dev, unsigned id /*unused*/) +static void tscan1_remove(struct device *dev, unsigned id /*unused*/) { struct net_device *netdev; struct sja1000_priv *priv; @@ -191,8 +178,6 @@ static int tscan1_remove(struct device *dev, unsigned id /*unused*/) release_region(pld_base, TSCAN1_PLD_SIZE); free_sja1000dev(netdev); - - return 0; } static struct isa_driver tscan1_isa_driver = { diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c deleted file mode 100644 index aa97dbc797b6..000000000000 --- a/drivers/net/can/slcan.c +++ /dev/null @@ -1,778 +0,0 @@ -/* - * slcan.c - serial line CAN interface driver (using tty line discipline) - * - * This file is derived from linux/drivers/net/slip/slip.c - * - * slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk> - * Fred N. van Kempen <waltje@uwalt.nl.mugnet.org> - * slcan.c Author : Oliver Hartkopp <socketcan@hartkopp.net> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, see http://www.gnu.org/licenses/gpl.html - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH - * DAMAGE. - * - */ - -#include <linux/module.h> -#include <linux/moduleparam.h> - -#include <linux/uaccess.h> -#include <linux/bitops.h> -#include <linux/string.h> -#include <linux/tty.h> -#include <linux/errno.h> -#include <linux/netdevice.h> -#include <linux/skbuff.h> -#include <linux/rtnetlink.h> -#include <linux/if_arp.h> -#include <linux/if_ether.h> -#include <linux/sched.h> -#include <linux/delay.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/workqueue.h> -#include <linux/can.h> -#include <linux/can/skb.h> - -MODULE_ALIAS_LDISC(N_SLCAN); -MODULE_DESCRIPTION("serial line CAN interface"); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>"); - -#define SLCAN_MAGIC 0x53CA - -static int maxdev = 10; /* MAX number of SLCAN channels; - This can be overridden with - insmod slcan.ko maxdev=nnn */ -module_param(maxdev, int, 0); -MODULE_PARM_DESC(maxdev, "Maximum number of slcan interfaces"); - -/* maximum rx buffer len: extended CAN frame with timestamp */ -#define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1) - -#define SLC_CMD_LEN 1 -#define SLC_SFF_ID_LEN 3 -#define SLC_EFF_ID_LEN 8 - -struct slcan { - int magic; - - /* Various fields. */ - struct tty_struct *tty; /* ptr to TTY structure */ - struct net_device *dev; /* easy for intr handling */ - spinlock_t lock; - struct work_struct tx_work; /* Flushes transmit buffer */ - - /* These are pointers to the malloc()ed frame buffers. */ - unsigned char rbuff[SLC_MTU]; /* receiver buffer */ - int rcount; /* received chars counter */ - unsigned char xbuff[SLC_MTU]; /* transmitter buffer */ - unsigned char *xhead; /* pointer to next XMIT byte */ - int xleft; /* bytes left in XMIT queue */ - - unsigned long flags; /* Flag values/ mode etc */ -#define SLF_INUSE 0 /* Channel in use */ -#define SLF_ERROR 1 /* Parity, etc. error */ -}; - -static struct net_device **slcan_devs; - - /************************************************************************ - * SLCAN ENCAPSULATION FORMAT * - ************************************************************************/ - -/* - * A CAN frame has a can_id (11 bit standard frame format OR 29 bit extended - * frame format) a data length code (can_dlc) which can be from 0 to 8 - * and up to <can_dlc> data bytes as payload. - * Additionally a CAN frame may become a remote transmission frame if the - * RTR-bit is set. This causes another ECU to send a CAN frame with the - * given can_id. - * - * The SLCAN ASCII representation of these different frame types is: - * <type> <id> <dlc> <data>* - * - * Extended frames (29 bit) are defined by capital characters in the type. - * RTR frames are defined as 'r' types - normal frames have 't' type: - * t => 11 bit data frame - * r => 11 bit RTR frame - * T => 29 bit data frame - * R => 29 bit RTR frame - * - * The <id> is 3 (standard) or 8 (extended) bytes in ASCII Hex (base64). - * The <dlc> is a one byte ASCII number ('0' - '8') - * The <data> section has at much ASCII Hex bytes as defined by the <dlc> - * - * Examples: - * - * t1230 : can_id 0x123, can_dlc 0, no data - * t4563112233 : can_id 0x456, can_dlc 3, data 0x11 0x22 0x33 - * T12ABCDEF2AA55 : extended can_id 0x12ABCDEF, can_dlc 2, data 0xAA 0x55 - * r1230 : can_id 0x123, can_dlc 0, no data, remote transmission request - * - */ - - /************************************************************************ - * STANDARD SLCAN DECAPSULATION * - ************************************************************************/ - -/* Send one completely decapsulated can_frame to the network layer */ -static void slc_bump(struct slcan *sl) -{ - struct sk_buff *skb; - struct can_frame cf; - int i, tmp; - u32 tmpid; - char *cmd = sl->rbuff; - - cf.can_id = 0; - - switch (*cmd) { - case 'r': - cf.can_id = CAN_RTR_FLAG; - /* fallthrough */ - case 't': - /* store dlc ASCII value and terminate SFF CAN ID string */ - cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN]; - sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN] = 0; - /* point to payload data behind the dlc */ - cmd += SLC_CMD_LEN + SLC_SFF_ID_LEN + 1; - break; - case 'R': - cf.can_id = CAN_RTR_FLAG; - /* fallthrough */ - case 'T': - cf.can_id |= CAN_EFF_FLAG; - /* store dlc ASCII value and terminate EFF CAN ID string */ - cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN]; - sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN] = 0; - /* point to payload data behind the dlc */ - cmd += SLC_CMD_LEN + SLC_EFF_ID_LEN + 1; - break; - default: - return; - } - - if (kstrtou32(sl->rbuff + SLC_CMD_LEN, 16, &tmpid)) - return; - - cf.can_id |= tmpid; - - /* get can_dlc from sanitized ASCII value */ - if (cf.can_dlc >= '0' && cf.can_dlc < '9') - cf.can_dlc -= '0'; - else - return; - - *(u64 *) (&cf.data) = 0; /* clear payload */ - - /* RTR frames may have a dlc > 0 but they never have any data bytes */ - if (!(cf.can_id & CAN_RTR_FLAG)) { - for (i = 0; i < cf.can_dlc; i++) { - tmp = hex_to_bin(*cmd++); - if (tmp < 0) - return; - cf.data[i] = (tmp << 4); - tmp = hex_to_bin(*cmd++); - if (tmp < 0) - return; - cf.data[i] |= tmp; - } - } - - skb = dev_alloc_skb(sizeof(struct can_frame) + - sizeof(struct can_skb_priv)); - if (!skb) - return; - - skb->dev = sl->dev; - skb->protocol = htons(ETH_P_CAN); - skb->pkt_type = PACKET_BROADCAST; - skb->ip_summed = CHECKSUM_UNNECESSARY; - - can_skb_reserve(skb); - can_skb_prv(skb)->ifindex = sl->dev->ifindex; - can_skb_prv(skb)->skbcnt = 0; - - skb_put_data(skb, &cf, sizeof(struct can_frame)); - - sl->dev->stats.rx_packets++; - sl->dev->stats.rx_bytes += cf.can_dlc; - netif_rx_ni(skb); -} - -/* parse tty input stream */ -static void slcan_unesc(struct slcan *sl, unsigned char s) -{ - if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */ - if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && - (sl->rcount > 4)) { - slc_bump(sl); - } - sl->rcount = 0; - } else { - if (!test_bit(SLF_ERROR, &sl->flags)) { - if (sl->rcount < SLC_MTU) { - sl->rbuff[sl->rcount++] = s; - return; - } else { - sl->dev->stats.rx_over_errors++; - set_bit(SLF_ERROR, &sl->flags); - } - } - } -} - - /************************************************************************ - * STANDARD SLCAN ENCAPSULATION * - ************************************************************************/ - -/* Encapsulate one can_frame and stuff into a TTY queue. */ -static void slc_encaps(struct slcan *sl, struct can_frame *cf) -{ - int actual, i; - unsigned char *pos; - unsigned char *endpos; - canid_t id = cf->can_id; - - pos = sl->xbuff; - - if (cf->can_id & CAN_RTR_FLAG) - *pos = 'R'; /* becomes 'r' in standard frame format (SFF) */ - else - *pos = 'T'; /* becomes 't' in standard frame format (SSF) */ - - /* determine number of chars for the CAN-identifier */ - if (cf->can_id & CAN_EFF_FLAG) { - id &= CAN_EFF_MASK; - endpos = pos + SLC_EFF_ID_LEN; - } else { - *pos |= 0x20; /* convert R/T to lower case for SFF */ - id &= CAN_SFF_MASK; - endpos = pos + SLC_SFF_ID_LEN; - } - - /* build 3 (SFF) or 8 (EFF) digit CAN identifier */ - pos++; - while (endpos >= pos) { - *endpos-- = hex_asc_upper[id & 0xf]; - id >>= 4; - } - - pos += (cf->can_id & CAN_EFF_FLAG) ? SLC_EFF_ID_LEN : SLC_SFF_ID_LEN; - - *pos++ = cf->can_dlc + '0'; - - /* RTR frames may have a dlc > 0 but they never have any data bytes */ - if (!(cf->can_id & CAN_RTR_FLAG)) { - for (i = 0; i < cf->can_dlc; i++) - pos = hex_byte_pack_upper(pos, cf->data[i]); - } - - *pos++ = '\r'; - - /* Order of next two lines is *very* important. - * When we are sending a little amount of data, - * the transfer may be completed inside the ops->write() - * routine, because it's running with interrupts enabled. - * In this case we *never* got WRITE_WAKEUP event, - * if we did not request it before write operation. - * 14 Oct 1994 Dmitry Gorodchanin. - */ - set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); - actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff); - sl->xleft = (pos - sl->xbuff) - actual; - sl->xhead = sl->xbuff + actual; - sl->dev->stats.tx_bytes += cf->can_dlc; -} - -/* Write out any remaining transmit buffer. Scheduled when tty is writable */ -static void slcan_transmit(struct work_struct *work) -{ - struct slcan *sl = container_of(work, struct slcan, tx_work); - int actual; - - spin_lock_bh(&sl->lock); - /* First make sure we're connected. */ - if (!sl->tty || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) { - spin_unlock_bh(&sl->lock); - return; - } - - if (sl->xleft <= 0) { - /* Now serial buffer is almost free & we can start - * transmission of another packet */ - sl->dev->stats.tx_packets++; - clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); - spin_unlock_bh(&sl->lock); - netif_wake_queue(sl->dev); - return; - } - - actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft); - sl->xleft -= actual; - sl->xhead += actual; - spin_unlock_bh(&sl->lock); -} - -/* - * Called by the driver when there's room for more data. - * Schedule the transmit. - */ -static void slcan_write_wakeup(struct tty_struct *tty) -{ - struct slcan *sl = tty->disc_data; - - schedule_work(&sl->tx_work); -} - -/* Send a can_frame to a TTY queue. */ -static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct slcan *sl = netdev_priv(dev); - - if (skb->len != CAN_MTU) - goto out; - - spin_lock(&sl->lock); - if (!netif_running(dev)) { - spin_unlock(&sl->lock); - printk(KERN_WARNING "%s: xmit: iface is down\n", dev->name); - goto out; - } - if (sl->tty == NULL) { - spin_unlock(&sl->lock); - goto out; - } - - netif_stop_queue(sl->dev); - slc_encaps(sl, (struct can_frame *) skb->data); /* encaps & send */ - spin_unlock(&sl->lock); - -out: - kfree_skb(skb); - return NETDEV_TX_OK; -} - - -/****************************************** - * Routines looking at netdevice side. - ******************************************/ - -/* Netdevice UP -> DOWN routine */ -static int slc_close(struct net_device *dev) -{ - struct slcan *sl = netdev_priv(dev); - - spin_lock_bh(&sl->lock); - if (sl->tty) { - /* TTY discipline is running. */ - clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); - } - netif_stop_queue(dev); - sl->rcount = 0; - sl->xleft = 0; - spin_unlock_bh(&sl->lock); - - return 0; -} - -/* Netdevice DOWN -> UP routine */ -static int slc_open(struct net_device *dev) -{ - struct slcan *sl = netdev_priv(dev); - - if (sl->tty == NULL) - return -ENODEV; - - sl->flags &= (1 << SLF_INUSE); - netif_start_queue(dev); - return 0; -} - -/* Hook the destructor so we can free slcan devs at the right point in time */ -static void slc_free_netdev(struct net_device *dev) -{ - int i = dev->base_addr; - - slcan_devs[i] = NULL; -} - -static int slcan_change_mtu(struct net_device *dev, int new_mtu) -{ - return -EINVAL; -} - -static const struct net_device_ops slc_netdev_ops = { - .ndo_open = slc_open, - .ndo_stop = slc_close, - .ndo_start_xmit = slc_xmit, - .ndo_change_mtu = slcan_change_mtu, -}; - -static void slc_setup(struct net_device *dev) -{ - dev->netdev_ops = &slc_netdev_ops; - dev->needs_free_netdev = true; - dev->priv_destructor = slc_free_netdev; - - dev->hard_header_len = 0; - dev->addr_len = 0; - dev->tx_queue_len = 10; - - dev->mtu = CAN_MTU; - dev->type = ARPHRD_CAN; - - /* New-style flags. */ - dev->flags = IFF_NOARP; - dev->features = NETIF_F_HW_CSUM; -} - -/****************************************** - Routines looking at TTY side. - ******************************************/ - -/* - * Handle the 'receiver data ready' interrupt. - * This function is called by the 'tty_io' module in the kernel when - * a block of SLCAN data has been received, which can now be decapsulated - * and sent on to some IP layer for further processing. This will not - * be re-entered while running but other ldisc functions may be called - * in parallel - */ - -static void slcan_receive_buf(struct tty_struct *tty, - const unsigned char *cp, char *fp, int count) -{ - struct slcan *sl = (struct slcan *) tty->disc_data; - - if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) - return; - - /* Read the characters out of the buffer */ - while (count--) { - if (fp && *fp++) { - if (!test_and_set_bit(SLF_ERROR, &sl->flags)) - sl->dev->stats.rx_errors++; - cp++; - continue; - } - slcan_unesc(sl, *cp++); - } -} - -/************************************ - * slcan_open helper routines. - ************************************/ - -/* Collect hanged up channels */ -static void slc_sync(void) -{ - int i; - struct net_device *dev; - struct slcan *sl; - - for (i = 0; i < maxdev; i++) { - dev = slcan_devs[i]; - if (dev == NULL) - break; - - sl = netdev_priv(dev); - if (sl->tty) - continue; - if (dev->flags & IFF_UP) - dev_close(dev); - } -} - -/* Find a free SLCAN channel, and link in this `tty' line. */ -static struct slcan *slc_alloc(void) -{ - int i; - char name[IFNAMSIZ]; - struct net_device *dev = NULL; - struct slcan *sl; - - for (i = 0; i < maxdev; i++) { - dev = slcan_devs[i]; - if (dev == NULL) - break; - - } - - /* Sorry, too many, all slots in use */ - if (i >= maxdev) - return NULL; - - sprintf(name, "slcan%d", i); - dev = alloc_netdev(sizeof(*sl), name, NET_NAME_UNKNOWN, slc_setup); - if (!dev) - return NULL; - - dev->base_addr = i; - sl = netdev_priv(dev); - - /* Initialize channel control data */ - sl->magic = SLCAN_MAGIC; - sl->dev = dev; - spin_lock_init(&sl->lock); - INIT_WORK(&sl->tx_work, slcan_transmit); - slcan_devs[i] = dev; - - return sl; -} - -/* - * Open the high-level part of the SLCAN channel. - * This function is called by the TTY module when the - * SLCAN line discipline is called for. Because we are - * sure the tty line exists, we only have to link it to - * a free SLCAN channel... - * - * Called in process context serialized from other ldisc calls. - */ - -static int slcan_open(struct tty_struct *tty) -{ - struct slcan *sl; - int err; - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - if (tty->ops->write == NULL) - return -EOPNOTSUPP; - - /* RTnetlink lock is misused here to serialize concurrent - opens of slcan channels. There are better ways, but it is - the simplest one. - */ - rtnl_lock(); - - /* Collect hanged up channels. */ - slc_sync(); - - sl = tty->disc_data; - - err = -EEXIST; - /* First make sure we're not already connected. */ - if (sl && sl->magic == SLCAN_MAGIC) - goto err_exit; - - /* OK. Find a free SLCAN channel to use. */ - err = -ENFILE; - sl = slc_alloc(); - if (sl == NULL) - goto err_exit; - - sl->tty = tty; - tty->disc_data = sl; - - if (!test_bit(SLF_INUSE, &sl->flags)) { - /* Perform the low-level SLCAN initialization. */ - sl->rcount = 0; - sl->xleft = 0; - - set_bit(SLF_INUSE, &sl->flags); - - err = register_netdevice(sl->dev); - if (err) - goto err_free_chan; - } - - /* Done. We have linked the TTY line to a channel. */ - rtnl_unlock(); - tty->receive_room = 65536; /* We don't flow control */ - - /* TTY layer expects 0 on success */ - return 0; - -err_free_chan: - sl->tty = NULL; - tty->disc_data = NULL; - clear_bit(SLF_INUSE, &sl->flags); - -err_exit: - rtnl_unlock(); - - /* Count references from TTY module */ - return err; -} - -/* - * Close down a SLCAN channel. - * This means flushing out any pending queues, and then returning. This - * call is serialized against other ldisc functions. - * - * We also use this method for a hangup event. - */ - -static void slcan_close(struct tty_struct *tty) -{ - struct slcan *sl = (struct slcan *) tty->disc_data; - - /* First make sure we're connected. */ - if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty) - return; - - spin_lock_bh(&sl->lock); - tty->disc_data = NULL; - sl->tty = NULL; - spin_unlock_bh(&sl->lock); - - flush_work(&sl->tx_work); - - /* Flush network side */ - unregister_netdev(sl->dev); - /* This will complete via sl_free_netdev */ -} - -static int slcan_hangup(struct tty_struct *tty) -{ - slcan_close(tty); - return 0; -} - -/* Perform I/O control on an active SLCAN channel. */ -static int slcan_ioctl(struct tty_struct *tty, struct file *file, - unsigned int cmd, unsigned long arg) -{ - struct slcan *sl = (struct slcan *) tty->disc_data; - unsigned int tmp; - - /* First make sure we're connected. */ - if (!sl || sl->magic != SLCAN_MAGIC) - return -EINVAL; - - switch (cmd) { - case SIOCGIFNAME: - tmp = strlen(sl->dev->name) + 1; - if (copy_to_user((void __user *)arg, sl->dev->name, tmp)) - return -EFAULT; - return 0; - - case SIOCSIFHWADDR: - return -EINVAL; - - default: - return tty_mode_ioctl(tty, file, cmd, arg); - } -} - -static struct tty_ldisc_ops slc_ldisc = { - .owner = THIS_MODULE, - .magic = TTY_LDISC_MAGIC, - .name = "slcan", - .open = slcan_open, - .close = slcan_close, - .hangup = slcan_hangup, - .ioctl = slcan_ioctl, - .receive_buf = slcan_receive_buf, - .write_wakeup = slcan_write_wakeup, -}; - -static int __init slcan_init(void) -{ - int status; - - if (maxdev < 4) - maxdev = 4; /* Sanity */ - - pr_info("slcan: serial line CAN interface driver\n"); - pr_info("slcan: %d dynamic interface channels.\n", maxdev); - - slcan_devs = kcalloc(maxdev, sizeof(struct net_device *), GFP_KERNEL); - if (!slcan_devs) - return -ENOMEM; - - /* Fill in our line protocol discipline, and register it */ - status = tty_register_ldisc(N_SLCAN, &slc_ldisc); - if (status) { - printk(KERN_ERR "slcan: can't register line discipline\n"); - kfree(slcan_devs); - } - return status; -} - -static void __exit slcan_exit(void) -{ - int i; - struct net_device *dev; - struct slcan *sl; - unsigned long timeout = jiffies + HZ; - int busy = 0; - - if (slcan_devs == NULL) - return; - - /* First of all: check for active disciplines and hangup them. - */ - do { - if (busy) - msleep_interruptible(100); - - busy = 0; - for (i = 0; i < maxdev; i++) { - dev = slcan_devs[i]; - if (!dev) - continue; - sl = netdev_priv(dev); - spin_lock_bh(&sl->lock); - if (sl->tty) { - busy++; - tty_hangup(sl->tty); - } - spin_unlock_bh(&sl->lock); - } - } while (busy && time_before(jiffies, timeout)); - - /* FIXME: hangup is async so we should wait when doing this second - phase */ - - for (i = 0; i < maxdev; i++) { - dev = slcan_devs[i]; - if (!dev) - continue; - slcan_devs[i] = NULL; - - sl = netdev_priv(dev); - if (sl->tty) { - printk(KERN_ERR "%s: tty discipline still running\n", - dev->name); - } - - unregister_netdev(dev); - } - - kfree(slcan_devs); - slcan_devs = NULL; - - i = tty_unregister_ldisc(N_SLCAN); - if (i) - printk(KERN_ERR "slcan: can't unregister ldisc (err %d)\n", i); -} - -module_init(slcan_init); -module_exit(slcan_exit); diff --git a/drivers/net/can/slcan/Makefile b/drivers/net/can/slcan/Makefile new file mode 100644 index 000000000000..8a88e484ee21 --- /dev/null +++ b/drivers/net/can/slcan/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_CAN_SLCAN) += slcan.o + +slcan-objs := +slcan-objs += slcan-core.o +slcan-objs += slcan-ethtool.o diff --git a/drivers/net/can/slcan/slcan-core.c b/drivers/net/can/slcan/slcan-core.c new file mode 100644 index 000000000000..cd789e178d34 --- /dev/null +++ b/drivers/net/can/slcan/slcan-core.c @@ -0,0 +1,953 @@ +/* + * slcan.c - serial line CAN interface driver (using tty line discipline) + * + * This file is derived from linux/drivers/net/slip/slip.c and got + * inspiration from linux/drivers/net/can/can327.c for the rework made + * on the line discipline code. + * + * slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk> + * Fred N. van Kempen <waltje@uwalt.nl.mugnet.org> + * slcan.c Author : Oliver Hartkopp <socketcan@hartkopp.net> + * can327.c Author : Max Staudt <max-linux@enpas.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see http://www.gnu.org/licenses/gpl.html + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> + +#include <linux/uaccess.h> +#include <linux/bitops.h> +#include <linux/string.h> +#include <linux/tty.h> +#include <linux/errno.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/rtnetlink.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/workqueue.h> +#include <linux/can.h> +#include <linux/can/dev.h> +#include <linux/can/skb.h> + +#include "slcan.h" + +MODULE_ALIAS_LDISC(N_SLCAN); +MODULE_DESCRIPTION("serial line CAN interface"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>"); +MODULE_AUTHOR("Dario Binacchi <dario.binacchi@amarulasolutions.com>"); + +/* maximum rx buffer len: extended CAN frame with timestamp */ +#define SLCAN_MTU (sizeof("T1111222281122334455667788EA5F\r") + 1) + +#define SLCAN_CMD_LEN 1 +#define SLCAN_SFF_ID_LEN 3 +#define SLCAN_EFF_ID_LEN 8 +#define SLCAN_DATA_LENGTH_LEN 1 +#define SLCAN_ERROR_LEN 1 +#define SLCAN_STATE_LEN 1 +#define SLCAN_STATE_BE_RXCNT_LEN 3 +#define SLCAN_STATE_BE_TXCNT_LEN 3 +#define SLCAN_STATE_MSG_LEN (SLCAN_CMD_LEN + \ + SLCAN_STATE_LEN + \ + SLCAN_STATE_BE_RXCNT_LEN + \ + SLCAN_STATE_BE_TXCNT_LEN) +#define SLCAN_ERROR_MSG_LEN_MIN (SLCAN_CMD_LEN + \ + SLCAN_ERROR_LEN + \ + SLCAN_DATA_LENGTH_LEN) +#define SLCAN_FRAME_MSG_LEN_MIN (SLCAN_CMD_LEN + \ + SLCAN_SFF_ID_LEN + \ + SLCAN_DATA_LENGTH_LEN) +struct slcan { + struct can_priv can; + + /* Various fields. */ + struct tty_struct *tty; /* ptr to TTY structure */ + struct net_device *dev; /* easy for intr handling */ + spinlock_t lock; + struct work_struct tx_work; /* Flushes transmit buffer */ + + /* These are pointers to the malloc()ed frame buffers. */ + unsigned char rbuff[SLCAN_MTU]; /* receiver buffer */ + int rcount; /* received chars counter */ + unsigned char xbuff[SLCAN_MTU]; /* transmitter buffer*/ + unsigned char *xhead; /* pointer to next XMIT byte */ + int xleft; /* bytes left in XMIT queue */ + + unsigned long flags; /* Flag values/ mode etc */ +#define SLF_ERROR 0 /* Parity, etc. error */ +#define SLF_XCMD 1 /* Command transmission */ + unsigned long cmd_flags; /* Command flags */ +#define CF_ERR_RST 0 /* Reset errors on open */ + wait_queue_head_t xcmd_wait; /* Wait queue for commands */ + /* transmission */ +}; + +static const u32 slcan_bitrate_const[] = { + 10000, 20000, 50000, 100000, 125000, + 250000, 500000, 800000, 1000000 +}; + +bool slcan_err_rst_on_open(struct net_device *ndev) +{ + struct slcan *sl = netdev_priv(ndev); + + return !!test_bit(CF_ERR_RST, &sl->cmd_flags); +} + +int slcan_enable_err_rst_on_open(struct net_device *ndev, bool on) +{ + struct slcan *sl = netdev_priv(ndev); + + if (netif_running(ndev)) + return -EBUSY; + + if (on) + set_bit(CF_ERR_RST, &sl->cmd_flags); + else + clear_bit(CF_ERR_RST, &sl->cmd_flags); + + return 0; +} + +/************************************************************************* + * SLCAN ENCAPSULATION FORMAT * + *************************************************************************/ + +/* A CAN frame has a can_id (11 bit standard frame format OR 29 bit extended + * frame format) a data length code (len) which can be from 0 to 8 + * and up to <len> data bytes as payload. + * Additionally a CAN frame may become a remote transmission frame if the + * RTR-bit is set. This causes another ECU to send a CAN frame with the + * given can_id. + * + * The SLCAN ASCII representation of these different frame types is: + * <type> <id> <dlc> <data>* + * + * Extended frames (29 bit) are defined by capital characters in the type. + * RTR frames are defined as 'r' types - normal frames have 't' type: + * t => 11 bit data frame + * r => 11 bit RTR frame + * T => 29 bit data frame + * R => 29 bit RTR frame + * + * The <id> is 3 (standard) or 8 (extended) bytes in ASCII Hex (base64). + * The <dlc> is a one byte ASCII number ('0' - '8') + * The <data> section has at much ASCII Hex bytes as defined by the <dlc> + * + * Examples: + * + * t1230 : can_id 0x123, len 0, no data + * t4563112233 : can_id 0x456, len 3, data 0x11 0x22 0x33 + * T12ABCDEF2AA55 : extended can_id 0x12ABCDEF, len 2, data 0xAA 0x55 + * r1230 : can_id 0x123, len 0, no data, remote transmission request + * + */ + +/************************************************************************* + * STANDARD SLCAN DECAPSULATION * + *************************************************************************/ + +/* Send one completely decapsulated can_frame to the network layer */ +static void slcan_bump_frame(struct slcan *sl) +{ + struct sk_buff *skb; + struct can_frame *cf; + int i, tmp; + u32 tmpid; + char *cmd = sl->rbuff; + + if (sl->rcount < SLCAN_FRAME_MSG_LEN_MIN) + return; + + skb = alloc_can_skb(sl->dev, &cf); + if (unlikely(!skb)) { + sl->dev->stats.rx_dropped++; + return; + } + + switch (*cmd) { + case 'r': + cf->can_id = CAN_RTR_FLAG; + fallthrough; + case 't': + /* store dlc ASCII value and terminate SFF CAN ID string */ + cf->len = sl->rbuff[SLCAN_CMD_LEN + SLCAN_SFF_ID_LEN]; + sl->rbuff[SLCAN_CMD_LEN + SLCAN_SFF_ID_LEN] = 0; + /* point to payload data behind the dlc */ + cmd += SLCAN_CMD_LEN + SLCAN_SFF_ID_LEN + 1; + break; + case 'R': + cf->can_id = CAN_RTR_FLAG; + fallthrough; + case 'T': + cf->can_id |= CAN_EFF_FLAG; + /* store dlc ASCII value and terminate EFF CAN ID string */ + cf->len = sl->rbuff[SLCAN_CMD_LEN + SLCAN_EFF_ID_LEN]; + sl->rbuff[SLCAN_CMD_LEN + SLCAN_EFF_ID_LEN] = 0; + /* point to payload data behind the dlc */ + cmd += SLCAN_CMD_LEN + SLCAN_EFF_ID_LEN + 1; + break; + default: + goto decode_failed; + } + + if (kstrtou32(sl->rbuff + SLCAN_CMD_LEN, 16, &tmpid)) + goto decode_failed; + + cf->can_id |= tmpid; + + /* get len from sanitized ASCII value */ + if (cf->len >= '0' && cf->len < '9') + cf->len -= '0'; + else + goto decode_failed; + + /* RTR frames may have a dlc > 0 but they never have any data bytes */ + if (!(cf->can_id & CAN_RTR_FLAG)) { + for (i = 0; i < cf->len; i++) { + tmp = hex_to_bin(*cmd++); + if (tmp < 0) + goto decode_failed; + + cf->data[i] = (tmp << 4); + tmp = hex_to_bin(*cmd++); + if (tmp < 0) + goto decode_failed; + + cf->data[i] |= tmp; + } + } + + sl->dev->stats.rx_packets++; + if (!(cf->can_id & CAN_RTR_FLAG)) + sl->dev->stats.rx_bytes += cf->len; + + netif_rx(skb); + return; + +decode_failed: + sl->dev->stats.rx_errors++; + dev_kfree_skb(skb); +} + +/* A change state frame must contain state info and receive and transmit + * error counters. + * + * Examples: + * + * sb256256 : state bus-off: rx counter 256, tx counter 256 + * sa057033 : state active, rx counter 57, tx counter 33 + */ +static void slcan_bump_state(struct slcan *sl) +{ + struct net_device *dev = sl->dev; + struct sk_buff *skb; + struct can_frame *cf; + char *cmd = sl->rbuff; + u32 rxerr, txerr; + enum can_state state, rx_state, tx_state; + + switch (cmd[1]) { + case 'a': + state = CAN_STATE_ERROR_ACTIVE; + break; + case 'w': + state = CAN_STATE_ERROR_WARNING; + break; + case 'p': + state = CAN_STATE_ERROR_PASSIVE; + break; + case 'b': + state = CAN_STATE_BUS_OFF; + break; + default: + return; + } + + if (state == sl->can.state || sl->rcount != SLCAN_STATE_MSG_LEN) + return; + + cmd += SLCAN_STATE_BE_RXCNT_LEN + SLCAN_CMD_LEN + 1; + cmd[SLCAN_STATE_BE_TXCNT_LEN] = 0; + if (kstrtou32(cmd, 10, &txerr)) + return; + + *cmd = 0; + cmd -= SLCAN_STATE_BE_RXCNT_LEN; + if (kstrtou32(cmd, 10, &rxerr)) + return; + + skb = alloc_can_err_skb(dev, &cf); + + tx_state = txerr >= rxerr ? state : 0; + rx_state = txerr <= rxerr ? state : 0; + can_change_state(dev, cf, tx_state, rx_state); + + if (state == CAN_STATE_BUS_OFF) { + can_bus_off(dev); + } else if (skb) { + cf->can_id |= CAN_ERR_CNT; + cf->data[6] = txerr; + cf->data[7] = rxerr; + } + + if (skb) + netif_rx(skb); +} + +/* An error frame can contain more than one type of error. + * + * Examples: + * + * e1a : len 1, errors: ACK error + * e3bcO: len 3, errors: Bit0 error, CRC error, Tx overrun error + */ +static void slcan_bump_err(struct slcan *sl) +{ + struct net_device *dev = sl->dev; + struct sk_buff *skb; + struct can_frame *cf; + char *cmd = sl->rbuff; + bool rx_errors = false, tx_errors = false, rx_over_errors = false; + int i, len; + + if (sl->rcount < SLCAN_ERROR_MSG_LEN_MIN) + return; + + /* get len from sanitized ASCII value */ + len = cmd[1]; + if (len >= '0' && len < '9') + len -= '0'; + else + return; + + if ((len + SLCAN_CMD_LEN + 1) > sl->rcount) + return; + + skb = alloc_can_err_skb(dev, &cf); + + if (skb) + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + + cmd += SLCAN_CMD_LEN + 1; + for (i = 0; i < len; i++, cmd++) { + switch (*cmd) { + case 'a': + netdev_dbg(dev, "ACK error\n"); + tx_errors = true; + if (skb) { + cf->can_id |= CAN_ERR_ACK; + cf->data[3] = CAN_ERR_PROT_LOC_ACK; + } + + break; + case 'b': + netdev_dbg(dev, "Bit0 error\n"); + tx_errors = true; + if (skb) + cf->data[2] |= CAN_ERR_PROT_BIT0; + + break; + case 'B': + netdev_dbg(dev, "Bit1 error\n"); + tx_errors = true; + if (skb) + cf->data[2] |= CAN_ERR_PROT_BIT1; + + break; + case 'c': + netdev_dbg(dev, "CRC error\n"); + rx_errors = true; + if (skb) { + cf->data[2] |= CAN_ERR_PROT_BIT; + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + } + + break; + case 'f': + netdev_dbg(dev, "Form Error\n"); + rx_errors = true; + if (skb) + cf->data[2] |= CAN_ERR_PROT_FORM; + + break; + case 'o': + netdev_dbg(dev, "Rx overrun error\n"); + rx_over_errors = true; + rx_errors = true; + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + } + + break; + case 'O': + netdev_dbg(dev, "Tx overrun error\n"); + tx_errors = true; + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = CAN_ERR_CRTL_TX_OVERFLOW; + } + + break; + case 's': + netdev_dbg(dev, "Stuff error\n"); + rx_errors = true; + if (skb) + cf->data[2] |= CAN_ERR_PROT_STUFF; + + break; + default: + if (skb) + dev_kfree_skb(skb); + + return; + } + } + + if (rx_errors) + dev->stats.rx_errors++; + + if (rx_over_errors) + dev->stats.rx_over_errors++; + + if (tx_errors) + dev->stats.tx_errors++; + + if (skb) + netif_rx(skb); +} + +static void slcan_bump(struct slcan *sl) +{ + switch (sl->rbuff[0]) { + case 'r': + fallthrough; + case 't': + fallthrough; + case 'R': + fallthrough; + case 'T': + return slcan_bump_frame(sl); + case 'e': + return slcan_bump_err(sl); + case 's': + return slcan_bump_state(sl); + default: + return; + } +} + +/* parse tty input stream */ +static void slcan_unesc(struct slcan *sl, unsigned char s) +{ + if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */ + if (!test_and_clear_bit(SLF_ERROR, &sl->flags)) + slcan_bump(sl); + + sl->rcount = 0; + } else { + if (!test_bit(SLF_ERROR, &sl->flags)) { + if (sl->rcount < SLCAN_MTU) { + sl->rbuff[sl->rcount++] = s; + return; + } + + sl->dev->stats.rx_over_errors++; + set_bit(SLF_ERROR, &sl->flags); + } + } +} + +/************************************************************************* + * STANDARD SLCAN ENCAPSULATION * + *************************************************************************/ + +/* Encapsulate one can_frame and stuff into a TTY queue. */ +static void slcan_encaps(struct slcan *sl, struct can_frame *cf) +{ + int actual, i; + unsigned char *pos; + unsigned char *endpos; + canid_t id = cf->can_id; + + pos = sl->xbuff; + + if (cf->can_id & CAN_RTR_FLAG) + *pos = 'R'; /* becomes 'r' in standard frame format (SFF) */ + else + *pos = 'T'; /* becomes 't' in standard frame format (SSF) */ + + /* determine number of chars for the CAN-identifier */ + if (cf->can_id & CAN_EFF_FLAG) { + id &= CAN_EFF_MASK; + endpos = pos + SLCAN_EFF_ID_LEN; + } else { + *pos |= 0x20; /* convert R/T to lower case for SFF */ + id &= CAN_SFF_MASK; + endpos = pos + SLCAN_SFF_ID_LEN; + } + + /* build 3 (SFF) or 8 (EFF) digit CAN identifier */ + pos++; + while (endpos >= pos) { + *endpos-- = hex_asc_upper[id & 0xf]; + id >>= 4; + } + + pos += (cf->can_id & CAN_EFF_FLAG) ? + SLCAN_EFF_ID_LEN : SLCAN_SFF_ID_LEN; + + *pos++ = cf->len + '0'; + + /* RTR frames may have a dlc > 0 but they never have any data bytes */ + if (!(cf->can_id & CAN_RTR_FLAG)) { + for (i = 0; i < cf->len; i++) + pos = hex_byte_pack_upper(pos, cf->data[i]); + + sl->dev->stats.tx_bytes += cf->len; + } + + *pos++ = '\r'; + + /* Order of next two lines is *very* important. + * When we are sending a little amount of data, + * the transfer may be completed inside the ops->write() + * routine, because it's running with interrupts enabled. + * In this case we *never* got WRITE_WAKEUP event, + * if we did not request it before write operation. + * 14 Oct 1994 Dmitry Gorodchanin. + */ + set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); + actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff); + sl->xleft = (pos - sl->xbuff) - actual; + sl->xhead = sl->xbuff + actual; +} + +/* Write out any remaining transmit buffer. Scheduled when tty is writable */ +static void slcan_transmit(struct work_struct *work) +{ + struct slcan *sl = container_of(work, struct slcan, tx_work); + int actual; + + spin_lock_bh(&sl->lock); + /* First make sure we're connected. */ + if (unlikely(!netif_running(sl->dev)) && + likely(!test_bit(SLF_XCMD, &sl->flags))) { + spin_unlock_bh(&sl->lock); + return; + } + + if (sl->xleft <= 0) { + if (unlikely(test_bit(SLF_XCMD, &sl->flags))) { + clear_bit(SLF_XCMD, &sl->flags); + clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); + spin_unlock_bh(&sl->lock); + wake_up(&sl->xcmd_wait); + return; + } + + /* Now serial buffer is almost free & we can start + * transmission of another packet + */ + sl->dev->stats.tx_packets++; + clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); + spin_unlock_bh(&sl->lock); + netif_wake_queue(sl->dev); + return; + } + + actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft); + sl->xleft -= actual; + sl->xhead += actual; + spin_unlock_bh(&sl->lock); +} + +/* Called by the driver when there's room for more data. + * Schedule the transmit. + */ +static void slcan_write_wakeup(struct tty_struct *tty) +{ + struct slcan *sl = tty->disc_data; + + schedule_work(&sl->tx_work); +} + +/* Send a can_frame to a TTY queue. */ +static netdev_tx_t slcan_netdev_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct slcan *sl = netdev_priv(dev); + + if (can_dev_dropped_skb(dev, skb)) + return NETDEV_TX_OK; + + spin_lock(&sl->lock); + if (!netif_running(dev)) { + spin_unlock(&sl->lock); + netdev_warn(dev, "xmit: iface is down\n"); + goto out; + } + if (!sl->tty) { + spin_unlock(&sl->lock); + goto out; + } + + netif_stop_queue(sl->dev); + slcan_encaps(sl, (struct can_frame *)skb->data); /* encaps & send */ + spin_unlock(&sl->lock); + + skb_tx_timestamp(skb); + +out: + kfree_skb(skb); + return NETDEV_TX_OK; +} + +/****************************************** + * Routines looking at netdevice side. + ******************************************/ + +static int slcan_transmit_cmd(struct slcan *sl, const unsigned char *cmd) +{ + int ret, actual, n; + + spin_lock(&sl->lock); + if (!sl->tty) { + spin_unlock(&sl->lock); + return -ENODEV; + } + + n = scnprintf(sl->xbuff, sizeof(sl->xbuff), "%s", cmd); + set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); + actual = sl->tty->ops->write(sl->tty, sl->xbuff, n); + sl->xleft = n - actual; + sl->xhead = sl->xbuff + actual; + set_bit(SLF_XCMD, &sl->flags); + spin_unlock(&sl->lock); + ret = wait_event_interruptible_timeout(sl->xcmd_wait, + !test_bit(SLF_XCMD, &sl->flags), + HZ); + clear_bit(SLF_XCMD, &sl->flags); + if (ret == -ERESTARTSYS) + return ret; + + if (ret == 0) + return -ETIMEDOUT; + + return 0; +} + +/* Netdevice UP -> DOWN routine */ +static int slcan_netdev_close(struct net_device *dev) +{ + struct slcan *sl = netdev_priv(dev); + int err; + + if (sl->can.bittiming.bitrate && + sl->can.bittiming.bitrate != CAN_BITRATE_UNKNOWN) { + err = slcan_transmit_cmd(sl, "C\r"); + if (err) + netdev_warn(dev, + "failed to send close command 'C\\r'\n"); + } + + /* TTY discipline is running. */ + clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); + flush_work(&sl->tx_work); + + netif_stop_queue(dev); + sl->rcount = 0; + sl->xleft = 0; + close_candev(dev); + sl->can.state = CAN_STATE_STOPPED; + if (sl->can.bittiming.bitrate == CAN_BITRATE_UNKNOWN) + sl->can.bittiming.bitrate = CAN_BITRATE_UNSET; + + return 0; +} + +/* Netdevice DOWN -> UP routine */ +static int slcan_netdev_open(struct net_device *dev) +{ + struct slcan *sl = netdev_priv(dev); + unsigned char cmd[SLCAN_MTU]; + int err, s; + + /* The baud rate is not set with the command + * `ip link set <iface> type can bitrate <baud>' and therefore + * can.bittiming.bitrate is CAN_BITRATE_UNSET (0), causing + * open_candev() to fail. So let's set to a fake value. + */ + if (sl->can.bittiming.bitrate == CAN_BITRATE_UNSET) + sl->can.bittiming.bitrate = CAN_BITRATE_UNKNOWN; + + err = open_candev(dev); + if (err) { + netdev_err(dev, "failed to open can device\n"); + return err; + } + + if (sl->can.bittiming.bitrate != CAN_BITRATE_UNKNOWN) { + for (s = 0; s < ARRAY_SIZE(slcan_bitrate_const); s++) { + if (sl->can.bittiming.bitrate == slcan_bitrate_const[s]) + break; + } + + /* The CAN framework has already validate the bitrate value, + * so we can avoid to check if `s' has been properly set. + */ + snprintf(cmd, sizeof(cmd), "C\rS%d\r", s); + err = slcan_transmit_cmd(sl, cmd); + if (err) { + netdev_err(dev, + "failed to send bitrate command 'C\\rS%d\\r'\n", + s); + goto cmd_transmit_failed; + } + + if (test_bit(CF_ERR_RST, &sl->cmd_flags)) { + err = slcan_transmit_cmd(sl, "F\r"); + if (err) { + netdev_err(dev, + "failed to send error command 'F\\r'\n"); + goto cmd_transmit_failed; + } + } + + if (sl->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) { + err = slcan_transmit_cmd(sl, "L\r"); + if (err) { + netdev_err(dev, + "failed to send listen-only command 'L\\r'\n"); + goto cmd_transmit_failed; + } + } else { + err = slcan_transmit_cmd(sl, "O\r"); + if (err) { + netdev_err(dev, + "failed to send open command 'O\\r'\n"); + goto cmd_transmit_failed; + } + } + } + + sl->can.state = CAN_STATE_ERROR_ACTIVE; + netif_start_queue(dev); + return 0; + +cmd_transmit_failed: + close_candev(dev); + return err; +} + +static const struct net_device_ops slcan_netdev_ops = { + .ndo_open = slcan_netdev_open, + .ndo_stop = slcan_netdev_close, + .ndo_start_xmit = slcan_netdev_xmit, +}; + +/****************************************** + * Routines looking at TTY side. + ******************************************/ + +/* Handle the 'receiver data ready' interrupt. + * This function is called by the 'tty_io' module in the kernel when + * a block of SLCAN data has been received, which can now be decapsulated + * and sent on to some IP layer for further processing. This will not + * be re-entered while running but other ldisc functions may be called + * in parallel + */ +static void slcan_receive_buf(struct tty_struct *tty, const u8 *cp, + const u8 *fp, size_t count) +{ + struct slcan *sl = tty->disc_data; + + if (!netif_running(sl->dev)) + return; + + /* Read the characters out of the buffer */ + while (count--) { + if (fp && *fp++) { + if (!test_and_set_bit(SLF_ERROR, &sl->flags)) + sl->dev->stats.rx_errors++; + cp++; + continue; + } + slcan_unesc(sl, *cp++); + } +} + +/* Open the high-level part of the SLCAN channel. + * This function is called by the TTY module when the + * SLCAN line discipline is called for. + * + * Called in process context serialized from other ldisc calls. + */ +static int slcan_open(struct tty_struct *tty) +{ + struct net_device *dev; + struct slcan *sl; + int err; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (!tty->ops->write) + return -EOPNOTSUPP; + + dev = alloc_candev(sizeof(*sl), 1); + if (!dev) + return -ENFILE; + + sl = netdev_priv(dev); + + /* Configure TTY interface */ + tty->receive_room = 65536; /* We don't flow control */ + sl->rcount = 0; + sl->xleft = 0; + spin_lock_init(&sl->lock); + INIT_WORK(&sl->tx_work, slcan_transmit); + init_waitqueue_head(&sl->xcmd_wait); + + /* Configure CAN metadata */ + sl->can.bitrate_const = slcan_bitrate_const; + sl->can.bitrate_const_cnt = ARRAY_SIZE(slcan_bitrate_const); + sl->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY; + + /* Configure netdev interface */ + sl->dev = dev; + dev->netdev_ops = &slcan_netdev_ops; + dev->ethtool_ops = &slcan_ethtool_ops; + + /* Mark ldisc channel as alive */ + sl->tty = tty; + tty->disc_data = sl; + + err = register_candev(dev); + if (err) { + free_candev(dev); + pr_err("can't register candev\n"); + return err; + } + + netdev_info(dev, "slcan on %s.\n", tty->name); + /* TTY layer expects 0 on success */ + return 0; +} + +/* Close down a SLCAN channel. + * This means flushing out any pending queues, and then returning. This + * call is serialized against other ldisc functions. + * Once this is called, no other ldisc function of ours is entered. + * + * We also use this method for a hangup event. + */ +static void slcan_close(struct tty_struct *tty) +{ + struct slcan *sl = tty->disc_data; + + unregister_candev(sl->dev); + + /* + * The netdev needn't be UP (so .ndo_stop() is not called). Hence make + * sure this is not running before freeing it up. + */ + flush_work(&sl->tx_work); + + /* Mark channel as dead */ + spin_lock_bh(&sl->lock); + tty->disc_data = NULL; + sl->tty = NULL; + spin_unlock_bh(&sl->lock); + + netdev_info(sl->dev, "slcan off %s.\n", tty->name); + free_candev(sl->dev); +} + +/* Perform I/O control on an active SLCAN channel. */ +static int slcan_ioctl(struct tty_struct *tty, unsigned int cmd, + unsigned long arg) +{ + struct slcan *sl = tty->disc_data; + unsigned int tmp; + + switch (cmd) { + case SIOCGIFNAME: + tmp = strlen(sl->dev->name) + 1; + if (copy_to_user((void __user *)arg, sl->dev->name, tmp)) + return -EFAULT; + return 0; + + case SIOCSIFHWADDR: + return -EINVAL; + + default: + return tty_mode_ioctl(tty, cmd, arg); + } +} + +static struct tty_ldisc_ops slcan_ldisc = { + .owner = THIS_MODULE, + .num = N_SLCAN, + .name = KBUILD_MODNAME, + .open = slcan_open, + .close = slcan_close, + .ioctl = slcan_ioctl, + .receive_buf = slcan_receive_buf, + .write_wakeup = slcan_write_wakeup, +}; + +static int __init slcan_init(void) +{ + int status; + + pr_info("serial line CAN interface driver\n"); + + /* Fill in our line protocol discipline, and register it */ + status = tty_register_ldisc(&slcan_ldisc); + if (status) + pr_err("can't register line discipline\n"); + + return status; +} + +static void __exit slcan_exit(void) +{ + /* This will only be called when all channels have been closed by + * userspace - tty_ldisc.c takes care of the module's refcount. + */ + tty_unregister_ldisc(&slcan_ldisc); +} + +module_init(slcan_init); +module_exit(slcan_exit); diff --git a/drivers/net/can/slcan/slcan-ethtool.c b/drivers/net/can/slcan/slcan-ethtool.c new file mode 100644 index 000000000000..f598c653fbfa --- /dev/null +++ b/drivers/net/can/slcan/slcan-ethtool.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2022 Amarula Solutions, Dario Binacchi <dario.binacchi@amarulasolutions.com> + * + */ + +#include <linux/can/dev.h> +#include <linux/ethtool.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/platform_device.h> + +#include "slcan.h" + +static const char slcan_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define SLCAN_PRIV_FLAGS_ERR_RST_ON_OPEN BIT(0) + "err-rst-on-open", +}; + +static void slcan_get_strings(struct net_device *ndev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_PRIV_FLAGS: + memcpy(data, slcan_priv_flags_strings, + sizeof(slcan_priv_flags_strings)); + } +} + +static u32 slcan_get_priv_flags(struct net_device *ndev) +{ + u32 flags = 0; + + if (slcan_err_rst_on_open(ndev)) + flags |= SLCAN_PRIV_FLAGS_ERR_RST_ON_OPEN; + + return flags; +} + +static int slcan_set_priv_flags(struct net_device *ndev, u32 flags) +{ + bool err_rst_op_open = !!(flags & SLCAN_PRIV_FLAGS_ERR_RST_ON_OPEN); + + return slcan_enable_err_rst_on_open(ndev, err_rst_op_open); +} + +static int slcan_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_PRIV_FLAGS: + return ARRAY_SIZE(slcan_priv_flags_strings); + default: + return -EOPNOTSUPP; + } +} + +const struct ethtool_ops slcan_ethtool_ops = { + .get_strings = slcan_get_strings, + .get_priv_flags = slcan_get_priv_flags, + .set_priv_flags = slcan_set_priv_flags, + .get_sset_count = slcan_get_sset_count, + .get_ts_info = ethtool_op_get_ts_info, +}; diff --git a/drivers/net/can/slcan/slcan.h b/drivers/net/can/slcan/slcan.h new file mode 100644 index 000000000000..85cedf856db3 --- /dev/null +++ b/drivers/net/can/slcan/slcan.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 + * slcan.h - serial line CAN interface driver + * + * Copyright (C) Laurence Culhane <loz@holmes.demon.co.uk> + * Copyright (C) Fred N. van Kempen <waltje@uwalt.nl.mugnet.org> + * Copyright (C) Oliver Hartkopp <socketcan@hartkopp.net> + * Copyright (C) 2022 Amarula Solutions, Dario Binacchi <dario.binacchi@amarulasolutions.com> + * + */ + +#ifndef _SLCAN_H +#define _SLCAN_H + +bool slcan_err_rst_on_open(struct net_device *ndev); +int slcan_enable_err_rst_on_open(struct net_device *ndev, bool on); + +extern const struct ethtool_ops slcan_ethtool_ops; + +#endif /* _SLCAN_H */ diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig index 96b6fe158b5b..8afd7d0a1000 100644 --- a/drivers/net/can/softing/Kconfig +++ b/drivers/net/can/softing/Kconfig @@ -1,17 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0-only config CAN_SOFTING tristate "Softing Gmbh CAN generic support" depends on HAS_IOMEM - ---help--- + help Support for CAN cards from Softing Gmbh & some cards from Vector Gmbh. - Softing Gmbh CAN cards come with 1 or 2 physical busses. + Softing Gmbh CAN cards come with 1 or 2 physical buses. Those cards typically use Dual Port RAM to communicate with the host CPU. The interface is then identical for PCI and PCMCIA cards. This driver operates on a platform device, which has been created by softing_cs or softing_pci driver. Warning: The API of the card does not allow fine control per bus, but - controls the 2 busses on the card together. + controls the 2 buses on the card together. As such, some actions (start/stop/busoff recovery) on 1 bus must bring down the other bus too temporarily. @@ -19,11 +20,11 @@ config CAN_SOFTING_CS tristate "Softing Gmbh CAN pcmcia cards" depends on PCMCIA depends on CAN_SOFTING - ---help--- + help Support for PCMCIA cards from Softing Gmbh & some cards from Vector Gmbh. You need firmware for these, which you can get at - http://developer.berlios.de/projects/socketcan/ + https://github.com/linux-can/can-firmware This version of the driver is written against firmware version 4.6 (softing-fw-4.6-binaries.tar.gz) In order to use the card as CAN device, you need the Softing generic diff --git a/drivers/net/can/softing/Makefile b/drivers/net/can/softing/Makefile index a23da492dad5..c51154000377 100644 --- a/drivers/net/can/softing/Makefile +++ b/drivers/net/can/softing/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only softing-y := softing_main.o softing_fw.o obj-$(CONFIG_CAN_SOFTING) += softing.o diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c index 4d4492884e0b..e5c939b63fa6 100644 --- a/drivers/net/can/softing/softing_cs.c +++ b/drivers/net/can/softing/softing_cs.c @@ -1,19 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2008-2010 * * - Kurt Van Dijck, EIA Electronics - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include <linux/module.h> @@ -304,7 +293,7 @@ static int softingcs_probe(struct pcmcia_device *pcmcia) return 0; platform_failed: - kfree(dev); + platform_device_put(pdev); mem_failed: pcmcia_bad: pcmcia_failed: diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c index aac58ce6e371..721df91cdbfb 100644 --- a/drivers/net/can/softing/softing_fw.c +++ b/drivers/net/can/softing/softing_fw.c @@ -1,19 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2008-2010 * * - Kurt Van Dijck, EIA Electronics - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include <linux/firmware.h> @@ -284,7 +273,7 @@ int softing_load_app_fw(const char *file, struct softing *card) goto failed; } - /* regualar data */ + /* regular data */ for (sum = 0, j = 0; j < len; ++j) sum += dat[j]; /* work in 16bit (target) */ @@ -447,7 +436,7 @@ int softing_startstop(struct net_device *dev, int up) return ret; bus_bitmask_start = 0; - if (dev && up) + if (up) /* prepare to start this bus as well */ bus_bitmask_start |= (1 << priv->index); /* bring netdevs down */ @@ -485,14 +474,14 @@ int softing_startstop(struct net_device *dev, int up) if (ret) goto failed; if (!bus_bitmask_start) - /* no busses to be brought up */ + /* no buses to be brought up */ goto card_done; if ((bus_bitmask_start & 1) && (bus_bitmask_start & 2) && (softing_error_reporting(card->net[0]) != softing_error_reporting(card->net[1]))) { dev_alert(&card->pdev->dev, - "err_reporting flag differs for busses\n"); + "err_reporting flag differs for buses\n"); goto invalid; } error_reporting = 0; @@ -576,18 +565,19 @@ int softing_startstop(struct net_device *dev, int up) if (ret < 0) goto failed; } - /* enable_error_frame */ - /* + + /* enable_error_frame + * * Error reporting is switched off at the moment since * the receiving of them is not yet 100% verified * This should be enabled sooner or later - * - if (error_reporting) { + */ + if (0 && error_reporting) { ret = softing_fct_cmd(card, 51, "enable_error_frame"); if (ret < 0) goto failed; } - */ + /* initialize interface */ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 2]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 4]); @@ -635,7 +625,7 @@ int softing_startstop(struct net_device *dev, int up) */ memset(&msg, 0, sizeof(msg)); msg.can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED; - msg.can_dlc = CAN_ERR_DLC; + msg.len = CAN_ERR_DLC; for (j = 0; j < ARRAY_SIZE(card->net); ++j) { if (!(bus_bitmask_start & (1 << j))) continue; @@ -646,7 +636,7 @@ int softing_startstop(struct net_device *dev, int up) priv->can.state = CAN_STATE_ERROR_ACTIVE; open_candev(netdev); if (dev != netdev) { - /* notify other busses on the restart */ + /* notify other buses on the restart */ softing_netdev_rx(netdev, &msg, 0); ++priv->can.can_stats.restarts; } diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c index e22696190583..79bc64395ac4 100644 --- a/drivers/net/can/softing/softing_main.c +++ b/drivers/net/can/softing/softing_main.c @@ -1,21 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2008-2010 * * - Kurt Van Dijck, EIA Electronics - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. */ +#include <linux/ethtool.h> #include <linux/module.h> #include <linux/interrupt.h> #include <asm/io.h> @@ -70,7 +60,7 @@ static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb, struct can_frame *cf = (struct can_frame *)skb->data; uint8_t buf[DPRAM_TX_SIZE]; - if (can_dropped_invalid_skb(dev, skb)) + if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; spin_lock(&card->spin); @@ -95,7 +85,7 @@ static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb, if (priv->index) *ptr |= CMD_BUS2; ++ptr; - *ptr++ = cf->can_dlc; + *ptr++ = cf->len; *ptr++ = (cf->can_id >> 0); *ptr++ = (cf->can_id >> 8); if (cf->can_id & CAN_EFF_FLAG) { @@ -106,7 +96,7 @@ static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb, ptr += 1; } if (!(cf->can_id & CAN_RTR_FLAG)) - memcpy(ptr, &cf->data[0], cf->can_dlc); + memcpy(ptr, &cf->data[0], cf->len); memcpy_toio(&card->dpram[DPRAM_TX + DPRAM_TX_SIZE * fifo_wr], buf, DPRAM_TX_SIZE); if (++fifo_wr >= DPRAM_TX_CNT) @@ -115,7 +105,7 @@ static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb, card->tx.last_bus = priv->index; ++card->tx.pending; ++priv->tx.pending; - can_put_echo_skb(skb, dev, priv->tx.echo_put); + can_put_echo_skb(skb, dev, priv->tx.echo_put, 0); ++priv->tx.echo_put; if (priv->tx.echo_put >= TX_ECHO_SKB_MAX) priv->tx.echo_put = 0; @@ -178,11 +168,11 @@ static int softing_handle_1(struct softing *card) iowrite8(0, &card->dpram[DPRAM_RX_LOST]); /* prepare msg */ msg.can_id = CAN_ERR_FLAG | CAN_ERR_CRTL; - msg.can_dlc = CAN_ERR_DLC; + msg.len = CAN_ERR_DLC; msg.data[1] = CAN_ERR_CRTL_RX_OVERFLOW; /* - * service to all busses, we don't know which it was applicable - * but only service busses that are online + * service to all buses, we don't know which it was applicable + * but only service buses that are online */ for (j = 0; j < ARRAY_SIZE(card->net); ++j) { netdev = card->net[j]; @@ -229,7 +219,7 @@ static int softing_handle_1(struct softing *card) state = *ptr++; msg.can_id = CAN_ERR_FLAG; - msg.can_dlc = CAN_ERR_DLC; + msg.len = CAN_ERR_DLC; if (state & SF_MASK_BUSOFF) { can_state = CAN_STATE_BUS_OFF; @@ -250,7 +240,6 @@ static int softing_handle_1(struct softing *card) DPRAM_INFO_BUSSTATE2 : DPRAM_INFO_BUSSTATE]); /* timestamp */ tmp_u32 = le32_to_cpup((void *)ptr); - ptr += 4; ktime = softing_raw2ktime(card, tmp_u32); ++netdev->stats.rx_errors; @@ -272,7 +261,7 @@ static int softing_handle_1(struct softing *card) } else { if (cmd & CMD_RTR) msg.can_id |= CAN_RTR_FLAG; - msg.can_dlc = get_can_dlc(*ptr++); + msg.len = can_cc_dlc2len(*ptr++); if (cmd & CMD_XTD) { msg.can_id |= CAN_EFF_FLAG; msg.can_id |= le32_to_cpup((void *)ptr); @@ -287,7 +276,6 @@ static int softing_handle_1(struct softing *card) ktime = softing_raw2ktime(card, tmp_u32); if (!(msg.can_id & CAN_RTR_FLAG)) memcpy(&msg.data[0], ptr, 8); - ptr += 8; /* update socket */ if (cmd & CMD_ACK) { /* acknowledge, was tx msg */ @@ -295,7 +283,10 @@ static int softing_handle_1(struct softing *card) skb = priv->can.echo_skb[priv->tx.echo_get]; if (skb) skb->tstamp = ktime; - can_get_echo_skb(netdev, priv->tx.echo_get); + ++netdev->stats.tx_packets; + netdev->stats.tx_bytes += + can_get_echo_skb(netdev, priv->tx.echo_get, + NULL); ++priv->tx.echo_get; if (priv->tx.echo_get >= TX_ECHO_SKB_MAX) priv->tx.echo_get = 0; @@ -303,9 +294,6 @@ static int softing_handle_1(struct softing *card) --priv->tx.pending; if (card->tx.pending) --card->tx.pending; - ++netdev->stats.tx_packets; - if (!(msg.can_id & CAN_RTR_FLAG)) - netdev->stats.tx_bytes += msg.can_dlc; } else { int ret; @@ -313,7 +301,7 @@ static int softing_handle_1(struct softing *card) if (ret == NET_RX_SUCCESS) { ++netdev->stats.rx_packets; if (!(msg.can_id & CAN_RTR_FLAG)) - netdev->stats.rx_bytes += msg.can_dlc; + netdev->stats.rx_bytes += msg.len; } else { ++netdev->stats.rx_dropped; } @@ -350,7 +338,7 @@ static irqreturn_t softing_irq_thread(int irq, void *dev_id) continue; priv = netdev_priv(netdev); if (!canif_is_active(netdev)) - /* it makes no sense to wake dead busses */ + /* it makes no sense to wake dead buses */ continue; if (priv->tx.pending >= TX_ECHO_SKB_MAX) continue; @@ -385,7 +373,7 @@ static irqreturn_t softing_irq_v1(int irq, void *dev_id) } /* - * netdev/candev inter-operability + * netdev/candev interoperability */ static int softing_netdev_open(struct net_device *ndev) { @@ -393,20 +381,22 @@ static int softing_netdev_open(struct net_device *ndev) /* check or determine and set bittime */ ret = open_candev(ndev); - if (!ret) - ret = softing_startstop(ndev, 1); + if (ret) + return ret; + + ret = softing_startstop(ndev, 1); + if (ret < 0) + close_candev(ndev); + return ret; } static int softing_netdev_stop(struct net_device *ndev) { - int ret; - netif_stop_queue(ndev); /* softing cycle does close_candev() */ - ret = softing_startstop(ndev, 0); - return ret; + return softing_startstop(ndev, 0); } static int softing_candev_set_mode(struct net_device *ndev, enum can_mode mode) @@ -458,8 +448,9 @@ static void softing_card_shutdown(struct softing *card) { int fw_up = 0; - if (mutex_lock_interruptible(&card->fw.lock)) + if (mutex_lock_interruptible(&card->fw.lock)) { /* return -ERESTARTSYS */; + } fw_up = card->fw.up; card->fw.up = 0; @@ -618,11 +609,14 @@ static const struct net_device_ops softing_netdev_ops = { .ndo_open = softing_netdev_open, .ndo_stop = softing_netdev_stop, .ndo_start_xmit = softing_netdev_start_xmit, - .ndo_change_mtu = can_change_mtu, +}; + +static const struct ethtool_ops softing_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, }; static const struct can_bittiming_const softing_btr_const = { - .name = "softing", + .name = KBUILD_MODNAME, .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, @@ -659,6 +653,7 @@ static struct net_device *softing_netdev_create(struct softing *card, netdev->flags |= IFF_ECHO; netdev->netdev_ops = &softing_netdev_ops; + netdev->ethtool_ops = &softing_ethtool_ops; priv->can.do_set_mode = softing_candev_set_mode; priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; @@ -694,7 +689,7 @@ static void softing_netdev_cleanup(struct net_device *netdev) static ssize_t show_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ - struct softing *card = platform_get_drvdata(to_platform_device(dev)); \ + struct softing *card = dev_get_drvdata(dev); \ return sprintf(buf, "%u\n", card->member); \ } \ static DEVICE_ATTR(name, 0444, show_##name, NULL) @@ -703,7 +698,7 @@ static DEVICE_ATTR(name, 0444, show_##name, NULL) static ssize_t show_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ - struct softing *card = platform_get_drvdata(to_platform_device(dev)); \ + struct softing *card = dev_get_drvdata(dev); \ return sprintf(buf, "%s\n", card->member); \ } \ static DEVICE_ATTR(name, 0444, show_##name, NULL) @@ -733,7 +728,7 @@ static const struct attribute_group softing_pdev_group = { /* * platform driver */ -static int softing_pdev_remove(struct platform_device *pdev) +static void softing_pdev_remove(struct platform_device *pdev) { struct softing *card = platform_get_drvdata(pdev); int j; @@ -751,7 +746,6 @@ static int softing_pdev_remove(struct platform_device *pdev) iounmap(card->dpram); kfree(card); - return 0; } static int softing_pdev_probe(struct platform_device *pdev) @@ -788,7 +782,7 @@ static int softing_pdev_probe(struct platform_device *pdev) goto platform_resource_failed; card->dpram_phys = pres->start; card->dpram_size = resource_size(pres); - card->dpram = ioremap_nocache(card->dpram_phys, card->dpram_size); + card->dpram = ioremap(card->dpram_phys, card->dpram_size); if (!card->dpram) { dev_alert(&card->pdev->dev, "dpram ioremap failed\n"); goto ioremap_failed; @@ -856,7 +850,7 @@ platform_resource_failed: static struct platform_driver softing_driver = { .driver = { - .name = "softing", + .name = KBUILD_MODNAME, }, .probe = softing_pdev_probe, .remove = softing_pdev_remove, diff --git a/drivers/net/can/softing/softing_platform.h b/drivers/net/can/softing/softing_platform.h index 68a161547644..cd8d7904c5f0 100644 --- a/drivers/net/can/softing/softing_platform.h +++ b/drivers/net/can/softing/softing_platform.h @@ -19,7 +19,7 @@ struct softing_platform_data { * 16bit, shared interrupt */ int generation; - int nbus; /* # busses on device */ + int nbus; /* # buses on device */ unsigned int freq; /* operating frequency in Hz */ unsigned int max_brp; unsigned int max_sjw; diff --git a/drivers/net/can/spi/Kconfig b/drivers/net/can/spi/Kconfig index 8f2e0dd7b756..f45449210047 100644 --- a/drivers/net/can/spi/Kconfig +++ b/drivers/net/can/spi/Kconfig @@ -1,16 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0-only menu "CAN SPI interfaces" depends on SPI config CAN_HI311X tristate "Holt HI311x SPI CAN controllers" - depends on CAN_DEV && SPI && HAS_DMA - ---help--- + help Driver for the Holt HI311x SPI CAN controllers. config CAN_MCP251X - tristate "Microchip MCP251x SPI CAN controllers" - depends on HAS_DMA - ---help--- - Driver for the Microchip MCP251x SPI CAN controllers. + tristate "Microchip MCP251x and MCP25625 SPI CAN controllers" + help + Driver for the Microchip MCP251x and MCP25625 SPI CAN + controllers. + +source "drivers/net/can/spi/mcp251xfd/Kconfig" endmenu diff --git a/drivers/net/can/spi/Makefile b/drivers/net/can/spi/Makefile index f59fa3731073..33e3f60bbc10 100644 --- a/drivers/net/can/spi/Makefile +++ b/drivers/net/can/spi/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only # # Makefile for the Linux Controller Area Network SPI drivers. # @@ -5,3 +6,4 @@ obj-$(CONFIG_CAN_HI311X) += hi311x.o obj-$(CONFIG_CAN_MCP251X) += mcp251x.o +obj-y += mcp251xfd/ diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c index ddaf46239e39..e00d3dbc4cf4 100644 --- a/drivers/net/can/spi/hi311x.c +++ b/drivers/net/can/spi/hi311x.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* CAN bus driver for Holt HI3110 CAN Controller with SPI Interface * * Copyright(C) Timesys Corporation 2016 @@ -11,29 +12,24 @@ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix * - Simon Kallweit, intefo AG * Copyright 2007 - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/can/core.h> #include <linux/can/dev.h> -#include <linux/can/led.h> #include <linux/clk.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/device.h> -#include <linux/dma-mapping.h> +#include <linux/ethtool.h> #include <linux/freezer.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/netdevice.h> -#include <linux/of.h> -#include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <linux/spi/spi.h> @@ -129,10 +125,6 @@ #define DEVICE_NAME "hi3110" -static int hi3110_enable_dma = 1; /* Enable SPI DMA. Default: 1 (On) */ -module_param(hi3110_enable_dma, int, 0444); -MODULE_PARM_DESC(hi3110_enable_dma, "Enable SPI DMA. Default: 1 (On)"); - static const struct can_bittiming_const hi3110_bittiming_const = { .name = DEVICE_NAME, .tseg1_min = 2, @@ -159,11 +151,8 @@ struct hi3110_priv { u8 *spi_tx_buf; u8 *spi_rx_buf; - dma_addr_t spi_tx_dma; - dma_addr_t spi_rx_dma; struct sk_buff *tx_skb; - int tx_len; struct workqueue_struct *wq; struct work_struct tx_work; @@ -176,6 +165,8 @@ struct hi3110_priv { #define HI3110_AFTER_SUSPEND_POWER 4 #define HI3110_AFTER_SUSPEND_RESTART 8 int restart_tx; + bool tx_busy; + struct regulator *power; struct regulator *transceiver; struct clk *clk; @@ -185,14 +176,13 @@ static void hi3110_clean(struct net_device *net) { struct hi3110_priv *priv = netdev_priv(net); - if (priv->tx_skb || priv->tx_len) + if (priv->tx_skb || priv->tx_busy) net->stats.tx_errors++; - if (priv->tx_skb) - dev_kfree_skb(priv->tx_skb); - if (priv->tx_len) - can_free_echo_skb(priv->net, 0); + dev_kfree_skb(priv->tx_skb); + if (priv->tx_busy) + can_free_echo_skb(priv->net, 0, NULL); priv->tx_skb = NULL; - priv->tx_len = 0; + priv->tx_busy = false; } /* Note about handling of error return of hi3110_spi_trans: accessing @@ -220,13 +210,6 @@ static int hi3110_spi_trans(struct spi_device *spi, int len) int ret; spi_message_init(&m); - - if (hi3110_enable_dma) { - t.tx_dma = priv->spi_tx_dma; - t.rx_dma = priv->spi_rx_dma; - m.is_dma_mapped = 1; - } - spi_message_add_tail(&t, &m); ret = spi_sync(spi, &m); @@ -236,7 +219,7 @@ static int hi3110_spi_trans(struct spi_device *spi, int len) return ret; } -static u8 hi3110_cmd(struct spi_device *spi, u8 command) +static int hi3110_cmd(struct spi_device *spi, u8 command) { struct hi3110_priv *priv = spi_get_drvdata(spi); @@ -295,13 +278,13 @@ static void hi3110_hw_tx(struct spi_device *spi, struct can_frame *frame) ((frame->can_id & CAN_EFF_MASK) << 1) | ((frame->can_id & CAN_RTR_FLAG) ? 1 : 0); - buf[HI3110_FIFO_EXT_DLC_OFF] = frame->can_dlc; + buf[HI3110_FIFO_EXT_DLC_OFF] = frame->len; memcpy(buf + HI3110_FIFO_EXT_DATA_OFF, - frame->data, frame->can_dlc); + frame->data, frame->len); hi3110_hw_tx_frame(spi, buf, HI3110_TX_EXT_BUF_LEN - - (HI3110_CAN_MAX_DATA_LEN - frame->can_dlc)); + (HI3110_CAN_MAX_DATA_LEN - frame->len)); } else { /* Standard frame */ buf[HI3110_FIFO_ID_OFF] = (frame->can_id & CAN_SFF_MASK) >> 3; @@ -309,13 +292,13 @@ static void hi3110_hw_tx(struct spi_device *spi, struct can_frame *frame) ((frame->can_id & CAN_SFF_MASK) << 5) | ((frame->can_id & CAN_RTR_FLAG) ? (1 << 4) : 0); - buf[HI3110_FIFO_STD_DLC_OFF] = frame->can_dlc; + buf[HI3110_FIFO_STD_DLC_OFF] = frame->len; memcpy(buf + HI3110_FIFO_STD_DATA_OFF, - frame->data, frame->can_dlc); + frame->data, frame->len); hi3110_hw_tx_frame(spi, buf, HI3110_TX_STD_BUF_LEN - - (HI3110_CAN_MAX_DATA_LEN - frame->can_dlc)); + (HI3110_CAN_MAX_DATA_LEN - frame->len)); } } @@ -359,20 +342,19 @@ static void hi3110_hw_rx(struct spi_device *spi) } /* Data length */ - frame->can_dlc = get_can_dlc(buf[HI3110_FIFO_WOTIME_DLC_OFF] & 0x0F); + frame->len = can_cc_dlc2len(buf[HI3110_FIFO_WOTIME_DLC_OFF] & 0x0F); - if (buf[HI3110_FIFO_WOTIME_ID_OFF + 3] & HI3110_FIFO_WOTIME_ID_RTR) + if (buf[HI3110_FIFO_WOTIME_ID_OFF + 3] & HI3110_FIFO_WOTIME_ID_RTR) { frame->can_id |= CAN_RTR_FLAG; - else + } else { memcpy(frame->data, buf + HI3110_FIFO_WOTIME_DAT_OFF, - frame->can_dlc); + frame->len); + priv->net->stats.rx_bytes += frame->len; + } priv->net->stats.rx_packets++; - priv->net->stats.rx_bytes += frame->can_dlc; - can_led_event(priv->net, CAN_LED_EVENT_RX); - - netif_rx_ni(skb); + netif_rx(skb); } static void hi3110_hw_sleep(struct spi_device *spi) @@ -386,12 +368,12 @@ static netdev_tx_t hi3110_hard_start_xmit(struct sk_buff *skb, struct hi3110_priv *priv = netdev_priv(net); struct spi_device *spi = priv->spi; - if (priv->tx_skb || priv->tx_len) { + if (priv->tx_skb || priv->tx_busy) { dev_err(&spi->dev, "hard_xmit called while tx busy\n"); return NETDEV_TX_BUSY; } - if (can_dropped_invalid_skb(net, skb)) + if (can_dev_dropped_skb(net, skb)) return NETDEV_TX_OK; netif_stop_queue(net); @@ -563,8 +545,6 @@ static int hi3110_stop(struct net_device *net) priv->force_quit = 1; free_irq(spi->irq, priv); - destroy_workqueue(priv->wq); - priv->wq = NULL; mutex_lock(&priv->hi3110_lock); @@ -583,8 +563,6 @@ static int hi3110_stop(struct net_device *net) mutex_unlock(&priv->hi3110_lock); - can_led_event(net, CAN_LED_EVENT_STOP); - return 0; } @@ -603,8 +581,8 @@ static void hi3110_tx_work_handler(struct work_struct *ws) } else { frame = (struct can_frame *)priv->tx_skb->data; hi3110_hw_tx(spi, frame); - priv->tx_len = 1 + frame->can_dlc; - can_put_echo_skb(priv->tx_skb, net, 0); + priv->tx_busy = true; + can_put_echo_skb(priv->tx_skb, net, 0, 0); priv->tx_skb = NULL; } } @@ -683,25 +661,27 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id) u8 rxerr, txerr; skb = alloc_can_err_skb(net, &cf); - if (!skb) - break; txerr = hi3110_read(spi, HI3110_READ_TEC); rxerr = hi3110_read(spi, HI3110_READ_REC); - cf->data[6] = txerr; - cf->data[7] = rxerr; tx_state = txerr >= rxerr ? new_state : 0; rx_state = txerr <= rxerr ? new_state : 0; can_change_state(net, cf, tx_state, rx_state); - netif_rx_ni(skb); if (new_state == CAN_STATE_BUS_OFF) { + if (skb) + netif_rx(skb); can_bus_off(net); if (priv->can.restart_ms == 0) { priv->force_quit = 1; hi3110_hw_sleep(spi); break; } + } else if (skb) { + cf->can_id |= CAN_ERR_CNT; + cf->data[6] = txerr; + cf->data[7] = rxerr; + netif_rx(skb); } } @@ -714,38 +694,45 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id) /* Check for protocol errors */ if (eflag & HI3110_ERR_PROTOCOL_MASK) { skb = alloc_can_err_skb(net, &cf); - if (!skb) - break; + if (skb) + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; priv->can.can_stats.bus_error++; - priv->net->stats.rx_errors++; - if (eflag & HI3110_ERR_BITERR) - cf->data[2] |= CAN_ERR_PROT_BIT; - else if (eflag & HI3110_ERR_FRMERR) - cf->data[2] |= CAN_ERR_PROT_FORM; - else if (eflag & HI3110_ERR_STUFERR) - cf->data[2] |= CAN_ERR_PROT_STUFF; - else if (eflag & HI3110_ERR_CRCERR) - cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; - else if (eflag & HI3110_ERR_ACKERR) - cf->data[3] |= CAN_ERR_PROT_LOC_ACK; - - cf->data[6] = hi3110_read(spi, HI3110_READ_TEC); - cf->data[7] = hi3110_read(spi, HI3110_READ_REC); + if (eflag & HI3110_ERR_BITERR) { + priv->net->stats.tx_errors++; + if (skb) + cf->data[2] |= CAN_ERR_PROT_BIT; + } else if (eflag & HI3110_ERR_FRMERR) { + priv->net->stats.rx_errors++; + if (skb) + cf->data[2] |= CAN_ERR_PROT_FORM; + } else if (eflag & HI3110_ERR_STUFERR) { + priv->net->stats.rx_errors++; + if (skb) + cf->data[2] |= CAN_ERR_PROT_STUFF; + } else if (eflag & HI3110_ERR_CRCERR) { + priv->net->stats.rx_errors++; + if (skb) + cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; + } else if (eflag & HI3110_ERR_ACKERR) { + priv->net->stats.tx_errors++; + if (skb) + cf->data[3] |= CAN_ERR_PROT_LOC_ACK; + } + netdev_dbg(priv->net, "Bus Error\n"); - netif_rx_ni(skb); + if (skb) { + cf->data[6] = hi3110_read(spi, HI3110_READ_TEC); + cf->data[7] = hi3110_read(spi, HI3110_READ_REC); + netif_rx(skb); + } } } - if (priv->tx_len && statf & HI3110_STAT_TXMTY) { + if (priv->tx_busy && statf & HI3110_STAT_TXMTY) { net->stats.tx_packets++; - net->stats.tx_bytes += priv->tx_len - 1; - can_led_event(net, CAN_LED_EVENT_TX); - if (priv->tx_len) { - can_get_echo_skb(net, 0); - priv->tx_len = 0; - } + net->stats.tx_bytes += can_get_echo_skb(net, 0, NULL); + priv->tx_busy = false; netif_wake_queue(net); } @@ -772,7 +759,7 @@ static int hi3110_open(struct net_device *net) priv->force_quit = 0; priv->tx_skb = NULL; - priv->tx_len = 0; + priv->tx_busy = false; ret = request_threaded_irq(spi->irq, NULL, hi3110_can_ist, flags, DEVICE_NAME, priv); @@ -781,35 +768,23 @@ static int hi3110_open(struct net_device *net) goto out_close; } - priv->wq = alloc_workqueue("hi3110_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM, - 0); - if (!priv->wq) { - ret = -ENOMEM; - goto out_free_irq; - } - INIT_WORK(&priv->tx_work, hi3110_tx_work_handler); - INIT_WORK(&priv->restart_work, hi3110_restart_work_handler); - ret = hi3110_hw_reset(spi); if (ret) - goto out_free_wq; + goto out_free_irq; ret = hi3110_setup(net); if (ret) - goto out_free_wq; + goto out_free_irq; ret = hi3110_set_normal_mode(spi); if (ret) - goto out_free_wq; + goto out_free_irq; - can_led_event(net, CAN_LED_EVENT_OPEN); netif_wake_queue(net); mutex_unlock(&priv->hi3110_lock); return 0; - out_free_wq: - destroy_workqueue(priv->wq); out_free_irq: free_irq(spi->irq, priv); hi3110_hw_sleep(spi); @@ -826,6 +801,10 @@ static const struct net_device_ops hi3110_netdev_ops = { .ndo_start_xmit = hi3110_hard_start_xmit, }; +static const struct ethtool_ops hi3110_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, +}; + static const struct of_device_id hi3110_of_match[] = { { .compatible = "holt,hi3110", @@ -846,19 +825,24 @@ MODULE_DEVICE_TABLE(spi, hi3110_id_table); static int hi3110_can_probe(struct spi_device *spi) { - const struct of_device_id *of_id = of_match_device(hi3110_of_match, - &spi->dev); + struct device *dev = &spi->dev; struct net_device *net; struct hi3110_priv *priv; struct clk *clk; - int freq, ret; + u32 freq; + int ret; - clk = devm_clk_get(&spi->dev, NULL); - if (IS_ERR(clk)) { - dev_err(&spi->dev, "no CAN clock source defined\n"); - return PTR_ERR(clk); + clk = devm_clk_get_optional(&spi->dev, NULL); + if (IS_ERR(clk)) + return dev_err_probe(dev, PTR_ERR(clk), "no CAN clock source defined\n"); + + if (clk) { + freq = clk_get_rate(clk); + } else { + ret = device_property_read_u32(dev, "clock-frequency", &freq); + if (ret) + return dev_err_probe(dev, ret, "Failed to get clock-frequency!\n"); } - freq = clk_get_rate(clk); /* Sanity check */ if (freq > 40000000) @@ -869,13 +853,12 @@ static int hi3110_can_probe(struct spi_device *spi) if (!net) return -ENOMEM; - if (!IS_ERR(clk)) { - ret = clk_prepare_enable(clk); - if (ret) - goto out_free; - } + ret = clk_prepare_enable(clk); + if (ret) + goto out_free; net->netdev_ops = &hi3110_netdev_ops; + net->ethtool_ops = &hi3110_ethtool_ops; net->flags |= IFF_ECHO; priv = netdev_priv(net); @@ -888,10 +871,7 @@ static int hi3110_can_probe(struct spi_device *spi) CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING; - if (of_id) - priv->model = (enum hi3110_model)of_id->data; - else - priv->model = spi_get_device_id(spi)->driver_data; + priv->model = (enum hi3110_model)(uintptr_t)spi_get_device_match_data(spi); priv->net = net; priv->clk = clk; @@ -915,55 +895,38 @@ static int hi3110_can_probe(struct spi_device *spi) if (ret) goto out_clk; + priv->wq = alloc_workqueue("hi3110_wq", + WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU, + 0); + if (!priv->wq) { + ret = -ENOMEM; + goto out_clk; + } + INIT_WORK(&priv->tx_work, hi3110_tx_work_handler); + INIT_WORK(&priv->restart_work, hi3110_restart_work_handler); + priv->spi = spi; mutex_init(&priv->hi3110_lock); - /* If requested, allocate DMA buffers */ - if (hi3110_enable_dma) { - spi->dev.coherent_dma_mask = ~0; - - /* Minimum coherent DMA allocation is PAGE_SIZE, so allocate - * that much and share it between Tx and Rx DMA buffers. - */ - priv->spi_tx_buf = dmam_alloc_coherent(&spi->dev, - PAGE_SIZE, - &priv->spi_tx_dma, - GFP_DMA); - - if (priv->spi_tx_buf) { - priv->spi_rx_buf = (priv->spi_tx_buf + (PAGE_SIZE / 2)); - priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma + - (PAGE_SIZE / 2)); - } else { - /* Fall back to non-DMA */ - hi3110_enable_dma = 0; - } + priv->spi_tx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN, + GFP_KERNEL); + if (!priv->spi_tx_buf) { + ret = -ENOMEM; + goto error_probe; } + priv->spi_rx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN, + GFP_KERNEL); - /* Allocate non-DMA buffers */ - if (!hi3110_enable_dma) { - priv->spi_tx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN, - GFP_KERNEL); - if (!priv->spi_tx_buf) { - ret = -ENOMEM; - goto error_probe; - } - priv->spi_rx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN, - GFP_KERNEL); - - if (!priv->spi_rx_buf) { - ret = -ENOMEM; - goto error_probe; - } + if (!priv->spi_rx_buf) { + ret = -ENOMEM; + goto error_probe; } SET_NETDEV_DEV(net, &spi->dev); ret = hi3110_hw_probe(spi); if (ret) { - if (ret == -ENODEV) - dev_err(&spi->dev, "Cannot initialize %x. Wrong wiring?\n", - priv->model); + dev_err_probe(dev, ret, "Cannot initialize %x. Wrong wiring?\n", priv->model); goto error_probe; } hi3110_hw_sleep(spi); @@ -972,26 +935,25 @@ static int hi3110_can_probe(struct spi_device *spi) if (ret) goto error_probe; - devm_can_led_init(net); netdev_info(net, "%x successfully initialized.\n", priv->model); return 0; error_probe: + destroy_workqueue(priv->wq); + priv->wq = NULL; hi3110_power_enable(priv->power, 0); out_clk: - if (!IS_ERR(clk)) - clk_disable_unprepare(clk); + clk_disable_unprepare(clk); out_free: free_candev(net); - dev_err(&spi->dev, "Probe failed, err=%d\n", -ret); - return ret; + return dev_err_probe(dev, ret, "Probe failed\n"); } -static int hi3110_can_remove(struct spi_device *spi) +static void hi3110_can_remove(struct spi_device *spi) { struct hi3110_priv *priv = spi_get_drvdata(spi); struct net_device *net = priv->net; @@ -1000,12 +962,12 @@ static int hi3110_can_remove(struct spi_device *spi) hi3110_power_enable(priv->power, 0); - if (!IS_ERR(priv->clk)) - clk_disable_unprepare(priv->clk); + destroy_workqueue(priv->wq); + priv->wq = NULL; + + clk_disable_unprepare(priv->clk); free_candev(net); - - return 0; } static int __maybe_unused hi3110_can_suspend(struct device *dev) diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index e90817608645..fa97adf25b73 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -1,5 +1,5 @@ -/* - * CAN bus driver for Microchip 251x CAN Controller with SPI Interface +// SPDX-License-Identifier: GPL-2.0-only +/* CAN bus driver for Microchip 251x/25625 CAN Controller with SPI Interface * * MCP2510 support and bug fixes by Christian Pellegrin * <chripell@evolware.org> @@ -17,65 +17,30 @@ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix * - Simon Kallweit, intefo AG * Copyright 2007 - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - * - * - * - * Your platform definition file should specify something like: - * - * static struct mcp251x_platform_data mcp251x_info = { - * .oscillator_frequency = 8000000, - * }; - * - * static struct spi_board_info spi_board_info[] = { - * { - * .modalias = "mcp2510", - * // or "mcp2515" depending on your controller - * .platform_data = &mcp251x_info, - * .irq = IRQ_EINT13, - * .max_speed_hz = 2*1000*1000, - * .chip_select = 2, - * }, - * }; - * - * Please see mcp251x.h for a description of the fields in - * struct mcp251x_platform_data. - * */ +#include <linux/bitfield.h> #include <linux/can/core.h> #include <linux/can/dev.h> -#include <linux/can/led.h> -#include <linux/can/platform/mcp251x.h> #include <linux/clk.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/device.h> -#include <linux/dma-mapping.h> +#include <linux/ethtool.h> #include <linux/freezer.h> +#include <linux/gpio/driver.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> -#include <linux/of.h> -#include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/regulator/consumer.h> #include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/uaccess.h> -#include <linux/regulator/consumer.h> /* SPI interface instruction set */ #define INSTRUCTION_WRITE 0x02 @@ -89,8 +54,31 @@ #define RTS_TXB2 0x04 #define INSTRUCTION_RTS(n) (0x80 | ((n) & 0x07)) - /* MPC251x registers */ +#define BFPCTRL 0x0c +# define BFPCTRL_B0BFM BIT(0) +# define BFPCTRL_B1BFM BIT(1) +# define BFPCTRL_BFM(n) (BFPCTRL_B0BFM << (n)) +# define BFPCTRL_BFM_MASK GENMASK(1, 0) +# define BFPCTRL_B0BFE BIT(2) +# define BFPCTRL_B1BFE BIT(3) +# define BFPCTRL_BFE(n) (BFPCTRL_B0BFE << (n)) +# define BFPCTRL_BFE_MASK GENMASK(3, 2) +# define BFPCTRL_B0BFS BIT(4) +# define BFPCTRL_B1BFS BIT(5) +# define BFPCTRL_BFS(n) (BFPCTRL_B0BFS << (n)) +# define BFPCTRL_BFS_MASK GENMASK(5, 4) +#define TXRTSCTRL 0x0d +# define TXRTSCTRL_B0RTSM BIT(0) +# define TXRTSCTRL_B1RTSM BIT(1) +# define TXRTSCTRL_B2RTSM BIT(2) +# define TXRTSCTRL_RTSM(n) (TXRTSCTRL_B0RTSM << (n)) +# define TXRTSCTRL_RTSM_MASK GENMASK(2, 0) +# define TXRTSCTRL_B0RTS BIT(3) +# define TXRTSCTRL_B1RTS BIT(4) +# define TXRTSCTRL_B2RTS BIT(5) +# define TXRTSCTRL_RTS(n) (TXRTSCTRL_B0RTS << (n)) +# define TXRTSCTRL_RTS_MASK GENMASK(5, 3) #define CANSTAT 0x0e #define CANCTRL 0x0f # define CANCTRL_REQOP_MASK 0xe0 @@ -205,8 +193,7 @@ #define SET_BYTE(val, byte) \ (((val) & 0xff) << ((byte) * 8)) -/* - * Buffer size required for the largest SPI transfer (i.e., reading a +/* Buffer size required for the largest SPI transfer (i.e., reading a * frame) */ #define CAN_FRAME_MAX_DATA_LEN 8 @@ -219,10 +206,6 @@ #define DEVICE_NAME "mcp251x" -static int mcp251x_enable_dma; /* Enable SPI DMA. Default: 0 (Off) */ -module_param(mcp251x_enable_dma, int, 0444); -MODULE_PARM_DESC(mcp251x_enable_dma, "Enable SPI DMA. Default: 0 (Off)"); - static const struct can_bittiming_const mcp251x_bittiming_const = { .name = DEVICE_NAME, .tseg1_min = 3, @@ -238,6 +221,7 @@ static const struct can_bittiming_const mcp251x_bittiming_const = { enum mcp251x_model { CAN_MCP251X_MCP2510 = 0x2510, CAN_MCP251X_MCP2515 = 0x2515, + CAN_MCP251X_MCP25625 = 0x25625, }; struct mcp251x_priv { @@ -250,11 +234,8 @@ struct mcp251x_priv { u8 *spi_tx_buf; u8 *spi_rx_buf; - dma_addr_t spi_tx_dma; - dma_addr_t spi_rx_dma; struct sk_buff *tx_skb; - int tx_len; struct workqueue_struct *wq; struct work_struct tx_work; @@ -267,9 +248,15 @@ struct mcp251x_priv { #define AFTER_SUSPEND_POWER 4 #define AFTER_SUSPEND_RESTART 8 int restart_tx; + bool tx_busy; + struct regulator *power; struct regulator *transceiver; struct clk *clk; +#ifdef CONFIG_GPIOLIB + struct gpio_chip gpio; + u8 reg_bfpctrl; +#endif }; #define MCP251X_IS(_model) \ @@ -280,24 +267,21 @@ static inline int mcp251x_is_##_model(struct spi_device *spi) \ } MCP251X_IS(2510); -MCP251X_IS(2515); static void mcp251x_clean(struct net_device *net) { struct mcp251x_priv *priv = netdev_priv(net); - if (priv->tx_skb || priv->tx_len) + if (priv->tx_skb || priv->tx_busy) net->stats.tx_errors++; - if (priv->tx_skb) - dev_kfree_skb(priv->tx_skb); - if (priv->tx_len) - can_free_echo_skb(priv->net, 0); + dev_kfree_skb(priv->tx_skb); + if (priv->tx_busy) + can_free_echo_skb(priv->net, 0, NULL); priv->tx_skb = NULL; - priv->tx_len = 0; + priv->tx_busy = false; } -/* - * Note about handling of error return of mcp251x_spi_trans: accessing +/* Note about handling of error return of mcp251x_spi_trans: accessing * registers via SPI is not really different conceptually than using * normal I/O assembler instructions, although it's much more * complicated from a practical POV. So it's not advisable to always @@ -322,13 +306,6 @@ static int mcp251x_spi_trans(struct spi_device *spi, int len) int ret; spi_message_init(&m); - - if (mcp251x_enable_dma) { - t.tx_dma = priv->spi_tx_dma; - t.rx_dma = priv->spi_rx_dma; - m.is_dma_mapped = 1; - } - spi_message_add_tail(&t, &m); ret = spi_sync(spi, &m); @@ -337,7 +314,19 @@ static int mcp251x_spi_trans(struct spi_device *spi, int len) return ret; } -static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg) +static int mcp251x_spi_write(struct spi_device *spi, int len) +{ + struct mcp251x_priv *priv = spi_get_drvdata(spi); + int ret; + + ret = spi_write(spi, priv->spi_tx_buf, len); + if (ret) + dev_err(&spi->dev, "spi write failed: ret = %d\n", ret); + + return ret; +} + +static u8 mcp251x_read_reg(struct spi_device *spi, u8 reg) { struct mcp251x_priv *priv = spi_get_drvdata(spi); u8 val = 0; @@ -345,27 +334,38 @@ static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg) priv->spi_tx_buf[0] = INSTRUCTION_READ; priv->spi_tx_buf[1] = reg; - mcp251x_spi_trans(spi, 3); - val = priv->spi_rx_buf[2]; + if (spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX) { + spi_write_then_read(spi, priv->spi_tx_buf, 2, &val, 1); + } else { + mcp251x_spi_trans(spi, 3); + val = priv->spi_rx_buf[2]; + } return val; } -static void mcp251x_read_2regs(struct spi_device *spi, uint8_t reg, - uint8_t *v1, uint8_t *v2) +static void mcp251x_read_2regs(struct spi_device *spi, u8 reg, u8 *v1, u8 *v2) { struct mcp251x_priv *priv = spi_get_drvdata(spi); priv->spi_tx_buf[0] = INSTRUCTION_READ; priv->spi_tx_buf[1] = reg; - mcp251x_spi_trans(spi, 4); + if (spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX) { + u8 val[2] = { 0 }; + + spi_write_then_read(spi, priv->spi_tx_buf, 2, val, 2); + *v1 = val[0]; + *v2 = val[1]; + } else { + mcp251x_spi_trans(spi, 4); - *v1 = priv->spi_rx_buf[2]; - *v2 = priv->spi_rx_buf[3]; + *v1 = priv->spi_rx_buf[2]; + *v2 = priv->spi_rx_buf[3]; + } } -static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val) +static void mcp251x_write_reg(struct spi_device *spi, u8 reg, u8 val) { struct mcp251x_priv *priv = spi_get_drvdata(spi); @@ -373,11 +373,23 @@ static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val) priv->spi_tx_buf[1] = reg; priv->spi_tx_buf[2] = val; - mcp251x_spi_trans(spi, 3); + mcp251x_spi_write(spi, 3); } -static void mcp251x_write_bits(struct spi_device *spi, u8 reg, - u8 mask, uint8_t val) +static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2) +{ + struct mcp251x_priv *priv = spi_get_drvdata(spi); + + priv->spi_tx_buf[0] = INSTRUCTION_WRITE; + priv->spi_tx_buf[1] = reg; + priv->spi_tx_buf[2] = v1; + priv->spi_tx_buf[3] = v2; + + mcp251x_spi_write(spi, 4); +} + +static int mcp251x_write_bits(struct spi_device *spi, u8 reg, + u8 mask, u8 val) { struct mcp251x_priv *priv = spi_get_drvdata(spi); @@ -386,9 +398,235 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg, priv->spi_tx_buf[2] = mask; priv->spi_tx_buf[3] = val; - mcp251x_spi_trans(spi, 4); + return mcp251x_spi_write(spi, 4); +} + +static u8 mcp251x_read_stat(struct spi_device *spi) +{ + return mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK; +} + +#define mcp251x_read_stat_poll_timeout(addr, val, cond, delay_us, timeout_us) \ + readx_poll_timeout(mcp251x_read_stat, addr, val, cond, \ + delay_us, timeout_us) + +#ifdef CONFIG_GPIOLIB +enum { + MCP251X_GPIO_TX0RTS = 0, /* inputs */ + MCP251X_GPIO_TX1RTS, + MCP251X_GPIO_TX2RTS, + MCP251X_GPIO_RX0BF, /* outputs */ + MCP251X_GPIO_RX1BF, +}; + +#define MCP251X_GPIO_INPUT_MASK \ + GENMASK(MCP251X_GPIO_TX2RTS, MCP251X_GPIO_TX0RTS) +#define MCP251X_GPIO_OUTPUT_MASK \ + GENMASK(MCP251X_GPIO_RX1BF, MCP251X_GPIO_RX0BF) + +static const char * const mcp251x_gpio_names[] = { + [MCP251X_GPIO_TX0RTS] = "TX0RTS", /* inputs */ + [MCP251X_GPIO_TX1RTS] = "TX1RTS", + [MCP251X_GPIO_TX2RTS] = "TX2RTS", + [MCP251X_GPIO_RX0BF] = "RX0BF", /* outputs */ + [MCP251X_GPIO_RX1BF] = "RX1BF", +}; + +static inline bool mcp251x_gpio_is_input(unsigned int offset) +{ + return offset <= MCP251X_GPIO_TX2RTS; +} + +static int mcp251x_gpio_request(struct gpio_chip *chip, + unsigned int offset) +{ + struct mcp251x_priv *priv = gpiochip_get_data(chip); + int ret; + u8 val; + + /* nothing to be done for inputs */ + if (mcp251x_gpio_is_input(offset)) + return 0; + + val = BFPCTRL_BFE(offset - MCP251X_GPIO_RX0BF); + + mutex_lock(&priv->mcp_lock); + ret = mcp251x_write_bits(priv->spi, BFPCTRL, val, val); + mutex_unlock(&priv->mcp_lock); + if (ret) + return ret; + + priv->reg_bfpctrl |= val; + + return 0; +} + +static void mcp251x_gpio_free(struct gpio_chip *chip, + unsigned int offset) +{ + struct mcp251x_priv *priv = gpiochip_get_data(chip); + u8 val; + + /* nothing to be done for inputs */ + if (mcp251x_gpio_is_input(offset)) + return; + + val = BFPCTRL_BFE(offset - MCP251X_GPIO_RX0BF); + + mutex_lock(&priv->mcp_lock); + mcp251x_write_bits(priv->spi, BFPCTRL, val, 0); + mutex_unlock(&priv->mcp_lock); + + priv->reg_bfpctrl &= ~val; +} + +static int mcp251x_gpio_get_direction(struct gpio_chip *chip, + unsigned int offset) +{ + if (mcp251x_gpio_is_input(offset)) + return GPIO_LINE_DIRECTION_IN; + + return GPIO_LINE_DIRECTION_OUT; +} + +static int mcp251x_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct mcp251x_priv *priv = gpiochip_get_data(chip); + u8 reg, mask, val; + + if (mcp251x_gpio_is_input(offset)) { + reg = TXRTSCTRL; + mask = TXRTSCTRL_RTS(offset); + } else { + reg = BFPCTRL; + mask = BFPCTRL_BFS(offset - MCP251X_GPIO_RX0BF); + } + + mutex_lock(&priv->mcp_lock); + val = mcp251x_read_reg(priv->spi, reg); + mutex_unlock(&priv->mcp_lock); + + return !!(val & mask); +} + +static int mcp251x_gpio_get_multiple(struct gpio_chip *chip, + unsigned long *maskp, unsigned long *bitsp) +{ + struct mcp251x_priv *priv = gpiochip_get_data(chip); + unsigned long bits = 0; + u8 val; + + mutex_lock(&priv->mcp_lock); + if (maskp[0] & MCP251X_GPIO_INPUT_MASK) { + val = mcp251x_read_reg(priv->spi, TXRTSCTRL); + val = FIELD_GET(TXRTSCTRL_RTS_MASK, val); + bits |= FIELD_PREP(MCP251X_GPIO_INPUT_MASK, val); + } + if (maskp[0] & MCP251X_GPIO_OUTPUT_MASK) { + val = mcp251x_read_reg(priv->spi, BFPCTRL); + val = FIELD_GET(BFPCTRL_BFS_MASK, val); + bits |= FIELD_PREP(MCP251X_GPIO_OUTPUT_MASK, val); + } + mutex_unlock(&priv->mcp_lock); + + bitsp[0] = bits; + return 0; +} + +static int mcp251x_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) +{ + struct mcp251x_priv *priv = gpiochip_get_data(chip); + u8 mask, val; + int ret; + + mask = BFPCTRL_BFS(offset - MCP251X_GPIO_RX0BF); + val = value ? mask : 0; + + mutex_lock(&priv->mcp_lock); + ret = mcp251x_write_bits(priv->spi, BFPCTRL, mask, val); + mutex_unlock(&priv->mcp_lock); + if (ret) + return ret; + + priv->reg_bfpctrl &= ~mask; + priv->reg_bfpctrl |= val; + + return 0; +} + +static int +mcp251x_gpio_set_multiple(struct gpio_chip *chip, + unsigned long *maskp, unsigned long *bitsp) +{ + struct mcp251x_priv *priv = gpiochip_get_data(chip); + u8 mask, val; + int ret; + + mask = FIELD_GET(MCP251X_GPIO_OUTPUT_MASK, maskp[0]); + mask = FIELD_PREP(BFPCTRL_BFS_MASK, mask); + + val = FIELD_GET(MCP251X_GPIO_OUTPUT_MASK, bitsp[0]); + val = FIELD_PREP(BFPCTRL_BFS_MASK, val); + + if (!mask) + return 0; + + mutex_lock(&priv->mcp_lock); + ret = mcp251x_write_bits(priv->spi, BFPCTRL, mask, val); + mutex_unlock(&priv->mcp_lock); + if (ret) + return ret; + + priv->reg_bfpctrl &= ~mask; + priv->reg_bfpctrl |= val; + + return 0; +} + +static void mcp251x_gpio_restore(struct spi_device *spi) +{ + struct mcp251x_priv *priv = spi_get_drvdata(spi); + + mcp251x_write_reg(spi, BFPCTRL, priv->reg_bfpctrl); +} + +static int mcp251x_gpio_setup(struct mcp251x_priv *priv) +{ + struct gpio_chip *gpio = &priv->gpio; + + if (!device_property_present(&priv->spi->dev, "gpio-controller")) + return 0; + + /* gpiochip handles TX[0..2]RTS and RX[0..1]BF */ + gpio->label = priv->spi->modalias; + gpio->parent = &priv->spi->dev; + gpio->owner = THIS_MODULE; + gpio->request = mcp251x_gpio_request; + gpio->free = mcp251x_gpio_free; + gpio->get_direction = mcp251x_gpio_get_direction; + gpio->get = mcp251x_gpio_get; + gpio->get_multiple = mcp251x_gpio_get_multiple; + gpio->set = mcp251x_gpio_set; + gpio->set_multiple = mcp251x_gpio_set_multiple; + gpio->base = -1; + gpio->ngpio = ARRAY_SIZE(mcp251x_gpio_names); + gpio->names = mcp251x_gpio_names; + gpio->can_sleep = true; + + return devm_gpiochip_add_data(&priv->spi->dev, gpio, priv); +} +#else +static inline void mcp251x_gpio_restore(struct spi_device *spi) +{ } +static inline int mcp251x_gpio_setup(struct mcp251x_priv *priv) +{ + return 0; +} +#endif + static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf, int len, int tx_buf_idx) { @@ -402,7 +640,7 @@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf, buf[i]); } else { memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len); - mcp251x_spi_trans(spi, TXBDAT_OFF + len); + mcp251x_spi_write(spi, TXBDAT_OFF + len); } } @@ -428,13 +666,13 @@ static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame, ((eid >> SIDL_EID_SHIFT) & SIDL_EID_MASK); buf[TXBEID8_OFF] = GET_BYTE(eid, 1); buf[TXBEID0_OFF] = GET_BYTE(eid, 0); - buf[TXBDLC_OFF] = (rtr << DLC_RTR_SHIFT) | frame->can_dlc; - memcpy(buf + TXBDAT_OFF, frame->data, frame->can_dlc); - mcp251x_hw_tx_frame(spi, buf, frame->can_dlc, tx_buf_idx); + buf[TXBDLC_OFF] = (rtr << DLC_RTR_SHIFT) | frame->len; + memcpy(buf + TXBDAT_OFF, frame->data, frame->len); + mcp251x_hw_tx_frame(spi, buf, frame->len, tx_buf_idx); /* use INSTRUCTION_RTS, to avoid "repeated frame problem" */ priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx); - mcp251x_spi_trans(priv->spi, 1); + mcp251x_spi_write(priv->spi, 1); } static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf, @@ -448,13 +686,21 @@ static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf, for (i = 1; i < RXBDAT_OFF; i++) buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i); - len = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK); + len = can_cc_dlc2len(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK); for (; i < (RXBDAT_OFF + len); i++) buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i); } else { priv->spi_tx_buf[RXBCTRL_OFF] = INSTRUCTION_READ_RXB(buf_idx); - mcp251x_spi_trans(spi, SPI_TRANSFER_BUF_LEN); - memcpy(buf, priv->spi_rx_buf, SPI_TRANSFER_BUF_LEN); + if (spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX) { + spi_write_then_read(spi, priv->spi_tx_buf, 1, + priv->spi_rx_buf, + SPI_TRANSFER_BUF_LEN); + memcpy(buf + 1, priv->spi_rx_buf, + SPI_TRANSFER_BUF_LEN - 1); + } else { + mcp251x_spi_trans(spi, SPI_TRANSFER_BUF_LEN); + memcpy(buf, priv->spi_rx_buf, SPI_TRANSFER_BUF_LEN); + } } } @@ -496,15 +742,15 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx) frame->can_id |= CAN_RTR_FLAG; } /* Data length */ - frame->can_dlc = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK); - memcpy(frame->data, buf + RXBDAT_OFF, frame->can_dlc); + frame->len = can_cc_dlc2len(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK); + if (!(frame->can_id & CAN_RTR_FLAG)) { + memcpy(frame->data, buf + RXBDAT_OFF, frame->len); + priv->net->stats.rx_bytes += frame->len; + } priv->net->stats.rx_packets++; - priv->net->stats.rx_bytes += frame->can_dlc; - - can_led_event(priv->net, CAN_LED_EVENT_RX); - netif_rx_ni(skb); + netif_rx(skb); } static void mcp251x_hw_sleep(struct spi_device *spi) @@ -512,18 +758,50 @@ static void mcp251x_hw_sleep(struct spi_device *spi) mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_SLEEP); } +/* May only be called when device is sleeping! */ +static int mcp251x_hw_wake(struct spi_device *spi) +{ + u8 value; + int ret; + + /* Force wakeup interrupt to wake device, but don't execute IST */ + disable_irq_nosync(spi->irq); + mcp251x_write_2regs(spi, CANINTE, CANINTE_WAKIE, CANINTF_WAKIF); + + /* Wait for oscillator startup timer after wake up */ + mdelay(MCP251X_OST_DELAY_MS); + + /* Put device into config mode */ + mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_CONF); + + /* Wait for the device to enter config mode */ + ret = mcp251x_read_stat_poll_timeout(spi, value, value == CANCTRL_REQOP_CONF, + MCP251X_OST_DELAY_MS * 1000, + USEC_PER_SEC); + if (ret) { + dev_err(&spi->dev, "MCP251x didn't enter in config mode\n"); + return ret; + } + + /* Disable and clear pending interrupts */ + mcp251x_write_2regs(spi, CANINTE, 0x00, 0x00); + enable_irq(spi->irq); + + return 0; +} + static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb, struct net_device *net) { struct mcp251x_priv *priv = netdev_priv(net); struct spi_device *spi = priv->spi; - if (priv->tx_skb || priv->tx_len) { + if (priv->tx_skb || priv->tx_busy) { dev_warn(&spi->dev, "hard_xmit called while tx busy\n"); return NETDEV_TX_BUSY; } - if (can_dropped_invalid_skb(net, skb)) + if (can_dev_dropped_skb(net, skb)) return NETDEV_TX_OK; netif_stop_queue(net); @@ -557,7 +835,8 @@ static int mcp251x_do_set_mode(struct net_device *net, enum can_mode mode) static int mcp251x_set_normal_mode(struct spi_device *spi) { struct mcp251x_priv *priv = spi_get_drvdata(spi); - unsigned long timeout; + u8 value; + int ret; /* Enable interrupts */ mcp251x_write_reg(spi, CANINTE, @@ -575,14 +854,12 @@ static int mcp251x_set_normal_mode(struct spi_device *spi) mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_NORMAL); /* Wait for the device to enter normal mode */ - timeout = jiffies + HZ; - while (mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) { - schedule(); - if (time_after(jiffies, timeout)) { - dev_err(&spi->dev, "MCP251x didn't" - " enter in normal mode\n"); - return -EBUSY; - } + ret = mcp251x_read_stat_poll_timeout(spi, value, value == 0, + MCP251X_OST_DELAY_MS * 1000, + USEC_PER_SEC); + if (ret) { + dev_err(&spi->dev, "MCP251x didn't enter in normal mode\n"); + return ret; } } priv->can.state = CAN_STATE_ERROR_ACTIVE; @@ -626,25 +903,27 @@ static int mcp251x_setup(struct net_device *net, struct spi_device *spi) static int mcp251x_hw_reset(struct spi_device *spi) { struct mcp251x_priv *priv = spi_get_drvdata(spi); - u8 reg; + u8 value; int ret; /* Wait for oscillator startup timer after power up */ mdelay(MCP251X_OST_DELAY_MS); priv->spi_tx_buf[0] = INSTRUCTION_RESET; - ret = mcp251x_spi_trans(spi, 1); + ret = mcp251x_spi_write(spi, 1); if (ret) return ret; /* Wait for oscillator startup timer after reset */ mdelay(MCP251X_OST_DELAY_MS); - - reg = mcp251x_read_reg(spi, CANSTAT); - if ((reg & CANCTRL_REQOP_MASK) != CANCTRL_REQOP_CONF) - return -ENODEV; - return 0; + /* Wait for reset to finish */ + ret = mcp251x_read_stat_poll_timeout(spi, value, value == CANCTRL_REQOP_CONF, + MCP251X_OST_DELAY_MS * 1000, + USEC_PER_SEC); + if (ret) + dev_err(&spi->dev, "MCP251x didn't enter in conf mode after reset\n"); + return ret; } static int mcp251x_hw_probe(struct spi_device *spi) @@ -678,17 +957,6 @@ static int mcp251x_power_enable(struct regulator *reg, int enable) return regulator_disable(reg); } -static void mcp251x_open_clean(struct net_device *net) -{ - struct mcp251x_priv *priv = netdev_priv(net); - struct spi_device *spi = priv->spi; - - free_irq(spi->irq, priv); - mcp251x_hw_sleep(spi); - mcp251x_power_enable(priv->transceiver, 0); - close_candev(net); -} - static int mcp251x_stop(struct net_device *net) { struct mcp251x_priv *priv = netdev_priv(net); @@ -698,14 +966,11 @@ static int mcp251x_stop(struct net_device *net) priv->force_quit = 1; free_irq(spi->irq, priv); - destroy_workqueue(priv->wq); - priv->wq = NULL; mutex_lock(&priv->mcp_lock); /* Disable and clear pending interrupts */ - mcp251x_write_reg(spi, CANINTE, 0x00); - mcp251x_write_reg(spi, CANINTF, 0x00); + mcp251x_write_2regs(spi, CANINTE, 0x00, 0x00); mcp251x_write_reg(spi, TXBCTRL(0), 0); mcp251x_clean(net); @@ -718,8 +983,6 @@ static int mcp251x_stop(struct net_device *net) mutex_unlock(&priv->mcp_lock); - can_led_event(net, CAN_LED_EVENT_STOP); - return 0; } @@ -732,7 +995,7 @@ static void mcp251x_error_skb(struct net_device *net, int can_id, int data1) if (skb) { frame->can_id |= can_id; frame->data[1] = data1; - netif_rx_ni(skb); + netif_rx(skb); } else { netdev_err(net, "cannot allocate error skb\n"); } @@ -753,11 +1016,11 @@ static void mcp251x_tx_work_handler(struct work_struct *ws) } else { frame = (struct can_frame *)priv->tx_skb->data; - if (frame->can_dlc > CAN_FRAME_MAX_DATA_LEN) - frame->can_dlc = CAN_FRAME_MAX_DATA_LEN; + if (frame->len > CAN_FRAME_MAX_DATA_LEN) + frame->len = CAN_FRAME_MAX_DATA_LEN; mcp251x_hw_tx(spi, frame, 0); - priv->tx_len = 1 + frame->can_dlc; - can_put_echo_skb(priv->tx_skb, net, 0); + priv->tx_busy = true; + can_put_echo_skb(priv->tx_skb, net, 0, 0); priv->tx_skb = NULL; } } @@ -773,8 +1036,14 @@ static void mcp251x_restart_work_handler(struct work_struct *ws) mutex_lock(&priv->mcp_lock); if (priv->after_suspend) { - mcp251x_hw_reset(spi); - mcp251x_setup(net, spi); + if (priv->after_suspend & AFTER_SUSPEND_POWER) { + mcp251x_hw_reset(spi); + mcp251x_setup(net, spi); + mcp251x_gpio_restore(spi); + } else { + mcp251x_hw_wake(spi); + } + priv->force_quit = 0; if (priv->after_suspend & AFTER_SUSPEND_RESTART) { mcp251x_set_normal_mode(spi); } else if (priv->after_suspend & AFTER_SUSPEND_UP) { @@ -786,7 +1055,6 @@ static void mcp251x_restart_work_handler(struct work_struct *ws) mcp251x_hw_sleep(spi); } priv->after_suspend = 0; - priv->force_quit = 0; } if (priv->restart_tx) { @@ -814,28 +1082,40 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id) mcp251x_read_2regs(spi, CANINTF, &intf, &eflag); - /* mask out flags we don't care about */ - intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR; - /* receive buffer 0 */ if (intf & CANINTF_RX0IF) { mcp251x_hw_rx(spi, 0); - /* - * Free one buffer ASAP - * (The MCP2515 does this automatically.) + /* Free one buffer ASAP + * (The MCP2515/25625 does this automatically.) */ if (mcp251x_is_2510(spi)) - mcp251x_write_bits(spi, CANINTF, CANINTF_RX0IF, 0x00); + mcp251x_write_bits(spi, CANINTF, + CANINTF_RX0IF, 0x00); + + /* check if buffer 1 is already known to be full, no need to re-read */ + if (!(intf & CANINTF_RX1IF)) { + u8 intf1, eflag1; + + /* intf needs to be read again to avoid a race condition */ + mcp251x_read_2regs(spi, CANINTF, &intf1, &eflag1); + + /* combine flags from both operations for error handling */ + intf |= intf1; + eflag |= eflag1; + } } /* receive buffer 1 */ if (intf & CANINTF_RX1IF) { mcp251x_hw_rx(spi, 1); - /* the MCP2515 does this automatically */ + /* The MCP2515/25625 does this automatically. */ if (mcp251x_is_2510(spi)) clear_intf |= CANINTF_RX1IF; } + /* mask out flags we don't care about */ + intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR; + /* any error or tx interrupt we need to clear? */ if (intf & (CANINTF_ERR | CANINTF_TX)) clear_intf |= intf & (CANINTF_ERR | CANINTF_TX); @@ -875,7 +1155,8 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id) if (new_state >= CAN_STATE_ERROR_WARNING && new_state <= CAN_STATE_BUS_OFF) priv->can.can_stats.error_warning++; - case CAN_STATE_ERROR_WARNING: /* fallthrough */ + fallthrough; + case CAN_STATE_ERROR_WARNING: if (new_state >= CAN_STATE_ERROR_PASSIVE && new_state <= CAN_STATE_BUS_OFF) priv->can.can_stats.error_passive++; @@ -916,16 +1197,14 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id) break; if (intf & CANINTF_TX) { - net->stats.tx_packets++; - net->stats.tx_bytes += priv->tx_len - 1; - can_led_event(net, CAN_LED_EVENT_TX); - if (priv->tx_len) { - can_get_echo_skb(net, 0); - priv->tx_len = 0; + if (priv->tx_busy) { + net->stats.tx_packets++; + net->stats.tx_bytes += can_get_echo_skb(net, 0, + NULL); + priv->tx_busy = false; } netif_wake_queue(net); } - } mutex_unlock(&priv->mcp_lock); return IRQ_HANDLED; @@ -935,7 +1214,7 @@ static int mcp251x_open(struct net_device *net) { struct mcp251x_priv *priv = netdev_priv(net); struct spi_device *spi = priv->spi; - unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_FALLING; + unsigned long flags = 0; int ret; ret = open_candev(net); @@ -949,43 +1228,40 @@ static int mcp251x_open(struct net_device *net) priv->force_quit = 0; priv->tx_skb = NULL; - priv->tx_len = 0; + priv->tx_busy = false; + + if (!dev_fwnode(&spi->dev)) + flags = IRQF_TRIGGER_FALLING; ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist, - flags | IRQF_ONESHOT, DEVICE_NAME, priv); + flags | IRQF_ONESHOT, dev_name(&spi->dev), + priv); if (ret) { dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); - mcp251x_power_enable(priv->transceiver, 0); - close_candev(net); - goto open_unlock; + goto out_close; } - priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM, - 0); - INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); - INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); - - ret = mcp251x_hw_reset(spi); - if (ret) { - mcp251x_open_clean(net); - goto open_unlock; - } + ret = mcp251x_hw_wake(spi); + if (ret) + goto out_free_irq; ret = mcp251x_setup(net, spi); - if (ret) { - mcp251x_open_clean(net); - goto open_unlock; - } + if (ret) + goto out_free_irq; ret = mcp251x_set_normal_mode(spi); - if (ret) { - mcp251x_open_clean(net); - goto open_unlock; - } - - can_led_event(net, CAN_LED_EVENT_OPEN); + if (ret) + goto out_free_irq; netif_wake_queue(net); + mutex_unlock(&priv->mcp_lock); -open_unlock: + return 0; + +out_free_irq: + free_irq(spi->irq, priv); + mcp251x_hw_sleep(spi); +out_close: + mcp251x_power_enable(priv->transceiver, 0); + close_candev(net); mutex_unlock(&priv->mcp_lock); return ret; } @@ -994,7 +1270,10 @@ static const struct net_device_ops mcp251x_netdev_ops = { .ndo_open = mcp251x_open, .ndo_stop = mcp251x_stop, .ndo_start_xmit = mcp251x_hard_start_xmit, - .ndo_change_mtu = can_change_mtu, +}; + +static const struct ethtool_ops mcp251x_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, }; static const struct of_device_id mcp251x_of_match[] = { @@ -1006,6 +1285,10 @@ static const struct of_device_id mcp251x_of_match[] = { .compatible = "microchip,mcp2515", .data = (void *)CAN_MCP251X_MCP2515, }, + { + .compatible = "microchip,mcp25625", + .data = (void *)CAN_MCP251X_MCP25625, + }, { } }; MODULE_DEVICE_TABLE(of, mcp251x_of_match); @@ -1019,46 +1302,47 @@ static const struct spi_device_id mcp251x_id_table[] = { .name = "mcp2515", .driver_data = (kernel_ulong_t)CAN_MCP251X_MCP2515, }, + { + .name = "mcp25625", + .driver_data = (kernel_ulong_t)CAN_MCP251X_MCP25625, + }, { } }; MODULE_DEVICE_TABLE(spi, mcp251x_id_table); static int mcp251x_can_probe(struct spi_device *spi) { - const struct of_device_id *of_id = of_match_device(mcp251x_of_match, - &spi->dev); - struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev); struct net_device *net; struct mcp251x_priv *priv; struct clk *clk; - int freq, ret; - - clk = devm_clk_get(&spi->dev, NULL); - if (IS_ERR(clk)) { - if (pdata) - freq = pdata->oscillator_frequency; - else - return PTR_ERR(clk); - } else { - freq = clk_get_rate(clk); - } + u32 freq; + int ret; + + clk = devm_clk_get_optional(&spi->dev, NULL); + if (IS_ERR(clk)) + return dev_err_probe(&spi->dev, PTR_ERR(clk), "Cannot get clock\n"); + + freq = clk_get_rate(clk); + if (freq == 0) + device_property_read_u32(&spi->dev, "clock-frequency", &freq); /* Sanity check */ if (freq < 1000000 || freq > 25000000) - return -ERANGE; + return dev_err_probe(&spi->dev, -ERANGE, "clock frequency out of range\n"); /* Allocate can/net device */ net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX); if (!net) return -ENOMEM; - if (!IS_ERR(clk)) { - ret = clk_prepare_enable(clk); - if (ret) - goto out_free; + ret = clk_prepare_enable(clk); + if (ret) { + dev_err_probe(&spi->dev, ret, "Cannot enable clock\n"); + goto out_free; } net->netdev_ops = &mcp251x_netdev_ops; + net->ethtool_ops = &mcp251x_ethtool_ops; net->flags |= IFF_ECHO; priv = netdev_priv(net); @@ -1067,10 +1351,7 @@ static int mcp251x_can_probe(struct spi_device *spi) priv->can.clock.freq = freq / 2; priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY; - if (of_id) - priv->model = (enum mcp251x_model)of_id->data; - else - priv->model = spi_get_device_id(spi)->driver_data; + priv->model = (enum mcp251x_model)(uintptr_t)spi_get_device_match_data(spi); priv->net = net; priv->clk = clk; @@ -1083,61 +1364,51 @@ static int mcp251x_can_probe(struct spi_device *spi) else spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000; ret = spi_setup(spi); - if (ret) + if (ret) { + dev_err_probe(&spi->dev, ret, "Cannot set up spi\n"); goto out_clk; + } priv->power = devm_regulator_get_optional(&spi->dev, "vdd"); priv->transceiver = devm_regulator_get_optional(&spi->dev, "xceiver"); if ((PTR_ERR(priv->power) == -EPROBE_DEFER) || (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) { ret = -EPROBE_DEFER; + dev_err_probe(&spi->dev, ret, "supply deferred\n"); goto out_clk; } ret = mcp251x_power_enable(priv->power, 1); - if (ret) + if (ret) { + dev_err_probe(&spi->dev, ret, "Cannot enable power\n"); goto out_clk; + } + + priv->wq = alloc_workqueue("mcp251x_wq", + WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU, + 0); + if (!priv->wq) { + ret = -ENOMEM; + goto out_clk; + } + INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); + INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); priv->spi = spi; mutex_init(&priv->mcp_lock); - /* If requested, allocate DMA buffers */ - if (mcp251x_enable_dma) { - spi->dev.coherent_dma_mask = ~0; - - /* - * Minimum coherent DMA allocation is PAGE_SIZE, so allocate - * that much and share it between Tx and Rx DMA buffers. - */ - priv->spi_tx_buf = dmam_alloc_coherent(&spi->dev, - PAGE_SIZE, - &priv->spi_tx_dma, - GFP_DMA); - - if (priv->spi_tx_buf) { - priv->spi_rx_buf = (priv->spi_tx_buf + (PAGE_SIZE / 2)); - priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma + - (PAGE_SIZE / 2)); - } else { - /* Fall back to non-DMA */ - mcp251x_enable_dma = 0; - } + priv->spi_tx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN, + GFP_KERNEL); + if (!priv->spi_tx_buf) { + ret = -ENOMEM; + goto error_probe; } - /* Allocate non-DMA buffers */ - if (!mcp251x_enable_dma) { - priv->spi_tx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN, - GFP_KERNEL); - if (!priv->spi_tx_buf) { - ret = -ENOMEM; - goto error_probe; - } - priv->spi_rx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN, - GFP_KERNEL); - if (!priv->spi_rx_buf) { - ret = -ENOMEM; - goto error_probe; - } + priv->spi_rx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN, + GFP_KERNEL); + if (!priv->spi_rx_buf) { + ret = -ENOMEM; + goto error_probe; } SET_NETDEV_DEV(net, &spi->dev); @@ -1145,37 +1416,46 @@ static int mcp251x_can_probe(struct spi_device *spi) /* Here is OK to not lock the MCP, no one knows about it yet */ ret = mcp251x_hw_probe(spi); if (ret) { - if (ret == -ENODEV) - dev_err(&spi->dev, "Cannot initialize MCP%x. Wrong wiring?\n", priv->model); + dev_err_probe(&spi->dev, ret, "Cannot initialize MCP%x. Wrong wiring?\n", + priv->model); goto error_probe; } mcp251x_hw_sleep(spi); ret = register_candev(net); - if (ret) + if (ret) { + dev_err_probe(&spi->dev, ret, "Cannot register CAN device\n"); goto error_probe; + } - devm_can_led_init(net); + ret = mcp251x_gpio_setup(priv); + if (ret) { + dev_err_probe(&spi->dev, ret, "Cannot set up gpios\n"); + goto out_unregister_candev; + } netdev_info(net, "MCP%x successfully initialized.\n", priv->model); return 0; +out_unregister_candev: + unregister_candev(net); + error_probe: + destroy_workqueue(priv->wq); + priv->wq = NULL; mcp251x_power_enable(priv->power, 0); out_clk: - if (!IS_ERR(clk)) - clk_disable_unprepare(clk); + clk_disable_unprepare(clk); out_free: free_candev(net); - dev_err(&spi->dev, "Probe failed, err=%d\n", -ret); return ret; } -static int mcp251x_can_remove(struct spi_device *spi) +static void mcp251x_can_remove(struct spi_device *spi) { struct mcp251x_priv *priv = spi_get_drvdata(spi); struct net_device *net = priv->net; @@ -1184,12 +1464,12 @@ static int mcp251x_can_remove(struct spi_device *spi) mcp251x_power_enable(priv->power, 0); - if (!IS_ERR(priv->clk)) - clk_disable_unprepare(priv->clk); + destroy_workqueue(priv->wq); + priv->wq = NULL; - free_candev(net); + clk_disable_unprepare(priv->clk); - return 0; + free_candev(net); } static int __maybe_unused mcp251x_can_suspend(struct device *dev) @@ -1200,8 +1480,7 @@ static int __maybe_unused mcp251x_can_suspend(struct device *dev) priv->force_quit = 1; disable_irq(spi->irq); - /* - * Note: at this point neither IST nor workqueues are running. + /* Note: at this point neither IST nor workqueues are running. * open/stop cannot be called anyway so locking is not needed */ if (netif_running(net)) { @@ -1214,10 +1493,8 @@ static int __maybe_unused mcp251x_can_suspend(struct device *dev) priv->after_suspend = AFTER_SUSPEND_DOWN; } - if (!IS_ERR_OR_NULL(priv->power)) { - regulator_disable(priv->power); - priv->after_suspend |= AFTER_SUSPEND_POWER; - } + mcp251x_power_enable(priv->power, 0); + priv->after_suspend |= AFTER_SUSPEND_POWER; return 0; } @@ -1229,13 +1506,13 @@ static int __maybe_unused mcp251x_can_resume(struct device *dev) if (priv->after_suspend & AFTER_SUSPEND_POWER) mcp251x_power_enable(priv->power, 1); - - if (priv->after_suspend & AFTER_SUSPEND_UP) { + if (priv->after_suspend & AFTER_SUSPEND_UP) mcp251x_power_enable(priv->transceiver, 1); + + if (priv->after_suspend & (AFTER_SUSPEND_POWER | AFTER_SUSPEND_UP)) queue_work(priv->wq, &priv->restart_work); - } else { + else priv->after_suspend = 0; - } priv->force_quit = 0; enable_irq(spi->irq); @@ -1259,5 +1536,5 @@ module_spi_driver(mcp251x_can_driver); MODULE_AUTHOR("Chris Elston <celston@katalix.com>, " "Christian Pellegrin <chripell@evolware.org>"); -MODULE_DESCRIPTION("Microchip 251x CAN driver"); +MODULE_DESCRIPTION("Microchip 251x/25625 CAN driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/can/spi/mcp251xfd/Kconfig b/drivers/net/can/spi/mcp251xfd/Kconfig new file mode 100644 index 000000000000..7c29846e6051 --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/Kconfig @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config CAN_MCP251XFD + tristate "Microchip MCP251xFD SPI CAN controllers" + select CAN_RX_OFFLOAD + select REGMAP + select WANT_DEV_COREDUMP + select GPIOLIB + help + Driver for the Microchip MCP251XFD SPI FD-CAN controller + family. + +config CAN_MCP251XFD_SANITY + depends on CAN_MCP251XFD + bool "Additional Sanity Checks" + help + This option enables additional sanity checks in the driver, + that compares various internal counters with the in chip + variants. This comes with a runtime overhead. + Disable if unsure. diff --git a/drivers/net/can/spi/mcp251xfd/Makefile b/drivers/net/can/spi/mcp251xfd/Makefile new file mode 100644 index 000000000000..94d7de954294 --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/Makefile @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_CAN_MCP251XFD) += mcp251xfd.o + +mcp251xfd-objs := +mcp251xfd-objs += mcp251xfd-chip-fifo.o +mcp251xfd-objs += mcp251xfd-core.o +mcp251xfd-objs += mcp251xfd-crc16.o +mcp251xfd-objs += mcp251xfd-ethtool.o +mcp251xfd-objs += mcp251xfd-ram.o +mcp251xfd-objs += mcp251xfd-regmap.o +mcp251xfd-objs += mcp251xfd-ring.o +mcp251xfd-objs += mcp251xfd-rx.o +mcp251xfd-objs += mcp251xfd-tef.o +mcp251xfd-objs += mcp251xfd-timestamp.o +mcp251xfd-objs += mcp251xfd-tx.o + +mcp251xfd-$(CONFIG_DEV_COREDUMP) += mcp251xfd-dump.o diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c new file mode 100644 index 000000000000..0d96097a2547 --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// mcp251xfd - Microchip MCP251xFD Family CAN controller driver +// +// Copyright (c) 2019, 2020, 2021 Pengutronix, +// Marc Kleine-Budde <kernel@pengutronix.de> +// +// Based on: +// +// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface +// +// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org> +// + +#include <linux/bitfield.h> + +#include "mcp251xfd.h" + +static int +mcp251xfd_chip_rx_fifo_init_one(const struct mcp251xfd_priv *priv, + const struct mcp251xfd_rx_ring *ring) +{ + u32 fifo_con; + + /* Enable RXOVIE on _all_ RX FIFOs, not just the last one. + * + * FIFOs hit by a RX MAB overflow and RXOVIE enabled will + * generate a RXOVIF, use this to properly detect RX MAB + * overflows. + */ + fifo_con = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK, + ring->obj_num - 1) | + MCP251XFD_REG_FIFOCON_RXTSEN | + MCP251XFD_REG_FIFOCON_RXOVIE | + MCP251XFD_REG_FIFOCON_TFNRFNIE; + + if (mcp251xfd_is_fd_mode(priv)) + fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK, + MCP251XFD_REG_FIFOCON_PLSIZE_64); + else + fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK, + MCP251XFD_REG_FIFOCON_PLSIZE_8); + + return regmap_write(priv->map_reg, + MCP251XFD_REG_FIFOCON(ring->fifo_nr), fifo_con); +} + +static int +mcp251xfd_chip_rx_filter_init_one(const struct mcp251xfd_priv *priv, + const struct mcp251xfd_rx_ring *ring) +{ + u32 fltcon; + + fltcon = MCP251XFD_REG_FLTCON_FLTEN(ring->nr) | + MCP251XFD_REG_FLTCON_FBP(ring->nr, ring->fifo_nr); + + return regmap_update_bits(priv->map_reg, + MCP251XFD_REG_FLTCON(ring->nr >> 2), + MCP251XFD_REG_FLTCON_FLT_MASK(ring->nr), + fltcon); +} + +int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv) +{ + const struct mcp251xfd_tx_ring *tx_ring = priv->tx; + const struct mcp251xfd_rx_ring *rx_ring; + u32 val; + int err, n; + + /* TEF */ + val = FIELD_PREP(MCP251XFD_REG_TEFCON_FSIZE_MASK, + tx_ring->obj_num - 1) | + MCP251XFD_REG_TEFCON_TEFTSEN | + MCP251XFD_REG_TEFCON_TEFOVIE | + MCP251XFD_REG_TEFCON_TEFNEIE; + + err = regmap_write(priv->map_reg, MCP251XFD_REG_TEFCON, val); + if (err) + return err; + + /* TX FIFO */ + val = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK, + tx_ring->obj_num - 1) | + MCP251XFD_REG_FIFOCON_TXEN | + MCP251XFD_REG_FIFOCON_TXATIE; + + if (mcp251xfd_is_fd_mode(priv)) + val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK, + MCP251XFD_REG_FIFOCON_PLSIZE_64); + else + val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK, + MCP251XFD_REG_FIFOCON_PLSIZE_8); + + if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) + val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK, + MCP251XFD_REG_FIFOCON_TXAT_ONE_SHOT); + else + val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK, + MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED); + + err = regmap_write(priv->map_reg, + MCP251XFD_REG_FIFOCON(priv->tx->fifo_nr), + val); + if (err) + return err; + + /* RX FIFOs */ + mcp251xfd_for_each_rx_ring(priv, rx_ring, n) { + err = mcp251xfd_chip_rx_fifo_init_one(priv, rx_ring); + if (err) + return err; + + err = mcp251xfd_chip_rx_filter_init_one(priv, rx_ring); + if (err) + return err; + } + + return 0; +} diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c new file mode 100644 index 000000000000..5134ebb85880 --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -0,0 +1,2417 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// mcp251xfd - Microchip MCP251xFD Family CAN controller driver +// +// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix, +// Marc Kleine-Budde <kernel@pengutronix.de> +// +// Based on: +// +// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface +// +// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org> +// + +#include <linux/unaligned.h> +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/pm_runtime.h> +#include <linux/property.h> + +#include "mcp251xfd.h" + +#define DEVICE_NAME "mcp251xfd" + +static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2517fd = { + .quirks = MCP251XFD_QUIRK_MAB_NO_WARN | MCP251XFD_QUIRK_CRC_REG | + MCP251XFD_QUIRK_CRC_RX | MCP251XFD_QUIRK_CRC_TX | + MCP251XFD_QUIRK_ECC, + .model = MCP251XFD_MODEL_MCP2517FD, +}; + +static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2518fd = { + .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX | + MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC, + .model = MCP251XFD_MODEL_MCP2518FD, +}; + +static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251863 = { + .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX | + MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC, + .model = MCP251XFD_MODEL_MCP251863, +}; + +/* Autodetect model, start with CRC enabled. */ +static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251xfd = { + .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX | + MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC, + .model = MCP251XFD_MODEL_MCP251XFD, +}; + +static const struct can_bittiming_const mcp251xfd_bittiming_const = { + .name = DEVICE_NAME, + .tseg1_min = 2, + .tseg1_max = 256, + .tseg2_min = 1, + .tseg2_max = 128, + .sjw_max = 128, + .brp_min = 1, + .brp_max = 256, + .brp_inc = 1, +}; + +static const struct can_bittiming_const mcp251xfd_data_bittiming_const = { + .name = DEVICE_NAME, + .tseg1_min = 1, + .tseg1_max = 32, + .tseg2_min = 1, + .tseg2_max = 16, + .sjw_max = 16, + .brp_min = 1, + .brp_max = 256, + .brp_inc = 1, +}; + +/* The datasheet of the mcp2518fd (DS20006027B) specifies a range of + * [-64,63] for TDCO, indicating a relative TDCO. + * + * Manual tests have shown, that using a relative TDCO configuration + * results in bus off, while an absolute configuration works. + * + * For TDCO use the max value (63) from the data sheet, but 0 as the + * minimum. + */ +static const struct can_tdc_const mcp251xfd_tdc_const = { + .tdcv_min = 0, + .tdcv_max = 63, + .tdco_min = 0, + .tdco_max = 63, + .tdcf_min = 0, + .tdcf_max = 0, +}; + +static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model) +{ + switch (model) { + case MCP251XFD_MODEL_MCP2517FD: + return "MCP2517FD"; + case MCP251XFD_MODEL_MCP2518FD: + return "MCP2518FD"; + case MCP251XFD_MODEL_MCP251863: + return "MCP251863"; + case MCP251XFD_MODEL_MCP251XFD: + return "MCP251xFD"; + } + + return "<unknown>"; +} + +static inline const char * +mcp251xfd_get_model_str(const struct mcp251xfd_priv *priv) +{ + return __mcp251xfd_get_model_str(priv->devtype_data.model); +} + +static const char *mcp251xfd_get_mode_str(const u8 mode) +{ + switch (mode) { + case MCP251XFD_REG_CON_MODE_MIXED: + return "Mixed (CAN FD/CAN 2.0)"; + case MCP251XFD_REG_CON_MODE_SLEEP: + return "Sleep"; + case MCP251XFD_REG_CON_MODE_INT_LOOPBACK: + return "Internal Loopback"; + case MCP251XFD_REG_CON_MODE_LISTENONLY: + return "Listen Only"; + case MCP251XFD_REG_CON_MODE_CONFIG: + return "Configuration"; + case MCP251XFD_REG_CON_MODE_EXT_LOOPBACK: + return "External Loopback"; + case MCP251XFD_REG_CON_MODE_CAN2_0: + return "CAN 2.0"; + case MCP251XFD_REG_CON_MODE_RESTRICTED: + return "Restricted Operation"; + } + + return "<unknown>"; +} + +static const char * +mcp251xfd_get_osc_str(const u32 osc, const u32 osc_reference) +{ + switch (~osc & osc_reference & + (MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY)) { + case MCP251XFD_REG_OSC_PLLRDY: + return "PLL"; + case MCP251XFD_REG_OSC_OSCRDY: + return "Oscillator"; + case MCP251XFD_REG_OSC_PLLRDY | MCP251XFD_REG_OSC_OSCRDY: + return "Oscillator/PLL"; + } + + return "<unknown>"; +} + +static inline int mcp251xfd_vdd_enable(const struct mcp251xfd_priv *priv) +{ + if (!priv->reg_vdd) + return 0; + + return regulator_enable(priv->reg_vdd); +} + +static inline int mcp251xfd_vdd_disable(const struct mcp251xfd_priv *priv) +{ + if (!priv->reg_vdd) + return 0; + + return regulator_disable(priv->reg_vdd); +} + +static inline int +mcp251xfd_transceiver_enable(const struct mcp251xfd_priv *priv) +{ + if (!priv->reg_xceiver) + return 0; + + return regulator_enable(priv->reg_xceiver); +} + +static inline int +mcp251xfd_transceiver_disable(const struct mcp251xfd_priv *priv) +{ + if (!priv->reg_xceiver) + return 0; + + return regulator_disable(priv->reg_xceiver); +} + +static int mcp251xfd_clks_and_vdd_enable(const struct mcp251xfd_priv *priv) +{ + int err; + + err = clk_prepare_enable(priv->clk); + if (err) + return err; + + err = mcp251xfd_vdd_enable(priv); + if (err) + clk_disable_unprepare(priv->clk); + + /* Wait for oscillator stabilisation time after power up */ + usleep_range(MCP251XFD_OSC_STAB_SLEEP_US, + 2 * MCP251XFD_OSC_STAB_SLEEP_US); + + return err; +} + +static int mcp251xfd_clks_and_vdd_disable(const struct mcp251xfd_priv *priv) +{ + int err; + + err = mcp251xfd_vdd_disable(priv); + if (err) + return err; + + clk_disable_unprepare(priv->clk); + + return 0; +} + +static inline bool mcp251xfd_reg_invalid(u32 reg) +{ + return reg == 0x0 || reg == 0xffffffff; +} + +static inline int +mcp251xfd_chip_get_mode(const struct mcp251xfd_priv *priv, u8 *mode) +{ + u32 val; + int err; + + err = regmap_read(priv->map_reg, MCP251XFD_REG_CON, &val); + if (err) + return err; + + *mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, val); + + return 0; +} + +static int +__mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv, + const u8 mode_req, bool nowait) +{ + const struct can_bittiming *bt = &priv->can.bittiming; + unsigned long timeout_us = MCP251XFD_POLL_TIMEOUT_US; + u32 con = 0, con_reqop, osc = 0; + u8 mode; + int err; + + con_reqop = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, mode_req); + err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CON, + MCP251XFD_REG_CON_REQOP_MASK, con_reqop); + if (err == -EBADMSG) { + netdev_err(priv->ndev, + "Failed to set Requested Operation Mode.\n"); + + return -ENODEV; + } else if (err) { + return err; + } + + if (mode_req == MCP251XFD_REG_CON_MODE_SLEEP || nowait) + return 0; + + if (bt->bitrate) + timeout_us = max_t(unsigned long, timeout_us, + MCP251XFD_FRAME_LEN_MAX_BITS * USEC_PER_SEC / + bt->bitrate); + + err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_CON, con, + !mcp251xfd_reg_invalid(con) && + FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, + con) == mode_req, + MCP251XFD_POLL_SLEEP_US, timeout_us); + if (err != -ETIMEDOUT && err != -EBADMSG) + return err; + + /* Ignore return value. + * Print below error messages, even if this fails. + */ + regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc); + + if (mcp251xfd_reg_invalid(con)) { + netdev_err(priv->ndev, + "Failed to read CAN Control Register (con=0x%08x, osc=0x%08x).\n", + con, osc); + + return -ENODEV; + } + + mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con); + netdev_err(priv->ndev, + "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u) (con=0x%08x, osc=0x%08x).\n", + mcp251xfd_get_mode_str(mode_req), mode_req, + mcp251xfd_get_mode_str(mode), mode, + con, osc); + + return -ETIMEDOUT; +} + +static inline int +mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv, + const u8 mode_req) +{ + return __mcp251xfd_chip_set_mode(priv, mode_req, false); +} + +static inline int __maybe_unused +mcp251xfd_chip_set_mode_nowait(const struct mcp251xfd_priv *priv, + const u8 mode_req) +{ + return __mcp251xfd_chip_set_mode(priv, mode_req, true); +} + +static int +mcp251xfd_chip_wait_for_osc_ready(const struct mcp251xfd_priv *priv, + u32 osc_reference, u32 osc_mask) +{ + u32 osc; + int err; + + err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_OSC, osc, + !mcp251xfd_reg_invalid(osc) && + (osc & osc_mask) == osc_reference, + MCP251XFD_OSC_STAB_SLEEP_US, + MCP251XFD_OSC_STAB_TIMEOUT_US); + if (err != -ETIMEDOUT) + return err; + + if (mcp251xfd_reg_invalid(osc)) { + netdev_err(priv->ndev, + "Failed to read Oscillator Configuration Register (osc=0x%08x).\n", + osc); + return -ENODEV; + } + + netdev_err(priv->ndev, + "Timeout waiting for %s ready (osc=0x%08x, osc_reference=0x%08x, osc_mask=0x%08x).\n", + mcp251xfd_get_osc_str(osc, osc_reference), + osc, osc_reference, osc_mask); + + return -ETIMEDOUT; +} + +static int mcp251xfd_chip_wake(const struct mcp251xfd_priv *priv) +{ + u32 osc, osc_reference, osc_mask; + int err; + + /* For normal sleep on MCP2517FD and MCP2518FD, clearing + * "Oscillator Disable" will wake the chip. For low power mode + * on MCP2518FD, asserting the chip select will wake the + * chip. Writing to the Oscillator register will wake it in + * both cases. + */ + osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK, + MCP251XFD_REG_OSC_CLKODIV_10); + + /* We cannot check for the PLL ready bit (either set or + * unset), as the PLL might be enabled. This can happen if the + * system reboots, while the mcp251xfd stays powered. + */ + osc_reference = MCP251XFD_REG_OSC_OSCRDY; + osc_mask = MCP251XFD_REG_OSC_OSCRDY; + + /* If the controller is in Sleep Mode the following write only + * removes the "Oscillator Disable" bit and powers it up. All + * other bits are unaffected. + */ + err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc); + if (err) + return err; + + /* Sometimes the PLL is stuck enabled, the controller never + * sets the OSC Ready bit, and we get an -ETIMEDOUT. Our + * caller takes care of retry. + */ + return mcp251xfd_chip_wait_for_osc_ready(priv, osc_reference, osc_mask); +} + +static inline int mcp251xfd_chip_sleep(const struct mcp251xfd_priv *priv) +{ + if (priv->pll_enable) { + u32 osc; + int err; + + /* Turn off PLL */ + osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK, + MCP251XFD_REG_OSC_CLKODIV_10); + err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc); + if (err) + netdev_err(priv->ndev, + "Failed to disable PLL.\n"); + + priv->spi->max_speed_hz = priv->spi_max_speed_hz_slow; + } + + return mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP); +} + +static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv) +{ + const __be16 cmd = mcp251xfd_cmd_reset(); + int err; + + /* The Set Mode and SPI Reset command only works if the + * controller is not in Sleep Mode. + */ + err = mcp251xfd_chip_wake(priv); + if (err) + return err; + + err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_CONFIG); + if (err) + return err; + + /* spi_write_then_read() works with non DMA-safe buffers */ + return spi_write_then_read(priv->spi, &cmd, sizeof(cmd), NULL, 0); +} + +static int mcp251xfd_chip_softreset_check(const struct mcp251xfd_priv *priv) +{ + u32 osc_reference, osc_mask; + u8 mode; + int err; + + /* Check for reset defaults of OSC reg. + * This will take care of stabilization period. + */ + osc_reference = MCP251XFD_REG_OSC_OSCRDY | + FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK, + MCP251XFD_REG_OSC_CLKODIV_10); + osc_mask = osc_reference | MCP251XFD_REG_OSC_PLLRDY; + err = mcp251xfd_chip_wait_for_osc_ready(priv, osc_reference, osc_mask); + if (err) + return err; + + err = mcp251xfd_chip_get_mode(priv, &mode); + if (err) + return err; + + if (mode != MCP251XFD_REG_CON_MODE_CONFIG) { + netdev_info(priv->ndev, + "Controller not in Config Mode after reset, but in %s Mode (%u).\n", + mcp251xfd_get_mode_str(mode), mode); + return -ETIMEDOUT; + } + + return 0; +} + +static int mcp251xfd_chip_softreset(const struct mcp251xfd_priv *priv) +{ + int err, i; + + for (i = 0; i < MCP251XFD_SOFTRESET_RETRIES_MAX; i++) { + if (i) + netdev_info(priv->ndev, + "Retrying to reset controller.\n"); + + err = mcp251xfd_chip_softreset_do(priv); + if (err == -ETIMEDOUT) + continue; + if (err) + return err; + + err = mcp251xfd_chip_softreset_check(priv); + if (err == -ETIMEDOUT) + continue; + if (err) + return err; + + return 0; + } + + return err; +} + +static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv) +{ + u32 osc, osc_reference, osc_mask; + int err; + + /* Activate Low Power Mode on Oscillator Disable. This only + * works on the MCP2518FD. The MCP2517FD will go into normal + * Sleep Mode instead. + */ + osc = MCP251XFD_REG_OSC_LPMEN | + FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK, + MCP251XFD_REG_OSC_CLKODIV_10); + osc_reference = MCP251XFD_REG_OSC_OSCRDY; + osc_mask = MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY; + + if (priv->pll_enable) { + osc |= MCP251XFD_REG_OSC_PLLEN; + osc_reference |= MCP251XFD_REG_OSC_PLLRDY; + } + + err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc); + if (err) + return err; + + err = mcp251xfd_chip_wait_for_osc_ready(priv, osc_reference, osc_mask); + if (err) + return err; + + priv->spi->max_speed_hz = priv->spi_max_speed_hz_fast; + + return 0; +} + +static int mcp251xfd_chip_timestamp_init(const struct mcp251xfd_priv *priv) +{ + /* Set Time Base Counter Prescaler to 1. + * + * This means an overflow of the 32 bit Time Base Counter + * register at 40 MHz every 107 seconds. + */ + return regmap_write(priv->map_reg, MCP251XFD_REG_TSCON, + MCP251XFD_REG_TSCON_TBCEN); +} + +static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv) +{ + const struct can_bittiming *bt = &priv->can.bittiming; + const struct can_bittiming *dbt = &priv->can.fd.data_bittiming; + u32 tdcmod, val = 0; + int err; + + /* CAN Control Register + * + * - no transmit bandwidth sharing + * - config mode + * - disable transmit queue + * - store in transmit FIFO event + * - transition to restricted operation mode on system error + * - ESI is transmitted recessive when ESI of message is high or + * CAN controller error passive + * - restricted retransmission attempts, + * use TQXCON_TXAT and FIFOCON_TXAT + * - wake-up filter bits T11FILTER + * - use CAN bus line filter for wakeup + * - protocol exception is treated as a form error + * - Do not compare data bytes + */ + val = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, + MCP251XFD_REG_CON_MODE_CONFIG) | + MCP251XFD_REG_CON_STEF | + MCP251XFD_REG_CON_ESIGM | + MCP251XFD_REG_CON_RTXAT | + FIELD_PREP(MCP251XFD_REG_CON_WFT_MASK, + MCP251XFD_REG_CON_WFT_T11FILTER) | + MCP251XFD_REG_CON_WAKFIL | + MCP251XFD_REG_CON_PXEDIS; + + if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)) + val |= MCP251XFD_REG_CON_ISOCRCEN; + + err = regmap_write(priv->map_reg, MCP251XFD_REG_CON, val); + if (err) + return err; + + /* Nominal Bit Time */ + val = FIELD_PREP(MCP251XFD_REG_NBTCFG_BRP_MASK, bt->brp - 1) | + FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG1_MASK, + bt->prop_seg + bt->phase_seg1 - 1) | + FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG2_MASK, + bt->phase_seg2 - 1) | + FIELD_PREP(MCP251XFD_REG_NBTCFG_SJW_MASK, bt->sjw - 1); + + err = regmap_write(priv->map_reg, MCP251XFD_REG_NBTCFG, val); + if (err) + return err; + + if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD)) + return 0; + + /* Data Bit Time */ + val = FIELD_PREP(MCP251XFD_REG_DBTCFG_BRP_MASK, dbt->brp - 1) | + FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG1_MASK, + dbt->prop_seg + dbt->phase_seg1 - 1) | + FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG2_MASK, + dbt->phase_seg2 - 1) | + FIELD_PREP(MCP251XFD_REG_DBTCFG_SJW_MASK, dbt->sjw - 1); + + err = regmap_write(priv->map_reg, MCP251XFD_REG_DBTCFG, val); + if (err) + return err; + + /* Transmitter Delay Compensation */ + if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_AUTO) + tdcmod = MCP251XFD_REG_TDC_TDCMOD_AUTO; + else if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_MANUAL) + tdcmod = MCP251XFD_REG_TDC_TDCMOD_MANUAL; + else + tdcmod = MCP251XFD_REG_TDC_TDCMOD_DISABLED; + + val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, tdcmod) | + FIELD_PREP(MCP251XFD_REG_TDC_TDCV_MASK, priv->can.fd.tdc.tdcv) | + FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, priv->can.fd.tdc.tdco); + + return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val); +} + +static int mcp251xfd_chip_rx_int_enable(const struct mcp251xfd_priv *priv) +{ + u32 val, mask; + + if (!priv->rx_int) + return 0; + + /* Configure PIN1 as RX Interrupt: + * + * PIN1 must be Input, otherwise there is a glitch on the + * rx-INT line. It happens between setting the PIN as output + * (in the first byte of the SPI transfer) and configuring the + * PIN as interrupt (in the last byte of the SPI transfer). + */ + val = MCP251XFD_REG_IOCON_TRIS(1); + mask = MCP251XFD_REG_IOCON_TRIS(1) | MCP251XFD_REG_IOCON_PM(1); + return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, mask, val); +} + +static int mcp251xfd_chip_rx_int_disable(const struct mcp251xfd_priv *priv) +{ + u32 val; + + if (!priv->rx_int) + return 0; + + /* Configure PIN1 as GPIO Input */ + val = MCP251XFD_REG_IOCON_PM(1) | MCP251XFD_REG_IOCON_TRIS(1); + return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, val, val); +} + +static int mcp251xfd_chip_ecc_init(struct mcp251xfd_priv *priv) +{ + struct mcp251xfd_ecc *ecc = &priv->ecc; + void *ram; + u32 val = 0; + int err; + + ecc->ecc_stat = 0; + + if (priv->devtype_data.quirks & MCP251XFD_QUIRK_ECC) + val = MCP251XFD_REG_ECCCON_ECCEN; + + err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON, + MCP251XFD_REG_ECCCON_ECCEN, val); + if (err) + return err; + + ram = kzalloc(MCP251XFD_RAM_SIZE, GFP_KERNEL); + if (!ram) + return -ENOMEM; + + err = regmap_raw_write(priv->map_reg, MCP251XFD_RAM_START, ram, + MCP251XFD_RAM_SIZE); + kfree(ram); + + return err; +} + +static u8 mcp251xfd_get_normal_mode(const struct mcp251xfd_priv *priv) +{ + u8 mode; + + if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) + mode = MCP251XFD_REG_CON_MODE_INT_LOOPBACK; + else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + mode = MCP251XFD_REG_CON_MODE_LISTENONLY; + else if (priv->can.ctrlmode & CAN_CTRLMODE_FD) + mode = MCP251XFD_REG_CON_MODE_MIXED; + else + mode = MCP251XFD_REG_CON_MODE_CAN2_0; + + return mode; +} + +static int +__mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv, + bool nowait) +{ + u8 mode; + + mode = mcp251xfd_get_normal_mode(priv); + + return __mcp251xfd_chip_set_mode(priv, mode, nowait); +} + +static inline int +mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv) +{ + return __mcp251xfd_chip_set_normal_mode(priv, false); +} + +static inline int +mcp251xfd_chip_set_normal_mode_nowait(const struct mcp251xfd_priv *priv) +{ + return __mcp251xfd_chip_set_normal_mode(priv, true); +} + +static int mcp251xfd_chip_interrupts_enable(const struct mcp251xfd_priv *priv) +{ + u32 val; + int err; + + val = MCP251XFD_REG_CRC_FERRIE | MCP251XFD_REG_CRC_CRCERRIE; + err = regmap_write(priv->map_reg, MCP251XFD_REG_CRC, val); + if (err) + return err; + + val = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE; + err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON, val, val); + if (err) + return err; + + val = MCP251XFD_REG_INT_CERRIE | + MCP251XFD_REG_INT_SERRIE | + MCP251XFD_REG_INT_RXOVIE | + MCP251XFD_REG_INT_TXATIE | + MCP251XFD_REG_INT_SPICRCIE | + MCP251XFD_REG_INT_ECCIE | + MCP251XFD_REG_INT_TEFIE | + MCP251XFD_REG_INT_MODIE | + MCP251XFD_REG_INT_RXIE; + + if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) + val |= MCP251XFD_REG_INT_IVMIE; + + return regmap_write(priv->map_reg, MCP251XFD_REG_INT, val); +} + +static int mcp251xfd_chip_interrupts_disable(const struct mcp251xfd_priv *priv) +{ + int err; + u32 mask; + + err = regmap_write(priv->map_reg, MCP251XFD_REG_INT, 0); + if (err) + return err; + + mask = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE; + err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON, + mask, 0x0); + if (err) + return err; + + return regmap_write(priv->map_reg, MCP251XFD_REG_CRC, 0); +} + +static void mcp251xfd_chip_stop(struct mcp251xfd_priv *priv, + const enum can_state state) +{ + priv->can.state = state; + + mcp251xfd_chip_interrupts_disable(priv); + mcp251xfd_chip_rx_int_disable(priv); + mcp251xfd_timestamp_stop(priv); + mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_CONFIG); +} + +static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv) +{ + int err; + + err = mcp251xfd_chip_timestamp_init(priv); + if (err) + goto out_chip_stop; + + mcp251xfd_timestamp_start(priv); + + err = mcp251xfd_set_bittiming(priv); + if (err) + goto out_chip_stop; + + err = mcp251xfd_chip_rx_int_enable(priv); + if (err) + goto out_chip_stop; + + err = mcp251xfd_chip_ecc_init(priv); + if (err) + goto out_chip_stop; + + err = mcp251xfd_ring_init(priv); + if (err) + goto out_chip_stop; + + err = mcp251xfd_chip_fifo_init(priv); + if (err) + goto out_chip_stop; + + priv->can.state = CAN_STATE_ERROR_ACTIVE; + + err = mcp251xfd_chip_set_normal_mode(priv); + if (err) + goto out_chip_stop; + + return 0; + +out_chip_stop: + mcp251xfd_dump(priv); + mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); + + return err; +} + +static int mcp251xfd_set_mode(struct net_device *ndev, enum can_mode mode) +{ + struct mcp251xfd_priv *priv = netdev_priv(ndev); + int err; + + switch (mode) { + case CAN_MODE_START: + err = mcp251xfd_chip_start(priv); + if (err) + return err; + + err = mcp251xfd_chip_interrupts_enable(priv); + if (err) { + mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); + return err; + } + + netif_wake_queue(ndev); + break; + + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int __mcp251xfd_get_berr_counter(const struct net_device *ndev, + struct can_berr_counter *bec) +{ + const struct mcp251xfd_priv *priv = netdev_priv(ndev); + u32 trec; + int err; + + err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec); + if (err) + return err; + + if (trec & MCP251XFD_REG_TREC_TXBO) + bec->txerr = CAN_BUS_OFF_THRESHOLD; + else + bec->txerr = FIELD_GET(MCP251XFD_REG_TREC_TEC_MASK, trec); + bec->rxerr = FIELD_GET(MCP251XFD_REG_TREC_REC_MASK, trec); + + return 0; +} + +static int mcp251xfd_get_berr_counter(const struct net_device *ndev, + struct can_berr_counter *bec) +{ + const struct mcp251xfd_priv *priv = netdev_priv(ndev); + + /* Avoid waking up the controller if the interface is down */ + if (!(ndev->flags & IFF_UP)) + return 0; + + /* The controller is powered down during Bus Off, use saved + * bec values. + */ + if (priv->can.state == CAN_STATE_BUS_OFF) { + *bec = priv->bec; + return 0; + } + + return __mcp251xfd_get_berr_counter(ndev, bec); +} + +static struct sk_buff * +mcp251xfd_alloc_can_err_skb(struct mcp251xfd_priv *priv, + struct can_frame **cf, u32 *ts_raw) +{ + struct sk_buff *skb; + int err; + + err = mcp251xfd_get_timestamp_raw(priv, ts_raw); + if (err) + return NULL; + + skb = alloc_can_err_skb(priv->ndev, cf); + if (skb) + mcp251xfd_skb_set_timestamp_raw(priv, skb, *ts_raw); + + return skb; +} + +static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv) +{ + struct net_device_stats *stats = &priv->ndev->stats; + struct mcp251xfd_rx_ring *ring; + struct sk_buff *skb; + struct can_frame *cf; + u32 ts_raw, rxovif; + int err, i; + + stats->rx_over_errors++; + stats->rx_errors++; + + err = regmap_read(priv->map_reg, MCP251XFD_REG_RXOVIF, &rxovif); + if (err) + return err; + + mcp251xfd_for_each_rx_ring(priv, ring, i) { + if (!(rxovif & BIT(ring->fifo_nr))) + continue; + + /* If SERRIF is active, there was a RX MAB overflow. */ + if (priv->regs_status.intf & MCP251XFD_REG_INT_SERRIF) { + if (net_ratelimit()) + netdev_dbg(priv->ndev, + "RX-%d: MAB overflow detected.\n", + ring->nr); + } else { + if (net_ratelimit()) + netdev_dbg(priv->ndev, + "RX-%d: FIFO overflow.\n", + ring->nr); + } + + err = regmap_update_bits(priv->map_reg, + MCP251XFD_REG_FIFOSTA(ring->fifo_nr), + MCP251XFD_REG_FIFOSTA_RXOVIF, + 0x0); + if (err) + return err; + } + + skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &ts_raw); + if (!skb) + return 0; + + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + + err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw); + if (err) + stats->rx_fifo_errors++; + + return 0; +} + +static int mcp251xfd_handle_txatif(struct mcp251xfd_priv *priv) +{ + netdev_info(priv->ndev, "%s\n", __func__); + + return 0; +} + +static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv) +{ + struct net_device_stats *stats = &priv->ndev->stats; + u32 bdiag1, ts_raw; + struct sk_buff *skb; + struct can_frame *cf = NULL; + int err; + + err = mcp251xfd_get_timestamp_raw(priv, &ts_raw); + if (err) + return err; + + err = regmap_read(priv->map_reg, MCP251XFD_REG_BDIAG1, &bdiag1); + if (err) + return err; + + /* Write 0s to clear error bits, don't write 1s to non active + * bits, as they will be set. + */ + err = regmap_write(priv->map_reg, MCP251XFD_REG_BDIAG1, 0x0); + if (err) + return err; + + priv->can.can_stats.bus_error++; + + skb = alloc_can_err_skb(priv->ndev, &cf); + if (cf) + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + + /* Controller misconfiguration */ + if (WARN_ON(bdiag1 & MCP251XFD_REG_BDIAG1_DLCMM)) + netdev_err(priv->ndev, + "recv'd DLC is larger than PLSIZE of FIFO element."); + + /* RX errors */ + if (bdiag1 & (MCP251XFD_REG_BDIAG1_DCRCERR | + MCP251XFD_REG_BDIAG1_NCRCERR)) { + netdev_dbg(priv->ndev, "CRC error\n"); + + stats->rx_errors++; + if (cf) + cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; + } + if (bdiag1 & (MCP251XFD_REG_BDIAG1_DSTUFERR | + MCP251XFD_REG_BDIAG1_NSTUFERR)) { + netdev_dbg(priv->ndev, "Stuff error\n"); + + stats->rx_errors++; + if (cf) + cf->data[2] |= CAN_ERR_PROT_STUFF; + } + if (bdiag1 & (MCP251XFD_REG_BDIAG1_DFORMERR | + MCP251XFD_REG_BDIAG1_NFORMERR)) { + netdev_dbg(priv->ndev, "Format error\n"); + + stats->rx_errors++; + if (cf) + cf->data[2] |= CAN_ERR_PROT_FORM; + } + + /* TX errors */ + if (bdiag1 & MCP251XFD_REG_BDIAG1_NACKERR) { + netdev_dbg(priv->ndev, "NACK error\n"); + + stats->tx_errors++; + if (cf) { + cf->can_id |= CAN_ERR_ACK; + cf->data[2] |= CAN_ERR_PROT_TX; + } + } + if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT1ERR | + MCP251XFD_REG_BDIAG1_NBIT1ERR)) { + netdev_dbg(priv->ndev, "Bit1 error\n"); + + stats->tx_errors++; + if (cf) + cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT1; + } + if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT0ERR | + MCP251XFD_REG_BDIAG1_NBIT0ERR)) { + netdev_dbg(priv->ndev, "Bit0 error\n"); + + stats->tx_errors++; + if (cf) + cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT0; + } + + if (!cf) + return 0; + + mcp251xfd_skb_set_timestamp_raw(priv, skb, ts_raw); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw); + if (err) + stats->rx_fifo_errors++; + + return 0; +} + +static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv) +{ + struct net_device_stats *stats = &priv->ndev->stats; + struct sk_buff *skb; + struct can_frame *cf = NULL; + enum can_state new_state, rx_state, tx_state; + u32 trec, ts_raw; + int err; + + err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec); + if (err) + return err; + + if (trec & MCP251XFD_REG_TREC_TXBO) + tx_state = CAN_STATE_BUS_OFF; + else if (trec & MCP251XFD_REG_TREC_TXBP) + tx_state = CAN_STATE_ERROR_PASSIVE; + else if (trec & MCP251XFD_REG_TREC_TXWARN) + tx_state = CAN_STATE_ERROR_WARNING; + else + tx_state = CAN_STATE_ERROR_ACTIVE; + + if (trec & MCP251XFD_REG_TREC_RXBP) + rx_state = CAN_STATE_ERROR_PASSIVE; + else if (trec & MCP251XFD_REG_TREC_RXWARN) + rx_state = CAN_STATE_ERROR_WARNING; + else + rx_state = CAN_STATE_ERROR_ACTIVE; + + new_state = max(tx_state, rx_state); + if (new_state == priv->can.state) + return 0; + + /* The skb allocation might fail, but can_change_state() + * handles cf == NULL. + */ + skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &ts_raw); + can_change_state(priv->ndev, cf, tx_state, rx_state); + + if (new_state == CAN_STATE_BUS_OFF) { + /* As we're going to switch off the chip now, let's + * save the error counters and return them to + * userspace, if do_get_berr_counter() is called while + * the chip is in Bus Off. + */ + err = __mcp251xfd_get_berr_counter(priv->ndev, &priv->bec); + if (err) + return err; + + mcp251xfd_chip_stop(priv, CAN_STATE_BUS_OFF); + can_bus_off(priv->ndev); + } + + if (!skb) + return 0; + + if (new_state != CAN_STATE_BUS_OFF) { + struct can_berr_counter bec; + + err = mcp251xfd_get_berr_counter(priv->ndev, &bec); + if (err) + return err; + cf->can_id |= CAN_ERR_CNT; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + } + + err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw); + if (err) + stats->rx_fifo_errors++; + + return 0; +} + +static int +mcp251xfd_handle_modif(const struct mcp251xfd_priv *priv, bool *set_normal_mode) +{ + const u8 mode_reference = mcp251xfd_get_normal_mode(priv); + u8 mode; + int err; + + err = mcp251xfd_chip_get_mode(priv, &mode); + if (err) + return err; + + if (mode == mode_reference) { + netdev_dbg(priv->ndev, + "Controller changed into %s Mode (%u).\n", + mcp251xfd_get_mode_str(mode), mode); + return 0; + } + + /* According to MCP2517FD errata DS80000792C 1., during a TX + * MAB underflow, the controller will transition to Restricted + * Operation Mode or Listen Only Mode (depending on SERR2LOM). + * + * However this is not always the case. If SERR2LOM is + * configured for Restricted Operation Mode (SERR2LOM not set) + * the MCP2517FD will sometimes transition to Listen Only Mode + * first. When polling this bit we see that it will transition + * to Restricted Operation Mode shortly after. + */ + if ((priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN) && + (mode == MCP251XFD_REG_CON_MODE_RESTRICTED || + mode == MCP251XFD_REG_CON_MODE_LISTENONLY)) + netdev_dbg(priv->ndev, + "Controller changed into %s Mode (%u).\n", + mcp251xfd_get_mode_str(mode), mode); + else + netdev_err(priv->ndev, + "Controller changed into %s Mode (%u).\n", + mcp251xfd_get_mode_str(mode), mode); + + /* After the application requests Normal mode, the controller + * will automatically attempt to retransmit the message that + * caused the TX MAB underflow. + * + * However, if there is an ECC error in the TX-RAM, we first + * have to reload the tx-object before requesting Normal + * mode. This is done later in mcp251xfd_handle_eccif(). + */ + if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF) { + *set_normal_mode = true; + return 0; + } + + return mcp251xfd_chip_set_normal_mode_nowait(priv); +} + +static int mcp251xfd_handle_serrif(struct mcp251xfd_priv *priv) +{ + struct mcp251xfd_ecc *ecc = &priv->ecc; + struct net_device_stats *stats = &priv->ndev->stats; + bool handled = false; + + /* TX MAB underflow + * + * According to MCP2517FD Errata DS80000792C 1. a TX MAB + * underflow is indicated by SERRIF and MODIF. + * + * In addition to the effects mentioned in the Errata, there + * are Bus Errors due to the aborted CAN frame, so a IVMIF + * will be seen as well. + * + * Sometimes there is an ECC error in the TX-RAM, which leads + * to a TX MAB underflow. + * + * However, probably due to a race condition, there is no + * associated MODIF pending. + * + * Further, there are situations, where the SERRIF is caused + * by an ECC error in the TX-RAM, but not even the ECCIF is + * set. This only seems to happen _after_ the first occurrence + * of a ECCIF (which is tracked in ecc->cnt). + * + * Treat all as a known system errors.. + */ + if ((priv->regs_status.intf & MCP251XFD_REG_INT_MODIF && + priv->regs_status.intf & MCP251XFD_REG_INT_IVMIF) || + priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF || + ecc->cnt) { + const char *msg; + + if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF || + ecc->cnt) + msg = "TX MAB underflow due to ECC error detected."; + else + msg = "TX MAB underflow detected."; + + if (priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN) + netdev_dbg(priv->ndev, "%s\n", msg); + else + netdev_info(priv->ndev, "%s\n", msg); + + stats->tx_aborted_errors++; + stats->tx_errors++; + handled = true; + } + + /* RX MAB overflow + * + * According to MCP2517FD Errata DS80000792C 1. a RX MAB + * overflow is indicated by SERRIF. + * + * In addition to the effects mentioned in the Errata, (most + * of the times) a RXOVIF is raised, if the FIFO that is being + * received into has the RXOVIE activated (and we have enabled + * RXOVIE on all FIFOs). + * + * Sometimes there is no RXOVIF just a RXIF is pending. + * + * Treat all as a known system errors.. + */ + if (priv->regs_status.intf & MCP251XFD_REG_INT_RXOVIF || + priv->regs_status.intf & MCP251XFD_REG_INT_RXIF) { + stats->rx_dropped++; + handled = true; + } + + if (!handled) + netdev_err(priv->ndev, + "Unhandled System Error Interrupt (intf=0x%08x)!\n", + priv->regs_status.intf); + + return 0; +} + +static int +mcp251xfd_handle_eccif_recover(struct mcp251xfd_priv *priv, u8 nr) +{ + struct mcp251xfd_tx_ring *tx_ring = priv->tx; + struct mcp251xfd_ecc *ecc = &priv->ecc; + struct mcp251xfd_tx_obj *tx_obj; + u8 chip_tx_tail, tx_tail, offset; + u16 addr; + int err; + + addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc->ecc_stat); + + err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail); + if (err) + return err; + + tx_tail = mcp251xfd_get_tx_tail(tx_ring); + offset = (nr - chip_tx_tail) & (tx_ring->obj_num - 1); + + /* Bail out if one of the following is met: + * - tx_tail information is inconsistent + * - for mcp2517fd: offset not 0 + * - for mcp2518fd: offset not 0 or 1 + */ + if (chip_tx_tail != tx_tail || + !(offset == 0 || (offset == 1 && (mcp251xfd_is_2518FD(priv) || + mcp251xfd_is_251863(priv))))) { + netdev_err(priv->ndev, + "ECC Error information inconsistent (addr=0x%04x, nr=%d, tx_tail=0x%08x(%d), chip_tx_tail=%d, offset=%d).\n", + addr, nr, tx_ring->tail, tx_tail, chip_tx_tail, + offset); + return -EINVAL; + } + + netdev_info(priv->ndev, + "Recovering %s ECC Error at address 0x%04x (in TX-RAM, tx_obj=%d, tx_tail=0x%08x(%d), offset=%d).\n", + ecc->ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF ? + "Single" : "Double", + addr, nr, tx_ring->tail, tx_tail, offset); + + /* reload tx_obj into controller RAM ... */ + tx_obj = &tx_ring->obj[nr]; + err = spi_sync_transfer(priv->spi, tx_obj->xfer, 1); + if (err) + return err; + + /* ... and trigger retransmit */ + return mcp251xfd_chip_set_normal_mode(priv); +} + +static int +mcp251xfd_handle_eccif(struct mcp251xfd_priv *priv, bool set_normal_mode) +{ + struct mcp251xfd_ecc *ecc = &priv->ecc; + const char *msg; + bool in_tx_ram; + u32 ecc_stat; + u16 addr; + u8 nr; + int err; + + err = regmap_read(priv->map_reg, MCP251XFD_REG_ECCSTAT, &ecc_stat); + if (err) + return err; + + err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCSTAT, + MCP251XFD_REG_ECCSTAT_IF_MASK, ~ecc_stat); + if (err) + return err; + + /* Check if ECC error occurred in TX-RAM */ + addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc_stat); + err = mcp251xfd_get_tx_nr_by_addr(priv->tx, &nr, addr); + if (!err) + in_tx_ram = true; + else if (err == -ENOENT) + in_tx_ram = false; + else + return err; + + /* Errata Reference: + * mcp2517fd: DS80000789C 3., mcp2518fd: DS80000792E 2., + * mcp251863: DS80000984A 2. + * + * ECC single error correction does not work in all cases: + * + * Fix/Work Around: + * Enable single error correction and double error detection + * interrupts by setting SECIE and DEDIE. Handle SECIF as a + * detection interrupt and do not rely on the error + * correction. Instead, handle both interrupts as a + * notification that the RAM word at ERRADDR was corrupted. + */ + if (ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF) + msg = "Single ECC Error detected at address"; + else if (ecc_stat & MCP251XFD_REG_ECCSTAT_DEDIF) + msg = "Double ECC Error detected at address"; + else + return -EINVAL; + + if (!in_tx_ram) { + ecc->ecc_stat = 0; + + netdev_notice(priv->ndev, "%s 0x%04x.\n", msg, addr); + } else { + /* Re-occurring error? */ + if (ecc->ecc_stat == ecc_stat) { + ecc->cnt++; + } else { + ecc->ecc_stat = ecc_stat; + ecc->cnt = 1; + } + + netdev_info(priv->ndev, + "%s 0x%04x (in TX-RAM, tx_obj=%d), occurred %d time%s.\n", + msg, addr, nr, ecc->cnt, ecc->cnt > 1 ? "s" : ""); + + if (ecc->cnt >= MCP251XFD_ECC_CNT_MAX) + return mcp251xfd_handle_eccif_recover(priv, nr); + } + + if (set_normal_mode) + return mcp251xfd_chip_set_normal_mode_nowait(priv); + + return 0; +} + +static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv) +{ + int err; + u32 crc; + + err = regmap_read(priv->map_reg, MCP251XFD_REG_CRC, &crc); + if (err) + return err; + + err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CRC, + MCP251XFD_REG_CRC_IF_MASK, + ~crc); + if (err) + return err; + + if (crc & MCP251XFD_REG_CRC_FERRIF) + netdev_notice(priv->ndev, "CRC write command format error.\n"); + else if (crc & MCP251XFD_REG_CRC_CRCERRIF) + netdev_notice(priv->ndev, + "CRC write error detected. CRC=0x%04lx.\n", + FIELD_GET(MCP251XFD_REG_CRC_MASK, crc)); + + return 0; +} + +static int mcp251xfd_read_regs_status(struct mcp251xfd_priv *priv) +{ + const int val_bytes = regmap_get_val_bytes(priv->map_reg); + size_t len; + + if (priv->rx_ring_num == 1) + len = sizeof(priv->regs_status.intf); + else + len = sizeof(priv->regs_status); + + return regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT, + &priv->regs_status, len / val_bytes); +} + +#define mcp251xfd_handle(priv, irq, ...) \ +({ \ + struct mcp251xfd_priv *_priv = (priv); \ + int err; \ +\ + err = mcp251xfd_handle_##irq(_priv, ## __VA_ARGS__); \ + if (err) \ + netdev_err(_priv->ndev, \ + "IRQ handler mcp251xfd_handle_%s() returned %d.\n", \ + __stringify(irq), err); \ + err; \ +}) + +static irqreturn_t mcp251xfd_irq(int irq, void *dev_id) +{ + struct mcp251xfd_priv *priv = dev_id; + irqreturn_t handled = IRQ_NONE; + int err; + + if (priv->rx_int) + do { + int rx_pending; + + rx_pending = gpiod_get_value_cansleep(priv->rx_int); + if (!rx_pending) + break; + + /* Assume 1st RX-FIFO pending, if other FIFOs + * are pending the main IRQ handler will take + * care. + */ + priv->regs_status.rxif = BIT(priv->rx[0]->fifo_nr); + err = mcp251xfd_handle(priv, rxif); + if (err) + goto out_fail; + + handled = IRQ_HANDLED; + + /* We don't know which RX-FIFO is pending, but only + * handle the 1st RX-FIFO. Leave loop here if we have + * more than 1 RX-FIFO to avoid starvation. + */ + } while (priv->rx_ring_num == 1); + + do { + u32 intf_pending, intf_pending_clearable; + bool set_normal_mode = false; + + err = mcp251xfd_read_regs_status(priv); + if (err) + goto out_fail; + + intf_pending = FIELD_GET(MCP251XFD_REG_INT_IF_MASK, + priv->regs_status.intf) & + FIELD_GET(MCP251XFD_REG_INT_IE_MASK, + priv->regs_status.intf); + + if (!(intf_pending)) { + can_rx_offload_threaded_irq_finish(&priv->offload); + return handled; + } + + /* Some interrupts must be ACKed in the + * MCP251XFD_REG_INT register. + * - First ACK then handle, to avoid lost-IRQ race + * condition on fast re-occurring interrupts. + * - Write "0" to clear active IRQs, "1" to all other, + * to avoid r/m/w race condition on the + * MCP251XFD_REG_INT register. + */ + intf_pending_clearable = intf_pending & + MCP251XFD_REG_INT_IF_CLEARABLE_MASK; + if (intf_pending_clearable) { + err = regmap_update_bits(priv->map_reg, + MCP251XFD_REG_INT, + MCP251XFD_REG_INT_IF_MASK, + ~intf_pending_clearable); + if (err) + goto out_fail; + } + + if (intf_pending & MCP251XFD_REG_INT_MODIF) { + err = mcp251xfd_handle(priv, modif, &set_normal_mode); + if (err) + goto out_fail; + } + + if (intf_pending & MCP251XFD_REG_INT_RXIF) { + err = mcp251xfd_handle(priv, rxif); + if (err) + goto out_fail; + } + + if (intf_pending & MCP251XFD_REG_INT_TEFIF) { + err = mcp251xfd_handle(priv, tefif); + if (err) + goto out_fail; + } + + if (intf_pending & MCP251XFD_REG_INT_RXOVIF) { + err = mcp251xfd_handle(priv, rxovif); + if (err) + goto out_fail; + } + + if (intf_pending & MCP251XFD_REG_INT_TXATIF) { + err = mcp251xfd_handle(priv, txatif); + if (err) + goto out_fail; + } + + if (intf_pending & MCP251XFD_REG_INT_IVMIF) { + err = mcp251xfd_handle(priv, ivmif); + if (err) + goto out_fail; + } + + if (intf_pending & MCP251XFD_REG_INT_SERRIF) { + err = mcp251xfd_handle(priv, serrif); + if (err) + goto out_fail; + } + + if (intf_pending & MCP251XFD_REG_INT_ECCIF) { + err = mcp251xfd_handle(priv, eccif, set_normal_mode); + if (err) + goto out_fail; + } + + if (intf_pending & MCP251XFD_REG_INT_SPICRCIF) { + err = mcp251xfd_handle(priv, spicrcif); + if (err) + goto out_fail; + } + + /* On the MCP2527FD and MCP2518FD, we don't get a + * CERRIF IRQ on the transition TX ERROR_WARNING -> TX + * ERROR_ACTIVE. + */ + if (intf_pending & MCP251XFD_REG_INT_CERRIF || + priv->can.state > CAN_STATE_ERROR_ACTIVE) { + err = mcp251xfd_handle(priv, cerrif); + if (err) + goto out_fail; + + /* In Bus Off we completely shut down the + * controller. Every subsequent register read + * will read bogus data, and if + * MCP251XFD_QUIRK_CRC_REG is enabled the CRC + * check will fail, too. So leave IRQ handler + * directly. + */ + if (priv->can.state == CAN_STATE_BUS_OFF) { + can_rx_offload_threaded_irq_finish(&priv->offload); + return IRQ_HANDLED; + } + } + + handled = IRQ_HANDLED; + } while (1); + +out_fail: + can_rx_offload_threaded_irq_finish(&priv->offload); + + netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n", + err, priv->regs_status.intf); + mcp251xfd_dump(priv); + mcp251xfd_chip_interrupts_disable(priv); + mcp251xfd_timestamp_stop(priv); + + return handled; +} + +static int mcp251xfd_open(struct net_device *ndev) +{ + struct mcp251xfd_priv *priv = netdev_priv(ndev); + const struct spi_device *spi = priv->spi; + int err; + + err = open_candev(ndev); + if (err) + return err; + + err = pm_runtime_resume_and_get(ndev->dev.parent); + if (err) { + if (err == -ETIMEDOUT || err == -ENODEV) + pm_runtime_set_suspended(ndev->dev.parent); + goto out_close_candev; + } + + err = mcp251xfd_ring_alloc(priv); + if (err) + goto out_pm_runtime_put; + + err = mcp251xfd_transceiver_enable(priv); + if (err) + goto out_mcp251xfd_ring_free; + + mcp251xfd_timestamp_init(priv); + + err = mcp251xfd_chip_start(priv); + if (err) + goto out_transceiver_disable; + + clear_bit(MCP251XFD_FLAGS_DOWN, priv->flags); + can_rx_offload_enable(&priv->offload); + + priv->wq = alloc_ordered_workqueue("%s-mcp251xfd_wq", + WQ_FREEZABLE | WQ_MEM_RECLAIM, + dev_name(&spi->dev)); + if (!priv->wq) { + err = -ENOMEM; + goto out_can_rx_offload_disable; + } + INIT_WORK(&priv->tx_work, mcp251xfd_tx_obj_write_sync); + + err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq, + IRQF_SHARED | IRQF_ONESHOT, + dev_name(&spi->dev), priv); + if (err) + goto out_destroy_workqueue; + + err = mcp251xfd_chip_interrupts_enable(priv); + if (err) + goto out_free_irq; + + netif_start_queue(ndev); + + return 0; + +out_free_irq: + free_irq(spi->irq, priv); +out_destroy_workqueue: + destroy_workqueue(priv->wq); +out_can_rx_offload_disable: + can_rx_offload_disable(&priv->offload); + set_bit(MCP251XFD_FLAGS_DOWN, priv->flags); +out_transceiver_disable: + mcp251xfd_transceiver_disable(priv); +out_mcp251xfd_ring_free: + mcp251xfd_ring_free(priv); +out_pm_runtime_put: + mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); + pm_runtime_put(ndev->dev.parent); +out_close_candev: + close_candev(ndev); + + return err; +} + +static int mcp251xfd_stop(struct net_device *ndev) +{ + struct mcp251xfd_priv *priv = netdev_priv(ndev); + + netif_stop_queue(ndev); + set_bit(MCP251XFD_FLAGS_DOWN, priv->flags); + hrtimer_cancel(&priv->rx_irq_timer); + hrtimer_cancel(&priv->tx_irq_timer); + mcp251xfd_chip_interrupts_disable(priv); + free_irq(ndev->irq, priv); + destroy_workqueue(priv->wq); + can_rx_offload_disable(&priv->offload); + mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); + mcp251xfd_transceiver_disable(priv); + mcp251xfd_ring_free(priv); + close_candev(ndev); + + pm_runtime_put(ndev->dev.parent); + + return 0; +} + +static const struct net_device_ops mcp251xfd_netdev_ops = { + .ndo_open = mcp251xfd_open, + .ndo_stop = mcp251xfd_stop, + .ndo_start_xmit = mcp251xfd_start_xmit, + .ndo_hwtstamp_get = can_hwtstamp_get, + .ndo_hwtstamp_set = can_hwtstamp_set, +}; + +static void +mcp251xfd_register_quirks(struct mcp251xfd_priv *priv) +{ + const struct spi_device *spi = priv->spi; + const struct spi_controller *ctlr = spi->controller; + + if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) + priv->devtype_data.quirks |= MCP251XFD_QUIRK_HALF_DUPLEX; +} + +static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv) +{ + const struct net_device *ndev = priv->ndev; + const struct mcp251xfd_devtype_data *devtype_data; + u32 osc; + int err; + + /* The OSC_LPMEN is only supported on MCP2518FD and MCP251863, + * so use it to autodetect the model. + */ + err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_OSC, + MCP251XFD_REG_OSC_LPMEN, + MCP251XFD_REG_OSC_LPMEN); + if (err) + return err; + + err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc); + if (err) + return err; + + if (osc & MCP251XFD_REG_OSC_LPMEN) { + /* We cannot distinguish between MCP2518FD and + * MCP251863. If firmware specifies MCP251863, keep + * it, otherwise set to MCP2518FD. + */ + if (mcp251xfd_is_251863(priv)) + devtype_data = &mcp251xfd_devtype_data_mcp251863; + else + devtype_data = &mcp251xfd_devtype_data_mcp2518fd; + } else { + devtype_data = &mcp251xfd_devtype_data_mcp2517fd; + } + + if (!mcp251xfd_is_251XFD(priv) && + priv->devtype_data.model != devtype_data->model) { + netdev_info(ndev, + "Detected %s, but firmware specifies a %s. Fixing up.\n", + __mcp251xfd_get_model_str(devtype_data->model), + mcp251xfd_get_model_str(priv)); + } + priv->devtype_data = *devtype_data; + + /* We need to preserve the Half Duplex Quirk. */ + mcp251xfd_register_quirks(priv); + + /* Re-init regmap with quirks of detected model. */ + return mcp251xfd_regmap_init(priv); +} + +static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv) +{ + int err, rx_pending; + + if (!priv->rx_int) + return 0; + + err = mcp251xfd_chip_rx_int_enable(priv); + if (err) + return err; + + /* Check if RX_INT is properly working. The RX_INT should not + * be active after a softreset. + */ + rx_pending = gpiod_get_value_cansleep(priv->rx_int); + + err = mcp251xfd_chip_rx_int_disable(priv); + if (err) + return err; + + if (!rx_pending) + return 0; + + netdev_info(priv->ndev, + "RX_INT active after softreset, disabling RX_INT support.\n"); + devm_gpiod_put(&priv->spi->dev, priv->rx_int); + priv->rx_int = NULL; + + return 0; +} + +static const char * const mcp251xfd_gpio_names[] = { "GPIO0", "GPIO1" }; + +static int mcp251xfd_gpio_request(struct gpio_chip *chip, unsigned int offset) +{ + struct mcp251xfd_priv *priv = gpiochip_get_data(chip); + u32 pin_mask = MCP251XFD_REG_IOCON_PM(offset); + int ret; + + if (priv->rx_int && offset == 1) { + netdev_err(priv->ndev, "Can't use GPIO 1 with RX-INT!\n"); + return -EINVAL; + } + + ret = pm_runtime_resume_and_get(priv->ndev->dev.parent); + if (ret) + return ret; + + return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, pin_mask, pin_mask); +} + +static void mcp251xfd_gpio_free(struct gpio_chip *chip, unsigned int offset) +{ + struct mcp251xfd_priv *priv = gpiochip_get_data(chip); + + pm_runtime_put(priv->ndev->dev.parent); +} + +static int mcp251xfd_gpio_get_direction(struct gpio_chip *chip, + unsigned int offset) +{ + struct mcp251xfd_priv *priv = gpiochip_get_data(chip); + u32 mask = MCP251XFD_REG_IOCON_TRIS(offset); + u32 val; + int ret; + + ret = regmap_read(priv->map_reg, MCP251XFD_REG_IOCON, &val); + if (ret) + return ret; + + if (mask & val) + return GPIO_LINE_DIRECTION_IN; + + return GPIO_LINE_DIRECTION_OUT; +} + +static int mcp251xfd_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct mcp251xfd_priv *priv = gpiochip_get_data(chip); + u32 mask = MCP251XFD_REG_IOCON_GPIO(offset); + u32 val; + int ret; + + ret = regmap_read(priv->map_reg, MCP251XFD_REG_IOCON, &val); + if (ret) + return ret; + + return !!(mask & val); +} + +static int mcp251xfd_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask, + unsigned long *bit) +{ + struct mcp251xfd_priv *priv = gpiochip_get_data(chip); + u32 val; + int ret; + + ret = regmap_read(priv->map_reg, MCP251XFD_REG_IOCON, &val); + if (ret) + return ret; + + *bit = FIELD_GET(MCP251XFD_REG_IOCON_GPIO_MASK, val) & *mask; + + return 0; +} + +static int mcp251xfd_gpio_direction_output(struct gpio_chip *chip, + unsigned int offset, int value) +{ + struct mcp251xfd_priv *priv = gpiochip_get_data(chip); + u32 dir_mask = MCP251XFD_REG_IOCON_TRIS(offset); + u32 val_mask = MCP251XFD_REG_IOCON_LAT(offset); + u32 val; + + if (value) + val = val_mask; + else + val = 0; + + return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, + dir_mask | val_mask, val); +} + +static int mcp251xfd_gpio_direction_input(struct gpio_chip *chip, + unsigned int offset) +{ + struct mcp251xfd_priv *priv = gpiochip_get_data(chip); + u32 dir_mask = MCP251XFD_REG_IOCON_TRIS(offset); + + return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, dir_mask, dir_mask); +} + +static int mcp251xfd_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) +{ + struct mcp251xfd_priv *priv = gpiochip_get_data(chip); + u32 val_mask = MCP251XFD_REG_IOCON_LAT(offset); + u32 val; + + if (value) + val = val_mask; + else + val = 0; + + return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, val_mask, val); +} + +static int mcp251xfd_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask, + unsigned long *bits) +{ + struct mcp251xfd_priv *priv = gpiochip_get_data(chip); + u32 val; + + val = FIELD_PREP(MCP251XFD_REG_IOCON_LAT_MASK, *bits); + + return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, + MCP251XFD_REG_IOCON_LAT_MASK, val); +} + +static int mcp251fdx_gpio_setup(struct mcp251xfd_priv *priv) +{ + struct gpio_chip *gc = &priv->gc; + + if (!device_property_present(&priv->spi->dev, "gpio-controller")) + return 0; + + gc->label = dev_name(&priv->spi->dev); + gc->parent = &priv->spi->dev; + gc->owner = THIS_MODULE; + gc->request = mcp251xfd_gpio_request; + gc->free = mcp251xfd_gpio_free; + gc->get_direction = mcp251xfd_gpio_get_direction; + gc->direction_output = mcp251xfd_gpio_direction_output; + gc->direction_input = mcp251xfd_gpio_direction_input; + gc->get = mcp251xfd_gpio_get; + gc->get_multiple = mcp251xfd_gpio_get_multiple; + gc->set = mcp251xfd_gpio_set; + gc->set_multiple = mcp251xfd_gpio_set_multiple; + gc->base = -1; + gc->can_sleep = true; + gc->ngpio = ARRAY_SIZE(mcp251xfd_gpio_names); + gc->names = mcp251xfd_gpio_names; + + return devm_gpiochip_add_data(&priv->spi->dev, gc, priv); +} + +static int +mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id, + u32 *effective_speed_hz_slow, + u32 *effective_speed_hz_fast) +{ + struct mcp251xfd_map_buf_nocrc *buf_rx; + struct mcp251xfd_map_buf_nocrc *buf_tx; + struct spi_transfer xfer[2] = { }; + int err; + + buf_rx = kzalloc(sizeof(*buf_rx), GFP_KERNEL); + if (!buf_rx) + return -ENOMEM; + + buf_tx = kzalloc(sizeof(*buf_tx), GFP_KERNEL); + if (!buf_tx) { + err = -ENOMEM; + goto out_kfree_buf_rx; + } + + xfer[0].tx_buf = buf_tx; + xfer[0].len = sizeof(buf_tx->cmd); + xfer[0].speed_hz = priv->spi_max_speed_hz_slow; + xfer[1].rx_buf = buf_rx->data; + xfer[1].len = sizeof(*dev_id); + xfer[1].speed_hz = priv->spi_max_speed_hz_fast; + + mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, MCP251XFD_REG_DEVID); + + err = spi_sync_transfer(priv->spi, xfer, ARRAY_SIZE(xfer)); + if (err) + goto out_kfree_buf_tx; + + *dev_id = get_unaligned_le32(buf_rx->data); + *effective_speed_hz_slow = xfer[0].effective_speed_hz; + *effective_speed_hz_fast = xfer[1].effective_speed_hz; + +out_kfree_buf_tx: + kfree(buf_tx); +out_kfree_buf_rx: + kfree(buf_rx); + + return err; +} + +#define MCP251XFD_QUIRK_ACTIVE(quirk) \ + (priv->devtype_data.quirks & MCP251XFD_QUIRK_##quirk ? '+' : '-') + +static int +mcp251xfd_register_done(const struct mcp251xfd_priv *priv) +{ + u32 dev_id, effective_speed_hz_slow, effective_speed_hz_fast; + unsigned long clk_rate; + int err; + + err = mcp251xfd_register_get_dev_id(priv, &dev_id, + &effective_speed_hz_slow, + &effective_speed_hz_fast); + if (err) + return err; + + clk_rate = clk_get_rate(priv->clk); + + netdev_info(priv->ndev, + "%s rev%lu.%lu (%cRX_INT %cPLL %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD o:%lu.%02luMHz c:%u.%02uMHz m:%u.%02uMHz rs:%u.%02uMHz es:%u.%02uMHz rf:%u.%02uMHz ef:%u.%02uMHz) successfully initialized.\n", + mcp251xfd_get_model_str(priv), + FIELD_GET(MCP251XFD_REG_DEVID_ID_MASK, dev_id), + FIELD_GET(MCP251XFD_REG_DEVID_REV_MASK, dev_id), + priv->rx_int ? '+' : '-', + priv->pll_enable ? '+' : '-', + MCP251XFD_QUIRK_ACTIVE(MAB_NO_WARN), + MCP251XFD_QUIRK_ACTIVE(CRC_REG), + MCP251XFD_QUIRK_ACTIVE(CRC_RX), + MCP251XFD_QUIRK_ACTIVE(CRC_TX), + MCP251XFD_QUIRK_ACTIVE(ECC), + MCP251XFD_QUIRK_ACTIVE(HALF_DUPLEX), + clk_rate / 1000000, + clk_rate % 1000000 / 1000 / 10, + priv->can.clock.freq / 1000000, + priv->can.clock.freq % 1000000 / 1000 / 10, + priv->spi_max_speed_hz_orig / 1000000, + priv->spi_max_speed_hz_orig % 1000000 / 1000 / 10, + priv->spi_max_speed_hz_slow / 1000000, + priv->spi_max_speed_hz_slow % 1000000 / 1000 / 10, + effective_speed_hz_slow / 1000000, + effective_speed_hz_slow % 1000000 / 1000 / 10, + priv->spi_max_speed_hz_fast / 1000000, + priv->spi_max_speed_hz_fast % 1000000 / 1000 / 10, + effective_speed_hz_fast / 1000000, + effective_speed_hz_fast % 1000000 / 1000 / 10); + + return 0; +} + +static int mcp251xfd_register(struct mcp251xfd_priv *priv) +{ + struct net_device *ndev = priv->ndev; + int err; + + mcp251xfd_register_quirks(priv); + + err = mcp251xfd_clks_and_vdd_enable(priv); + if (err) + return err; + + err = mcp251xfd_chip_softreset(priv); + if (err == -ENODEV) + goto out_clks_and_vdd_disable; + if (err) + goto out_chip_sleep; + + err = mcp251xfd_chip_clock_init(priv); + if (err == -ENODEV) + goto out_clks_and_vdd_disable; + if (err) + goto out_chip_sleep; + + pm_runtime_get_noresume(ndev->dev.parent); + err = pm_runtime_set_active(ndev->dev.parent); + if (err) + goto out_runtime_put_noidle; + pm_runtime_enable(ndev->dev.parent); + + err = mcp251xfd_register_chip_detect(priv); + if (err) + goto out_runtime_disable; + + err = mcp251xfd_register_check_rx_int(priv); + if (err) + goto out_runtime_disable; + + mcp251xfd_ethtool_init(priv); + + err = mcp251fdx_gpio_setup(priv); + if (err) { + dev_err_probe(&priv->spi->dev, err, "Failed to register gpio-controller.\n"); + goto out_runtime_disable; + } + + err = register_candev(ndev); + if (err) + goto out_runtime_disable; + + err = mcp251xfd_register_done(priv); + if (err) + goto out_unregister_candev; + + /* Put controller into Config mode and let pm_runtime_put() + * put in sleep mode, disable the clocks and vdd. If CONFIG_PM + * is not enabled, the clocks and vdd will stay powered. + */ + err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_CONFIG); + if (err) + goto out_unregister_candev; + + pm_runtime_put(ndev->dev.parent); + + return 0; + +out_unregister_candev: + unregister_candev(ndev); +out_runtime_disable: + pm_runtime_disable(ndev->dev.parent); +out_runtime_put_noidle: + pm_runtime_put_noidle(ndev->dev.parent); +out_chip_sleep: + mcp251xfd_chip_sleep(priv); +out_clks_and_vdd_disable: + mcp251xfd_clks_and_vdd_disable(priv); + + return err; +} + +static inline void mcp251xfd_unregister(struct mcp251xfd_priv *priv) +{ + struct net_device *ndev = priv->ndev; + + unregister_candev(ndev); + + if (pm_runtime_enabled(ndev->dev.parent)) { + pm_runtime_disable(ndev->dev.parent); + } else { + mcp251xfd_chip_sleep(priv); + mcp251xfd_clks_and_vdd_disable(priv); + } +} + +static const struct of_device_id mcp251xfd_of_match[] = { + { + .compatible = "microchip,mcp2517fd", + .data = &mcp251xfd_devtype_data_mcp2517fd, + }, { + .compatible = "microchip,mcp2518fd", + .data = &mcp251xfd_devtype_data_mcp2518fd, + }, { + .compatible = "microchip,mcp251863", + .data = &mcp251xfd_devtype_data_mcp251863, + }, { + .compatible = "microchip,mcp251xfd", + .data = &mcp251xfd_devtype_data_mcp251xfd, + }, { + /* sentinel */ + }, +}; +MODULE_DEVICE_TABLE(of, mcp251xfd_of_match); + +static const struct spi_device_id mcp251xfd_id_table[] = { + { + .name = "mcp2517fd", + .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2517fd, + }, { + .name = "mcp2518fd", + .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2518fd, + }, { + .name = "mcp251863", + .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251863, + }, { + .name = "mcp251xfd", + .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251xfd, + }, { + /* sentinel */ + }, +}; +MODULE_DEVICE_TABLE(spi, mcp251xfd_id_table); + +static int mcp251xfd_probe(struct spi_device *spi) +{ + struct net_device *ndev; + struct mcp251xfd_priv *priv; + struct gpio_desc *rx_int; + struct regulator *reg_vdd, *reg_xceiver; + struct clk *clk; + bool pll_enable = false; + u32 freq = 0; + int err; + + if (!spi->irq) + return dev_err_probe(&spi->dev, -ENXIO, + "No IRQ specified (maybe node \"interrupts-extended\" in DT missing)!\n"); + + rx_int = devm_gpiod_get_optional(&spi->dev, "microchip,rx-int", + GPIOD_IN); + if (IS_ERR(rx_int)) + return dev_err_probe(&spi->dev, PTR_ERR(rx_int), + "Failed to get RX-INT!\n"); + + reg_vdd = devm_regulator_get_optional(&spi->dev, "vdd"); + if (PTR_ERR(reg_vdd) == -ENODEV) + reg_vdd = NULL; + else if (IS_ERR(reg_vdd)) + return dev_err_probe(&spi->dev, PTR_ERR(reg_vdd), + "Failed to get VDD regulator!\n"); + + reg_xceiver = devm_regulator_get_optional(&spi->dev, "xceiver"); + if (PTR_ERR(reg_xceiver) == -ENODEV) + reg_xceiver = NULL; + else if (IS_ERR(reg_xceiver)) + return dev_err_probe(&spi->dev, PTR_ERR(reg_xceiver), + "Failed to get Transceiver regulator!\n"); + + clk = devm_clk_get_optional(&spi->dev, NULL); + if (IS_ERR(clk)) + return dev_err_probe(&spi->dev, PTR_ERR(clk), + "Failed to get Oscillator (clock)!\n"); + if (clk) { + freq = clk_get_rate(clk); + } else { + err = device_property_read_u32(&spi->dev, "clock-frequency", + &freq); + if (err) + return dev_err_probe(&spi->dev, err, + "Failed to get clock-frequency!\n"); + } + + /* Sanity check */ + if (freq < MCP251XFD_SYSCLOCK_HZ_MIN || + freq > MCP251XFD_SYSCLOCK_HZ_MAX) { + dev_err(&spi->dev, + "Oscillator frequency (%u Hz) is too low or high.\n", + freq); + return -ERANGE; + } + + if (freq <= MCP251XFD_SYSCLOCK_HZ_MAX / MCP251XFD_OSC_PLL_MULTIPLIER) + pll_enable = true; + + ndev = alloc_candev(sizeof(struct mcp251xfd_priv), + MCP251XFD_TX_OBJ_NUM_MAX); + if (!ndev) + return -ENOMEM; + + SET_NETDEV_DEV(ndev, &spi->dev); + + ndev->netdev_ops = &mcp251xfd_netdev_ops; + ndev->irq = spi->irq; + ndev->flags |= IFF_ECHO; + + priv = netdev_priv(ndev); + spi_set_drvdata(spi, priv); + priv->can.clock.freq = freq; + if (pll_enable) + priv->can.clock.freq *= MCP251XFD_OSC_PLL_MULTIPLIER; + priv->can.do_set_mode = mcp251xfd_set_mode; + priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter; + priv->can.bittiming_const = &mcp251xfd_bittiming_const; + priv->can.fd.data_bittiming_const = &mcp251xfd_data_bittiming_const; + priv->can.fd.tdc_const = &mcp251xfd_tdc_const; + priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | + CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING | + CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO | + CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_TDC_AUTO | + CAN_CTRLMODE_TDC_MANUAL; + set_bit(MCP251XFD_FLAGS_DOWN, priv->flags); + priv->ndev = ndev; + priv->spi = spi; + priv->rx_int = rx_int; + priv->clk = clk; + priv->pll_enable = pll_enable; + priv->reg_vdd = reg_vdd; + priv->reg_xceiver = reg_xceiver; + priv->devtype_data = *(struct mcp251xfd_devtype_data *)spi_get_device_match_data(spi); + + /* Errata Reference: + * mcp2517fd: DS80000792C 5., mcp2518fd: DS80000789E 4., + * mcp251863: DS80000984A 4. + * + * The SPI can write corrupted data to the RAM at fast SPI + * speeds: + * + * Simultaneous activity on the CAN bus while writing data to + * RAM via the SPI interface, with high SCK frequency, can + * lead to corrupted data being written to RAM. + * + * Fix/Work Around: + * Ensure that FSCK is less than or equal to 0.85 * + * (FSYSCLK/2). + * + * Known good combinations are: + * + * MCP ext-clk SoC SPI SPI-clk max-clk parent-clk config + * + * 2518 20 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 8333333 Hz 83.33% 600000000 Hz assigned-clocks = <&ccu CLK_SPIx> + * 2518 40 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 16666667 Hz 83.33% 600000000 Hz assigned-clocks = <&ccu CLK_SPIx> + * 2517 40 MHz atmel,sama5d27 atmel,at91rm9200-spi 16400000 Hz 82.00% 82000000 Hz default + * 2518 40 MHz atmel,sama5d27 atmel,at91rm9200-spi 16400000 Hz 82.00% 82000000 Hz default + * 2518 40 MHz fsl,imx6dl fsl,imx51-ecspi 15000000 Hz 75.00% 30000000 Hz default + * 2517 20 MHz fsl,imx8mm fsl,imx51-ecspi 8333333 Hz 83.33% 16666667 Hz assigned-clocks = <&clk IMX8MM_CLK_ECSPIx_ROOT> + * + */ + priv->spi_max_speed_hz_orig = spi->max_speed_hz; + priv->spi_max_speed_hz_slow = min(spi->max_speed_hz, + freq / 2 / 1000 * 850); + if (priv->pll_enable) + priv->spi_max_speed_hz_fast = min(spi->max_speed_hz, + freq * + MCP251XFD_OSC_PLL_MULTIPLIER / + 2 / 1000 * 850); + else + priv->spi_max_speed_hz_fast = priv->spi_max_speed_hz_slow; + spi->max_speed_hz = priv->spi_max_speed_hz_slow; + spi->bits_per_word = 8; + spi->rt = true; + err = spi_setup(spi); + if (err) + goto out_free_candev; + + err = mcp251xfd_regmap_init(priv); + if (err) + goto out_free_candev; + + err = can_rx_offload_add_manual(ndev, &priv->offload, + MCP251XFD_NAPI_WEIGHT); + if (err) + goto out_free_candev; + + err = mcp251xfd_register(priv); + if (err) { + dev_err_probe(&spi->dev, err, "Failed to detect %s.\n", + mcp251xfd_get_model_str(priv)); + goto out_can_rx_offload_del; + } + + return 0; + +out_can_rx_offload_del: + can_rx_offload_del(&priv->offload); +out_free_candev: + spi->max_speed_hz = priv->spi_max_speed_hz_orig; + + free_candev(ndev); + + return err; +} + +static void mcp251xfd_remove(struct spi_device *spi) +{ + struct mcp251xfd_priv *priv = spi_get_drvdata(spi); + struct net_device *ndev = priv->ndev; + + mcp251xfd_unregister(priv); + can_rx_offload_del(&priv->offload); + spi->max_speed_hz = priv->spi_max_speed_hz_orig; + free_candev(ndev); +} + +static int __maybe_unused mcp251xfd_runtime_suspend(struct device *device) +{ + struct mcp251xfd_priv *priv = dev_get_drvdata(device); + + mcp251xfd_chip_sleep(priv); + return mcp251xfd_clks_and_vdd_disable(priv); +} + +static int __maybe_unused mcp251xfd_runtime_resume(struct device *device) +{ + struct mcp251xfd_priv *priv = dev_get_drvdata(device); + int err; + + err = mcp251xfd_clks_and_vdd_enable(priv); + if (err) + return err; + + err = mcp251xfd_chip_softreset(priv); + if (err == -ENODEV) + goto out_clks_and_vdd_disable; + if (err) + goto out_chip_sleep; + + err = mcp251xfd_chip_clock_init(priv); + if (err == -ENODEV) + goto out_clks_and_vdd_disable; + if (err) + goto out_chip_sleep; + + return 0; + +out_chip_sleep: + mcp251xfd_chip_sleep(priv); +out_clks_and_vdd_disable: + mcp251xfd_clks_and_vdd_disable(priv); + + return err; +} + +static const struct dev_pm_ops mcp251xfd_pm_ops = { + SET_RUNTIME_PM_OPS(mcp251xfd_runtime_suspend, + mcp251xfd_runtime_resume, NULL) +}; + +static struct spi_driver mcp251xfd_driver = { + .driver = { + .name = DEVICE_NAME, + .pm = &mcp251xfd_pm_ops, + .of_match_table = mcp251xfd_of_match, + }, + .probe = mcp251xfd_probe, + .remove = mcp251xfd_remove, + .id_table = mcp251xfd_id_table, +}; +module_spi_driver(mcp251xfd_driver); + +MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>"); +MODULE_DESCRIPTION("Microchip MCP251xFD Family CAN controller driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-crc16.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-crc16.c new file mode 100644 index 000000000000..a02ca76ac239 --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-crc16.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// mcp251xfd - Microchip MCP251xFD Family CAN controller driver +// +// Copyright (c) 2020 Pengutronix, +// Marc Kleine-Budde <kernel@pengutronix.de> +// +// Based on: +// +// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface +// +// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org> +// + +#include "mcp251xfd.h" + +/* The standard crc16 in linux/crc16.h is unfortunately not computing + * the correct results (left shift vs. right shift). So here an + * implementation with a table generated with the help of: + * + * http://lkml.iu.edu/hypermail/linux/kernel/0508.1/1085.html + */ +static const u16 mcp251xfd_crc16_table[] = { + 0x0000, 0x8005, 0x800f, 0x000a, 0x801b, 0x001e, 0x0014, 0x8011, + 0x8033, 0x0036, 0x003c, 0x8039, 0x0028, 0x802d, 0x8027, 0x0022, + 0x8063, 0x0066, 0x006c, 0x8069, 0x0078, 0x807d, 0x8077, 0x0072, + 0x0050, 0x8055, 0x805f, 0x005a, 0x804b, 0x004e, 0x0044, 0x8041, + 0x80c3, 0x00c6, 0x00cc, 0x80c9, 0x00d8, 0x80dd, 0x80d7, 0x00d2, + 0x00f0, 0x80f5, 0x80ff, 0x00fa, 0x80eb, 0x00ee, 0x00e4, 0x80e1, + 0x00a0, 0x80a5, 0x80af, 0x00aa, 0x80bb, 0x00be, 0x00b4, 0x80b1, + 0x8093, 0x0096, 0x009c, 0x8099, 0x0088, 0x808d, 0x8087, 0x0082, + 0x8183, 0x0186, 0x018c, 0x8189, 0x0198, 0x819d, 0x8197, 0x0192, + 0x01b0, 0x81b5, 0x81bf, 0x01ba, 0x81ab, 0x01ae, 0x01a4, 0x81a1, + 0x01e0, 0x81e5, 0x81ef, 0x01ea, 0x81fb, 0x01fe, 0x01f4, 0x81f1, + 0x81d3, 0x01d6, 0x01dc, 0x81d9, 0x01c8, 0x81cd, 0x81c7, 0x01c2, + 0x0140, 0x8145, 0x814f, 0x014a, 0x815b, 0x015e, 0x0154, 0x8151, + 0x8173, 0x0176, 0x017c, 0x8179, 0x0168, 0x816d, 0x8167, 0x0162, + 0x8123, 0x0126, 0x012c, 0x8129, 0x0138, 0x813d, 0x8137, 0x0132, + 0x0110, 0x8115, 0x811f, 0x011a, 0x810b, 0x010e, 0x0104, 0x8101, + 0x8303, 0x0306, 0x030c, 0x8309, 0x0318, 0x831d, 0x8317, 0x0312, + 0x0330, 0x8335, 0x833f, 0x033a, 0x832b, 0x032e, 0x0324, 0x8321, + 0x0360, 0x8365, 0x836f, 0x036a, 0x837b, 0x037e, 0x0374, 0x8371, + 0x8353, 0x0356, 0x035c, 0x8359, 0x0348, 0x834d, 0x8347, 0x0342, + 0x03c0, 0x83c5, 0x83cf, 0x03ca, 0x83db, 0x03de, 0x03d4, 0x83d1, + 0x83f3, 0x03f6, 0x03fc, 0x83f9, 0x03e8, 0x83ed, 0x83e7, 0x03e2, + 0x83a3, 0x03a6, 0x03ac, 0x83a9, 0x03b8, 0x83bd, 0x83b7, 0x03b2, + 0x0390, 0x8395, 0x839f, 0x039a, 0x838b, 0x038e, 0x0384, 0x8381, + 0x0280, 0x8285, 0x828f, 0x028a, 0x829b, 0x029e, 0x0294, 0x8291, + 0x82b3, 0x02b6, 0x02bc, 0x82b9, 0x02a8, 0x82ad, 0x82a7, 0x02a2, + 0x82e3, 0x02e6, 0x02ec, 0x82e9, 0x02f8, 0x82fd, 0x82f7, 0x02f2, + 0x02d0, 0x82d5, 0x82df, 0x02da, 0x82cb, 0x02ce, 0x02c4, 0x82c1, + 0x8243, 0x0246, 0x024c, 0x8249, 0x0258, 0x825d, 0x8257, 0x0252, + 0x0270, 0x8275, 0x827f, 0x027a, 0x826b, 0x026e, 0x0264, 0x8261, + 0x0220, 0x8225, 0x822f, 0x022a, 0x823b, 0x023e, 0x0234, 0x8231, + 0x8213, 0x0216, 0x021c, 0x8219, 0x0208, 0x820d, 0x8207, 0x0202 +}; + +static inline u16 mcp251xfd_crc16_byte(u16 crc, const u8 data) +{ + u8 index = (crc >> 8) ^ data; + + return (crc << 8) ^ mcp251xfd_crc16_table[index]; +} + +static u16 mcp251xfd_crc16(u16 crc, u8 const *buffer, size_t len) +{ + while (len--) + crc = mcp251xfd_crc16_byte(crc, *buffer++); + + return crc; +} + +u16 mcp251xfd_crc16_compute(const void *data, size_t data_size) +{ + u16 crc = 0xffff; + + return mcp251xfd_crc16(crc, data, data_size); +} + +u16 mcp251xfd_crc16_compute2(const void *cmd, size_t cmd_size, + const void *data, size_t data_size) +{ + u16 crc; + + crc = mcp251xfd_crc16_compute(cmd, cmd_size); + crc = mcp251xfd_crc16(crc, data, data_size); + + return crc; +} diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c new file mode 100644 index 000000000000..050321345304 --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// mcp251xfd - Microchip MCP251xFD Family CAN controller driver +// +// Copyright (c) 2020, 2021 Pengutronix, +// Marc Kleine-Budde <kernel@pengutronix.de> +// Copyright (C) 2015-2018 Etnaviv Project +// + +#include <linux/devcoredump.h> + +#include "mcp251xfd.h" +#include "mcp251xfd-dump.h" + +struct mcp251xfd_dump_iter { + void *start; + struct mcp251xfd_dump_object_header *hdr; + void *data; +}; + +struct mcp251xfd_dump_reg_space { + u16 base; + u16 size; +}; + +struct mcp251xfd_dump_ring { + enum mcp251xfd_dump_object_ring_key key; + u32 val; +}; + +static const struct mcp251xfd_dump_reg_space mcp251xfd_dump_reg_space[] = { + { + .base = MCP251XFD_REG_CON, + .size = MCP251XFD_REG_FLTOBJ(32) - MCP251XFD_REG_CON, + }, { + .base = MCP251XFD_RAM_START, + .size = MCP251XFD_RAM_SIZE, + }, { + .base = MCP251XFD_REG_OSC, + .size = MCP251XFD_REG_DEVID - MCP251XFD_REG_OSC, + }, +}; + +static void mcp251xfd_dump_header(struct mcp251xfd_dump_iter *iter, + enum mcp251xfd_dump_object_type object_type, + const void *data_end) +{ + struct mcp251xfd_dump_object_header *hdr = iter->hdr; + unsigned int len; + + len = data_end - iter->data; + if (!len) + return; + + hdr->magic = cpu_to_le32(MCP251XFD_DUMP_MAGIC); + hdr->type = cpu_to_le32(object_type); + hdr->offset = cpu_to_le32(iter->data - iter->start); + hdr->len = cpu_to_le32(len); + + iter->hdr++; + iter->data += len; +} + +static void mcp251xfd_dump_registers(const struct mcp251xfd_priv *priv, + struct mcp251xfd_dump_iter *iter) +{ + const int val_bytes = regmap_get_val_bytes(priv->map_rx); + struct mcp251xfd_dump_object_reg *reg = iter->data; + unsigned int i, j; + int err; + + for (i = 0; i < ARRAY_SIZE(mcp251xfd_dump_reg_space); i++) { + const struct mcp251xfd_dump_reg_space *reg_space; + void *buf; + + reg_space = &mcp251xfd_dump_reg_space[i]; + + buf = kmalloc(reg_space->size, GFP_KERNEL); + if (!buf) + goto out; + + err = regmap_bulk_read(priv->map_reg, reg_space->base, + buf, reg_space->size / val_bytes); + if (err) { + kfree(buf); + continue; + } + + for (j = 0; j < reg_space->size; j += sizeof(u32), reg++) { + reg->reg = cpu_to_le32(reg_space->base + j); + reg->val = cpu_to_le32p(buf + j); + } + + kfree(buf); + } + +out: + mcp251xfd_dump_header(iter, MCP251XFD_DUMP_OBJECT_TYPE_REG, reg); +} + +static void mcp251xfd_dump_ring(struct mcp251xfd_dump_iter *iter, + enum mcp251xfd_dump_object_type object_type, + const struct mcp251xfd_dump_ring *dump_ring, + unsigned int len) +{ + struct mcp251xfd_dump_object_reg *reg = iter->data; + unsigned int i; + + for (i = 0; i < len; i++, reg++) { + reg->reg = cpu_to_le32(dump_ring[i].key); + reg->val = cpu_to_le32(dump_ring[i].val); + } + + mcp251xfd_dump_header(iter, object_type, reg); +} + +static void mcp251xfd_dump_tef_ring(const struct mcp251xfd_priv *priv, + struct mcp251xfd_dump_iter *iter) +{ + const struct mcp251xfd_tef_ring *tef = priv->tef; + const struct mcp251xfd_tx_ring *tx = priv->tx; + const struct mcp251xfd_dump_ring dump_ring[] = { + { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_HEAD, + .val = tef->head, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_TAIL, + .val = tef->tail, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_BASE, + .val = 0, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_NR, + .val = 0, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_FIFO_NR, + .val = 0, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_NUM, + .val = tx->obj_num, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_SIZE, + .val = sizeof(struct mcp251xfd_hw_tef_obj), + }, + }; + + mcp251xfd_dump_ring(iter, MCP251XFD_DUMP_OBJECT_TYPE_TEF, + dump_ring, ARRAY_SIZE(dump_ring)); +} + +static void mcp251xfd_dump_rx_ring_one(const struct mcp251xfd_priv *priv, + struct mcp251xfd_dump_iter *iter, + const struct mcp251xfd_rx_ring *rx) +{ + const struct mcp251xfd_dump_ring dump_ring[] = { + { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_HEAD, + .val = rx->head, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_TAIL, + .val = rx->tail, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_BASE, + .val = rx->base, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_NR, + .val = rx->nr, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_FIFO_NR, + .val = rx->fifo_nr, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_NUM, + .val = rx->obj_num, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_SIZE, + .val = rx->obj_size, + }, + }; + + mcp251xfd_dump_ring(iter, MCP251XFD_DUMP_OBJECT_TYPE_RX, + dump_ring, ARRAY_SIZE(dump_ring)); +} + +static void mcp251xfd_dump_rx_ring(const struct mcp251xfd_priv *priv, + struct mcp251xfd_dump_iter *iter) +{ + struct mcp251xfd_rx_ring *rx_ring; + unsigned int i; + + mcp251xfd_for_each_rx_ring(priv, rx_ring, i) + mcp251xfd_dump_rx_ring_one(priv, iter, rx_ring); +} + +static void mcp251xfd_dump_tx_ring(const struct mcp251xfd_priv *priv, + struct mcp251xfd_dump_iter *iter) +{ + const struct mcp251xfd_tx_ring *tx = priv->tx; + const struct mcp251xfd_dump_ring dump_ring[] = { + { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_HEAD, + .val = tx->head, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_TAIL, + .val = tx->tail, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_BASE, + .val = tx->base, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_NR, + .val = tx->nr, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_FIFO_NR, + .val = tx->fifo_nr, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_NUM, + .val = tx->obj_num, + }, { + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_SIZE, + .val = tx->obj_size, + }, + }; + + mcp251xfd_dump_ring(iter, MCP251XFD_DUMP_OBJECT_TYPE_TX, + dump_ring, ARRAY_SIZE(dump_ring)); +} + +static void mcp251xfd_dump_end(const struct mcp251xfd_priv *priv, + struct mcp251xfd_dump_iter *iter) +{ + struct mcp251xfd_dump_object_header *hdr = iter->hdr; + + hdr->magic = cpu_to_le32(MCP251XFD_DUMP_MAGIC); + hdr->type = cpu_to_le32(MCP251XFD_DUMP_OBJECT_TYPE_END); + hdr->offset = cpu_to_le32(0); + hdr->len = cpu_to_le32(0); + + /* provoke NULL pointer access, if used after END object */ + iter->hdr = NULL; +} + +void mcp251xfd_dump(const struct mcp251xfd_priv *priv) +{ + struct mcp251xfd_dump_iter iter; + unsigned int rings_num, obj_num; + unsigned int file_size = 0; + unsigned int i; + + /* register space + end marker */ + obj_num = 2; + + /* register space */ + for (i = 0; i < ARRAY_SIZE(mcp251xfd_dump_reg_space); i++) + file_size += mcp251xfd_dump_reg_space[i].size / sizeof(u32) * + sizeof(struct mcp251xfd_dump_object_reg); + + /* TEF ring, RX rings, TX ring */ + rings_num = 1 + priv->rx_ring_num + 1; + obj_num += rings_num; + file_size += rings_num * __MCP251XFD_DUMP_OBJECT_RING_KEY_MAX * + sizeof(struct mcp251xfd_dump_object_reg); + + /* size of the headers */ + file_size += sizeof(*iter.hdr) * obj_num; + + /* allocate the file in vmalloc memory, it's likely to be big */ + iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | + __GFP_ZERO | __GFP_NORETRY); + if (!iter.start) { + netdev_warn(priv->ndev, "Failed to allocate devcoredump file.\n"); + return; + } + + /* point the data member after the headers */ + iter.hdr = iter.start; + iter.data = &iter.hdr[obj_num]; + + mcp251xfd_dump_registers(priv, &iter); + mcp251xfd_dump_tef_ring(priv, &iter); + mcp251xfd_dump_rx_ring(priv, &iter); + mcp251xfd_dump_tx_ring(priv, &iter); + mcp251xfd_dump_end(priv, &iter); + + dev_coredumpv(&priv->spi->dev, iter.start, + iter.data - iter.start, GFP_KERNEL); +} diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.h new file mode 100644 index 000000000000..e7560b0712eb --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * mcp251xfd - Microchip MCP251xFD Family CAN controller driver + * + * Copyright (c) 2019, 2020, 2021 Pengutronix, + * Marc Kleine-Budde <kernel@pengutronix.de> + */ + +#ifndef _MCP251XFD_DUMP_H +#define _MCP251XFD_DUMP_H + +#define MCP251XFD_DUMP_MAGIC 0x1825434d + +enum mcp251xfd_dump_object_type { + MCP251XFD_DUMP_OBJECT_TYPE_REG, + MCP251XFD_DUMP_OBJECT_TYPE_TEF, + MCP251XFD_DUMP_OBJECT_TYPE_RX, + MCP251XFD_DUMP_OBJECT_TYPE_TX, + MCP251XFD_DUMP_OBJECT_TYPE_END = -1, +}; + +enum mcp251xfd_dump_object_ring_key { + MCP251XFD_DUMP_OBJECT_RING_KEY_HEAD, + MCP251XFD_DUMP_OBJECT_RING_KEY_TAIL, + MCP251XFD_DUMP_OBJECT_RING_KEY_BASE, + MCP251XFD_DUMP_OBJECT_RING_KEY_NR, + MCP251XFD_DUMP_OBJECT_RING_KEY_FIFO_NR, + MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_NUM, + MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_SIZE, + __MCP251XFD_DUMP_OBJECT_RING_KEY_MAX, +}; + +struct mcp251xfd_dump_object_header { + __le32 magic; + __le32 type; + __le32 offset; + __le32 len; +}; + +struct mcp251xfd_dump_object_reg { + __le32 reg; + __le32 val; +}; + +#endif diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c new file mode 100644 index 000000000000..57eeb066a945 --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// mcp251xfd - Microchip MCP251xFD Family CAN controller driver +// +// Copyright (c) 2021, 2022 Pengutronix, +// Marc Kleine-Budde <kernel@pengutronix.de> +// + +#include <linux/ethtool.h> + +#include "mcp251xfd.h" +#include "mcp251xfd-ram.h" + +static void +mcp251xfd_ring_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + const struct mcp251xfd_priv *priv = netdev_priv(ndev); + const bool fd_mode = mcp251xfd_is_fd_mode(priv); + struct can_ram_layout layout; + + can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, fd_mode); + ring->rx_max_pending = layout.max_rx; + ring->tx_max_pending = layout.max_tx; + + ring->rx_pending = priv->rx_obj_num; + ring->tx_pending = priv->tx->obj_num; +} + +static int +mcp251xfd_ring_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct mcp251xfd_priv *priv = netdev_priv(ndev); + const bool fd_mode = mcp251xfd_is_fd_mode(priv); + struct can_ram_layout layout; + + can_ram_get_layout(&layout, &mcp251xfd_ram_config, ring, NULL, fd_mode); + if ((layout.cur_rx != priv->rx_obj_num || + layout.cur_tx != priv->tx->obj_num) && + netif_running(ndev)) + return -EBUSY; + + priv->rx_obj_num = layout.cur_rx; + priv->rx_obj_num_coalesce_irq = layout.rx_coalesce; + priv->tx->obj_num = layout.cur_tx; + priv->tx_obj_num_coalesce_irq = layout.tx_coalesce; + + return 0; +} + +static int mcp251xfd_ring_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kec, + struct netlink_ext_ack *ext_ack) +{ + struct mcp251xfd_priv *priv = netdev_priv(ndev); + u32 rx_max_frames, tx_max_frames; + + /* The ethtool doc says: + * To disable coalescing, set usecs = 0 and max_frames = 1. + */ + if (priv->rx_obj_num_coalesce_irq == 0) + rx_max_frames = 1; + else + rx_max_frames = priv->rx_obj_num_coalesce_irq; + + ec->rx_max_coalesced_frames_irq = rx_max_frames; + ec->rx_coalesce_usecs_irq = priv->rx_coalesce_usecs_irq; + + if (priv->tx_obj_num_coalesce_irq == 0) + tx_max_frames = 1; + else + tx_max_frames = priv->tx_obj_num_coalesce_irq; + + ec->tx_max_coalesced_frames_irq = tx_max_frames; + ec->tx_coalesce_usecs_irq = priv->tx_coalesce_usecs_irq; + + return 0; +} + +static int mcp251xfd_ring_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kec, + struct netlink_ext_ack *ext_ack) +{ + struct mcp251xfd_priv *priv = netdev_priv(ndev); + const bool fd_mode = mcp251xfd_is_fd_mode(priv); + const struct ethtool_ringparam ring = { + .rx_pending = priv->rx_obj_num, + .tx_pending = priv->tx->obj_num, + }; + struct can_ram_layout layout; + + can_ram_get_layout(&layout, &mcp251xfd_ram_config, &ring, ec, fd_mode); + + if ((layout.rx_coalesce != priv->rx_obj_num_coalesce_irq || + ec->rx_coalesce_usecs_irq != priv->rx_coalesce_usecs_irq || + layout.tx_coalesce != priv->tx_obj_num_coalesce_irq || + ec->tx_coalesce_usecs_irq != priv->tx_coalesce_usecs_irq) && + netif_running(ndev)) + return -EBUSY; + + priv->rx_obj_num = layout.cur_rx; + priv->rx_obj_num_coalesce_irq = layout.rx_coalesce; + priv->rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; + + priv->tx->obj_num = layout.cur_tx; + priv->tx_obj_num_coalesce_irq = layout.tx_coalesce; + priv->tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; + + return 0; +} + +static const struct ethtool_ops mcp251xfd_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS_IRQ | + ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ | + ETHTOOL_COALESCE_TX_USECS_IRQ | + ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ, + .get_ringparam = mcp251xfd_ring_get_ringparam, + .set_ringparam = mcp251xfd_ring_set_ringparam, + .get_coalesce = mcp251xfd_ring_get_coalesce, + .set_coalesce = mcp251xfd_ring_set_coalesce, + .get_ts_info = can_ethtool_op_get_ts_info_hwts, +}; + +void mcp251xfd_ethtool_init(struct mcp251xfd_priv *priv) +{ + struct can_ram_layout layout; + + priv->ndev->ethtool_ops = &mcp251xfd_ethtool_ops; + + can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, false); + priv->rx_obj_num = layout.default_rx; + priv->tx->obj_num = layout.default_tx; + + priv->rx_obj_num_coalesce_irq = 0; + priv->tx_obj_num_coalesce_irq = 0; + priv->rx_coalesce_usecs_irq = 0; + priv->tx_coalesce_usecs_irq = 0; +} diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c new file mode 100644 index 000000000000..61b0d6fa52dd --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// mcp251xfd - Microchip MCP251xFD Family CAN controller driver +// +// Copyright (c) 2021, 2022 Pengutronix, +// Marc Kleine-Budde <kernel@pengutronix.de> +// + +#include "mcp251xfd-ram.h" + +static inline u8 can_ram_clamp(const struct can_ram_config *config, + const struct can_ram_obj_config *obj, + u8 val) +{ + u8 max; + + max = min_t(u8, obj->max, obj->fifo_num * config->fifo_depth); + return clamp(val, obj->min, max); +} + +static u8 +can_ram_rounddown_pow_of_two(const struct can_ram_config *config, + const struct can_ram_obj_config *obj, + const u8 coalesce, u8 val) +{ + u8 fifo_num = obj->fifo_num; + u8 ret = 0, i; + + val = can_ram_clamp(config, obj, val); + + if (coalesce) { + /* Use 1st FIFO for coalescing, if requested. + * + * Either use complete FIFO (and FIFO Full IRQ) for + * coalescing or only half of FIFO (FIFO Half Full + * IRQ) and use remaining half for normal objects. + */ + ret = min_t(u8, coalesce * 2, config->fifo_depth); + val -= ret; + fifo_num--; + } + + for (i = 0; i < fifo_num && val; i++) { + u8 n; + + n = min_t(u8, rounddown_pow_of_two(val), + config->fifo_depth); + + /* skip small FIFOs */ + if (n < obj->fifo_depth_min) + return ret; + + ret += n; + val -= n; + } + + return ret; +} + +void can_ram_get_layout(struct can_ram_layout *layout, + const struct can_ram_config *config, + const struct ethtool_ringparam *ring, + const struct ethtool_coalesce *ec, + const bool fd_mode) +{ + u8 num_rx, num_tx; + u16 ram_free; + + /* default CAN */ + + num_tx = config->tx.def[fd_mode]; + num_tx = can_ram_rounddown_pow_of_two(config, &config->tx, 0, num_tx); + + ram_free = config->size; + ram_free -= config->tx.size[fd_mode] * num_tx; + + num_rx = ram_free / config->rx.size[fd_mode]; + + layout->default_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, num_rx); + layout->default_tx = num_tx; + + /* MAX CAN */ + + ram_free = config->size; + ram_free -= config->tx.size[fd_mode] * config->tx.min; + num_rx = ram_free / config->rx.size[fd_mode]; + + ram_free = config->size; + ram_free -= config->rx.size[fd_mode] * config->rx.min; + num_tx = ram_free / config->tx.size[fd_mode]; + + layout->max_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, num_rx); + layout->max_tx = can_ram_rounddown_pow_of_two(config, &config->tx, 0, num_tx); + + /* cur CAN */ + + if (ring) { + u8 num_rx_coalesce = 0, num_tx_coalesce = 0; + + /* If the ring parameters have been configured in + * CAN-CC mode, but and we are in CAN-FD mode now, + * they might be to big. Use the default CAN-FD values + * in this case. + */ + num_rx = ring->rx_pending; + if (num_rx > layout->max_rx) + num_rx = layout->default_rx; + + num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, num_rx); + + /* The ethtool doc says: + * To disable coalescing, set usecs = 0 and max_frames = 1. + */ + if (ec && !(ec->rx_coalesce_usecs_irq == 0 && + ec->rx_max_coalesced_frames_irq == 1)) { + u8 max; + + /* use only max half of available objects for coalescing */ + max = min_t(u8, num_rx / 2, config->fifo_depth); + num_rx_coalesce = clamp(ec->rx_max_coalesced_frames_irq, + (u32)config->rx.fifo_depth_coalesce_min, + (u32)max); + num_rx_coalesce = rounddown_pow_of_two(num_rx_coalesce); + + num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, + num_rx_coalesce, num_rx); + } + + ram_free = config->size - config->rx.size[fd_mode] * num_rx; + num_tx = ram_free / config->tx.size[fd_mode]; + num_tx = min_t(u8, ring->tx_pending, num_tx); + num_tx = can_ram_rounddown_pow_of_two(config, &config->tx, 0, num_tx); + + /* The ethtool doc says: + * To disable coalescing, set usecs = 0 and max_frames = 1. + */ + if (ec && !(ec->tx_coalesce_usecs_irq == 0 && + ec->tx_max_coalesced_frames_irq == 1)) { + u8 max; + + /* use only max half of available objects for coalescing */ + max = min_t(u8, num_tx / 2, config->fifo_depth); + num_tx_coalesce = clamp(ec->tx_max_coalesced_frames_irq, + (u32)config->tx.fifo_depth_coalesce_min, + (u32)max); + num_tx_coalesce = rounddown_pow_of_two(num_tx_coalesce); + + num_tx = can_ram_rounddown_pow_of_two(config, &config->tx, + num_tx_coalesce, num_tx); + } + + layout->cur_rx = num_rx; + layout->cur_tx = num_tx; + layout->rx_coalesce = num_rx_coalesce; + layout->tx_coalesce = num_tx_coalesce; + } else { + layout->cur_rx = layout->default_rx; + layout->cur_tx = layout->default_tx; + layout->rx_coalesce = 0; + layout->tx_coalesce = 0; + } +} diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.h new file mode 100644 index 000000000000..7558c1510cbf --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * mcp251xfd - Microchip MCP251xFD Family CAN controller driver + * + * Copyright (c) 2021, 2022 Pengutronix, + * Marc Kleine-Budde <kernel@pengutronix.de> + */ + +#ifndef _MCP251XFD_RAM_H +#define _MCP251XFD_RAM_H + +#include <linux/ethtool.h> + +#define CAN_RAM_NUM_MAX (-1) + +enum can_ram_mode { + CAN_RAM_MODE_CAN, + CAN_RAM_MODE_CANFD, + __CAN_RAM_MODE_MAX +}; + +struct can_ram_obj_config { + u8 size[__CAN_RAM_MODE_MAX]; + + u8 def[__CAN_RAM_MODE_MAX]; + u8 min; + u8 max; + + u8 fifo_num; + u8 fifo_depth_min; + u8 fifo_depth_coalesce_min; +}; + +struct can_ram_config { + const struct can_ram_obj_config rx; + const struct can_ram_obj_config tx; + + u16 size; + u8 fifo_depth; +}; + +struct can_ram_layout { + u8 default_rx; + u8 default_tx; + + u8 max_rx; + u8 max_tx; + + u8 cur_rx; + u8 cur_tx; + + u8 rx_coalesce; + u8 tx_coalesce; +}; + +void can_ram_get_layout(struct can_ram_layout *layout, + const struct can_ram_config *config, + const struct ethtool_ringparam *ring, + const struct ethtool_coalesce *ec, + const bool fd_mode); + +#endif diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c new file mode 100644 index 000000000000..70d5ff0ae7ac --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c @@ -0,0 +1,687 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// mcp251xfd - Microchip MCP251xFD Family CAN controller driver +// +// Copyright (c) 2019, 2020, 2021 Pengutronix, +// Marc Kleine-Budde <kernel@pengutronix.de> +// + +#include "mcp251xfd.h" + +#include <linux/unaligned.h> + +static const struct regmap_config mcp251xfd_regmap_crc; + +static int +_mcp251xfd_regmap_nocrc_gather_write(void *context, + const void *reg, size_t reg_len, + const void *val, size_t val_len) +{ + struct spi_device *spi = context; + struct mcp251xfd_priv *priv = spi_get_drvdata(spi); + struct mcp251xfd_map_buf_nocrc *buf_tx = priv->map_buf_nocrc_tx; + struct spi_transfer xfer[] = { + { + .tx_buf = buf_tx, + .len = sizeof(buf_tx->cmd) + val_len, + }, + }; + + BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16)); + + if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) && + reg_len != sizeof(buf_tx->cmd.cmd)) + return -EINVAL; + + memcpy(&buf_tx->cmd, reg, sizeof(buf_tx->cmd)); + memcpy(buf_tx->data, val, val_len); + + return spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer)); +} + +static int +mcp251xfd_regmap_nocrc_gather_write(void *context, + const void *reg_p, size_t reg_len, + const void *val, size_t val_len) +{ + const u16 byte_exclude = MCP251XFD_REG_IOCON + + mcp251xfd_first_byte_set(MCP251XFD_REG_IOCON_GPIO_MASK); + u16 reg = be16_to_cpu(*(__be16 *)reg_p) & MCP251XFD_SPI_ADDRESS_MASK; + int ret; + + /* Never write to bits 16..23 of IOCON register to avoid clearing of LAT0/LAT1 + * + * According to MCP2518FD Errata DS80000789E 5 writing IOCON register using one + * SPI write command clears LAT0/LAT1. + * + * Errata Fix/Work Around suggests to write registers with single byte + * write instructions. However, it seems that the byte at 0xe06(IOCON[23:16]) + * is for read-only access and writing to it causes the clearing of LAT0/LAT1. + */ + if (reg <= byte_exclude && reg + val_len > byte_exclude) { + size_t len = byte_exclude - reg; + + /* Write up to 0xe05 */ + ret = _mcp251xfd_regmap_nocrc_gather_write(context, reg_p, reg_len, val, len); + if (ret) + return ret; + + /* Write from 0xe07 on */ + reg += len + 1; + reg = (__force unsigned short)cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_WRITE | reg); + return _mcp251xfd_regmap_nocrc_gather_write(context, ®, reg_len, + val + len + 1, + val_len - len - 1); + } + + return _mcp251xfd_regmap_nocrc_gather_write(context, reg_p, reg_len, + val, val_len); +} + +static int +mcp251xfd_regmap_nocrc_write(void *context, const void *data, size_t count) +{ + const size_t data_offset = sizeof(__be16); + + return mcp251xfd_regmap_nocrc_gather_write(context, data, data_offset, + data + data_offset, count - data_offset); +} + +static inline bool +mcp251xfd_update_bits_read_reg(const struct mcp251xfd_priv *priv, + unsigned int reg) +{ + struct mcp251xfd_rx_ring *ring; + int n; + + switch (reg) { + case MCP251XFD_REG_INT: + case MCP251XFD_REG_TEFCON: + case MCP251XFD_REG_FLTCON(0): + case MCP251XFD_REG_ECCSTAT: + case MCP251XFD_REG_CRC: + return false; + case MCP251XFD_REG_CON: + case MCP251XFD_REG_OSC: + case MCP251XFD_REG_ECCCON: + case MCP251XFD_REG_IOCON: + return true; + default: + mcp251xfd_for_each_rx_ring(priv, ring, n) { + if (reg == MCP251XFD_REG_FIFOCON(ring->fifo_nr)) + return false; + if (reg == MCP251XFD_REG_FIFOSTA(ring->fifo_nr)) + return true; + } + + WARN(1, "Status of reg 0x%04x unknown.\n", reg); + } + + return true; +} + +static int +mcp251xfd_regmap_nocrc_update_bits(void *context, unsigned int reg, + unsigned int mask, unsigned int val) +{ + struct spi_device *spi = context; + struct mcp251xfd_priv *priv = spi_get_drvdata(spi); + struct mcp251xfd_map_buf_nocrc *buf_rx = priv->map_buf_nocrc_rx; + struct mcp251xfd_map_buf_nocrc *buf_tx = priv->map_buf_nocrc_tx; + __le32 orig_le32 = 0, mask_le32, val_le32, tmp_le32; + u8 first_byte, last_byte, len; + int err; + + BUILD_BUG_ON(sizeof(buf_rx->cmd) != sizeof(__be16)); + BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16)); + + if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) && + mask == 0) + return -EINVAL; + + first_byte = mcp251xfd_first_byte_set(mask); + last_byte = mcp251xfd_last_byte_set(mask); + len = last_byte - first_byte + 1; + + if (mcp251xfd_update_bits_read_reg(priv, reg)) { + struct spi_transfer xfer[2] = { }; + struct spi_message msg; + + spi_message_init(&msg); + spi_message_add_tail(&xfer[0], &msg); + + if (priv->devtype_data.quirks & MCP251XFD_QUIRK_HALF_DUPLEX) { + xfer[0].tx_buf = buf_tx; + xfer[0].len = sizeof(buf_tx->cmd); + + xfer[1].rx_buf = buf_rx->data; + xfer[1].len = len; + spi_message_add_tail(&xfer[1], &msg); + } else { + xfer[0].tx_buf = buf_tx; + xfer[0].rx_buf = buf_rx; + xfer[0].len = sizeof(buf_tx->cmd) + len; + + if (MCP251XFD_SANITIZE_SPI) + memset(buf_tx->data, 0x0, len); + } + + mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, reg + first_byte); + err = spi_sync(spi, &msg); + if (err) + return err; + + memcpy(&orig_le32, buf_rx->data, len); + } + + mask_le32 = cpu_to_le32(mask >> BITS_PER_BYTE * first_byte); + val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte); + + tmp_le32 = orig_le32 & ~mask_le32; + tmp_le32 |= val_le32 & mask_le32; + + reg += first_byte; + mcp251xfd_spi_cmd_write_nocrc(&buf_tx->cmd, reg); + return mcp251xfd_regmap_nocrc_gather_write(context, &buf_tx->cmd, 2, &tmp_le32, len); +} + +static int +mcp251xfd_regmap_nocrc_read(void *context, + const void *reg, size_t reg_len, + void *val_buf, size_t val_len) +{ + struct spi_device *spi = context; + struct mcp251xfd_priv *priv = spi_get_drvdata(spi); + struct mcp251xfd_map_buf_nocrc *buf_rx = priv->map_buf_nocrc_rx; + struct mcp251xfd_map_buf_nocrc *buf_tx = priv->map_buf_nocrc_tx; + struct spi_transfer xfer[2] = { }; + struct spi_message msg; + int err; + + BUILD_BUG_ON(sizeof(buf_rx->cmd) != sizeof(__be16)); + BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16)); + + if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) && + reg_len != sizeof(buf_tx->cmd.cmd)) + return -EINVAL; + + spi_message_init(&msg); + spi_message_add_tail(&xfer[0], &msg); + + if (priv->devtype_data.quirks & MCP251XFD_QUIRK_HALF_DUPLEX) { + xfer[0].tx_buf = reg; + xfer[0].len = sizeof(buf_tx->cmd); + + xfer[1].rx_buf = val_buf; + xfer[1].len = val_len; + spi_message_add_tail(&xfer[1], &msg); + } else { + xfer[0].tx_buf = buf_tx; + xfer[0].rx_buf = buf_rx; + xfer[0].len = sizeof(buf_tx->cmd) + val_len; + + memcpy(&buf_tx->cmd, reg, sizeof(buf_tx->cmd)); + if (MCP251XFD_SANITIZE_SPI) + memset(buf_tx->data, 0x0, val_len); + } + + err = spi_sync(spi, &msg); + if (err) + return err; + + if (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_HALF_DUPLEX)) + memcpy(val_buf, buf_rx->data, val_len); + + return 0; +} + +static int +_mcp251xfd_regmap_crc_gather_write(void *context, + const void *reg_p, size_t reg_len, + const void *val, size_t val_len) +{ + struct spi_device *spi = context; + struct mcp251xfd_priv *priv = spi_get_drvdata(spi); + struct mcp251xfd_map_buf_crc *buf_tx = priv->map_buf_crc_tx; + struct spi_transfer xfer[] = { + { + .tx_buf = buf_tx, + .len = sizeof(buf_tx->cmd) + val_len + + sizeof(buf_tx->crc), + }, + }; + u16 reg = *(u16 *)reg_p; + u16 crc; + + BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16) + sizeof(u8)); + + if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) && + reg_len != sizeof(buf_tx->cmd.cmd) + + mcp251xfd_regmap_crc.pad_bits / BITS_PER_BYTE) + return -EINVAL; + + mcp251xfd_spi_cmd_write_crc(&buf_tx->cmd, reg, val_len); + memcpy(buf_tx->data, val, val_len); + + crc = mcp251xfd_crc16_compute(buf_tx, sizeof(buf_tx->cmd) + val_len); + put_unaligned_be16(crc, buf_tx->data + val_len); + + return spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer)); +} + +static int +mcp251xfd_regmap_crc_gather_write(void *context, + const void *reg_p, size_t reg_len, + const void *val, size_t val_len) +{ + const u16 byte_exclude = MCP251XFD_REG_IOCON + + mcp251xfd_first_byte_set(MCP251XFD_REG_IOCON_GPIO_MASK); + u16 reg = *(u16 *)reg_p; + int ret; + + /* Never write to bits 16..23 of IOCON register to avoid clearing of LAT0/LAT1 + * + * According to MCP2518FD Errata DS80000789E 5 writing IOCON register using one + * SPI write command clears LAT0/LAT1. + * + * Errata Fix/Work Around suggests to write registers with single byte + * write instructions. However, it seems that the byte at 0xe06(IOCON[23:16]) + * is for read-only access and writing to it causes the clearing of LAT0/LAT1. + */ + if (reg <= byte_exclude && reg + val_len > byte_exclude) { + size_t len = byte_exclude - reg; + + /* Write up to 0xe05 */ + ret = _mcp251xfd_regmap_crc_gather_write(context, ®, reg_len, val, len); + if (ret) + return ret; + + /* Write from 0xe07 on */ + reg += len + 1; + return _mcp251xfd_regmap_crc_gather_write(context, ®, reg_len, + val + len + 1, + val_len - len - 1); + } + + return _mcp251xfd_regmap_crc_gather_write(context, reg_p, reg_len, + val, val_len); +} + +static int +mcp251xfd_regmap_crc_write(void *context, + const void *data, size_t count) +{ + const size_t data_offset = sizeof(__be16) + + mcp251xfd_regmap_crc.pad_bits / BITS_PER_BYTE; + + return mcp251xfd_regmap_crc_gather_write(context, + data, data_offset, + data + data_offset, + count - data_offset); +} + +static int +mcp251xfd_regmap_crc_read_check_crc(const struct mcp251xfd_map_buf_crc * const buf_rx, + const struct mcp251xfd_map_buf_crc * const buf_tx, + unsigned int data_len) +{ + u16 crc_received, crc_calculated; + + crc_received = get_unaligned_be16(buf_rx->data + data_len); + crc_calculated = mcp251xfd_crc16_compute2(&buf_tx->cmd, + sizeof(buf_tx->cmd), + buf_rx->data, + data_len); + if (crc_received != crc_calculated) + return -EBADMSG; + + return 0; +} + +static int +mcp251xfd_regmap_crc_read_one(struct mcp251xfd_priv *priv, + struct spi_message *msg, unsigned int data_len) +{ + const struct mcp251xfd_map_buf_crc *buf_rx = priv->map_buf_crc_rx; + const struct mcp251xfd_map_buf_crc *buf_tx = priv->map_buf_crc_tx; + int err; + + BUILD_BUG_ON(sizeof(buf_rx->cmd) != sizeof(__be16) + sizeof(u8)); + BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16) + sizeof(u8)); + + err = spi_sync(priv->spi, msg); + if (err) + return err; + + return mcp251xfd_regmap_crc_read_check_crc(buf_rx, buf_tx, data_len); +} + +static int +mcp251xfd_regmap_crc_read(void *context, + const void *reg_p, size_t reg_len, + void *val_buf, size_t val_len) +{ + struct spi_device *spi = context; + struct mcp251xfd_priv *priv = spi_get_drvdata(spi); + struct mcp251xfd_map_buf_crc *buf_rx = priv->map_buf_crc_rx; + struct mcp251xfd_map_buf_crc *buf_tx = priv->map_buf_crc_tx; + struct spi_transfer xfer[2] = { }; + struct spi_message msg; + u16 reg = *(u16 *)reg_p; + int i, err; + + BUILD_BUG_ON(sizeof(buf_rx->cmd) != sizeof(__be16) + sizeof(u8)); + BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16) + sizeof(u8)); + + if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) && + reg_len != sizeof(buf_tx->cmd.cmd) + + mcp251xfd_regmap_crc.pad_bits / BITS_PER_BYTE) + return -EINVAL; + + spi_message_init(&msg); + spi_message_add_tail(&xfer[0], &msg); + + if (priv->devtype_data.quirks & MCP251XFD_QUIRK_HALF_DUPLEX) { + xfer[0].tx_buf = buf_tx; + xfer[0].len = sizeof(buf_tx->cmd); + + xfer[1].rx_buf = buf_rx->data; + xfer[1].len = val_len + sizeof(buf_tx->crc); + spi_message_add_tail(&xfer[1], &msg); + } else { + xfer[0].tx_buf = buf_tx; + xfer[0].rx_buf = buf_rx; + xfer[0].len = sizeof(buf_tx->cmd) + val_len + + sizeof(buf_tx->crc); + + if (MCP251XFD_SANITIZE_SPI) + memset(buf_tx->data, 0x0, val_len + + sizeof(buf_tx->crc)); + } + + mcp251xfd_spi_cmd_read_crc(&buf_tx->cmd, reg, val_len); + + for (i = 0; i < MCP251XFD_READ_CRC_RETRIES_MAX; i++) { + err = mcp251xfd_regmap_crc_read_one(priv, &msg, val_len); + if (!err) + goto out; + if (err != -EBADMSG) + return err; + + /* MCP251XFD_REG_TBC is the time base counter + * register. It increments once per SYS clock tick, + * which is 20 or 40 MHz. + * + * Observation on the mcp2518fd shows that if the + * lowest byte (which is transferred first on the SPI + * bus) of that register is 0x00 or 0x80 the + * calculated CRC doesn't always match the transferred + * one. On the mcp2517fd this problem is not limited + * to the first byte being 0x00 or 0x80. + * + * If the highest bit in the lowest byte is flipped + * the transferred CRC matches the calculated one. We + * assume for now the CRC operates on the correct + * data. + */ + if (reg == MCP251XFD_REG_TBC && + ((buf_rx->data[0] & 0xf8) == 0x0 || + (buf_rx->data[0] & 0xf8) == 0x80)) { + /* Flip highest bit in lowest byte of le32 */ + buf_rx->data[0] ^= 0x80; + + /* re-check CRC */ + err = mcp251xfd_regmap_crc_read_check_crc(buf_rx, + buf_tx, + val_len); + if (!err) { + /* If CRC is now correct, assume + * flipped data is OK. + */ + goto out; + } + } + + /* MCP251XFD_REG_OSC is the first ever reg we read from. + * + * The chip may be in deep sleep and this SPI transfer + * (i.e. the assertion of the CS) will wake the chip + * up. This takes about 3ms. The CRC of this transfer + * is wrong. + * + * Or there isn't a chip at all, in this case the CRC + * will be wrong, too. + * + * In both cases ignore the CRC and copy the read data + * to the caller. It will take care of both cases. + * + */ + if (reg == MCP251XFD_REG_OSC && val_len == sizeof(__le32)) { + err = 0; + goto out; + } + + netdev_info(priv->ndev, + "CRC read error at address 0x%04x (length=%zd, data=%*ph, CRC=0x%04x) retrying.\n", + reg, val_len, (int)val_len, buf_rx->data, + get_unaligned_be16(buf_rx->data + val_len)); + } + + if (err) { + netdev_err(priv->ndev, + "CRC read error at address 0x%04x (length=%zd, data=%*ph, CRC=0x%04x).\n", + reg, val_len, (int)val_len, buf_rx->data, + get_unaligned_be16(buf_rx->data + val_len)); + + return err; + } +out: + memcpy(val_buf, buf_rx->data, val_len); + + return 0; +} + +static const struct regmap_range mcp251xfd_reg_table_yes_range[] = { + regmap_reg_range(0x000, 0x2ec), /* CAN FD Controller Module SFR */ + regmap_reg_range(0x400, 0xbfc), /* RAM */ + regmap_reg_range(0xe00, 0xe14), /* MCP2517/18FD SFR */ +}; + +static const struct regmap_access_table mcp251xfd_reg_table = { + .yes_ranges = mcp251xfd_reg_table_yes_range, + .n_yes_ranges = ARRAY_SIZE(mcp251xfd_reg_table_yes_range), +}; + +static const struct regmap_config mcp251xfd_regmap_nocrc = { + .name = "nocrc", + .reg_bits = 16, + .reg_stride = 4, + .pad_bits = 0, + .val_bits = 32, + .max_register = 0xffc, + .wr_table = &mcp251xfd_reg_table, + .rd_table = &mcp251xfd_reg_table, + .cache_type = REGCACHE_NONE, + .read_flag_mask = (__force unsigned long) + cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_READ), + .write_flag_mask = (__force unsigned long) + cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_WRITE), +}; + +static const struct regmap_bus mcp251xfd_bus_nocrc = { + .write = mcp251xfd_regmap_nocrc_write, + .gather_write = mcp251xfd_regmap_nocrc_gather_write, + .reg_update_bits = mcp251xfd_regmap_nocrc_update_bits, + .read = mcp251xfd_regmap_nocrc_read, + .reg_format_endian_default = REGMAP_ENDIAN_BIG, + .val_format_endian_default = REGMAP_ENDIAN_LITTLE, + .max_raw_read = sizeof_field(struct mcp251xfd_map_buf_nocrc, data), + .max_raw_write = sizeof_field(struct mcp251xfd_map_buf_nocrc, data), +}; + +static const struct regmap_config mcp251xfd_regmap_crc = { + .name = "crc", + .reg_bits = 16, + .reg_stride = 4, + .pad_bits = 16, /* keep data bits aligned */ + .val_bits = 32, + .max_register = 0xffc, + .wr_table = &mcp251xfd_reg_table, + .rd_table = &mcp251xfd_reg_table, + .cache_type = REGCACHE_NONE, +}; + +static const struct regmap_bus mcp251xfd_bus_crc = { + .write = mcp251xfd_regmap_crc_write, + .gather_write = mcp251xfd_regmap_crc_gather_write, + .read = mcp251xfd_regmap_crc_read, + .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, + .val_format_endian_default = REGMAP_ENDIAN_LITTLE, + .max_raw_read = sizeof_field(struct mcp251xfd_map_buf_crc, data), + .max_raw_write = sizeof_field(struct mcp251xfd_map_buf_crc, data), +}; + +static inline bool +mcp251xfd_regmap_use_nocrc(struct mcp251xfd_priv *priv) +{ + return (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG)) || + (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_RX)); +} + +static inline bool +mcp251xfd_regmap_use_crc(struct mcp251xfd_priv *priv) +{ + return (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) || + (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_RX); +} + +static int +mcp251xfd_regmap_init_nocrc(struct mcp251xfd_priv *priv) +{ + if (!priv->map_nocrc) { + struct regmap *map; + + map = devm_regmap_init(&priv->spi->dev, &mcp251xfd_bus_nocrc, + priv->spi, &mcp251xfd_regmap_nocrc); + if (IS_ERR(map)) + return PTR_ERR(map); + + priv->map_nocrc = map; + } + + if (!priv->map_buf_nocrc_rx) { + priv->map_buf_nocrc_rx = + devm_kzalloc(&priv->spi->dev, + sizeof(*priv->map_buf_nocrc_rx), + GFP_KERNEL); + if (!priv->map_buf_nocrc_rx) + return -ENOMEM; + } + + if (!priv->map_buf_nocrc_tx) { + priv->map_buf_nocrc_tx = + devm_kzalloc(&priv->spi->dev, + sizeof(*priv->map_buf_nocrc_tx), + GFP_KERNEL); + if (!priv->map_buf_nocrc_tx) + return -ENOMEM; + } + + if (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG)) + priv->map_reg = priv->map_nocrc; + + if (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_RX)) + priv->map_rx = priv->map_nocrc; + + return 0; +} + +static void mcp251xfd_regmap_destroy_nocrc(struct mcp251xfd_priv *priv) +{ + if (priv->map_buf_nocrc_rx) { + devm_kfree(&priv->spi->dev, priv->map_buf_nocrc_rx); + priv->map_buf_nocrc_rx = NULL; + } + if (priv->map_buf_nocrc_tx) { + devm_kfree(&priv->spi->dev, priv->map_buf_nocrc_tx); + priv->map_buf_nocrc_tx = NULL; + } +} + +static int +mcp251xfd_regmap_init_crc(struct mcp251xfd_priv *priv) +{ + if (!priv->map_crc) { + struct regmap *map; + + map = devm_regmap_init(&priv->spi->dev, &mcp251xfd_bus_crc, + priv->spi, &mcp251xfd_regmap_crc); + if (IS_ERR(map)) + return PTR_ERR(map); + + priv->map_crc = map; + } + + if (!priv->map_buf_crc_rx) { + priv->map_buf_crc_rx = + devm_kzalloc(&priv->spi->dev, + sizeof(*priv->map_buf_crc_rx), + GFP_KERNEL); + if (!priv->map_buf_crc_rx) + return -ENOMEM; + } + + if (!priv->map_buf_crc_tx) { + priv->map_buf_crc_tx = + devm_kzalloc(&priv->spi->dev, + sizeof(*priv->map_buf_crc_tx), + GFP_KERNEL); + if (!priv->map_buf_crc_tx) + return -ENOMEM; + } + + if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) + priv->map_reg = priv->map_crc; + + if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_RX) + priv->map_rx = priv->map_crc; + + return 0; +} + +static void mcp251xfd_regmap_destroy_crc(struct mcp251xfd_priv *priv) +{ + if (priv->map_buf_crc_rx) { + devm_kfree(&priv->spi->dev, priv->map_buf_crc_rx); + priv->map_buf_crc_rx = NULL; + } + if (priv->map_buf_crc_tx) { + devm_kfree(&priv->spi->dev, priv->map_buf_crc_tx); + priv->map_buf_crc_tx = NULL; + } +} + +int mcp251xfd_regmap_init(struct mcp251xfd_priv *priv) +{ + int err; + + if (mcp251xfd_regmap_use_nocrc(priv)) { + err = mcp251xfd_regmap_init_nocrc(priv); + + if (err) + return err; + } else { + mcp251xfd_regmap_destroy_nocrc(priv); + } + + if (mcp251xfd_regmap_use_crc(priv)) { + err = mcp251xfd_regmap_init_crc(priv); + + if (err) + return err; + } else { + mcp251xfd_regmap_destroy_crc(priv); + } + + return 0; +} diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c new file mode 100644 index 000000000000..c34f2067a989 --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c @@ -0,0 +1,551 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// mcp251xfd - Microchip MCP251xFD Family CAN controller driver +// +// Copyright (c) 2019, 2020, 2021, 2024 Pengutronix, +// Marc Kleine-Budde <kernel@pengutronix.de> +// +// Based on: +// +// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface +// +// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org> +// + +#include <linux/unaligned.h> + +#include "mcp251xfd.h" +#include "mcp251xfd-ram.h" + +static inline u8 +mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv, + union mcp251xfd_write_reg_buf *write_reg_buf, + const u16 reg, const u32 mask, const u32 val) +{ + u8 first_byte, last_byte, len; + u8 *data; + __le32 val_le32; + + first_byte = mcp251xfd_first_byte_set(mask); + last_byte = mcp251xfd_last_byte_set(mask); + len = last_byte - first_byte + 1; + + data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte, len); + val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte); + memcpy(data, &val_le32, len); + + if (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG)) { + len += sizeof(write_reg_buf->nocrc.cmd); + } else if (len == 1) { + u16 crc; + + /* CRC */ + len += sizeof(write_reg_buf->safe.cmd); + crc = mcp251xfd_crc16_compute(&write_reg_buf->safe, len); + put_unaligned_be16(crc, (void *)write_reg_buf + len); + + /* Total length */ + len += sizeof(write_reg_buf->safe.crc); + } else { + u16 crc; + + mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd, + len); + /* CRC */ + len += sizeof(write_reg_buf->crc.cmd); + crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len); + put_unaligned_be16(crc, (void *)write_reg_buf + len); + + /* Total length */ + len += sizeof(write_reg_buf->crc.crc); + } + + return len; +} + +static void +mcp251xfd_ring_init_tef(struct mcp251xfd_priv *priv, u16 *base) +{ + struct mcp251xfd_tef_ring *tef_ring; + struct spi_transfer *xfer; + u32 val; + u16 addr; + u8 len; + int i; + + /* TEF */ + tef_ring = priv->tef; + tef_ring->head = 0; + tef_ring->tail = 0; + + /* TEF- and TX-FIFO have same number of objects */ + *base = mcp251xfd_get_tef_obj_addr(priv->tx->obj_num); + + /* FIFO IRQ enable */ + addr = MCP251XFD_REG_TEFCON; + val = MCP251XFD_REG_TEFCON_TEFOVIE | MCP251XFD_REG_TEFCON_TEFNEIE; + + len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->irq_enable_buf, + addr, val, val); + tef_ring->irq_enable_xfer.tx_buf = &tef_ring->irq_enable_buf; + tef_ring->irq_enable_xfer.len = len; + spi_message_init_with_transfers(&tef_ring->irq_enable_msg, + &tef_ring->irq_enable_xfer, 1); + + /* FIFO increment TEF tail pointer */ + addr = MCP251XFD_REG_TEFCON; + val = MCP251XFD_REG_TEFCON_UINC; + len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf, + addr, val, val); + + for (i = 0; i < ARRAY_SIZE(tef_ring->uinc_xfer); i++) { + xfer = &tef_ring->uinc_xfer[i]; + xfer->tx_buf = &tef_ring->uinc_buf; + xfer->len = len; + xfer->cs_change = 1; + xfer->cs_change_delay.value = 0; + xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; + } + + /* "cs_change == 1" on the last transfer results in an active + * chip select after the complete SPI message. This causes the + * controller to interpret the next register access as + * data. Set "cs_change" of the last transfer to "0" to + * properly deactivate the chip select at the end of the + * message. + */ + xfer->cs_change = 0; + + if (priv->tx_coalesce_usecs_irq || priv->tx_obj_num_coalesce_irq) { + val = MCP251XFD_REG_TEFCON_UINC | + MCP251XFD_REG_TEFCON_TEFOVIE | + MCP251XFD_REG_TEFCON_TEFHIE; + + len = mcp251xfd_cmd_prepare_write_reg(priv, + &tef_ring->uinc_irq_disable_buf, + addr, val, val); + xfer->tx_buf = &tef_ring->uinc_irq_disable_buf; + xfer->len = len; + } +} + +static void +mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv, + const struct mcp251xfd_tx_ring *ring, + struct mcp251xfd_tx_obj *tx_obj, + const u8 rts_buf_len, + const u8 n) +{ + struct spi_transfer *xfer; + u16 addr; + + /* FIFO load */ + addr = mcp251xfd_get_tx_obj_addr(ring, n); + if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) + mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd, + addr); + else + mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd, + addr); + + xfer = &tx_obj->xfer[0]; + xfer->tx_buf = &tx_obj->buf; + xfer->len = 0; /* actual len is assigned on the fly */ + xfer->cs_change = 1; + xfer->cs_change_delay.value = 0; + xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; + + /* FIFO request to send */ + xfer = &tx_obj->xfer[1]; + xfer->tx_buf = &ring->rts_buf; + xfer->len = rts_buf_len; + + /* SPI message */ + spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer, + ARRAY_SIZE(tx_obj->xfer)); +} + +static void +mcp251xfd_ring_init_tx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr) +{ + struct mcp251xfd_tx_ring *tx_ring; + struct mcp251xfd_tx_obj *tx_obj; + u32 val; + u16 addr; + u8 len; + int i; + + tx_ring = priv->tx; + tx_ring->head = 0; + tx_ring->tail = 0; + tx_ring->base = *base; + tx_ring->nr = 0; + tx_ring->fifo_nr = *fifo_nr; + + *base = mcp251xfd_get_tx_obj_addr(tx_ring, tx_ring->obj_num); + *fifo_nr += 1; + + /* FIFO request to send */ + addr = MCP251XFD_REG_FIFOCON(tx_ring->fifo_nr); + val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC; + len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf, + addr, val, val); + + mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i) + mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i); +} + +static void +mcp251xfd_ring_init_rx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr) +{ + struct mcp251xfd_rx_ring *rx_ring; + struct spi_transfer *xfer; + u32 val; + u16 addr; + u8 len; + int i, j; + + mcp251xfd_for_each_rx_ring(priv, rx_ring, i) { + rx_ring->last_valid = timecounter_read(&priv->tc); + rx_ring->head = 0; + rx_ring->tail = 0; + rx_ring->base = *base; + rx_ring->nr = i; + rx_ring->fifo_nr = *fifo_nr; + + *base = mcp251xfd_get_rx_obj_addr(rx_ring, rx_ring->obj_num); + *fifo_nr += 1; + + /* FIFO IRQ enable */ + addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr); + val = MCP251XFD_REG_FIFOCON_RXOVIE | + MCP251XFD_REG_FIFOCON_TFNRFNIE; + len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->irq_enable_buf, + addr, val, val); + rx_ring->irq_enable_xfer.tx_buf = &rx_ring->irq_enable_buf; + rx_ring->irq_enable_xfer.len = len; + spi_message_init_with_transfers(&rx_ring->irq_enable_msg, + &rx_ring->irq_enable_xfer, 1); + + /* FIFO increment RX tail pointer */ + val = MCP251XFD_REG_FIFOCON_UINC; + len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf, + addr, val, val); + + for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) { + xfer = &rx_ring->uinc_xfer[j]; + xfer->tx_buf = &rx_ring->uinc_buf; + xfer->len = len; + xfer->cs_change = 1; + xfer->cs_change_delay.value = 0; + xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; + } + + /* "cs_change == 1" on the last transfer results in an + * active chip select after the complete SPI + * message. This causes the controller to interpret + * the next register access as data. Set "cs_change" + * of the last transfer to "0" to properly deactivate + * the chip select at the end of the message. + */ + xfer->cs_change = 0; + + /* Use 1st RX-FIFO for IRQ coalescing. If enabled + * (rx_coalesce_usecs_irq or rx_max_coalesce_frames_irq + * is activated), use the last transfer to disable: + * + * - TFNRFNIE (Receive FIFO Not Empty Interrupt) + * + * and enable: + * + * - TFHRFHIE (Receive FIFO Half Full Interrupt) + * - or - + * - TFERFFIE (Receive FIFO Full Interrupt) + * + * depending on rx_max_coalesce_frames_irq. + * + * The RXOVIE (Overflow Interrupt) is always enabled. + */ + if (rx_ring->nr == 0 && (priv->rx_coalesce_usecs_irq || + priv->rx_obj_num_coalesce_irq)) { + val = MCP251XFD_REG_FIFOCON_UINC | + MCP251XFD_REG_FIFOCON_RXOVIE; + + if (priv->rx_obj_num_coalesce_irq == rx_ring->obj_num) + val |= MCP251XFD_REG_FIFOCON_TFERFFIE; + else if (priv->rx_obj_num_coalesce_irq) + val |= MCP251XFD_REG_FIFOCON_TFHRFHIE; + + len = mcp251xfd_cmd_prepare_write_reg(priv, + &rx_ring->uinc_irq_disable_buf, + addr, val, val); + xfer->tx_buf = &rx_ring->uinc_irq_disable_buf; + xfer->len = len; + } + } +} + +int mcp251xfd_ring_init(struct mcp251xfd_priv *priv) +{ + const struct mcp251xfd_rx_ring *rx_ring; + u16 base = 0, ram_used; + u8 fifo_nr = 1; + int err = 0, i; + + netdev_reset_queue(priv->ndev); + + mcp251xfd_ring_init_tef(priv, &base); + mcp251xfd_ring_init_rx(priv, &base, &fifo_nr); + mcp251xfd_ring_init_tx(priv, &base, &fifo_nr); + + /* mcp251xfd_handle_rxif() will iterate over all RX rings. + * Rings with their corresponding bit set in + * priv->regs_status.rxif are read out. + * + * If the chip is configured for only 1 RX-FIFO, and if there + * is an RX interrupt pending (RXIF in INT register is set), + * it must be the 1st RX-FIFO. + * + * We mark the RXIF of the 1st FIFO as pending here, so that + * we can skip the read of the RXIF register in + * mcp251xfd_read_regs_status() for the 1 RX-FIFO only case. + * + * If we use more than 1 RX-FIFO, this value gets overwritten + * in mcp251xfd_read_regs_status(), so set it unconditionally + * here. + */ + priv->regs_status.rxif = BIT(priv->rx[0]->fifo_nr); + + if (priv->tx_obj_num_coalesce_irq) { + netdev_dbg(priv->ndev, + "FIFO setup: TEF: 0x%03x: %2d*%zu bytes = %4zu bytes (coalesce)\n", + mcp251xfd_get_tef_obj_addr(0), + priv->tx_obj_num_coalesce_irq, + sizeof(struct mcp251xfd_hw_tef_obj), + priv->tx_obj_num_coalesce_irq * + sizeof(struct mcp251xfd_hw_tef_obj)); + + netdev_dbg(priv->ndev, + " 0x%03x: %2d*%zu bytes = %4zu bytes\n", + mcp251xfd_get_tef_obj_addr(priv->tx_obj_num_coalesce_irq), + priv->tx->obj_num - priv->tx_obj_num_coalesce_irq, + sizeof(struct mcp251xfd_hw_tef_obj), + (priv->tx->obj_num - priv->tx_obj_num_coalesce_irq) * + sizeof(struct mcp251xfd_hw_tef_obj)); + } else { + netdev_dbg(priv->ndev, + "FIFO setup: TEF: 0x%03x: %2d*%zu bytes = %4zu bytes\n", + mcp251xfd_get_tef_obj_addr(0), + priv->tx->obj_num, sizeof(struct mcp251xfd_hw_tef_obj), + priv->tx->obj_num * sizeof(struct mcp251xfd_hw_tef_obj)); + } + + mcp251xfd_for_each_rx_ring(priv, rx_ring, i) { + if (rx_ring->nr == 0 && priv->rx_obj_num_coalesce_irq) { + netdev_dbg(priv->ndev, + "FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes (coalesce)\n", + rx_ring->nr, rx_ring->fifo_nr, + mcp251xfd_get_rx_obj_addr(rx_ring, 0), + priv->rx_obj_num_coalesce_irq, rx_ring->obj_size, + priv->rx_obj_num_coalesce_irq * rx_ring->obj_size); + + if (priv->rx_obj_num_coalesce_irq == MCP251XFD_FIFO_DEPTH) + continue; + + netdev_dbg(priv->ndev, + " 0x%03x: %2u*%u bytes = %4u bytes\n", + mcp251xfd_get_rx_obj_addr(rx_ring, + priv->rx_obj_num_coalesce_irq), + rx_ring->obj_num - priv->rx_obj_num_coalesce_irq, + rx_ring->obj_size, + (rx_ring->obj_num - priv->rx_obj_num_coalesce_irq) * + rx_ring->obj_size); + } else { + netdev_dbg(priv->ndev, + "FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n", + rx_ring->nr, rx_ring->fifo_nr, + mcp251xfd_get_rx_obj_addr(rx_ring, 0), + rx_ring->obj_num, rx_ring->obj_size, + rx_ring->obj_num * rx_ring->obj_size); + } + } + + netdev_dbg(priv->ndev, + "FIFO setup: TX: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n", + priv->tx->fifo_nr, + mcp251xfd_get_tx_obj_addr(priv->tx, 0), + priv->tx->obj_num, priv->tx->obj_size, + priv->tx->obj_num * priv->tx->obj_size); + + netdev_dbg(priv->ndev, + "FIFO setup: free: %4d bytes\n", + MCP251XFD_RAM_SIZE - (base - MCP251XFD_RAM_START)); + + ram_used = base - MCP251XFD_RAM_START; + if (ram_used > MCP251XFD_RAM_SIZE) { + netdev_err(priv->ndev, + "Error during ring configuration, using more RAM (%u bytes) than available (%u bytes).\n", + ram_used, MCP251XFD_RAM_SIZE); + err = -ENOMEM; + } + + if (priv->tx_obj_num_coalesce_irq && + priv->tx_obj_num_coalesce_irq * 2 != priv->tx->obj_num) { + netdev_err(priv->ndev, + "Error during ring configuration, number of TEF coalescing buffers (%u) must be half of TEF buffers (%u).\n", + priv->tx_obj_num_coalesce_irq, priv->tx->obj_num); + err = -EINVAL; + } + + return err; +} + +void mcp251xfd_ring_free(struct mcp251xfd_priv *priv) +{ + int i; + + for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) { + kfree(priv->rx[i]); + priv->rx[i] = NULL; + } +} + +static enum hrtimer_restart mcp251xfd_rx_irq_timer(struct hrtimer *t) +{ + struct mcp251xfd_priv *priv = container_of(t, struct mcp251xfd_priv, + rx_irq_timer); + struct mcp251xfd_rx_ring *ring = priv->rx[0]; + + if (test_bit(MCP251XFD_FLAGS_DOWN, priv->flags)) + return HRTIMER_NORESTART; + + spi_async(priv->spi, &ring->irq_enable_msg); + + return HRTIMER_NORESTART; +} + +static enum hrtimer_restart mcp251xfd_tx_irq_timer(struct hrtimer *t) +{ + struct mcp251xfd_priv *priv = container_of(t, struct mcp251xfd_priv, + tx_irq_timer); + struct mcp251xfd_tef_ring *ring = priv->tef; + + if (test_bit(MCP251XFD_FLAGS_DOWN, priv->flags)) + return HRTIMER_NORESTART; + + spi_async(priv->spi, &ring->irq_enable_msg); + + return HRTIMER_NORESTART; +} + +const struct can_ram_config mcp251xfd_ram_config = { + .rx = { + .size[CAN_RAM_MODE_CAN] = sizeof(struct mcp251xfd_hw_rx_obj_can), + .size[CAN_RAM_MODE_CANFD] = sizeof(struct mcp251xfd_hw_rx_obj_canfd), + .min = MCP251XFD_RX_OBJ_NUM_MIN, + .max = MCP251XFD_RX_OBJ_NUM_MAX, + .def[CAN_RAM_MODE_CAN] = CAN_RAM_NUM_MAX, + .def[CAN_RAM_MODE_CANFD] = CAN_RAM_NUM_MAX, + .fifo_num = MCP251XFD_FIFO_RX_NUM, + .fifo_depth_min = MCP251XFD_RX_FIFO_DEPTH_MIN, + .fifo_depth_coalesce_min = MCP251XFD_RX_FIFO_DEPTH_COALESCE_MIN, + }, + .tx = { + .size[CAN_RAM_MODE_CAN] = sizeof(struct mcp251xfd_hw_tef_obj) + + sizeof(struct mcp251xfd_hw_tx_obj_can), + .size[CAN_RAM_MODE_CANFD] = sizeof(struct mcp251xfd_hw_tef_obj) + + sizeof(struct mcp251xfd_hw_tx_obj_canfd), + .min = MCP251XFD_TX_OBJ_NUM_MIN, + .max = MCP251XFD_TX_OBJ_NUM_MAX, + .def[CAN_RAM_MODE_CAN] = MCP251XFD_TX_OBJ_NUM_CAN_DEFAULT, + .def[CAN_RAM_MODE_CANFD] = MCP251XFD_TX_OBJ_NUM_CANFD_DEFAULT, + .fifo_num = MCP251XFD_FIFO_TX_NUM, + .fifo_depth_min = MCP251XFD_TX_FIFO_DEPTH_MIN, + .fifo_depth_coalesce_min = MCP251XFD_TX_FIFO_DEPTH_COALESCE_MIN, + }, + .size = MCP251XFD_RAM_SIZE, + .fifo_depth = MCP251XFD_FIFO_DEPTH, +}; + +int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv) +{ + const bool fd_mode = mcp251xfd_is_fd_mode(priv); + struct mcp251xfd_tx_ring *tx_ring = priv->tx; + struct mcp251xfd_rx_ring *rx_ring; + u8 tx_obj_size, rx_obj_size; + u8 rem, i; + + /* switching from CAN-2.0 to CAN-FD mode or vice versa */ + if (fd_mode != test_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags)) { + const struct ethtool_ringparam ring = { + .rx_pending = priv->rx_obj_num, + .tx_pending = priv->tx->obj_num, + }; + const struct ethtool_coalesce ec = { + .rx_coalesce_usecs_irq = priv->rx_coalesce_usecs_irq, + .rx_max_coalesced_frames_irq = priv->rx_obj_num_coalesce_irq == 0 ? + 1 : priv->rx_obj_num_coalesce_irq, + .tx_coalesce_usecs_irq = priv->tx_coalesce_usecs_irq, + .tx_max_coalesced_frames_irq = priv->tx_obj_num_coalesce_irq == 0 ? + 1 : priv->tx_obj_num_coalesce_irq, + }; + struct can_ram_layout layout; + + can_ram_get_layout(&layout, &mcp251xfd_ram_config, &ring, &ec, fd_mode); + + priv->rx_obj_num = layout.cur_rx; + priv->rx_obj_num_coalesce_irq = layout.rx_coalesce; + + tx_ring->obj_num = layout.cur_tx; + priv->tx_obj_num_coalesce_irq = layout.tx_coalesce; + } + + if (fd_mode) { + tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd); + rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd); + set_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags); + } else { + tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can); + rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can); + clear_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags); + } + + tx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(tx_ring->obj_num) - + ilog2(tx_ring->obj_num); + tx_ring->obj_size = tx_obj_size; + + rem = priv->rx_obj_num; + for (i = 0; i < ARRAY_SIZE(priv->rx) && rem; i++) { + u8 rx_obj_num; + + if (i == 0 && priv->rx_obj_num_coalesce_irq) + rx_obj_num = min_t(u8, priv->rx_obj_num_coalesce_irq * 2, + MCP251XFD_FIFO_DEPTH); + else + rx_obj_num = min_t(u8, rounddown_pow_of_two(rem), + MCP251XFD_FIFO_DEPTH); + rem -= rx_obj_num; + + rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num, + GFP_KERNEL); + if (!rx_ring) { + mcp251xfd_ring_free(priv); + return -ENOMEM; + } + + rx_ring->obj_num = rx_obj_num; + rx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(rx_ring->obj_num_shift_to_u8) - + ilog2(rx_obj_num); + rx_ring->obj_size = rx_obj_size; + priv->rx[i] = rx_ring; + } + priv->rx_ring_num = i; + + hrtimer_setup(&priv->rx_irq_timer, mcp251xfd_rx_irq_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + + hrtimer_setup(&priv->tx_irq_timer, mcp251xfd_tx_irq_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + + return 0; +} diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c new file mode 100644 index 000000000000..fe897f3e4c12 --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// mcp251xfd - Microchip MCP251xFD Family CAN controller driver +// +// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix, +// Marc Kleine-Budde <kernel@pengutronix.de> +// +// Based on: +// +// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface +// +// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org> +// + +#include <linux/bitfield.h> + +#include "mcp251xfd.h" + +static inline bool mcp251xfd_rx_fifo_sta_empty(const u32 fifo_sta) +{ + return !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF); +} + +static inline bool mcp251xfd_rx_fifo_sta_full(const u32 fifo_sta) +{ + return fifo_sta & MCP251XFD_REG_FIFOSTA_TFERFFIF; +} + +static inline int +mcp251xfd_rx_tail_get_from_chip(const struct mcp251xfd_priv *priv, + const struct mcp251xfd_rx_ring *ring, + u8 *rx_tail) +{ + u32 fifo_ua; + int err; + + err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOUA(ring->fifo_nr), + &fifo_ua); + if (err) + return err; + + fifo_ua -= ring->base - MCP251XFD_RAM_START; + *rx_tail = fifo_ua / ring->obj_size; + + return 0; +} + +static int +mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv, + const struct mcp251xfd_rx_ring *ring) +{ + u8 rx_tail_chip, rx_tail; + int err; + + if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY)) + return 0; + + err = mcp251xfd_rx_tail_get_from_chip(priv, ring, &rx_tail_chip); + if (err) + return err; + + rx_tail = mcp251xfd_get_rx_tail(ring); + if (rx_tail_chip != rx_tail) { + netdev_err(priv->ndev, + "RX tail of chip (%d) and ours (%d) inconsistent.\n", + rx_tail_chip, rx_tail); + return -EILSEQ; + } + + return 0; +} + +static int +mcp251xfd_get_rx_len(const struct mcp251xfd_priv *priv, + const struct mcp251xfd_rx_ring *ring, + u8 *len_p) +{ + const u8 shift = ring->obj_num_shift_to_u8; + u8 chip_head, tail, len; + u32 fifo_sta; + int err; + + err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr), + &fifo_sta); + if (err) + return err; + + if (mcp251xfd_rx_fifo_sta_empty(fifo_sta)) { + *len_p = 0; + return 0; + } + + if (mcp251xfd_rx_fifo_sta_full(fifo_sta)) { + *len_p = ring->obj_num; + return 0; + } + + chip_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta); + + err = mcp251xfd_check_rx_tail(priv, ring); + if (err) + return err; + tail = mcp251xfd_get_rx_tail(ring); + + /* First shift to full u8. The subtraction works on signed + * values, that keeps the difference steady around the u8 + * overflow. The right shift acts on len, which is an u8. + */ + BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(chip_head)); + BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(tail)); + BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(len)); + + len = (chip_head << shift) - (tail << shift); + *len_p = len >> shift; + + return 0; +} + +static void +mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv, + const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj, + struct sk_buff *skb) +{ + struct canfd_frame *cfd = (struct canfd_frame *)skb->data; + u8 dlc; + + if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_IDE) { + u32 sid, eid; + + eid = FIELD_GET(MCP251XFD_OBJ_ID_EID_MASK, hw_rx_obj->id); + sid = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, hw_rx_obj->id); + + cfd->can_id = CAN_EFF_FLAG | + FIELD_PREP(MCP251XFD_REG_FRAME_EFF_EID_MASK, eid) | + FIELD_PREP(MCP251XFD_REG_FRAME_EFF_SID_MASK, sid); + } else { + cfd->can_id = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, + hw_rx_obj->id); + } + + dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC_MASK, hw_rx_obj->flags); + + /* CANFD */ + if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) { + if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_ESI) + cfd->flags |= CANFD_ESI; + + if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_BRS) + cfd->flags |= CANFD_BRS; + + cfd->len = can_fd_dlc2len(dlc); + } else { + if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR) + cfd->can_id |= CAN_RTR_FLAG; + + can_frame_set_cc_len((struct can_frame *)cfd, dlc, + priv->can.ctrlmode); + } + + if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)) + memcpy(cfd->data, hw_rx_obj->data, cfd->len); +} + +static int +mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv, + struct mcp251xfd_rx_ring *ring, + const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj) +{ + struct net_device_stats *stats = &priv->ndev->stats; + struct sk_buff *skb; + struct canfd_frame *cfd; + u64 timestamp; + int err; + + /* According to mcp2518fd erratum DS80000789E 6. the FIFOCI + * bits of a FIFOSTA register, here the RX FIFO head index + * might be corrupted and we might process past the RX FIFO's + * head into old CAN frames. + * + * Compare the timestamp of currently processed CAN frame with + * last valid frame received. Abort with -EBADMSG if an old + * CAN frame is detected. + */ + timestamp = timecounter_cyc2time(&priv->tc, hw_rx_obj->ts); + if (timestamp <= ring->last_valid) { + stats->rx_fifo_errors++; + + return -EBADMSG; + } + ring->last_valid = timestamp; + + if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) + skb = alloc_canfd_skb(priv->ndev, &cfd); + else + skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd); + + if (!skb) { + stats->rx_dropped++; + return 0; + } + + mcp251xfd_skb_set_timestamp(skb, timestamp); + mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, hw_rx_obj->ts); + if (err) + stats->rx_fifo_errors++; + + return 0; +} + +static inline int +mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv, + const struct mcp251xfd_rx_ring *ring, + struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj, + const u8 offset, const u8 len) +{ + const int val_bytes = regmap_get_val_bytes(priv->map_rx); + int err; + + err = regmap_bulk_read(priv->map_rx, + mcp251xfd_get_rx_obj_addr(ring, offset), + hw_rx_obj, + len * ring->obj_size / val_bytes); + + return err; +} + +static int +mcp251xfd_handle_rxif_ring_uinc(const struct mcp251xfd_priv *priv, + struct mcp251xfd_rx_ring *ring, + u8 len) +{ + int offset; + int err; + + if (!len) + return 0; + + ring->head += len; + + /* Increment the RX FIFO tail pointer 'len' times in a + * single SPI message. + * + * Note: + * Calculate offset, so that the SPI transfer ends on + * the last message of the uinc_xfer array, which has + * "cs_change == 0", to properly deactivate the chip + * select. + */ + offset = ARRAY_SIZE(ring->uinc_xfer) - len; + err = spi_sync_transfer(priv->spi, + ring->uinc_xfer + offset, len); + if (err) + return err; + + ring->tail += len; + + return 0; +} + +static int +mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv, + struct mcp251xfd_rx_ring *ring) +{ + struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj; + u8 rx_tail, len, l; + int err, i; + + err = mcp251xfd_get_rx_len(priv, ring, &len); + if (err) + return err; + + while ((l = mcp251xfd_get_rx_linear_len(ring, len))) { + rx_tail = mcp251xfd_get_rx_tail(ring); + + err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj, + rx_tail, l); + if (err) + return err; + + for (i = 0; i < l; i++) { + err = mcp251xfd_handle_rxif_one(priv, ring, + (void *)hw_rx_obj + + i * ring->obj_size); + + /* -EBADMSG means we're affected by mcp2518fd + * erratum DS80000789E 6., i.e. the timestamp + * in the RX object is older that the last + * valid received CAN frame. Don't process any + * further and mark processed frames as good. + */ + if (err == -EBADMSG) + return mcp251xfd_handle_rxif_ring_uinc(priv, ring, i); + else if (err) + return err; + } + + err = mcp251xfd_handle_rxif_ring_uinc(priv, ring, l); + if (err) + return err; + + len -= l; + } + + return 0; +} + +int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv) +{ + struct mcp251xfd_rx_ring *ring; + int err, n; + + mcp251xfd_for_each_rx_ring(priv, ring, n) { + /* - if RX IRQ coalescing is active always handle ring 0 + * - only handle rings if RX IRQ is active + */ + if ((ring->nr > 0 || !priv->rx_obj_num_coalesce_irq) && + !(priv->regs_status.rxif & BIT(ring->fifo_nr))) + continue; + + err = mcp251xfd_handle_rxif_ring(priv, ring); + if (err) + return err; + } + + if (priv->rx_coalesce_usecs_irq) + hrtimer_start(&priv->rx_irq_timer, + ns_to_ktime(priv->rx_coalesce_usecs_irq * + NSEC_PER_USEC), + HRTIMER_MODE_REL); + + return 0; +} diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c new file mode 100644 index 000000000000..e94321849fd7 --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c @@ -0,0 +1,302 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// mcp251xfd - Microchip MCP251xFD Family CAN controller driver +// +// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix, +// Marc Kleine-Budde <kernel@pengutronix.de> +// +// Based on: +// +// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface +// +// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org> +// + +#include <linux/bitfield.h> + +#include "mcp251xfd.h" + +static inline bool mcp251xfd_tx_fifo_sta_empty(u32 fifo_sta) +{ + return fifo_sta & MCP251XFD_REG_FIFOSTA_TFERFFIF; +} + +static inline bool mcp251xfd_tx_fifo_sta_less_than_half_full(u32 fifo_sta) +{ + return fifo_sta & MCP251XFD_REG_FIFOSTA_TFHRFHIF; +} + +static inline int +mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv, + u8 *tef_tail) +{ + u32 tef_ua; + int err; + + err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFUA, &tef_ua); + if (err) + return err; + + *tef_tail = tef_ua / sizeof(struct mcp251xfd_hw_tef_obj); + + return 0; +} + +static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv) +{ + u8 tef_tail_chip, tef_tail; + int err; + + if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY)) + return 0; + + err = mcp251xfd_tef_tail_get_from_chip(priv, &tef_tail_chip); + if (err) + return err; + + tef_tail = mcp251xfd_get_tef_tail(priv); + if (tef_tail_chip != tef_tail) { + netdev_err(priv->ndev, + "TEF tail of chip (0x%02x) and ours (0x%08x) inconsistent.\n", + tef_tail_chip, tef_tail); + return -EILSEQ; + } + + return 0; +} + +static int +mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv, + const struct mcp251xfd_hw_tef_obj *hw_tef_obj, + unsigned int *frame_len_ptr) +{ + struct net_device_stats *stats = &priv->ndev->stats; + u32 seq, tef_tail_masked, tef_tail; + struct sk_buff *skb; + + /* Use the MCP2517FD mask on the MCP2518FD, too. We only + * compare 7 bits, this is enough to detect old TEF objects. + */ + seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK, + hw_tef_obj->flags); + tef_tail_masked = priv->tef->tail & + field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK); + + /* According to mcp2518fd erratum DS80000789E 6. the FIFOCI + * bits of a FIFOSTA register, here the TX FIFO tail index + * might be corrupted and we might process past the TEF FIFO's + * head into old CAN frames. + * + * Compare the sequence number of the currently processed CAN + * frame with the expected sequence number. Abort with + * -EBADMSG if an old CAN frame is detected. + */ + if (seq != tef_tail_masked) { + netdev_dbg(priv->ndev, "%s: chip=0x%02x ring=0x%02x\n", __func__, + seq, tef_tail_masked); + stats->tx_fifo_errors++; + + return -EBADMSG; + } + + tef_tail = mcp251xfd_get_tef_tail(priv); + skb = priv->can.echo_skb[tef_tail]; + if (skb) + mcp251xfd_skb_set_timestamp_raw(priv, skb, hw_tef_obj->ts); + stats->tx_bytes += + can_rx_offload_get_echo_skb_queue_timestamp(&priv->offload, + tef_tail, hw_tef_obj->ts, + frame_len_ptr); + stats->tx_packets++; + priv->tef->tail++; + + return 0; +} + +static int +mcp251xfd_get_tef_len(struct mcp251xfd_priv *priv, u8 *len_p) +{ + const struct mcp251xfd_tx_ring *tx_ring = priv->tx; + const u8 shift = tx_ring->obj_num_shift_to_u8; + u8 chip_tx_tail, tail, len; + u32 fifo_sta; + int err; + + err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(priv->tx->fifo_nr), + &fifo_sta); + if (err) + return err; + + /* If the chip says the TX-FIFO is empty, but there are no TX + * buffers free in the ring, we assume all have been sent. + */ + if (mcp251xfd_tx_fifo_sta_empty(fifo_sta) && + mcp251xfd_get_tx_free(tx_ring) == 0) { + *len_p = tx_ring->obj_num; + return 0; + } + + chip_tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta); + + err = mcp251xfd_check_tef_tail(priv); + if (err) + return err; + tail = mcp251xfd_get_tef_tail(priv); + + /* First shift to full u8. The subtraction works on signed + * values, that keeps the difference steady around the u8 + * overflow. The right shift acts on len, which is an u8. + */ + BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(chip_tx_tail)); + BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(tail)); + BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(len)); + + len = (chip_tx_tail << shift) - (tail << shift); + len >>= shift; + + /* According to mcp2518fd erratum DS80000789E 6. the FIFOCI + * bits of a FIFOSTA register, here the TX-FIFO tail index + * might be corrupted. + * + * However here it seems the bit indicating that the TX-FIFO + * is empty (MCP251XFD_REG_FIFOSTA_TFERFFIF) is not correct + * while the TX-FIFO tail index is. + * + * We assume the TX-FIFO is empty, i.e. all pending CAN frames + * haven been send, if: + * - Chip's head and tail index are equal (len == 0). + * - The TX-FIFO is less than half full. + * (The TX-FIFO empty case has already been checked at the + * beginning of this function.) + * - No free buffers in the TX ring. + */ + if (len == 0 && mcp251xfd_tx_fifo_sta_less_than_half_full(fifo_sta) && + mcp251xfd_get_tx_free(tx_ring) == 0) + len = tx_ring->obj_num; + + *len_p = len; + + return 0; +} + +static inline int +mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv, + struct mcp251xfd_hw_tef_obj *hw_tef_obj, + const u8 offset, const u8 len) +{ + const struct mcp251xfd_tx_ring *tx_ring = priv->tx; + const int val_bytes = regmap_get_val_bytes(priv->map_rx); + + if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) && + (offset > tx_ring->obj_num || + len > tx_ring->obj_num || + offset + len > tx_ring->obj_num)) { + netdev_err(priv->ndev, + "Trying to read too many TEF objects (max=%d, offset=%d, len=%d).\n", + tx_ring->obj_num, offset, len); + return -ERANGE; + } + + return regmap_bulk_read(priv->map_rx, + mcp251xfd_get_tef_obj_addr(offset), + hw_tef_obj, + sizeof(*hw_tef_obj) / val_bytes * len); +} + +static inline void mcp251xfd_ecc_tefif_successful(struct mcp251xfd_priv *priv) +{ + struct mcp251xfd_ecc *ecc = &priv->ecc; + + ecc->ecc_stat = 0; +} + +int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) +{ + struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX]; + unsigned int total_frame_len = 0; + u8 tef_tail, len, l; + int err, i; + + err = mcp251xfd_get_tef_len(priv, &len); + if (err) + return err; + + tef_tail = mcp251xfd_get_tef_tail(priv); + l = mcp251xfd_get_tef_linear_len(priv, len); + err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l); + if (err) + return err; + + if (l < len) { + err = mcp251xfd_tef_obj_read(priv, &hw_tef_obj[l], 0, len - l); + if (err) + return err; + } + + for (i = 0; i < len; i++) { + unsigned int frame_len = 0; + + err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len); + /* -EBADMSG means we're affected by mcp2518fd erratum + * DS80000789E 6., i.e. the Sequence Number in the TEF + * doesn't match our tef_tail. Don't process any + * further and mark processed frames as good. + */ + if (err == -EBADMSG) + goto out_netif_wake_queue; + if (err) + return err; + + total_frame_len += frame_len; + } + +out_netif_wake_queue: + len = i; /* number of handled goods TEFs */ + if (len) { + struct mcp251xfd_tef_ring *ring = priv->tef; + struct mcp251xfd_tx_ring *tx_ring = priv->tx; + int offset; + + ring->head += len; + + /* Increment the TEF FIFO tail pointer 'len' times in + * a single SPI message. + * + * Note: + * Calculate offset, so that the SPI transfer ends on + * the last message of the uinc_xfer array, which has + * "cs_change == 0", to properly deactivate the chip + * select. + */ + offset = ARRAY_SIZE(ring->uinc_xfer) - len; + err = spi_sync_transfer(priv->spi, + ring->uinc_xfer + offset, len); + if (err) + return err; + + tx_ring->tail += len; + netdev_completed_queue(priv->ndev, len, total_frame_len); + + err = mcp251xfd_check_tef_tail(priv); + if (err) + return err; + } + + mcp251xfd_ecc_tefif_successful(priv); + + if (mcp251xfd_get_tx_free(priv->tx)) { + /* Make sure that anybody stopping the queue after + * this sees the new tx_ring->tail. + */ + smp_mb(); + netif_wake_queue(priv->ndev); + } + + if (priv->tx_coalesce_usecs_irq) + hrtimer_start(&priv->tx_irq_timer, + ns_to_ktime(priv->tx_coalesce_usecs_irq * + NSEC_PER_USEC), + HRTIMER_MODE_REL); + + return 0; +} diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c new file mode 100644 index 000000000000..413a5cb75c13 --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// mcp251xfd - Microchip MCP251xFD Family CAN controller driver +// +// Copyright (c) 2021, 2023 Pengutronix, +// Marc Kleine-Budde <kernel@pengutronix.de> +// + +#include <linux/clocksource.h> +#include <linux/workqueue.h> + +#include "mcp251xfd.h" + +static u64 mcp251xfd_timestamp_raw_read(struct cyclecounter *cc) +{ + const struct mcp251xfd_priv *priv; + u32 ts_raw = 0; + int err; + + priv = container_of(cc, struct mcp251xfd_priv, cc); + err = mcp251xfd_get_timestamp_raw(priv, &ts_raw); + if (err) + netdev_err(priv->ndev, + "Error %d while reading timestamp. HW timestamps may be inaccurate.", + err); + + return ts_raw; +} + +static void mcp251xfd_timestamp_work(struct work_struct *work) +{ + struct delayed_work *delayed_work = to_delayed_work(work); + struct mcp251xfd_priv *priv; + + priv = container_of(delayed_work, struct mcp251xfd_priv, timestamp); + timecounter_read(&priv->tc); + + schedule_delayed_work(&priv->timestamp, + MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ); +} + +void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv) +{ + struct cyclecounter *cc = &priv->cc; + + cc->read = mcp251xfd_timestamp_raw_read; + cc->mask = CYCLECOUNTER_MASK(32); + cc->shift = 1; + cc->mult = clocksource_hz2mult(priv->can.clock.freq, cc->shift); + + INIT_DELAYED_WORK(&priv->timestamp, mcp251xfd_timestamp_work); +} + +void mcp251xfd_timestamp_start(struct mcp251xfd_priv *priv) +{ + timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns()); + schedule_delayed_work(&priv->timestamp, + MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ); +} + +void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv) +{ + cancel_delayed_work_sync(&priv->timestamp); +} diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c new file mode 100644 index 000000000000..747ae3e8a768 --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c @@ -0,0 +1,244 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// mcp251xfd - Microchip MCP251xFD Family CAN controller driver +// +// Copyright (c) 2019, 2020, 2021 Pengutronix, +// Marc Kleine-Budde <kernel@pengutronix.de> +// +// Based on: +// +// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface +// +// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org> +// + +#include <linux/unaligned.h> +#include <linux/bitfield.h> + +#include "mcp251xfd.h" + +static inline struct +mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring) +{ + u8 tx_head; + + tx_head = mcp251xfd_get_tx_head(tx_ring); + + return &tx_ring->obj[tx_head]; +} + +static void +mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv, + struct mcp251xfd_tx_obj *tx_obj, + const struct sk_buff *skb, + unsigned int seq) +{ + const struct canfd_frame *cfd = (struct canfd_frame *)skb->data; + struct mcp251xfd_hw_tx_obj_raw *hw_tx_obj; + union mcp251xfd_tx_obj_load_buf *load_buf; + u8 dlc; + u32 id, flags; + int len_sanitized = 0, len; + + if (cfd->can_id & CAN_EFF_FLAG) { + u32 sid, eid; + + sid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_SID_MASK, cfd->can_id); + eid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_EID_MASK, cfd->can_id); + + id = FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, eid) | + FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, sid); + + flags = MCP251XFD_OBJ_FLAGS_IDE; + } else { + id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, cfd->can_id); + flags = 0; + } + + /* Use the MCP2518FD mask even on the MCP2517FD. It doesn't + * harm, only the lower 7 bits will be transferred into the + * TEF object. + */ + flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq); + + if (cfd->can_id & CAN_RTR_FLAG) + flags |= MCP251XFD_OBJ_FLAGS_RTR; + else + len_sanitized = canfd_sanitize_len(cfd->len); + + /* CANFD */ + if (can_is_canfd_skb(skb)) { + if (cfd->flags & CANFD_ESI) + flags |= MCP251XFD_OBJ_FLAGS_ESI; + + flags |= MCP251XFD_OBJ_FLAGS_FDF; + + if (cfd->flags & CANFD_BRS) + flags |= MCP251XFD_OBJ_FLAGS_BRS; + + dlc = can_fd_len2dlc(cfd->len); + } else { + dlc = can_get_cc_dlc((struct can_frame *)cfd, + priv->can.ctrlmode); + } + + flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC_MASK, dlc); + + load_buf = &tx_obj->buf; + if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) + hw_tx_obj = &load_buf->crc.hw_tx_obj; + else + hw_tx_obj = &load_buf->nocrc.hw_tx_obj; + + put_unaligned_le32(id, &hw_tx_obj->id); + put_unaligned_le32(flags, &hw_tx_obj->flags); + + /* Copy data */ + memcpy(hw_tx_obj->data, cfd->data, cfd->len); + + /* Clear unused data at end of CAN frame */ + if (MCP251XFD_SANITIZE_CAN && len_sanitized) { + int pad_len; + + pad_len = len_sanitized - cfd->len; + if (pad_len) + memset(hw_tx_obj->data + cfd->len, 0x0, pad_len); + } + + /* Number of bytes to be written into the RAM of the controller */ + len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags); + if (MCP251XFD_SANITIZE_CAN) + len += round_up(len_sanitized, sizeof(u32)); + else + len += round_up(cfd->len, sizeof(u32)); + + if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) { + u16 crc; + + mcp251xfd_spi_cmd_crc_set_len_in_ram(&load_buf->crc.cmd, + len); + /* CRC */ + len += sizeof(load_buf->crc.cmd); + crc = mcp251xfd_crc16_compute(&load_buf->crc, len); + put_unaligned_be16(crc, (void *)load_buf + len); + + /* Total length */ + len += sizeof(load_buf->crc.crc); + } else { + len += sizeof(load_buf->nocrc.cmd); + } + + tx_obj->xfer[0].len = len; +} + +static void mcp251xfd_tx_failure_drop(const struct mcp251xfd_priv *priv, + struct mcp251xfd_tx_ring *tx_ring, + int err) +{ + struct net_device *ndev = priv->ndev; + struct net_device_stats *stats = &ndev->stats; + unsigned int frame_len = 0; + u8 tx_head; + + tx_ring->head--; + stats->tx_dropped++; + tx_head = mcp251xfd_get_tx_head(tx_ring); + can_free_echo_skb(ndev, tx_head, &frame_len); + netdev_completed_queue(ndev, 1, frame_len); + netif_wake_queue(ndev); + + if (net_ratelimit()) + netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err); +} + +void mcp251xfd_tx_obj_write_sync(struct work_struct *work) +{ + struct mcp251xfd_priv *priv = container_of(work, struct mcp251xfd_priv, + tx_work); + struct mcp251xfd_tx_obj *tx_obj = priv->tx_work_obj; + struct mcp251xfd_tx_ring *tx_ring = priv->tx; + int err; + + err = spi_sync(priv->spi, &tx_obj->msg); + if (err) + mcp251xfd_tx_failure_drop(priv, tx_ring, err); +} + +static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv, + struct mcp251xfd_tx_obj *tx_obj) +{ + return spi_async(priv->spi, &tx_obj->msg); +} + +static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv, + struct mcp251xfd_tx_ring *tx_ring) +{ + if (mcp251xfd_get_tx_free(tx_ring) > 0) + return false; + + netif_stop_queue(priv->ndev); + + /* Memory barrier before checking tx_free (head and tail) */ + smp_mb(); + + if (mcp251xfd_get_tx_free(tx_ring) == 0) { + netdev_dbg(priv->ndev, + "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n", + tx_ring->head, tx_ring->tail, + tx_ring->head - tx_ring->tail); + + return true; + } + + netif_start_queue(priv->ndev); + + return false; +} + +static bool mcp251xfd_work_busy(struct work_struct *work) +{ + return work_busy(work); +} + +netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct mcp251xfd_priv *priv = netdev_priv(ndev); + struct mcp251xfd_tx_ring *tx_ring = priv->tx; + struct mcp251xfd_tx_obj *tx_obj; + unsigned int frame_len; + u8 tx_head; + int err; + + if (can_dev_dropped_skb(ndev, skb)) + return NETDEV_TX_OK; + + if (mcp251xfd_tx_busy(priv, tx_ring) || + mcp251xfd_work_busy(&priv->tx_work)) + return NETDEV_TX_BUSY; + + tx_obj = mcp251xfd_get_tx_obj_next(tx_ring); + mcp251xfd_tx_obj_from_skb(priv, tx_obj, skb, tx_ring->head); + + /* Stop queue if we occupy the complete TX FIFO */ + tx_head = mcp251xfd_get_tx_head(tx_ring); + tx_ring->head++; + if (mcp251xfd_get_tx_free(tx_ring) == 0) + netif_stop_queue(ndev); + + frame_len = can_skb_get_frame_len(skb); + err = can_put_echo_skb(skb, ndev, tx_head, frame_len); + if (!err) + netdev_sent_queue(priv->ndev, frame_len); + + err = mcp251xfd_tx_obj_write(priv, tx_obj); + if (err == -EBUSY) { + netif_stop_queue(ndev); + priv->tx_work_obj = tx_obj; + queue_work(priv->wq, &priv->tx_work); + } else if (err) { + mcp251xfd_tx_failure_drop(priv, tx_ring, err); + } + + return NETDEV_TX_OK; +} diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h new file mode 100644 index 000000000000..085d7101e595 --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h @@ -0,0 +1,983 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * mcp251xfd - Microchip MCP251xFD Family CAN controller driver + * + * Copyright (c) 2019, 2020, 2021, 2023 Pengutronix, + * Marc Kleine-Budde <kernel@pengutronix.de> + * Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org> + */ + +#ifndef _MCP251XFD_H +#define _MCP251XFD_H + +#include <linux/bitfield.h> +#include <linux/can/core.h> +#include <linux/can/dev.h> +#include <linux/can/rx-offload.h> +#include <linux/gpio/consumer.h> +#include <linux/gpio/driver.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> +#include <linux/spi/spi.h> +#include <linux/timecounter.h> +#include <linux/workqueue.h> + +/* MPC251x registers */ + +/* CAN FD Controller Module SFR */ +#define MCP251XFD_REG_CON 0x00 +#define MCP251XFD_REG_CON_TXBWS_MASK GENMASK(31, 28) +#define MCP251XFD_REG_CON_ABAT BIT(27) +#define MCP251XFD_REG_CON_REQOP_MASK GENMASK(26, 24) +#define MCP251XFD_REG_CON_MODE_MIXED 0 +#define MCP251XFD_REG_CON_MODE_SLEEP 1 +#define MCP251XFD_REG_CON_MODE_INT_LOOPBACK 2 +#define MCP251XFD_REG_CON_MODE_LISTENONLY 3 +#define MCP251XFD_REG_CON_MODE_CONFIG 4 +#define MCP251XFD_REG_CON_MODE_EXT_LOOPBACK 5 +#define MCP251XFD_REG_CON_MODE_CAN2_0 6 +#define MCP251XFD_REG_CON_MODE_RESTRICTED 7 +#define MCP251XFD_REG_CON_OPMOD_MASK GENMASK(23, 21) +#define MCP251XFD_REG_CON_TXQEN BIT(20) +#define MCP251XFD_REG_CON_STEF BIT(19) +#define MCP251XFD_REG_CON_SERR2LOM BIT(18) +#define MCP251XFD_REG_CON_ESIGM BIT(17) +#define MCP251XFD_REG_CON_RTXAT BIT(16) +#define MCP251XFD_REG_CON_BRSDIS BIT(12) +#define MCP251XFD_REG_CON_BUSY BIT(11) +#define MCP251XFD_REG_CON_WFT_MASK GENMASK(10, 9) +#define MCP251XFD_REG_CON_WFT_T00FILTER 0x0 +#define MCP251XFD_REG_CON_WFT_T01FILTER 0x1 +#define MCP251XFD_REG_CON_WFT_T10FILTER 0x2 +#define MCP251XFD_REG_CON_WFT_T11FILTER 0x3 +#define MCP251XFD_REG_CON_WAKFIL BIT(8) +#define MCP251XFD_REG_CON_PXEDIS BIT(6) +#define MCP251XFD_REG_CON_ISOCRCEN BIT(5) +#define MCP251XFD_REG_CON_DNCNT_MASK GENMASK(4, 0) + +#define MCP251XFD_REG_NBTCFG 0x04 +#define MCP251XFD_REG_NBTCFG_BRP_MASK GENMASK(31, 24) +#define MCP251XFD_REG_NBTCFG_TSEG1_MASK GENMASK(23, 16) +#define MCP251XFD_REG_NBTCFG_TSEG2_MASK GENMASK(14, 8) +#define MCP251XFD_REG_NBTCFG_SJW_MASK GENMASK(6, 0) + +#define MCP251XFD_REG_DBTCFG 0x08 +#define MCP251XFD_REG_DBTCFG_BRP_MASK GENMASK(31, 24) +#define MCP251XFD_REG_DBTCFG_TSEG1_MASK GENMASK(20, 16) +#define MCP251XFD_REG_DBTCFG_TSEG2_MASK GENMASK(11, 8) +#define MCP251XFD_REG_DBTCFG_SJW_MASK GENMASK(3, 0) + +#define MCP251XFD_REG_TDC 0x0c +#define MCP251XFD_REG_TDC_EDGFLTEN BIT(25) +#define MCP251XFD_REG_TDC_SID11EN BIT(24) +#define MCP251XFD_REG_TDC_TDCMOD_MASK GENMASK(17, 16) +#define MCP251XFD_REG_TDC_TDCMOD_AUTO 2 +#define MCP251XFD_REG_TDC_TDCMOD_MANUAL 1 +#define MCP251XFD_REG_TDC_TDCMOD_DISABLED 0 +#define MCP251XFD_REG_TDC_TDCO_MASK GENMASK(14, 8) +#define MCP251XFD_REG_TDC_TDCV_MASK GENMASK(5, 0) + +#define MCP251XFD_REG_TBC 0x10 + +#define MCP251XFD_REG_TSCON 0x14 +#define MCP251XFD_REG_TSCON_TSRES BIT(18) +#define MCP251XFD_REG_TSCON_TSEOF BIT(17) +#define MCP251XFD_REG_TSCON_TBCEN BIT(16) +#define MCP251XFD_REG_TSCON_TBCPRE_MASK GENMASK(9, 0) + +#define MCP251XFD_REG_VEC 0x18 +#define MCP251XFD_REG_VEC_RXCODE_MASK GENMASK(30, 24) +#define MCP251XFD_REG_VEC_TXCODE_MASK GENMASK(22, 16) +#define MCP251XFD_REG_VEC_FILHIT_MASK GENMASK(12, 8) +#define MCP251XFD_REG_VEC_ICODE_MASK GENMASK(6, 0) + +#define MCP251XFD_REG_INT 0x1c +#define MCP251XFD_REG_INT_IF_MASK GENMASK(15, 0) +#define MCP251XFD_REG_INT_IE_MASK GENMASK(31, 16) +#define MCP251XFD_REG_INT_IVMIE BIT(31) +#define MCP251XFD_REG_INT_WAKIE BIT(30) +#define MCP251XFD_REG_INT_CERRIE BIT(29) +#define MCP251XFD_REG_INT_SERRIE BIT(28) +#define MCP251XFD_REG_INT_RXOVIE BIT(27) +#define MCP251XFD_REG_INT_TXATIE BIT(26) +#define MCP251XFD_REG_INT_SPICRCIE BIT(25) +#define MCP251XFD_REG_INT_ECCIE BIT(24) +#define MCP251XFD_REG_INT_TEFIE BIT(20) +#define MCP251XFD_REG_INT_MODIE BIT(19) +#define MCP251XFD_REG_INT_TBCIE BIT(18) +#define MCP251XFD_REG_INT_RXIE BIT(17) +#define MCP251XFD_REG_INT_TXIE BIT(16) +#define MCP251XFD_REG_INT_IVMIF BIT(15) +#define MCP251XFD_REG_INT_WAKIF BIT(14) +#define MCP251XFD_REG_INT_CERRIF BIT(13) +#define MCP251XFD_REG_INT_SERRIF BIT(12) +#define MCP251XFD_REG_INT_RXOVIF BIT(11) +#define MCP251XFD_REG_INT_TXATIF BIT(10) +#define MCP251XFD_REG_INT_SPICRCIF BIT(9) +#define MCP251XFD_REG_INT_ECCIF BIT(8) +#define MCP251XFD_REG_INT_TEFIF BIT(4) +#define MCP251XFD_REG_INT_MODIF BIT(3) +#define MCP251XFD_REG_INT_TBCIF BIT(2) +#define MCP251XFD_REG_INT_RXIF BIT(1) +#define MCP251XFD_REG_INT_TXIF BIT(0) +/* These IRQ flags must be cleared by SW in the CAN_INT register */ +#define MCP251XFD_REG_INT_IF_CLEARABLE_MASK \ + (MCP251XFD_REG_INT_IVMIF | MCP251XFD_REG_INT_WAKIF | \ + MCP251XFD_REG_INT_CERRIF | MCP251XFD_REG_INT_SERRIF | \ + MCP251XFD_REG_INT_MODIF) + +#define MCP251XFD_REG_RXIF 0x20 +#define MCP251XFD_REG_TXIF 0x24 +#define MCP251XFD_REG_RXOVIF 0x28 +#define MCP251XFD_REG_TXATIF 0x2c +#define MCP251XFD_REG_TXREQ 0x30 + +#define MCP251XFD_REG_TREC 0x34 +#define MCP251XFD_REG_TREC_TXBO BIT(21) +#define MCP251XFD_REG_TREC_TXBP BIT(20) +#define MCP251XFD_REG_TREC_RXBP BIT(19) +#define MCP251XFD_REG_TREC_TXWARN BIT(18) +#define MCP251XFD_REG_TREC_RXWARN BIT(17) +#define MCP251XFD_REG_TREC_EWARN BIT(16) +#define MCP251XFD_REG_TREC_TEC_MASK GENMASK(15, 8) +#define MCP251XFD_REG_TREC_REC_MASK GENMASK(7, 0) + +#define MCP251XFD_REG_BDIAG0 0x38 +#define MCP251XFD_REG_BDIAG0_DTERRCNT_MASK GENMASK(31, 24) +#define MCP251XFD_REG_BDIAG0_DRERRCNT_MASK GENMASK(23, 16) +#define MCP251XFD_REG_BDIAG0_NTERRCNT_MASK GENMASK(15, 8) +#define MCP251XFD_REG_BDIAG0_NRERRCNT_MASK GENMASK(7, 0) + +#define MCP251XFD_REG_BDIAG1 0x3c +#define MCP251XFD_REG_BDIAG1_DLCMM BIT(31) +#define MCP251XFD_REG_BDIAG1_ESI BIT(30) +#define MCP251XFD_REG_BDIAG1_DCRCERR BIT(29) +#define MCP251XFD_REG_BDIAG1_DSTUFERR BIT(28) +#define MCP251XFD_REG_BDIAG1_DFORMERR BIT(27) +#define MCP251XFD_REG_BDIAG1_DBIT1ERR BIT(25) +#define MCP251XFD_REG_BDIAG1_DBIT0ERR BIT(24) +#define MCP251XFD_REG_BDIAG1_TXBOERR BIT(23) +#define MCP251XFD_REG_BDIAG1_NCRCERR BIT(21) +#define MCP251XFD_REG_BDIAG1_NSTUFERR BIT(20) +#define MCP251XFD_REG_BDIAG1_NFORMERR BIT(19) +#define MCP251XFD_REG_BDIAG1_NACKERR BIT(18) +#define MCP251XFD_REG_BDIAG1_NBIT1ERR BIT(17) +#define MCP251XFD_REG_BDIAG1_NBIT0ERR BIT(16) +#define MCP251XFD_REG_BDIAG1_BERR_MASK \ + (MCP251XFD_REG_BDIAG1_DLCMM | MCP251XFD_REG_BDIAG1_ESI | \ + MCP251XFD_REG_BDIAG1_DCRCERR | MCP251XFD_REG_BDIAG1_DSTUFERR | \ + MCP251XFD_REG_BDIAG1_DFORMERR | MCP251XFD_REG_BDIAG1_DBIT1ERR | \ + MCP251XFD_REG_BDIAG1_DBIT0ERR | MCP251XFD_REG_BDIAG1_TXBOERR | \ + MCP251XFD_REG_BDIAG1_NCRCERR | MCP251XFD_REG_BDIAG1_NSTUFERR | \ + MCP251XFD_REG_BDIAG1_NFORMERR | MCP251XFD_REG_BDIAG1_NACKERR | \ + MCP251XFD_REG_BDIAG1_NBIT1ERR | MCP251XFD_REG_BDIAG1_NBIT0ERR) +#define MCP251XFD_REG_BDIAG1_EFMSGCNT_MASK GENMASK(15, 0) + +#define MCP251XFD_REG_TEFCON 0x40 +#define MCP251XFD_REG_TEFCON_FSIZE_MASK GENMASK(28, 24) +#define MCP251XFD_REG_TEFCON_FRESET BIT(10) +#define MCP251XFD_REG_TEFCON_UINC BIT(8) +#define MCP251XFD_REG_TEFCON_TEFTSEN BIT(5) +#define MCP251XFD_REG_TEFCON_TEFOVIE BIT(3) +#define MCP251XFD_REG_TEFCON_TEFFIE BIT(2) +#define MCP251XFD_REG_TEFCON_TEFHIE BIT(1) +#define MCP251XFD_REG_TEFCON_TEFNEIE BIT(0) + +#define MCP251XFD_REG_TEFSTA 0x44 +#define MCP251XFD_REG_TEFSTA_TEFOVIF BIT(3) +#define MCP251XFD_REG_TEFSTA_TEFFIF BIT(2) +#define MCP251XFD_REG_TEFSTA_TEFHIF BIT(1) +#define MCP251XFD_REG_TEFSTA_TEFNEIF BIT(0) + +#define MCP251XFD_REG_TEFUA 0x48 + +#define MCP251XFD_REG_TXQCON 0x50 +#define MCP251XFD_REG_TXQCON_PLSIZE_MASK GENMASK(31, 29) +#define MCP251XFD_REG_TXQCON_PLSIZE_8 0 +#define MCP251XFD_REG_TXQCON_PLSIZE_12 1 +#define MCP251XFD_REG_TXQCON_PLSIZE_16 2 +#define MCP251XFD_REG_TXQCON_PLSIZE_20 3 +#define MCP251XFD_REG_TXQCON_PLSIZE_24 4 +#define MCP251XFD_REG_TXQCON_PLSIZE_32 5 +#define MCP251XFD_REG_TXQCON_PLSIZE_48 6 +#define MCP251XFD_REG_TXQCON_PLSIZE_64 7 +#define MCP251XFD_REG_TXQCON_FSIZE_MASK GENMASK(28, 24) +#define MCP251XFD_REG_TXQCON_TXAT_UNLIMITED 3 +#define MCP251XFD_REG_TXQCON_TXAT_THREE_SHOT 1 +#define MCP251XFD_REG_TXQCON_TXAT_ONE_SHOT 0 +#define MCP251XFD_REG_TXQCON_TXAT_MASK GENMASK(22, 21) +#define MCP251XFD_REG_TXQCON_TXPRI_MASK GENMASK(20, 16) +#define MCP251XFD_REG_TXQCON_FRESET BIT(10) +#define MCP251XFD_REG_TXQCON_TXREQ BIT(9) +#define MCP251XFD_REG_TXQCON_UINC BIT(8) +#define MCP251XFD_REG_TXQCON_TXEN BIT(7) +#define MCP251XFD_REG_TXQCON_TXATIE BIT(4) +#define MCP251XFD_REG_TXQCON_TXQEIE BIT(2) +#define MCP251XFD_REG_TXQCON_TXQNIE BIT(0) + +#define MCP251XFD_REG_TXQSTA 0x54 +#define MCP251XFD_REG_TXQSTA_TXQCI_MASK GENMASK(12, 8) +#define MCP251XFD_REG_TXQSTA_TXABT BIT(7) +#define MCP251XFD_REG_TXQSTA_TXLARB BIT(6) +#define MCP251XFD_REG_TXQSTA_TXERR BIT(5) +#define MCP251XFD_REG_TXQSTA_TXATIF BIT(4) +#define MCP251XFD_REG_TXQSTA_TXQEIF BIT(2) +#define MCP251XFD_REG_TXQSTA_TXQNIF BIT(0) + +#define MCP251XFD_REG_TXQUA 0x58 + +#define MCP251XFD_REG_FIFOCON(x) (0x50 + 0xc * (x)) +#define MCP251XFD_REG_FIFOCON_PLSIZE_MASK GENMASK(31, 29) +#define MCP251XFD_REG_FIFOCON_PLSIZE_8 0 +#define MCP251XFD_REG_FIFOCON_PLSIZE_12 1 +#define MCP251XFD_REG_FIFOCON_PLSIZE_16 2 +#define MCP251XFD_REG_FIFOCON_PLSIZE_20 3 +#define MCP251XFD_REG_FIFOCON_PLSIZE_24 4 +#define MCP251XFD_REG_FIFOCON_PLSIZE_32 5 +#define MCP251XFD_REG_FIFOCON_PLSIZE_48 6 +#define MCP251XFD_REG_FIFOCON_PLSIZE_64 7 +#define MCP251XFD_REG_FIFOCON_FSIZE_MASK GENMASK(28, 24) +#define MCP251XFD_REG_FIFOCON_TXAT_MASK GENMASK(22, 21) +#define MCP251XFD_REG_FIFOCON_TXAT_ONE_SHOT 0 +#define MCP251XFD_REG_FIFOCON_TXAT_THREE_SHOT 1 +#define MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED 3 +#define MCP251XFD_REG_FIFOCON_TXPRI_MASK GENMASK(20, 16) +#define MCP251XFD_REG_FIFOCON_FRESET BIT(10) +#define MCP251XFD_REG_FIFOCON_TXREQ BIT(9) +#define MCP251XFD_REG_FIFOCON_UINC BIT(8) +#define MCP251XFD_REG_FIFOCON_TXEN BIT(7) +#define MCP251XFD_REG_FIFOCON_RTREN BIT(6) +#define MCP251XFD_REG_FIFOCON_RXTSEN BIT(5) +#define MCP251XFD_REG_FIFOCON_TXATIE BIT(4) +#define MCP251XFD_REG_FIFOCON_RXOVIE BIT(3) +#define MCP251XFD_REG_FIFOCON_TFERFFIE BIT(2) +#define MCP251XFD_REG_FIFOCON_TFHRFHIE BIT(1) +#define MCP251XFD_REG_FIFOCON_TFNRFNIE BIT(0) + +#define MCP251XFD_REG_FIFOSTA(x) (0x54 + 0xc * (x)) +#define MCP251XFD_REG_FIFOSTA_FIFOCI_MASK GENMASK(12, 8) +#define MCP251XFD_REG_FIFOSTA_TXABT BIT(7) +#define MCP251XFD_REG_FIFOSTA_TXLARB BIT(6) +#define MCP251XFD_REG_FIFOSTA_TXERR BIT(5) +#define MCP251XFD_REG_FIFOSTA_TXATIF BIT(4) +#define MCP251XFD_REG_FIFOSTA_RXOVIF BIT(3) +#define MCP251XFD_REG_FIFOSTA_TFERFFIF BIT(2) +#define MCP251XFD_REG_FIFOSTA_TFHRFHIF BIT(1) +#define MCP251XFD_REG_FIFOSTA_TFNRFNIF BIT(0) + +#define MCP251XFD_REG_FIFOUA(x) (0x58 + 0xc * (x)) + +#define MCP251XFD_REG_FLTCON(x) (0x1d0 + 0x4 * (x)) +#define MCP251XFD_REG_FLTCON_FLTEN3 BIT(31) +#define MCP251XFD_REG_FLTCON_F3BP_MASK GENMASK(28, 24) +#define MCP251XFD_REG_FLTCON_FLTEN2 BIT(23) +#define MCP251XFD_REG_FLTCON_F2BP_MASK GENMASK(20, 16) +#define MCP251XFD_REG_FLTCON_FLTEN1 BIT(15) +#define MCP251XFD_REG_FLTCON_F1BP_MASK GENMASK(12, 8) +#define MCP251XFD_REG_FLTCON_FLTEN0 BIT(7) +#define MCP251XFD_REG_FLTCON_F0BP_MASK GENMASK(4, 0) +#define MCP251XFD_REG_FLTCON_FLTEN(x) (BIT(7) << 8 * ((x) & 0x3)) +#define MCP251XFD_REG_FLTCON_FLT_MASK(x) (GENMASK(7, 0) << (8 * ((x) & 0x3))) +#define MCP251XFD_REG_FLTCON_FBP(x, fifo) ((fifo) << 8 * ((x) & 0x3)) + +#define MCP251XFD_REG_FLTOBJ(x) (0x1f0 + 0x8 * (x)) +#define MCP251XFD_REG_FLTOBJ_EXIDE BIT(30) +#define MCP251XFD_REG_FLTOBJ_SID11 BIT(29) +#define MCP251XFD_REG_FLTOBJ_EID_MASK GENMASK(28, 11) +#define MCP251XFD_REG_FLTOBJ_SID_MASK GENMASK(10, 0) + +#define MCP251XFD_REG_FLTMASK(x) (0x1f4 + 0x8 * (x)) +#define MCP251XFD_REG_MASK_MIDE BIT(30) +#define MCP251XFD_REG_MASK_MSID11 BIT(29) +#define MCP251XFD_REG_MASK_MEID_MASK GENMASK(28, 11) +#define MCP251XFD_REG_MASK_MSID_MASK GENMASK(10, 0) + +/* RAM */ +#define MCP251XFD_RAM_START 0x400 +#define MCP251XFD_RAM_SIZE SZ_2K + +/* Message Object */ +#define MCP251XFD_OBJ_ID_SID11 BIT(29) +#define MCP251XFD_OBJ_ID_EID_MASK GENMASK(28, 11) +#define MCP251XFD_OBJ_ID_SID_MASK GENMASK(10, 0) +#define MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK GENMASK(31, 9) +#define MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK GENMASK(15, 9) +#define MCP251XFD_OBJ_FLAGS_SEQ_MASK MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK +#define MCP251XFD_OBJ_FLAGS_ESI BIT(8) +#define MCP251XFD_OBJ_FLAGS_FDF BIT(7) +#define MCP251XFD_OBJ_FLAGS_BRS BIT(6) +#define MCP251XFD_OBJ_FLAGS_RTR BIT(5) +#define MCP251XFD_OBJ_FLAGS_IDE BIT(4) +#define MCP251XFD_OBJ_FLAGS_DLC_MASK GENMASK(3, 0) + +#define MCP251XFD_REG_FRAME_EFF_SID_MASK GENMASK(28, 18) +#define MCP251XFD_REG_FRAME_EFF_EID_MASK GENMASK(17, 0) + +/* MCP2517/18FD SFR */ +#define MCP251XFD_REG_OSC 0xe00 +#define MCP251XFD_REG_OSC_SCLKRDY BIT(12) +#define MCP251XFD_REG_OSC_OSCRDY BIT(10) +#define MCP251XFD_REG_OSC_PLLRDY BIT(8) +#define MCP251XFD_REG_OSC_CLKODIV_10 3 +#define MCP251XFD_REG_OSC_CLKODIV_4 2 +#define MCP251XFD_REG_OSC_CLKODIV_2 1 +#define MCP251XFD_REG_OSC_CLKODIV_1 0 +#define MCP251XFD_REG_OSC_CLKODIV_MASK GENMASK(6, 5) +#define MCP251XFD_REG_OSC_SCLKDIV BIT(4) +#define MCP251XFD_REG_OSC_LPMEN BIT(3) /* MCP2518FD only */ +#define MCP251XFD_REG_OSC_OSCDIS BIT(2) +#define MCP251XFD_REG_OSC_PLLEN BIT(0) + +#define MCP251XFD_REG_IOCON 0xe04 +#define MCP251XFD_REG_IOCON_INTOD BIT(30) +#define MCP251XFD_REG_IOCON_SOF BIT(29) +#define MCP251XFD_REG_IOCON_TXCANOD BIT(28) +#define MCP251XFD_REG_IOCON_PM1 BIT(25) +#define MCP251XFD_REG_IOCON_PM0 BIT(24) +#define MCP251XFD_REG_IOCON_PM(n) (MCP251XFD_REG_IOCON_PM0 << (n)) +#define MCP251XFD_REG_IOCON_GPIO1 BIT(17) +#define MCP251XFD_REG_IOCON_GPIO0 BIT(16) +#define MCP251XFD_REG_IOCON_GPIO(n) (MCP251XFD_REG_IOCON_GPIO0 << (n)) +#define MCP251XFD_REG_IOCON_GPIO_MASK GENMASK(17, 16) +#define MCP251XFD_REG_IOCON_LAT1 BIT(9) +#define MCP251XFD_REG_IOCON_LAT0 BIT(8) +#define MCP251XFD_REG_IOCON_LAT(n) (MCP251XFD_REG_IOCON_LAT0 << (n)) +#define MCP251XFD_REG_IOCON_LAT_MASK GENMASK(9, 8) +#define MCP251XFD_REG_IOCON_XSTBYEN BIT(6) +#define MCP251XFD_REG_IOCON_TRIS1 BIT(1) +#define MCP251XFD_REG_IOCON_TRIS0 BIT(0) +#define MCP251XFD_REG_IOCON_TRIS(n) (MCP251XFD_REG_IOCON_TRIS0 << (n)) + +#define MCP251XFD_REG_CRC 0xe08 +#define MCP251XFD_REG_CRC_FERRIE BIT(25) +#define MCP251XFD_REG_CRC_CRCERRIE BIT(24) +#define MCP251XFD_REG_CRC_FERRIF BIT(17) +#define MCP251XFD_REG_CRC_CRCERRIF BIT(16) +#define MCP251XFD_REG_CRC_IF_MASK GENMASK(17, 16) +#define MCP251XFD_REG_CRC_MASK GENMASK(15, 0) + +#define MCP251XFD_REG_ECCCON 0xe0c +#define MCP251XFD_REG_ECCCON_PARITY_MASK GENMASK(14, 8) +#define MCP251XFD_REG_ECCCON_DEDIE BIT(2) +#define MCP251XFD_REG_ECCCON_SECIE BIT(1) +#define MCP251XFD_REG_ECCCON_ECCEN BIT(0) + +#define MCP251XFD_REG_ECCSTAT 0xe10 +#define MCP251XFD_REG_ECCSTAT_ERRADDR_MASK GENMASK(27, 16) +#define MCP251XFD_REG_ECCSTAT_IF_MASK GENMASK(2, 1) +#define MCP251XFD_REG_ECCSTAT_DEDIF BIT(2) +#define MCP251XFD_REG_ECCSTAT_SECIF BIT(1) + +#define MCP251XFD_REG_DEVID 0xe14 /* MCP2518FD only */ +#define MCP251XFD_REG_DEVID_ID_MASK GENMASK(7, 4) +#define MCP251XFD_REG_DEVID_REV_MASK GENMASK(3, 0) + +/* SPI commands */ +#define MCP251XFD_SPI_INSTRUCTION_RESET 0x0000 +#define MCP251XFD_SPI_INSTRUCTION_WRITE 0x2000 +#define MCP251XFD_SPI_INSTRUCTION_READ 0x3000 +#define MCP251XFD_SPI_INSTRUCTION_WRITE_CRC 0xa000 +#define MCP251XFD_SPI_INSTRUCTION_READ_CRC 0xb000 +#define MCP251XFD_SPI_INSTRUCTION_WRITE_CRC_SAFE 0xc000 +#define MCP251XFD_SPI_ADDRESS_MASK GENMASK(11, 0) + +#define MCP251XFD_SYSCLOCK_HZ_MAX 40000000 +#define MCP251XFD_SYSCLOCK_HZ_MIN 1000000 +#define MCP251XFD_SPICLOCK_HZ_MAX 20000000 +#define MCP251XFD_TIMESTAMP_WORK_DELAY_SEC 45 +static_assert(MCP251XFD_TIMESTAMP_WORK_DELAY_SEC < + CYCLECOUNTER_MASK(32) / MCP251XFD_SYSCLOCK_HZ_MAX / 2); +#define MCP251XFD_OSC_PLL_MULTIPLIER 10 +#define MCP251XFD_OSC_STAB_SLEEP_US (3 * USEC_PER_MSEC) +#define MCP251XFD_OSC_STAB_TIMEOUT_US (10 * MCP251XFD_OSC_STAB_SLEEP_US) +#define MCP251XFD_POLL_SLEEP_US (10) +#define MCP251XFD_POLL_TIMEOUT_US (USEC_PER_MSEC) +#define MCP251XFD_FRAME_LEN_MAX_BITS (736) + +/* Misc */ +#define MCP251XFD_NAPI_WEIGHT 32 +#define MCP251XFD_SOFTRESET_RETRIES_MAX 3 +#define MCP251XFD_READ_CRC_RETRIES_MAX 3 +#define MCP251XFD_ECC_CNT_MAX 2 +#define MCP251XFD_SANITIZE_SPI 1 +#define MCP251XFD_SANITIZE_CAN 1 + +/* FIFO and Ring */ +#define MCP251XFD_FIFO_TEF_NUM 1U +#define MCP251XFD_FIFO_RX_NUM 3U +#define MCP251XFD_FIFO_TX_NUM 1U + +#define MCP251XFD_FIFO_DEPTH 32U + +#define MCP251XFD_RX_OBJ_NUM_MIN 16U +#define MCP251XFD_RX_OBJ_NUM_MAX (MCP251XFD_FIFO_RX_NUM * MCP251XFD_FIFO_DEPTH) +#define MCP251XFD_RX_FIFO_DEPTH_MIN 4U +#define MCP251XFD_RX_FIFO_DEPTH_COALESCE_MIN 8U + +#define MCP251XFD_TX_OBJ_NUM_MIN 2U +#define MCP251XFD_TX_OBJ_NUM_MAX 16U +#define MCP251XFD_TX_OBJ_NUM_CAN_DEFAULT 8U +#define MCP251XFD_TX_OBJ_NUM_CANFD_DEFAULT 4U +#define MCP251XFD_TX_FIFO_DEPTH_MIN 2U +#define MCP251XFD_TX_FIFO_DEPTH_COALESCE_MIN 2U + +static_assert(MCP251XFD_FIFO_TEF_NUM == 1U); +static_assert(MCP251XFD_FIFO_TEF_NUM == MCP251XFD_FIFO_TX_NUM); +static_assert(MCP251XFD_FIFO_RX_NUM <= 4U); + +/* Silence TX MAB overflow warnings */ +#define MCP251XFD_QUIRK_MAB_NO_WARN BIT(0) +/* Use CRC to access registers */ +#define MCP251XFD_QUIRK_CRC_REG BIT(1) +/* Use CRC to access RX/TEF-RAM */ +#define MCP251XFD_QUIRK_CRC_RX BIT(2) +/* Use CRC to access TX-RAM */ +#define MCP251XFD_QUIRK_CRC_TX BIT(3) +/* Enable ECC for RAM */ +#define MCP251XFD_QUIRK_ECC BIT(4) +/* Use Half Duplex SPI transfers */ +#define MCP251XFD_QUIRK_HALF_DUPLEX BIT(5) + +struct mcp251xfd_hw_tef_obj { + u32 id; + u32 flags; + u32 ts; +}; + +/* The tx_obj_raw version is used in spi async, i.e. without + * regmap. We have to take care of endianness ourselves. + */ +struct __packed mcp251xfd_hw_tx_obj_raw { + __le32 id; + __le32 flags; + u8 data[sizeof_field(struct canfd_frame, data)]; +}; + +struct mcp251xfd_hw_tx_obj_can { + u32 id; + u32 flags; + u8 data[sizeof_field(struct can_frame, data)]; +}; + +struct mcp251xfd_hw_tx_obj_canfd { + u32 id; + u32 flags; + u8 data[sizeof_field(struct canfd_frame, data)]; +}; + +struct mcp251xfd_hw_rx_obj_can { + u32 id; + u32 flags; + u32 ts; + u8 data[sizeof_field(struct can_frame, data)]; +}; + +struct mcp251xfd_hw_rx_obj_canfd { + u32 id; + u32 flags; + u32 ts; + u8 data[sizeof_field(struct canfd_frame, data)]; +}; + +struct __packed mcp251xfd_buf_cmd { + __be16 cmd; +}; + +struct __packed mcp251xfd_buf_cmd_crc { + __be16 cmd; + u8 len; +}; + +union mcp251xfd_tx_obj_load_buf { + struct __packed { + struct mcp251xfd_buf_cmd cmd; + struct mcp251xfd_hw_tx_obj_raw hw_tx_obj; + } nocrc; + struct __packed { + struct mcp251xfd_buf_cmd_crc cmd; + struct mcp251xfd_hw_tx_obj_raw hw_tx_obj; + __be16 crc; + } crc; +} ____cacheline_aligned; + +union mcp251xfd_write_reg_buf { + struct __packed { + struct mcp251xfd_buf_cmd cmd; + u8 data[4]; + } nocrc; + struct __packed { + struct mcp251xfd_buf_cmd_crc cmd; + u8 data[4]; + __be16 crc; + } crc; + struct __packed { + struct mcp251xfd_buf_cmd cmd; + u8 data[1]; + __be16 crc; + } safe; +} ____cacheline_aligned; + +struct mcp251xfd_tx_obj { + struct spi_message msg; + struct spi_transfer xfer[2]; + union mcp251xfd_tx_obj_load_buf buf; +}; + +struct mcp251xfd_tef_ring { + unsigned int head; + unsigned int tail; + + /* u8 obj_num equals tx_ring->obj_num */ + /* u8 obj_size equals sizeof(struct mcp251xfd_hw_tef_obj) */ + /* u8 obj_num_shift_to_u8 equals tx_ring->obj_num_shift_to_u8 */ + + union mcp251xfd_write_reg_buf irq_enable_buf; + struct spi_transfer irq_enable_xfer; + struct spi_message irq_enable_msg; + + union mcp251xfd_write_reg_buf uinc_buf; + union mcp251xfd_write_reg_buf uinc_irq_disable_buf; + struct spi_transfer uinc_xfer[MCP251XFD_TX_OBJ_NUM_MAX]; +}; + +struct mcp251xfd_tx_ring { + unsigned int head; + unsigned int tail; + + u16 base; + u8 nr; + u8 fifo_nr; + u8 obj_num; + u8 obj_num_shift_to_u8; + u8 obj_size; + + struct mcp251xfd_tx_obj obj[MCP251XFD_TX_OBJ_NUM_MAX]; + union mcp251xfd_write_reg_buf rts_buf; +}; + +struct mcp251xfd_rx_ring { + unsigned int head; + unsigned int tail; + + /* timestamp of the last valid received CAN frame */ + u64 last_valid; + + u16 base; + u8 nr; + u8 fifo_nr; + u8 obj_num; + u8 obj_num_shift_to_u8; + u8 obj_size; + + union mcp251xfd_write_reg_buf irq_enable_buf; + struct spi_transfer irq_enable_xfer; + struct spi_message irq_enable_msg; + + union mcp251xfd_write_reg_buf uinc_buf; + union mcp251xfd_write_reg_buf uinc_irq_disable_buf; + struct spi_transfer uinc_xfer[MCP251XFD_FIFO_DEPTH]; + struct mcp251xfd_hw_rx_obj_canfd obj[]; +}; + +struct __packed mcp251xfd_map_buf_nocrc { + struct mcp251xfd_buf_cmd cmd; + u8 data[256]; +} ____cacheline_aligned; + +struct __packed mcp251xfd_map_buf_crc { + struct mcp251xfd_buf_cmd_crc cmd; + u8 data[256 - 4]; + __be16 crc; +} ____cacheline_aligned; + +struct mcp251xfd_ecc { + u32 ecc_stat; + int cnt; +}; + +struct mcp251xfd_regs_status { + u32 intf; + u32 rxif; +}; + +enum mcp251xfd_model { + MCP251XFD_MODEL_MCP2517FD = 0x2517, + MCP251XFD_MODEL_MCP2518FD = 0x2518, + MCP251XFD_MODEL_MCP251863 = 0x251863, + MCP251XFD_MODEL_MCP251XFD = 0xffffffff, /* autodetect model */ +}; + +struct mcp251xfd_devtype_data { + enum mcp251xfd_model model; + u32 quirks; +}; + +enum mcp251xfd_flags { + MCP251XFD_FLAGS_DOWN, + MCP251XFD_FLAGS_FD_MODE, + + __MCP251XFD_FLAGS_SIZE__ +}; + +struct mcp251xfd_priv { + struct can_priv can; + struct can_rx_offload offload; + struct net_device *ndev; + + struct regmap *map_reg; /* register access */ + struct regmap *map_rx; /* RX/TEF RAM access */ + + struct regmap *map_nocrc; + struct mcp251xfd_map_buf_nocrc *map_buf_nocrc_rx; + struct mcp251xfd_map_buf_nocrc *map_buf_nocrc_tx; + + struct regmap *map_crc; + struct mcp251xfd_map_buf_crc *map_buf_crc_rx; + struct mcp251xfd_map_buf_crc *map_buf_crc_tx; + + struct spi_device *spi; + u32 spi_max_speed_hz_orig; + u32 spi_max_speed_hz_fast; + u32 spi_max_speed_hz_slow; + + struct mcp251xfd_tef_ring tef[MCP251XFD_FIFO_TEF_NUM]; + struct mcp251xfd_rx_ring *rx[MCP251XFD_FIFO_RX_NUM]; + struct mcp251xfd_tx_ring tx[MCP251XFD_FIFO_TX_NUM]; + + struct workqueue_struct *wq; + struct work_struct tx_work; + struct mcp251xfd_tx_obj *tx_work_obj; + + DECLARE_BITMAP(flags, __MCP251XFD_FLAGS_SIZE__); + + u8 rx_ring_num; + u8 rx_obj_num; + u8 rx_obj_num_coalesce_irq; + u8 tx_obj_num_coalesce_irq; + + u32 rx_coalesce_usecs_irq; + u32 tx_coalesce_usecs_irq; + struct hrtimer rx_irq_timer; + struct hrtimer tx_irq_timer; + + struct mcp251xfd_ecc ecc; + struct mcp251xfd_regs_status regs_status; + + struct cyclecounter cc; + struct timecounter tc; + struct delayed_work timestamp; + + struct gpio_desc *rx_int; + struct clk *clk; + bool pll_enable; + struct regulator *reg_vdd; + struct regulator *reg_xceiver; + + struct mcp251xfd_devtype_data devtype_data; + struct can_berr_counter bec; + struct gpio_chip gc; +}; + +#define MCP251XFD_IS(_model) \ +static inline bool \ +mcp251xfd_is_##_model(const struct mcp251xfd_priv *priv) \ +{ \ + return priv->devtype_data.model == MCP251XFD_MODEL_MCP##_model; \ +} + +MCP251XFD_IS(2517FD); +MCP251XFD_IS(2518FD); +MCP251XFD_IS(251863); +MCP251XFD_IS(251XFD); + +static inline bool mcp251xfd_is_fd_mode(const struct mcp251xfd_priv *priv) +{ + /* listen-only mode works like FD mode */ + return priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD); +} + +static inline u8 mcp251xfd_first_byte_set(u32 mask) +{ + return (mask & 0x0000ffff) ? + ((mask & 0x000000ff) ? 0 : 1) : + ((mask & 0x00ff0000) ? 2 : 3); +} + +static inline u8 mcp251xfd_last_byte_set(u32 mask) +{ + return (mask & 0xffff0000) ? + ((mask & 0xff000000) ? 3 : 2) : + ((mask & 0x0000ff00) ? 1 : 0); +} + +static inline __be16 mcp251xfd_cmd_reset(void) +{ + return cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_RESET); +} + +static inline void +mcp251xfd_spi_cmd_read_nocrc(struct mcp251xfd_buf_cmd *cmd, u16 addr) +{ + cmd->cmd = cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_READ | addr); +} + +static inline void +mcp251xfd_spi_cmd_write_nocrc(struct mcp251xfd_buf_cmd *cmd, u16 addr) +{ + cmd->cmd = cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_WRITE | addr); +} + +static inline bool mcp251xfd_reg_in_ram(unsigned int reg) +{ + static const struct regmap_range range = + regmap_reg_range(MCP251XFD_RAM_START, + MCP251XFD_RAM_START + MCP251XFD_RAM_SIZE - 4); + + return regmap_reg_in_range(reg, &range); +} + +static inline void +__mcp251xfd_spi_cmd_crc_set_len(struct mcp251xfd_buf_cmd_crc *cmd, + u16 len, bool in_ram) +{ + /* Number of u32 for RAM access, number of u8 otherwise. */ + if (in_ram) + cmd->len = len >> 2; + else + cmd->len = len; +} + +static inline void +mcp251xfd_spi_cmd_crc_set_len_in_ram(struct mcp251xfd_buf_cmd_crc *cmd, u16 len) +{ + __mcp251xfd_spi_cmd_crc_set_len(cmd, len, true); +} + +static inline void +mcp251xfd_spi_cmd_crc_set_len_in_reg(struct mcp251xfd_buf_cmd_crc *cmd, u16 len) +{ + __mcp251xfd_spi_cmd_crc_set_len(cmd, len, false); +} + +static inline void +mcp251xfd_spi_cmd_read_crc_set_addr(struct mcp251xfd_buf_cmd_crc *cmd, u16 addr) +{ + cmd->cmd = cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_READ_CRC | addr); +} + +static inline void +mcp251xfd_spi_cmd_read_crc(struct mcp251xfd_buf_cmd_crc *cmd, + u16 addr, u16 len) +{ + mcp251xfd_spi_cmd_read_crc_set_addr(cmd, addr); + __mcp251xfd_spi_cmd_crc_set_len(cmd, len, mcp251xfd_reg_in_ram(addr)); +} + +static inline void +mcp251xfd_spi_cmd_write_crc_set_addr(struct mcp251xfd_buf_cmd_crc *cmd, + u16 addr) +{ + cmd->cmd = cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_WRITE_CRC | addr); +} + +static inline void +mcp251xfd_spi_cmd_write_safe_set_addr(struct mcp251xfd_buf_cmd *cmd, + u16 addr) +{ + cmd->cmd = cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_WRITE_CRC_SAFE | addr); +} + +static inline void +mcp251xfd_spi_cmd_write_crc(struct mcp251xfd_buf_cmd_crc *cmd, + u16 addr, u16 len) +{ + mcp251xfd_spi_cmd_write_crc_set_addr(cmd, addr); + __mcp251xfd_spi_cmd_crc_set_len(cmd, len, mcp251xfd_reg_in_ram(addr)); +} + +static inline u8 * +mcp251xfd_spi_cmd_write(const struct mcp251xfd_priv *priv, + union mcp251xfd_write_reg_buf *write_reg_buf, + u16 addr, u8 len) +{ + u8 *data; + + if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) { + if (len == 1) { + mcp251xfd_spi_cmd_write_safe_set_addr(&write_reg_buf->safe.cmd, + addr); + data = write_reg_buf->safe.data; + } else { + mcp251xfd_spi_cmd_write_crc_set_addr(&write_reg_buf->crc.cmd, + addr); + data = write_reg_buf->crc.data; + } + } else { + mcp251xfd_spi_cmd_write_nocrc(&write_reg_buf->nocrc.cmd, + addr); + data = write_reg_buf->nocrc.data; + } + + return data; +} + +static inline int mcp251xfd_get_timestamp_raw(const struct mcp251xfd_priv *priv, + u32 *ts_raw) +{ + return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, ts_raw); +} + +static inline void mcp251xfd_skb_set_timestamp(struct sk_buff *skb, u64 ns) +{ + struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); + + hwtstamps->hwtstamp = ns_to_ktime(ns); +} + +static inline +void mcp251xfd_skb_set_timestamp_raw(const struct mcp251xfd_priv *priv, + struct sk_buff *skb, u32 ts_raw) +{ + u64 ns; + + ns = timecounter_cyc2time(&priv->tc, ts_raw); + mcp251xfd_skb_set_timestamp(skb, ns); +} + +static inline u16 mcp251xfd_get_tef_obj_addr(u8 n) +{ + return MCP251XFD_RAM_START + + sizeof(struct mcp251xfd_hw_tef_obj) * n; +} + +static inline u16 +mcp251xfd_get_tx_obj_addr(const struct mcp251xfd_tx_ring *ring, u8 n) +{ + return ring->base + ring->obj_size * n; +} + +static inline u16 +mcp251xfd_get_rx_obj_addr(const struct mcp251xfd_rx_ring *ring, u8 n) +{ + return ring->base + ring->obj_size * n; +} + +static inline int +mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv *priv, + u8 *tx_tail) +{ + u32 fifo_sta; + int err; + + err = regmap_read(priv->map_reg, + MCP251XFD_REG_FIFOSTA(priv->tx->fifo_nr), + &fifo_sta); + if (err) + return err; + + *tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta); + + return 0; +} + +static inline u8 mcp251xfd_get_tef_head(const struct mcp251xfd_priv *priv) +{ + return priv->tef->head & (priv->tx->obj_num - 1); +} + +static inline u8 mcp251xfd_get_tef_tail(const struct mcp251xfd_priv *priv) +{ + return priv->tef->tail & (priv->tx->obj_num - 1); +} + +static inline u8 mcp251xfd_get_tef_linear_len(const struct mcp251xfd_priv *priv, u8 len) +{ + return min_t(u8, len, priv->tx->obj_num - mcp251xfd_get_tef_tail(priv)); +} + +static inline u8 mcp251xfd_get_tx_head(const struct mcp251xfd_tx_ring *ring) +{ + return ring->head & (ring->obj_num - 1); +} + +static inline u8 mcp251xfd_get_tx_tail(const struct mcp251xfd_tx_ring *ring) +{ + return ring->tail & (ring->obj_num - 1); +} + +static inline u8 mcp251xfd_get_tx_free(const struct mcp251xfd_tx_ring *ring) +{ + return ring->obj_num - (ring->head - ring->tail); +} + +static inline int +mcp251xfd_get_tx_nr_by_addr(const struct mcp251xfd_tx_ring *tx_ring, u8 *nr, + u16 addr) +{ + if (addr < mcp251xfd_get_tx_obj_addr(tx_ring, 0) || + addr >= mcp251xfd_get_tx_obj_addr(tx_ring, tx_ring->obj_num)) + return -ENOENT; + + *nr = (addr - mcp251xfd_get_tx_obj_addr(tx_ring, 0)) / + tx_ring->obj_size; + + return 0; +} + +static inline u8 mcp251xfd_get_rx_head(const struct mcp251xfd_rx_ring *ring) +{ + return ring->head & (ring->obj_num - 1); +} + +static inline u8 mcp251xfd_get_rx_tail(const struct mcp251xfd_rx_ring *ring) +{ + return ring->tail & (ring->obj_num - 1); +} + +static inline u8 +mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring, u8 len) +{ + return min_t(u8, len, ring->obj_num - mcp251xfd_get_rx_tail(ring)); +} + +#define mcp251xfd_for_each_tx_obj(ring, _obj, n) \ + for ((n) = 0, (_obj) = &(ring)->obj[(n)]; \ + (n) < (ring)->obj_num; \ + (n)++, (_obj) = &(ring)->obj[(n)]) + +#define mcp251xfd_for_each_rx_ring(priv, ring, n) \ + for ((n) = 0, (ring) = *((priv)->rx + (n)); \ + (n) < (priv)->rx_ring_num; \ + (n)++, (ring) = *((priv)->rx + (n))) + +int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv); +u16 mcp251xfd_crc16_compute2(const void *cmd, size_t cmd_size, + const void *data, size_t data_size); +u16 mcp251xfd_crc16_compute(const void *data, size_t data_size); +void mcp251xfd_ethtool_init(struct mcp251xfd_priv *priv); +int mcp251xfd_regmap_init(struct mcp251xfd_priv *priv); +extern const struct can_ram_config mcp251xfd_ram_config; +int mcp251xfd_ring_init(struct mcp251xfd_priv *priv); +void mcp251xfd_ring_free(struct mcp251xfd_priv *priv); +int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv); +int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv); +int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv); +void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv); +void mcp251xfd_timestamp_start(struct mcp251xfd_priv *priv); +void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv); + +void mcp251xfd_tx_obj_write_sync(struct work_struct *work); +netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb, + struct net_device *ndev); + +#if IS_ENABLED(CONFIG_DEV_COREDUMP) +void mcp251xfd_dump(const struct mcp251xfd_priv *priv); +#else +static inline void mcp251xfd_dump(const struct mcp251xfd_priv *priv) +{ +} +#endif + +#endif diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index 093fc9a529f0..af52285d5a4e 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c @@ -51,16 +51,16 @@ #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> -#include <linux/can/led.h> #include <linux/clk.h> #include <linux/delay.h> +#include <linux/ethtool.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/reset.h> #define DRV_NAME "sun4i_can" @@ -90,6 +90,8 @@ #define SUN4I_REG_BUF12_ADDR 0x0070 /* CAN Tx/Rx Buffer 12 */ #define SUN4I_REG_ACPC_ADDR 0x0040 /* CAN Acceptance Code 0 */ #define SUN4I_REG_ACPM_ADDR 0x0044 /* CAN Acceptance Mask 0 */ +#define SUN4I_REG_ACPC_ADDR_D1 0x0028 /* CAN Acceptance Code 0 on the D1 */ +#define SUN4I_REG_ACPM_ADDR_D1 0x002C /* CAN Acceptance Mask 0 on the D1 */ #define SUN4I_REG_RBUF_RBACK_START_ADDR 0x0180 /* CAN transmit buffer start */ #define SUN4I_REG_RBUF_RBACK_END_ADDR 0x01b0 /* CAN transmit buffer end */ @@ -200,11 +202,24 @@ #define SUN4I_CAN_MAX_IRQ 20 #define SUN4I_MODE_MAX_RETRIES 100 +/** + * struct sun4ican_quirks - Differences between SoC variants. + * + * @has_reset: SoC needs reset deasserted. + * @acp_offset: Offset of ACPC and ACPM registers + */ +struct sun4ican_quirks { + bool has_reset; + int acp_offset; +}; + struct sun4ican_priv { struct can_priv can; void __iomem *base; struct clk *clk; + struct reset_control *reset; spinlock_t cmdreg_lock; /* lock for concurrent cmd register writes */ + int acp_offset; }; static const struct can_bittiming_const sun4ican_bittiming_const = { @@ -327,8 +342,8 @@ static int sun4i_can_start(struct net_device *dev) } /* set filters - we accept all */ - writel(0x00000000, priv->base + SUN4I_REG_ACPC_ADDR); - writel(0xFFFFFFFF, priv->base + SUN4I_REG_ACPM_ADDR); + writel(0x00000000, priv->base + SUN4I_REG_ACPC_ADDR + priv->acp_offset); + writel(0xFFFFFFFF, priv->base + SUN4I_REG_ACPM_ADDR + priv->acp_offset); /* clear error counters and error code capture */ writel(0, priv->base + SUN4I_REG_ERRC_ADDR); @@ -418,13 +433,13 @@ static netdev_tx_t sun4ican_start_xmit(struct sk_buff *skb, struct net_device *d canid_t id; int i; - if (can_dropped_invalid_skb(dev, skb)) + if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; netif_stop_queue(dev); id = cf->can_id; - dlc = cf->can_dlc; + dlc = cf->len; msg_flag_n = dlc; if (id & CAN_RTR_FLAG) @@ -448,7 +463,7 @@ static netdev_tx_t sun4ican_start_xmit(struct sk_buff *skb, struct net_device *d writel(msg_flag_n, priv->base + SUN4I_REG_BUF0_ADDR); - can_put_echo_skb(skb, dev, 0); + can_put_echo_skb(skb, dev, 0, 0); if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) sun4i_can_write_cmdreg(priv, SUN4I_CMD_SELF_RCV_REQ); @@ -475,7 +490,7 @@ static void sun4i_can_rx(struct net_device *dev) return; fi = readl(priv->base + SUN4I_REG_BUF0_ADDR); - cf->can_dlc = get_can_dlc(fi & 0x0F); + cf->len = can_cc_dlc2len(fi & 0x0F); if (fi & SUN4I_MSG_EFF_FLAG) { dreg = SUN4I_REG_BUF5_ADDR; id = (readl(priv->base + SUN4I_REG_BUF1_ADDR) << 21) | @@ -490,21 +505,21 @@ static void sun4i_can_rx(struct net_device *dev) } /* remote frame ? */ - if (fi & SUN4I_MSG_RTR_FLAG) + if (fi & SUN4I_MSG_RTR_FLAG) { id |= CAN_RTR_FLAG; - else - for (i = 0; i < cf->can_dlc; i++) + } else { + for (i = 0; i < cf->len; i++) cf->data[i] = readl(priv->base + dreg + i * 4); + stats->rx_bytes += cf->len; + } + stats->rx_packets++; + cf->can_id = id; sun4i_can_write_cmdreg(priv, SUN4I_CMD_RELEASE_RBUF); - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; netif_rx(skb); - - can_led_event(dev, CAN_LED_EVENT_RX); } static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status) @@ -525,11 +540,6 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status) rxerr = (errc >> 16) & 0xFF; txerr = errc & 0xFF; - if (skb) { - cf->data[6] = txerr; - cf->data[7] = rxerr; - } - if (isrc & SUN4I_INT_DATA_OR) { /* data overrun interrupt */ netdev_dbg(dev, "data overrun interrupt\n"); @@ -560,15 +570,18 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status) else state = CAN_STATE_ERROR_ACTIVE; } + if (likely(skb) && state != CAN_STATE_BUS_OFF) { + cf->can_id |= CAN_ERR_CNT; + cf->data[6] = txerr; + cf->data[7] = rxerr; + } if (isrc & SUN4I_INT_BUS_ERR) { /* bus error interrupt */ netdev_dbg(dev, "bus error interrupt\n"); priv->can.can_stats.bus_error++; - stats->rx_errors++; + ecc = readl(priv->base + SUN4I_REG_STA_ADDR); if (likely(skb)) { - ecc = readl(priv->base + SUN4I_REG_STA_ADDR); - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; switch (ecc & SUN4I_STA_MASK_ERR) { @@ -586,9 +599,15 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status) >> 16; break; } - /* error occurred during transmission? */ - if ((ecc & SUN4I_STA_ERR_DIR) == 0) + } + + /* error occurred during transmission? */ + if ((ecc & SUN4I_STA_ERR_DIR) == 0) { + if (likely(skb)) cf->data[2] |= CAN_ERR_PROT_TX; + stats->tx_errors++; + } else { + stats->rx_errors++; } } if (isrc & SUN4I_INT_ERR_PASSIVE) { @@ -604,7 +623,6 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status) netdev_dbg(dev, "arbitration lost interrupt\n"); alc = readl(priv->base + SUN4I_REG_STA_ADDR); priv->can.can_stats.arbitration_lost++; - stats->tx_errors++; if (likely(skb)) { cf->can_id |= CAN_ERR_LOSTARB; cf->data[0] = (alc >> 8) & 0x1f; @@ -615,21 +633,18 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status) tx_state = txerr >= rxerr ? state : 0; rx_state = txerr <= rxerr ? state : 0; - if (likely(skb)) - can_change_state(dev, cf, tx_state, rx_state); - else - priv->can.state = state; + /* The skb allocation might fail, but can_change_state() + * handles cf == NULL. + */ + can_change_state(dev, cf, tx_state, rx_state); if (state == CAN_STATE_BUS_OFF) can_bus_off(dev); } - if (likely(skb)) { - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + if (likely(skb)) netif_rx(skb); - } else { + else return -ENOMEM; - } return 0; } @@ -642,8 +657,8 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id) u8 isrc, status; int n = 0; - while ((isrc = readl(priv->base + SUN4I_REG_INT_ADDR)) && - (n < SUN4I_CAN_MAX_IRQ)) { + while ((n < SUN4I_CAN_MAX_IRQ) && + (isrc = readl(priv->base + SUN4I_REG_INT_ADDR))) { n++; status = readl(priv->base + SUN4I_REG_STA_ADDR); @@ -652,13 +667,9 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id) if (isrc & SUN4I_INT_TBUF_VLD) { /* transmission complete interrupt */ - stats->tx_bytes += - readl(priv->base + - SUN4I_REG_RBUF_RBACK_START_ADDR) & 0xf; + stats->tx_bytes += can_get_echo_skb(dev, 0, NULL); stats->tx_packets++; - can_get_echo_skb(dev, 0); netif_wake_queue(dev); - can_led_event(dev, CAN_LED_EVENT_TX); } if ((isrc & SUN4I_INT_RBUF_VLD) && !(isrc & SUN4I_INT_DATA_OR)) { @@ -703,6 +714,13 @@ static int sun4ican_open(struct net_device *dev) goto exit_irq; } + /* software reset deassert */ + err = reset_control_deassert(priv->reset); + if (err) { + netdev_err(dev, "could not deassert CAN reset\n"); + goto exit_soft_reset; + } + /* turn on clocking for CAN peripheral block */ err = clk_prepare_enable(priv->clk); if (err) { @@ -716,7 +734,6 @@ static int sun4ican_open(struct net_device *dev) goto exit_can_start; } - can_led_event(dev, CAN_LED_EVENT_OPEN); netif_start_queue(dev); return 0; @@ -724,6 +741,8 @@ static int sun4ican_open(struct net_device *dev) exit_can_start: clk_disable_unprepare(priv->clk); exit_clock: + reset_control_assert(priv->reset); +exit_soft_reset: free_irq(dev->irq, dev); exit_irq: close_candev(dev); @@ -737,10 +756,10 @@ static int sun4ican_close(struct net_device *dev) netif_stop_queue(dev); sun4i_can_stop(dev); clk_disable_unprepare(priv->clk); + reset_control_assert(priv->reset); free_irq(dev->irq, dev); close_candev(dev); - can_led_event(dev, CAN_LED_EVENT_STOP); return 0; } @@ -751,32 +770,79 @@ static const struct net_device_ops sun4ican_netdev_ops = { .ndo_start_xmit = sun4ican_start_xmit, }; +static const struct ethtool_ops sun4ican_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, +}; + +static const struct sun4ican_quirks sun4ican_quirks_a10 = { + .has_reset = false, + .acp_offset = 0, +}; + +static const struct sun4ican_quirks sun4ican_quirks_r40 = { + .has_reset = true, + .acp_offset = 0, +}; + +static const struct sun4ican_quirks sun4ican_quirks_d1 = { + .has_reset = true, + .acp_offset = (SUN4I_REG_ACPC_ADDR_D1 - SUN4I_REG_ACPC_ADDR), +}; + static const struct of_device_id sun4ican_of_match[] = { - {.compatible = "allwinner,sun4i-a10-can"}, - {}, + { + .compatible = "allwinner,sun4i-a10-can", + .data = &sun4ican_quirks_a10 + }, { + .compatible = "allwinner,sun7i-a20-can", + .data = &sun4ican_quirks_a10 + }, { + .compatible = "allwinner,sun8i-r40-can", + .data = &sun4ican_quirks_r40 + }, { + .compatible = "allwinner,sun20i-d1-can", + .data = &sun4ican_quirks_d1 + }, { + /* sentinel */ + }, }; MODULE_DEVICE_TABLE(of, sun4ican_of_match); -static int sun4ican_remove(struct platform_device *pdev) +static void sun4ican_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); unregister_netdev(dev); free_candev(dev); - - return 0; } static int sun4ican_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; - struct resource *mem; struct clk *clk; + struct reset_control *reset = NULL; void __iomem *addr; int err, irq; struct net_device *dev; struct sun4ican_priv *priv; + const struct sun4ican_quirks *quirks; + + quirks = of_device_get_match_data(&pdev->dev); + if (!quirks) { + dev_err(&pdev->dev, "failed to determine the quirks to use\n"); + err = -ENODEV; + goto exit; + } + + if (quirks->has_reset) { + reset = devm_reset_control_get_exclusive(&pdev->dev, NULL); + if (IS_ERR(reset)) { + dev_err(&pdev->dev, "unable to request reset\n"); + err = PTR_ERR(reset); + goto exit; + } + } clk = of_clk_get(np, 0); if (IS_ERR(clk)) { @@ -787,15 +853,13 @@ static int sun4ican_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(&pdev->dev, "could not get a valid irq\n"); err = -ENODEV; goto exit; } - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - addr = devm_ioremap_resource(&pdev->dev, mem); + addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(addr)) { - err = -EBUSY; + err = PTR_ERR(addr); goto exit; } @@ -808,6 +872,7 @@ static int sun4ican_probe(struct platform_device *pdev) } dev->netdev_ops = &sun4ican_netdev_ops; + dev->ethtool_ops = &sun4ican_ethtool_ops; dev->irq = irq; dev->flags |= IFF_ECHO; @@ -822,6 +887,8 @@ static int sun4ican_probe(struct platform_device *pdev) CAN_CTRLMODE_3_SAMPLES; priv->base = addr; priv->clk = clk; + priv->reset = reset; + priv->acp_offset = quirks->acp_offset; spin_lock_init(&priv->cmdreg_lock); platform_set_drvdata(pdev, dev); @@ -833,7 +900,6 @@ static int sun4ican_probe(struct platform_device *pdev) DRV_NAME, err); goto exit_free; } - devm_can_led_init(dev); dev_info(&pdev->dev, "device registered (base=%p, irq=%d)\n", priv->base, dev->irq); @@ -860,4 +926,4 @@ module_platform_driver(sun4i_can_driver); MODULE_AUTHOR("Peter Chen <xingkongcp@gmail.com>"); MODULE_AUTHOR("Gerhard Bertelsmann <info@gerhard-bertelsmann.de>"); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_DESCRIPTION("CAN driver for Allwinner SoCs (A10/A20)"); +MODULE_DESCRIPTION("CAN driver for Allwinner SoCs (A10/A20/D1)"); diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index db6ea936dc3f..1d3dbf28b105 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * TI HECC (CAN) device driver * @@ -5,16 +6,7 @@ * specs for the same is available at <http://www.ti.com> * * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/ - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed as is WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * + * Copyright (C) 2019 Jeroen Hofstee <jhofstee@victronenergy.com> */ #include <linux/module.h> @@ -22,18 +14,18 @@ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/errno.h> +#include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/regulator/consumer.h> #include <linux/can/dev.h> #include <linux/can/error.h> -#include <linux/can/led.h> +#include <linux/can/rx-offload.h> #define DRV_NAME "ti_hecc" #define HECC_MODULE_VERSION "0.7" @@ -44,8 +36,7 @@ MODULE_VERSION(HECC_MODULE_VERSION); #define HECC_MAX_MAILBOXES 32 /* hardware mailboxes - do not change */ #define MAX_TX_PRIO 0x3F /* hardware value - do not change */ -/* - * Important Note: TX mailbox configuration +/* Important Note: TX mailbox configuration * TX mailboxes should be restricted to the number of SKB buffers to avoid * maintaining SKB buffers separately. TX mailboxes should be a power of 2 * for the mailbox logic to work. Top mailbox numbers are reserved for RX @@ -63,29 +54,16 @@ MODULE_VERSION(HECC_MODULE_VERSION); #define HECC_TX_PRIO_MASK (MAX_TX_PRIO << HECC_MB_TX_SHIFT) #define HECC_TX_MB_MASK (HECC_MAX_TX_MBOX - 1) #define HECC_TX_MASK ((HECC_MAX_TX_MBOX - 1) | HECC_TX_PRIO_MASK) -#define HECC_TX_MBOX_MASK (~(BIT(HECC_MAX_TX_MBOX) - 1)) -#define HECC_DEF_NAPI_WEIGHT HECC_MAX_RX_MBOX -/* - * Important Note: RX mailbox configuration - * RX mailboxes are further logically split into two - main and buffer - * mailboxes. The goal is to get all packets into main mailboxes as - * driven by mailbox number and receive priority (higher to lower) and - * buffer mailboxes are used to receive pkts while main mailboxes are being - * processed. This ensures in-order packet reception. - * - * Here are the recommended values for buffer mailbox. Note that RX mailboxes - * start after TX mailboxes: +/* RX mailbox configuration * - * HECC_MAX_RX_MBOX HECC_RX_BUFFER_MBOX No of buffer mailboxes - * 28 12 8 - * 16 20 4 + * The remaining mailboxes are used for reception and are delivered + * based on their timestamp, to avoid a hardware race when CANME is + * changed while CAN-bus traffic is being received. */ - #define HECC_MAX_RX_MBOX (HECC_MAX_MAILBOXES - HECC_MAX_TX_MBOX) -#define HECC_RX_BUFFER_MBOX 12 /* as per table above */ #define HECC_RX_FIRST_MBOX (HECC_MAX_MAILBOXES - 1) -#define HECC_RX_HIGH_MBOX_MASK (~(BIT(HECC_RX_BUFFER_MBOX) - 1)) +#define HECC_RX_LAST_MBOX (HECC_MAX_TX_MBOX) /* TI HECC module registers */ #define HECC_CANME 0x0 /* Mailbox enable */ @@ -95,7 +73,7 @@ MODULE_VERSION(HECC_MODULE_VERSION); #define HECC_CANTA 0x10 /* Transmission acknowledge */ #define HECC_CANAA 0x14 /* Abort acknowledge */ #define HECC_CANRMP 0x18 /* Receive message pending */ -#define HECC_CANRML 0x1C /* Remote message lost */ +#define HECC_CANRML 0x1C /* Receive message lost */ #define HECC_CANRFP 0x20 /* Remote frame pending */ #define HECC_CANGAM 0x24 /* SECC only:Global acceptance mask */ #define HECC_CANMC 0x28 /* Master control */ @@ -117,6 +95,9 @@ MODULE_VERSION(HECC_MODULE_VERSION); #define HECC_CANTIOCE 0x68 /* SCC only:Enhanced TX I/O control */ #define HECC_CANRIOCE 0x6C /* SCC only:Enhanced RX I/O control */ +/* TI HECC RAM registers */ +#define HECC_CANMOTS 0x80 /* Message object time stamp */ + /* Mailbox registers */ #define HECC_CANMID 0x0 #define HECC_CANMCF 0x4 @@ -159,6 +140,8 @@ MODULE_VERSION(HECC_MODULE_VERSION); #define HECC_BUS_ERROR (HECC_CANES_FE | HECC_CANES_BE |\ HECC_CANES_CRCE | HECC_CANES_SE |\ HECC_CANES_ACKE) +#define HECC_CANES_FLAGS (HECC_BUS_ERROR | HECC_CANES_BO |\ + HECC_CANES_EP | HECC_CANES_EW) #define HECC_CANMCF_RTR BIT(4) /* Remote transmit request */ @@ -193,7 +176,7 @@ static const struct can_bittiming_const ti_hecc_bittiming_const = { struct ti_hecc_priv { struct can_priv can; /* MUST be first member/field */ - struct napi_struct napi; + struct can_rx_offload offload; struct net_device *ndev; struct clk *clk; void __iomem *base; @@ -203,7 +186,6 @@ struct ti_hecc_priv { spinlock_t mbx_lock; /* CANME register needs protection */ u32 tx_head; u32 tx_tail; - u32 rx_next; struct regulator *reg_xceiver; }; @@ -227,8 +209,13 @@ static inline void hecc_write_lam(struct ti_hecc_priv *priv, u32 mbxno, u32 val) __raw_writel(val, priv->hecc_ram + mbxno * 4); } +static inline u32 hecc_read_stamp(struct ti_hecc_priv *priv, u32 mbxno) +{ + return __raw_readl(priv->hecc_ram + HECC_CANMOTS + mbxno * 4); +} + static inline void hecc_write_mbx(struct ti_hecc_priv *priv, u32 mbxno, - u32 reg, u32 val) + u32 reg, u32 val) { __raw_writel(val, priv->mbx + mbxno * 0x10 + reg); } @@ -249,13 +236,13 @@ static inline u32 hecc_read(struct ti_hecc_priv *priv, int reg) } static inline void hecc_set_bit(struct ti_hecc_priv *priv, int reg, - u32 bit_mask) + u32 bit_mask) { hecc_write(priv, reg, hecc_read(priv, reg) | bit_mask); } static inline void hecc_clear_bit(struct ti_hecc_priv *priv, int reg, - u32 bit_mask) + u32 bit_mask) { hecc_write(priv, reg, hecc_read(priv, reg) & ~bit_mask); } @@ -277,8 +264,8 @@ static int ti_hecc_set_btc(struct ti_hecc_priv *priv) if (bit_timing->brp > 4) can_btc |= HECC_CANBTC_SAM; else - netdev_warn(priv->ndev, "WARN: Triple" - "sampling not set due to h/w limitations"); + netdev_warn(priv->ndev, + "WARN: Triple sampling not set due to h/w limitations"); } can_btc |= ((bit_timing->sjw - 1) & 0x3) << 8; can_btc |= ((bit_timing->brp - 1) & 0xFF) << 16; @@ -314,8 +301,7 @@ static void ti_hecc_reset(struct net_device *ndev) /* Set change control request and wait till enabled */ hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_CCR); - /* - * INFO: It has been observed that at times CCE bit may not be + /* INFO: It has been observed that at times CCE bit may not be * set and hw seems to be ok even if this bit is not set so * timing out with a timing of 1ms to respect the specs */ @@ -325,8 +311,7 @@ static void ti_hecc_reset(struct net_device *ndev) udelay(10); } - /* - * Note: On HECC, BTC can be programmed only in initialization mode, so + /* Note: On HECC, BTC can be programmed only in initialization mode, so * it is expected that the can bittiming parameters are set via ip * utility before the device is opened */ @@ -335,13 +320,11 @@ static void ti_hecc_reset(struct net_device *ndev) /* Clear CCR (and CANMC register) and wait for CCE = 0 enable */ hecc_write(priv, HECC_CANMC, 0); - /* - * INFO: CAN net stack handles bus off and hence disabling auto-bus-on + /* INFO: CAN net stack handles bus off and hence disabling auto-bus-on * hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_ABO); */ - /* - * INFO: It has been observed that at times CCE bit may not be + /* INFO: It has been observed that at times CCE bit may not be * set and hw seems to be ok even if this bit is not set so */ cnt = HECC_CCE_WAIT_COUNT; @@ -374,8 +357,8 @@ static void ti_hecc_start(struct net_device *ndev) /* put HECC in initialization mode and set btc */ ti_hecc_reset(ndev); - priv->tx_head = priv->tx_tail = HECC_TX_MASK; - priv->rx_next = HECC_RX_FIRST_MBOX; + priv->tx_head = HECC_TX_MASK; + priv->tx_tail = HECC_TX_MASK; /* Enable local and global acceptance mask registers */ hecc_write(priv, HECC_CANGAM, HECC_SET_REG); @@ -392,8 +375,18 @@ static void ti_hecc_start(struct net_device *ndev) hecc_set_bit(priv, HECC_CANMIM, mbx_mask); } - /* Prevent message over-write & Enable interrupts */ - hecc_write(priv, HECC_CANOPC, HECC_SET_REG); + /* Enable tx interrupts */ + hecc_set_bit(priv, HECC_CANMIM, BIT(HECC_MAX_TX_MBOX) - 1); + + /* Prevent message over-write to create a rx fifo, but not for + * the lowest priority mailbox, since that allows detecting + * overflows instead of the hardware silently dropping the + * messages. + */ + mbx_mask = ~BIT_U32(HECC_RX_LAST_MBOX); + hecc_write(priv, HECC_CANOPC, mbx_mask); + + /* Enable interrupts */ if (priv->use_hecc1int) { hecc_write(priv, HECC_CANMIL, HECC_SET_REG); hecc_write(priv, HECC_CANGIM, HECC_CANGIM_DEF_MASK | @@ -401,7 +394,7 @@ static void ti_hecc_start(struct net_device *ndev) } else { hecc_write(priv, HECC_CANMIL, 0); hecc_write(priv, HECC_CANGIM, - HECC_CANGIM_DEF_MASK | HECC_CANGIM_I0EN); + HECC_CANGIM_DEF_MASK | HECC_CANGIM_I0EN); } priv->can.state = CAN_STATE_ERROR_ACTIVE; } @@ -410,6 +403,9 @@ static void ti_hecc_stop(struct net_device *ndev) { struct ti_hecc_priv *priv = netdev_priv(ndev); + /* Disable the CPK; stop sending, erroring and acking */ + hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_CCR); + /* Disable interrupts and disable mailboxes */ hecc_write(priv, HECC_CANGIM, 0); hecc_write(priv, HECC_CANMIM, 0); @@ -435,7 +431,7 @@ static int ti_hecc_do_set_mode(struct net_device *ndev, enum can_mode mode) } static int ti_hecc_get_berr_counter(const struct net_device *ndev, - struct can_berr_counter *bec) + struct can_berr_counter *bec) { struct ti_hecc_priv *priv = netdev_priv(ndev); @@ -445,11 +441,10 @@ static int ti_hecc_get_berr_counter(const struct net_device *ndev, return 0; } -/* - * ti_hecc_xmit: HECC Transmit +/* ti_hecc_xmit: HECC Transmit * * The transmit mailboxes start from 0 to HECC_MAX_TX_MBOX. In HECC the - * priority of the mailbox for tranmission is dependent upon priority setting + * priority of the mailbox for transmission is dependent upon priority setting * field in mailbox registers. The mailbox with highest value in priority field * is transmitted first. Only when two mailboxes have the same value in * priority field the highest numbered mailbox is transmitted first. @@ -474,7 +469,7 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev) u32 mbxno, mbx_mask, data; unsigned long flags; - if (can_dropped_invalid_skb(ndev, skb)) + if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; mbxno = get_tx_head_mb(priv); @@ -484,14 +479,14 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev) spin_unlock_irqrestore(&priv->mbx_lock, flags); netif_stop_queue(ndev); netdev_err(priv->ndev, - "BUG: TX mbx not ready tx_head=%08X, tx_tail=%08X\n", - priv->tx_head, priv->tx_tail); + "BUG: TX mbx not ready tx_head=%08X, tx_tail=%08X\n", + priv->tx_head, priv->tx_tail); return NETDEV_TX_BUSY; } spin_unlock_irqrestore(&priv->mbx_lock, flags); /* Prepare mailbox for transmission */ - data = cf->can_dlc | (get_tx_head_prio(priv) << 8); + data = cf->len | (get_tx_head_prio(priv) << 8); if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */ data |= HECC_CANMCF_RTR; hecc_write_mbx(priv, mbxno, HECC_CANMCF, data); @@ -502,261 +497,243 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev) data = (cf->can_id & CAN_SFF_MASK) << 18; hecc_write_mbx(priv, mbxno, HECC_CANMID, data); hecc_write_mbx(priv, mbxno, HECC_CANMDL, - be32_to_cpu(*(__be32 *)(cf->data))); - if (cf->can_dlc > 4) + be32_to_cpu(*(__be32 *)(cf->data))); + if (cf->len > 4) hecc_write_mbx(priv, mbxno, HECC_CANMDH, - be32_to_cpu(*(__be32 *)(cf->data + 4))); + be32_to_cpu(*(__be32 *)(cf->data + 4))); else *(u32 *)(cf->data + 4) = 0; - can_put_echo_skb(skb, ndev, mbxno); + can_put_echo_skb(skb, ndev, mbxno, 0); spin_lock_irqsave(&priv->mbx_lock, flags); --priv->tx_head; if ((hecc_read(priv, HECC_CANME) & BIT(get_tx_head_mb(priv))) || - (priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK) { + (priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK) { netif_stop_queue(ndev); } hecc_set_bit(priv, HECC_CANME, mbx_mask); spin_unlock_irqrestore(&priv->mbx_lock, flags); - hecc_clear_bit(priv, HECC_CANMD, mbx_mask); - hecc_set_bit(priv, HECC_CANMIM, mbx_mask); hecc_write(priv, HECC_CANTRS, mbx_mask); return NETDEV_TX_OK; } -static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno) +static inline +struct ti_hecc_priv *rx_offload_to_priv(struct can_rx_offload *offload) { - struct net_device_stats *stats = &priv->ndev->stats; - struct can_frame *cf; + return container_of(offload, struct ti_hecc_priv, offload); +} + +static struct sk_buff *ti_hecc_mailbox_read(struct can_rx_offload *offload, + unsigned int mbxno, u32 *timestamp, + bool drop) +{ + struct ti_hecc_priv *priv = rx_offload_to_priv(offload); struct sk_buff *skb; + struct can_frame *cf; u32 data, mbx_mask; - unsigned long flags; - skb = alloc_can_skb(priv->ndev, &cf); - if (!skb) { - if (printk_ratelimit()) - netdev_err(priv->ndev, - "ti_hecc_rx_pkt: alloc_can_skb() failed\n"); - return -ENOMEM; + mbx_mask = BIT(mbxno); + + if (unlikely(drop)) { + skb = ERR_PTR(-ENOBUFS); + goto mark_as_read; + } + + skb = alloc_can_skb(offload->dev, &cf); + if (unlikely(!skb)) { + skb = ERR_PTR(-ENOMEM); + goto mark_as_read; } - mbx_mask = BIT(mbxno); data = hecc_read_mbx(priv, mbxno, HECC_CANMID); if (data & HECC_CANMID_IDE) cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG; else cf->can_id = (data >> 18) & CAN_SFF_MASK; + data = hecc_read_mbx(priv, mbxno, HECC_CANMCF); if (data & HECC_CANMCF_RTR) cf->can_id |= CAN_RTR_FLAG; - cf->can_dlc = get_can_dlc(data & 0xF); + cf->len = can_cc_dlc2len(data & 0xF); + data = hecc_read_mbx(priv, mbxno, HECC_CANMDL); *(__be32 *)(cf->data) = cpu_to_be32(data); - if (cf->can_dlc > 4) { + if (cf->len > 4) { data = hecc_read_mbx(priv, mbxno, HECC_CANMDH); *(__be32 *)(cf->data + 4) = cpu_to_be32(data); } - spin_lock_irqsave(&priv->mbx_lock, flags); - hecc_clear_bit(priv, HECC_CANME, mbx_mask); - hecc_write(priv, HECC_CANRMP, mbx_mask); - /* enable mailbox only if it is part of rx buffer mailboxes */ - if (priv->rx_next < HECC_RX_BUFFER_MBOX) - hecc_set_bit(priv, HECC_CANME, mbx_mask); - spin_unlock_irqrestore(&priv->mbx_lock, flags); - - stats->rx_bytes += cf->can_dlc; - can_led_event(priv->ndev, CAN_LED_EVENT_RX); - netif_receive_skb(skb); - stats->rx_packets++; - - return 0; -} - -/* - * ti_hecc_rx_poll - HECC receive pkts - * - * The receive mailboxes start from highest numbered mailbox till last xmit - * mailbox. On CAN frame reception the hardware places the data into highest - * numbered mailbox that matches the CAN ID filter. Since all receive mailboxes - * have same filtering (ALL CAN frames) packets will arrive in the highest - * available RX mailbox and we need to ensure in-order packet reception. - * - * To ensure the packets are received in the right order we logically divide - * the RX mailboxes into main and buffer mailboxes. Packets are received as per - * mailbox priotity (higher to lower) in the main bank and once it is full we - * disable further reception into main mailboxes. While the main mailboxes are - * processed in NAPI, further packets are received in buffer mailboxes. - * - * We maintain a RX next mailbox counter to process packets and once all main - * mailboxe packets are passed to the upper stack we enable all of them but - * continue to process packets received in buffer mailboxes. With each packet - * received from buffer mailbox we enable it immediately so as to handle the - * overflow from higher mailboxes. - */ -static int ti_hecc_rx_poll(struct napi_struct *napi, int quota) -{ - struct net_device *ndev = napi->dev; - struct ti_hecc_priv *priv = netdev_priv(ndev); - u32 num_pkts = 0; - u32 mbx_mask; - unsigned long pending_pkts, flags; - - if (!netif_running(ndev)) - return 0; - while ((pending_pkts = hecc_read(priv, HECC_CANRMP)) && - num_pkts < quota) { - mbx_mask = BIT(priv->rx_next); /* next rx mailbox to process */ - if (mbx_mask & pending_pkts) { - if (ti_hecc_rx_pkt(priv, priv->rx_next) < 0) - return num_pkts; - ++num_pkts; - } else if (priv->rx_next > HECC_RX_BUFFER_MBOX) { - break; /* pkt not received yet */ - } - --priv->rx_next; - if (priv->rx_next == HECC_RX_BUFFER_MBOX) { - /* enable high bank mailboxes */ - spin_lock_irqsave(&priv->mbx_lock, flags); - mbx_mask = hecc_read(priv, HECC_CANME); - mbx_mask |= HECC_RX_HIGH_MBOX_MASK; - hecc_write(priv, HECC_CANME, mbx_mask); - spin_unlock_irqrestore(&priv->mbx_lock, flags); - } else if (priv->rx_next == HECC_MAX_TX_MBOX - 1) { - priv->rx_next = HECC_RX_FIRST_MBOX; - break; - } - } + *timestamp = hecc_read_stamp(priv, mbxno); + + /* Check for FIFO overrun. + * + * All but the last RX mailbox have activated overwrite + * protection. So skip check for overrun, if we're not + * handling the last RX mailbox. + * + * As the overwrite protection for the last RX mailbox is + * disabled, the CAN core might update while we're reading + * it. This means the skb might be inconsistent. + * + * Return an error to let rx-offload discard this CAN frame. + */ + if (unlikely(mbxno == HECC_RX_LAST_MBOX && + hecc_read(priv, HECC_CANRML) & mbx_mask)) + skb = ERR_PTR(-ENOBUFS); - /* Enable packet interrupt if all pkts are handled */ - if (hecc_read(priv, HECC_CANRMP) == 0) { - napi_complete(napi); - /* Re-enable RX mailbox interrupts */ - mbx_mask = hecc_read(priv, HECC_CANMIM); - mbx_mask |= HECC_TX_MBOX_MASK; - hecc_write(priv, HECC_CANMIM, mbx_mask); - } else { - /* repoll is done only if whole budget is used */ - num_pkts = quota; - } + mark_as_read: + hecc_write(priv, HECC_CANRMP, mbx_mask); - return num_pkts; + return skb; } static int ti_hecc_error(struct net_device *ndev, int int_status, - int err_status) + int err_status) { struct ti_hecc_priv *priv = netdev_priv(ndev); - struct net_device_stats *stats = &ndev->stats; struct can_frame *cf; struct sk_buff *skb; + u32 timestamp; + int err; - /* propagate the error condition to the can stack */ - skb = alloc_can_err_skb(ndev, &cf); - if (!skb) { - if (printk_ratelimit()) - netdev_err(priv->ndev, - "ti_hecc_error: alloc_can_err_skb() failed\n"); - return -ENOMEM; - } - - if (int_status & HECC_CANGIF_WLIF) { /* warning level int */ - if ((int_status & HECC_CANGIF_BOIF) == 0) { - priv->can.state = CAN_STATE_ERROR_WARNING; - ++priv->can.can_stats.error_warning; - cf->can_id |= CAN_ERR_CRTL; - if (hecc_read(priv, HECC_CANTEC) > 96) - cf->data[1] |= CAN_ERR_CRTL_TX_WARNING; - if (hecc_read(priv, HECC_CANREC) > 96) - cf->data[1] |= CAN_ERR_CRTL_RX_WARNING; - } - hecc_set_bit(priv, HECC_CANES, HECC_CANES_EW); - netdev_dbg(priv->ndev, "Error Warning interrupt\n"); - hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR); - } - - if (int_status & HECC_CANGIF_EPIF) { /* error passive int */ - if ((int_status & HECC_CANGIF_BOIF) == 0) { - priv->can.state = CAN_STATE_ERROR_PASSIVE; - ++priv->can.can_stats.error_passive; - cf->can_id |= CAN_ERR_CRTL; - if (hecc_read(priv, HECC_CANTEC) > 127) - cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; - if (hecc_read(priv, HECC_CANREC) > 127) - cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; + if (err_status & HECC_BUS_ERROR) { + /* propagate the error condition to the can stack */ + skb = alloc_can_err_skb(ndev, &cf); + if (!skb) { + if (net_ratelimit()) + netdev_err(priv->ndev, + "%s: alloc_can_err_skb() failed\n", + __func__); + return -ENOMEM; } - hecc_set_bit(priv, HECC_CANES, HECC_CANES_EP); - netdev_dbg(priv->ndev, "Error passive interrupt\n"); - hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR); - } - /* - * Need to check busoff condition in error status register too to - * ensure warning interrupts don't hog the system - */ - if ((int_status & HECC_CANGIF_BOIF) || (err_status & HECC_CANES_BO)) { - priv->can.state = CAN_STATE_BUS_OFF; - cf->can_id |= CAN_ERR_BUSOFF; - hecc_set_bit(priv, HECC_CANES, HECC_CANES_BO); - hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR); - /* Disable all interrupts in bus-off to avoid int hog */ - hecc_write(priv, HECC_CANGIM, 0); - ++priv->can.can_stats.bus_off; - can_bus_off(ndev); - } - - if (err_status & HECC_BUS_ERROR) { ++priv->can.can_stats.bus_error; cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; - if (err_status & HECC_CANES_FE) { - hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE); + if (err_status & HECC_CANES_FE) cf->data[2] |= CAN_ERR_PROT_FORM; - } - if (err_status & HECC_CANES_BE) { - hecc_set_bit(priv, HECC_CANES, HECC_CANES_BE); + if (err_status & HECC_CANES_BE) cf->data[2] |= CAN_ERR_PROT_BIT; - } - if (err_status & HECC_CANES_SE) { - hecc_set_bit(priv, HECC_CANES, HECC_CANES_SE); + if (err_status & HECC_CANES_SE) cf->data[2] |= CAN_ERR_PROT_STUFF; - } - if (err_status & HECC_CANES_CRCE) { - hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE); + if (err_status & HECC_CANES_CRCE) cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; - } - if (err_status & HECC_CANES_ACKE) { - hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE); + if (err_status & HECC_CANES_ACKE) cf->data[3] = CAN_ERR_PROT_LOC_ACK; - } + + timestamp = hecc_read(priv, HECC_CANLNT); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, + timestamp); + if (err) + ndev->stats.rx_fifo_errors++; } - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_rx(skb); + hecc_write(priv, HECC_CANES, HECC_CANES_FLAGS); return 0; } +static void ti_hecc_change_state(struct net_device *ndev, + enum can_state rx_state, + enum can_state tx_state) +{ + struct ti_hecc_priv *priv = netdev_priv(ndev); + struct can_frame *cf; + struct sk_buff *skb; + u32 timestamp; + int err; + + skb = alloc_can_err_skb(priv->ndev, &cf); + if (unlikely(!skb)) { + priv->can.state = max(tx_state, rx_state); + return; + } + + can_change_state(priv->ndev, cf, tx_state, rx_state); + + if (max(tx_state, rx_state) != CAN_STATE_BUS_OFF) { + cf->can_id |= CAN_ERR_CNT; + cf->data[6] = hecc_read(priv, HECC_CANTEC); + cf->data[7] = hecc_read(priv, HECC_CANREC); + } + + timestamp = hecc_read(priv, HECC_CANLNT); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); + if (err) + ndev->stats.rx_fifo_errors++; +} + static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id) { struct net_device *ndev = (struct net_device *)dev_id; struct ti_hecc_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; - u32 mbxno, mbx_mask, int_status, err_status; - unsigned long ack, flags; + u32 mbxno, mbx_mask, int_status, err_status, stamp; + unsigned long flags, rx_pending; + u32 handled = 0; int_status = hecc_read(priv, - (priv->use_hecc1int) ? HECC_CANGIF1 : HECC_CANGIF0); + priv->use_hecc1int ? + HECC_CANGIF1 : HECC_CANGIF0); if (!int_status) return IRQ_NONE; err_status = hecc_read(priv, HECC_CANES); - if (err_status & (HECC_BUS_ERROR | HECC_CANES_BO | - HECC_CANES_EP | HECC_CANES_EW)) - ti_hecc_error(ndev, int_status, err_status); + if (unlikely(err_status & HECC_CANES_FLAGS)) + ti_hecc_error(ndev, int_status, err_status); + + if (unlikely(int_status & HECC_CANGIM_DEF_MASK)) { + enum can_state rx_state, tx_state; + u32 rec = hecc_read(priv, HECC_CANREC); + u32 tec = hecc_read(priv, HECC_CANTEC); + + if (int_status & HECC_CANGIF_WLIF) { + handled |= HECC_CANGIF_WLIF; + rx_state = rec >= tec ? CAN_STATE_ERROR_WARNING : 0; + tx_state = rec <= tec ? CAN_STATE_ERROR_WARNING : 0; + netdev_dbg(priv->ndev, "Error Warning interrupt\n"); + ti_hecc_change_state(ndev, rx_state, tx_state); + } + + if (int_status & HECC_CANGIF_EPIF) { + handled |= HECC_CANGIF_EPIF; + rx_state = rec >= tec ? CAN_STATE_ERROR_PASSIVE : 0; + tx_state = rec <= tec ? CAN_STATE_ERROR_PASSIVE : 0; + netdev_dbg(priv->ndev, "Error passive interrupt\n"); + ti_hecc_change_state(ndev, rx_state, tx_state); + } + + if (int_status & HECC_CANGIF_BOIF) { + handled |= HECC_CANGIF_BOIF; + rx_state = CAN_STATE_BUS_OFF; + tx_state = CAN_STATE_BUS_OFF; + netdev_dbg(priv->ndev, "Bus off interrupt\n"); + + /* Disable all interrupts */ + hecc_write(priv, HECC_CANGIM, 0); + can_bus_off(ndev); + ti_hecc_change_state(ndev, rx_state, tx_state); + } + } else if (unlikely(priv->can.state != CAN_STATE_ERROR_ACTIVE)) { + enum can_state new_state, tx_state, rx_state; + u32 rec = hecc_read(priv, HECC_CANREC); + u32 tec = hecc_read(priv, HECC_CANTEC); + + if (rec >= 128 || tec >= 128) + new_state = CAN_STATE_ERROR_PASSIVE; + else if (rec >= 96 || tec >= 96) + new_state = CAN_STATE_ERROR_WARNING; + else + new_state = CAN_STATE_ERROR_ACTIVE; + + if (new_state < priv->can.state) { + rx_state = rec >= tec ? new_state : 0; + tx_state = rec <= tec ? new_state : 0; + ti_hecc_change_state(ndev, rx_state, tx_state); + } + } if (int_status & HECC_CANGIF_GMIF) { while (priv->tx_tail - priv->tx_head > 0) { @@ -764,44 +741,43 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id) mbx_mask = BIT(mbxno); if (!(mbx_mask & hecc_read(priv, HECC_CANTA))) break; - hecc_clear_bit(priv, HECC_CANMIM, mbx_mask); hecc_write(priv, HECC_CANTA, mbx_mask); spin_lock_irqsave(&priv->mbx_lock, flags); hecc_clear_bit(priv, HECC_CANME, mbx_mask); spin_unlock_irqrestore(&priv->mbx_lock, flags); - stats->tx_bytes += hecc_read_mbx(priv, mbxno, - HECC_CANMCF) & 0xF; + stamp = hecc_read_stamp(priv, mbxno); + stats->tx_bytes += + can_rx_offload_get_echo_skb_queue_timestamp(&priv->offload, + mbxno, stamp, NULL); stats->tx_packets++; - can_led_event(ndev, CAN_LED_EVENT_TX); - can_get_echo_skb(ndev, mbxno); --priv->tx_tail; } /* restart queue if wrap-up or if queue stalled on last pkt */ - if (((priv->tx_head == priv->tx_tail) && - ((priv->tx_head & HECC_TX_MASK) != HECC_TX_MASK)) || - (((priv->tx_tail & HECC_TX_MASK) == HECC_TX_MASK) && - ((priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK))) + if ((priv->tx_head == priv->tx_tail && + ((priv->tx_head & HECC_TX_MASK) != HECC_TX_MASK)) || + (((priv->tx_tail & HECC_TX_MASK) == HECC_TX_MASK) && + ((priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK))) netif_wake_queue(ndev); - /* Disable RX mailbox interrupts and let NAPI reenable them */ - if (hecc_read(priv, HECC_CANRMP)) { - ack = hecc_read(priv, HECC_CANMIM); - ack &= BIT(HECC_MAX_TX_MBOX) - 1; - hecc_write(priv, HECC_CANMIM, ack); - napi_schedule(&priv->napi); + /* offload RX mailboxes and let NAPI deliver them */ + while ((rx_pending = hecc_read(priv, HECC_CANRMP))) { + can_rx_offload_irq_offload_timestamp(&priv->offload, + rx_pending); } } /* clear all interrupt conditions - read back to avoid spurious ints */ if (priv->use_hecc1int) { - hecc_write(priv, HECC_CANGIF1, HECC_SET_REG); + hecc_write(priv, HECC_CANGIF1, handled); int_status = hecc_read(priv, HECC_CANGIF1); } else { - hecc_write(priv, HECC_CANGIF0, HECC_SET_REG); + hecc_write(priv, HECC_CANGIF0, handled); int_status = hecc_read(priv, HECC_CANGIF0); } + can_rx_offload_irq_finish(&priv->offload); + return IRQ_HANDLED; } @@ -811,7 +787,7 @@ static int ti_hecc_open(struct net_device *ndev) int err; err = request_irq(ndev->irq, ti_hecc_interrupt, IRQF_SHARED, - ndev->name, ndev); + ndev->name, ndev); if (err) { netdev_err(ndev, "error requesting interrupt\n"); return err; @@ -828,10 +804,8 @@ static int ti_hecc_open(struct net_device *ndev) return err; } - can_led_event(ndev, CAN_LED_EVENT_OPEN); - ti_hecc_start(ndev); - napi_enable(&priv->napi); + can_rx_offload_enable(&priv->offload); netif_start_queue(ndev); return 0; @@ -842,14 +816,12 @@ static int ti_hecc_close(struct net_device *ndev) struct ti_hecc_priv *priv = netdev_priv(ndev); netif_stop_queue(ndev); - napi_disable(&priv->napi); + can_rx_offload_disable(&priv->offload); ti_hecc_stop(ndev); free_irq(ndev->irq, ndev); close_candev(ndev); ti_hecc_transceiver_switch(priv, 0); - can_led_event(ndev, CAN_LED_EVENT_STOP); - return 0; } @@ -857,7 +829,10 @@ static const struct net_device_ops ti_hecc_netdev_ops = { .ndo_open = ti_hecc_open, .ndo_stop = ti_hecc_close, .ndo_start_xmit = ti_hecc_xmit, - .ndo_change_mtu = can_change_mtu, +}; + +static const struct ethtool_ops ti_hecc_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, }; static const struct of_device_id ti_hecc_dt_ids[] = { @@ -873,7 +848,6 @@ static int ti_hecc_probe(struct platform_device *pdev) struct net_device *ndev = (struct net_device *)0; struct ti_hecc_priv *priv; struct device_node *np = pdev->dev.of_node; - struct resource *res, *irq; struct regulator *reg_xceiver; int err = -ENODEV; @@ -894,48 +868,34 @@ static int ti_hecc_probe(struct platform_device *pdev) priv = netdev_priv(ndev); /* handle hecc memory */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hecc"); - if (!res) { - dev_err(&pdev->dev, "can't get IORESOURCE_MEM hecc\n"); - return -EINVAL; - } - - priv->base = devm_ioremap_resource(&pdev->dev, res); + priv->base = devm_platform_ioremap_resource_byname(pdev, "hecc"); if (IS_ERR(priv->base)) { dev_err(&pdev->dev, "hecc ioremap failed\n"); - return PTR_ERR(priv->base); + err = PTR_ERR(priv->base); + goto probe_exit_candev; } /* handle hecc-ram memory */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hecc-ram"); - if (!res) { - dev_err(&pdev->dev, "can't get IORESOURCE_MEM hecc-ram\n"); - return -EINVAL; - } - - priv->hecc_ram = devm_ioremap_resource(&pdev->dev, res); + priv->hecc_ram = devm_platform_ioremap_resource_byname(pdev, + "hecc-ram"); if (IS_ERR(priv->hecc_ram)) { dev_err(&pdev->dev, "hecc-ram ioremap failed\n"); - return PTR_ERR(priv->hecc_ram); + err = PTR_ERR(priv->hecc_ram); + goto probe_exit_candev; } /* handle mbx memory */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mbx"); - if (!res) { - dev_err(&pdev->dev, "can't get IORESOURCE_MEM mbx\n"); - return -EINVAL; - } - - priv->mbx = devm_ioremap_resource(&pdev->dev, res); + priv->mbx = devm_platform_ioremap_resource_byname(pdev, "mbx"); if (IS_ERR(priv->mbx)) { dev_err(&pdev->dev, "mbx ioremap failed\n"); - return PTR_ERR(priv->mbx); + err = PTR_ERR(priv->mbx); + goto probe_exit_candev; } - irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!irq) { - dev_err(&pdev->dev, "No irq resource\n"); - goto probe_exit; + ndev->irq = platform_get_irq(pdev, 0); + if (ndev->irq < 0) { + err = ndev->irq; + goto probe_exit_candev; } priv->ndev = ndev; @@ -948,11 +908,11 @@ static int ti_hecc_probe(struct platform_device *pdev) priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; spin_lock_init(&priv->mbx_lock); - ndev->irq = irq->start; ndev->flags |= IFF_ECHO; platform_set_drvdata(pdev, ndev); SET_NETDEV_DEV(ndev, &pdev->dev); ndev->netdev_ops = &ti_hecc_netdev_ops; + ndev->ethtool_ops = &ti_hecc_ethtool_ops; priv->clk = clk_get(&pdev->dev, "hecc_ck"); if (IS_ERR(priv->clk)) { @@ -962,37 +922,46 @@ static int ti_hecc_probe(struct platform_device *pdev) goto probe_exit_candev; } priv->can.clock.freq = clk_get_rate(priv->clk); - netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll, - HECC_DEF_NAPI_WEIGHT); err = clk_prepare_enable(priv->clk); if (err) { dev_err(&pdev->dev, "clk_prepare_enable() failed\n"); - goto probe_exit_clk; + goto probe_exit_release_clk; + } + + priv->offload.mailbox_read = ti_hecc_mailbox_read; + priv->offload.mb_first = HECC_RX_FIRST_MBOX; + priv->offload.mb_last = HECC_RX_LAST_MBOX; + err = can_rx_offload_add_timestamp(ndev, &priv->offload); + if (err) { + dev_err(&pdev->dev, "can_rx_offload_add_timestamp() failed\n"); + goto probe_exit_disable_clk; } err = register_candev(ndev); if (err) { dev_err(&pdev->dev, "register_candev() failed\n"); - goto probe_exit_clk; + goto probe_exit_offload; } - devm_can_led_init(ndev); - dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n", - priv->base, (u32) ndev->irq); + priv->base, (u32)ndev->irq); return 0; -probe_exit_clk: +probe_exit_offload: + can_rx_offload_del(&priv->offload); +probe_exit_disable_clk: + clk_disable_unprepare(priv->clk); +probe_exit_release_clk: clk_put(priv->clk); probe_exit_candev: free_candev(ndev); -probe_exit: + return err; } -static int ti_hecc_remove(struct platform_device *pdev) +static void ti_hecc_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct ti_hecc_priv *priv = netdev_priv(ndev); @@ -1000,9 +969,8 @@ static int ti_hecc_remove(struct platform_device *pdev) unregister_candev(ndev); clk_disable_unprepare(priv->clk); clk_put(priv->clk); + can_rx_offload_del(&priv->offload); free_candev(ndev); - - return 0; } #ifdef CONFIG_PM diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig index 750d04d9e2ae..cf65a90816b9 100644 --- a/drivers/net/can/usb/Kconfig +++ b/drivers/net/can/usb/Kconfig @@ -1,36 +1,73 @@ +# SPDX-License-Identifier: GPL-2.0-only menu "CAN USB interfaces" depends on USB config CAN_8DEV_USB tristate "8 devices USB2CAN interface" - ---help--- + help This driver supports the USB2CAN interface from 8 devices (http://www.8devices.com). config CAN_EMS_USB tristate "EMS CPC-USB/ARM7 CAN/USB interface" - ---help--- + help This driver is for the one channel CPC-USB/ARM7 CAN/USB interface from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de). -config CAN_ESD_USB2 - tristate "ESD USB/2 CAN/USB interface" - ---help--- - This driver supports the CAN-USB/2 interface - from esd electronic system design gmbh (http://www.esd.eu). +config CAN_ESD_USB + tristate "esd electronics gmbh CAN/USB interfaces" + help + This driver adds support for several CAN/USB interfaces + from esd electronics gmbh (https://www.esd.eu). + + The drivers supports the following devices: + - esd CAN-USB/2 + - esd CAN-USB/3-FD + - esd CAN-USB/Micro + + To compile this driver as a module, choose M here: the module + will be called esd_usb. + +config CAN_ETAS_ES58X + tristate "ETAS ES58X CAN/USB interfaces" + select CRC16 + select NET_DEVLINK + help + This driver supports the ES581.4, ES582.1 and ES584.1 interfaces + from ETAS GmbH (https://www.etas.com/en/products/es58x.php). + + To compile this driver as a module, choose M here: the module + will be called etas_es58x. + +config CAN_F81604 + tristate "Fintek F81604 USB to 2CAN interface" + help + This driver supports the Fintek F81604 USB to 2CAN interface. + The device can support CAN2.0A/B protocol and also support + 2 output pins to control external terminator (optional). + + To compile this driver as a module, choose M here: the module will + be called f81604. + + (see also https://www.fintek.com.tw). config CAN_GS_USB - tristate "Geschwister Schneider UG interfaces" - ---help--- - This driver supports the Geschwister Schneider and bytewerk.org - candleLight USB CAN interfaces USB/CAN devices + tristate "Geschwister Schneider UG and candleLight compatible interfaces" + select CAN_RX_OFFLOAD + help + This driver supports the Geschwister Schneider and + bytewerk.org candleLight compatible + (https://github.com/candle-usb/candleLight_fw) USB/CAN + interfaces. + If unsure choose N, choose Y for built in support, M to compile as module (module will be named: gs_usb). config CAN_KVASER_USB tristate "Kvaser CAN/USB interface" - ---help--- + select NET_DEVLINK + help This driver adds support for Kvaser CAN/USB devices like Kvaser Leaf Light, Kvaser USBcan II and Kvaser Memorator Pro 5xHS. @@ -51,9 +88,12 @@ config CAN_KVASER_USB - Kvaser Leaf Light "China" - Kvaser BlackBird SemiPro - Kvaser USBcan R + - Kvaser USBcan R v2 - Kvaser Leaf Light v2 + - Kvaser Leaf Light R v2 - Kvaser Mini PCI Express HS - Kvaser Mini PCI Express 2xHS + - Kvaser Mini PCIe 1xCAN - Kvaser USBcan Light 2xHS - Kvaser USBcan II HS/HS - Kvaser USBcan II HS/LS @@ -63,16 +103,25 @@ config CAN_KVASER_USB - Scania VCI2 (if you have the Kvaser logo on top) - Kvaser BlackBird v2 - Kvaser Leaf Pro HS v2 + - Kvaser Leaf v3 + - Kvaser Hybrid CAN/LIN - Kvaser Hybrid 2xCAN/LIN + - Kvaser Hybrid Pro CAN/LIN - Kvaser Hybrid Pro 2xCAN/LIN - Kvaser Memorator 2xHS v2 - Kvaser Memorator Pro 2xHS v2 - Kvaser Memorator Pro 5xHS - Kvaser USBcan Light 4xHS - Kvaser USBcan Pro 2xHS v2 + - Kvaser USBcan Pro 4xHS + - Kvaser USBcan Pro 5xCAN - Kvaser USBcan Pro 5xHS + - Kvaser U100 + - Kvaser U100P + - Kvaser U100S - ATI Memorator Pro 2xHS v2 - ATI USBcan Pro 2xHS v2 + - Vining 800 If unsure, say N. @@ -81,15 +130,26 @@ config CAN_KVASER_USB config CAN_MCBA_USB tristate "Microchip CAN BUS Analyzer interface" - ---help--- + help This driver supports the CAN BUS Analyzer interface from Microchip (http://www.microchip.com/development-tools/). +config CAN_NCT6694 + tristate "Nuvoton NCT6694 Socket CANfd support" + depends on MFD_NCT6694 + select CAN_RX_OFFLOAD + help + If you say yes to this option, support will be included for Nuvoton + NCT6694, a USB device to socket CANfd controller. + + This driver can also be built as a module. If so, the module will + be called nct6694_canfd. + config CAN_PEAK_USB tristate "PEAK PCAN-USB/USB Pro interfaces for CAN 2.0b/CAN-FD" - ---help--- + help This driver supports the PEAK-System Technik USB adapters that enable - access to the CAN bus, with repect to the CAN 2.0b and/or CAN-FD + access to the CAN bus, with respect to the CAN 2.0b and/or CAN-FD standards, that is: PCAN-USB single CAN 2.0b channel USB adapter @@ -101,15 +161,9 @@ config CAN_PEAK_USB (see also http://www.peak-system.com). -config CAN_MCBA_USB - tristate "Microchip CAN BUS Analyzer interface" - ---help--- - This driver supports the CAN BUS Analyzer interface - from Microchip (http://www.microchip.com/development-tools/). - config CAN_UCAN tristate "Theobroma Systems UCAN interface" - ---help--- + help This driver supports the Theobroma Systems UCAN USB-CAN interface. diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile index aa0f17c0b2ed..fcafb1ac262e 100644 --- a/drivers/net/can/usb/Makefile +++ b/drivers/net/can/usb/Makefile @@ -5,9 +5,12 @@ obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o -obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o +obj-$(CONFIG_CAN_ESD_USB) += esd_usb.o +obj-$(CONFIG_CAN_ETAS_ES58X) += etas_es58x/ +obj-$(CONFIG_CAN_F81604) += f81604.o obj-$(CONFIG_CAN_GS_USB) += gs_usb.o obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb/ obj-$(CONFIG_CAN_MCBA_USB) += mcba_usb.o +obj-$(CONFIG_CAN_NCT6694) += nct6694_canfd.o obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/ obj-$(CONFIG_CAN_UCAN) += ucan.o diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index b7dfd4109d24..de8e212a1366 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -1,21 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * CAN driver for EMS Dr. Thomas Wuensche CPC-USB/ARM7 * * Copyright (C) 2004-2009 EMS Dr. Thomas Wuensche - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published - * by the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ +#include <linux/ethtool.h> #include <linux/signal.h> #include <linux/slab.h> #include <linux/module.h> @@ -206,7 +195,7 @@ struct __packed ems_cpc_msg { __le32 ts_sec; /* timestamp in seconds */ __le32 ts_nsec; /* timestamp in nano seconds */ - union { + union __packed { u8 generic[64]; struct cpc_can_msg can_msg; struct cpc_can_params can_params; @@ -242,7 +231,6 @@ struct ems_tx_urb_context { struct ems_usb *dev; u32 echo_index; - u8 dlc; }; struct ems_usb { @@ -267,6 +255,8 @@ struct ems_usb { unsigned int free_slots; /* remember number of available slots */ struct ems_cpc_msg active_params; /* active controller parameters */ + void *rxbuf[MAX_RX_URBS]; + dma_addr_t rxbuf_dma[MAX_RX_URBS]; }; static void ems_usb_read_interrupt_callback(struct urb *urb) @@ -318,7 +308,7 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg) return; cf->can_id = le32_to_cpu(msg->msg.can_msg.id); - cf->can_dlc = get_can_dlc(msg->msg.can_msg.length & 0xF); + cf->len = can_cc_dlc2len(msg->msg.can_msg.length & 0xF); if (msg->type == CPC_MSG_TYPE_EXT_CAN_FRAME || msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME) @@ -328,12 +318,13 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg) msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME) { cf->can_id |= CAN_RTR_FLAG; } else { - for (i = 0; i < cf->can_dlc; i++) + for (i = 0; i < cf->len; i++) cf->data[i] = msg->msg.can_msg.msg[i]; - } + stats->rx_bytes += cf->len; + } stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } @@ -344,15 +335,14 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) struct net_device_stats *stats = &dev->netdev->stats; skb = alloc_can_err_skb(dev->netdev, &cf); - if (skb == NULL) - return; if (msg->type == CPC_MSG_TYPE_CAN_STATE) { u8 state = msg->msg.can_state; if (state & SJA1000_SR_BS) { dev->can.state = CAN_STATE_BUS_OFF; - cf->can_id |= CAN_ERR_BUSOFF; + if (skb) + cf->can_id |= CAN_ERR_BUSOFF; dev->can.can_stats.bus_off++; can_bus_off(dev->netdev); @@ -370,46 +360,53 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) /* bus error interrupt */ dev->can.can_stats.bus_error++; - stats->rx_errors++; - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + if (skb) { + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - switch (ecc & SJA1000_ECC_MASK) { - case SJA1000_ECC_BIT: - cf->data[2] |= CAN_ERR_PROT_BIT; - break; - case SJA1000_ECC_FORM: - cf->data[2] |= CAN_ERR_PROT_FORM; - break; - case SJA1000_ECC_STUFF: - cf->data[2] |= CAN_ERR_PROT_STUFF; - break; - default: - cf->data[3] = ecc & SJA1000_ECC_SEG; - break; + switch (ecc & SJA1000_ECC_MASK) { + case SJA1000_ECC_BIT: + cf->data[2] |= CAN_ERR_PROT_BIT; + break; + case SJA1000_ECC_FORM: + cf->data[2] |= CAN_ERR_PROT_FORM; + break; + case SJA1000_ECC_STUFF: + cf->data[2] |= CAN_ERR_PROT_STUFF; + break; + default: + cf->data[3] = ecc & SJA1000_ECC_SEG; + break; + } } /* Error occurred during transmission? */ - if ((ecc & SJA1000_ECC_DIR) == 0) - cf->data[2] |= CAN_ERR_PROT_TX; + if ((ecc & SJA1000_ECC_DIR) == 0) { + stats->tx_errors++; + if (skb) + cf->data[2] |= CAN_ERR_PROT_TX; + } else { + stats->rx_errors++; + } - if (dev->can.state == CAN_STATE_ERROR_WARNING || - dev->can.state == CAN_STATE_ERROR_PASSIVE) { + if (skb && (dev->can.state == CAN_STATE_ERROR_WARNING || + dev->can.state == CAN_STATE_ERROR_PASSIVE)) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] = (txerr > rxerr) ? CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; } } else if (msg->type == CPC_MSG_TYPE_OVERRUN) { - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + } stats->rx_over_errors++; stats->rx_errors++; } - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_rx(skb); + if (skb) + netif_rx(skb); } /* @@ -528,9 +525,8 @@ static void ems_usb_write_bulk_callback(struct urb *urb) /* transmission complete interrupt */ netdev->stats.tx_packets++; - netdev->stats.tx_bytes += context->dlc; - - can_get_echo_skb(netdev, context->echo_index); + netdev->stats.tx_bytes += can_get_echo_skb(netdev, context->echo_index, + NULL); /* Release context */ context->echo_index = MAX_TX_URBS; @@ -599,6 +595,7 @@ static int ems_usb_start(struct ems_usb *dev) for (i = 0; i < MAX_RX_URBS; i++) { struct urb *urb = NULL; u8 *buf = NULL; + dma_addr_t buf_dma; /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_KERNEL); @@ -608,7 +605,7 @@ static int ems_usb_start(struct ems_usb *dev) } buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL, - &urb->transfer_dma); + &buf_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); @@ -616,6 +613,8 @@ static int ems_usb_start(struct ems_usb *dev) break; } + urb->transfer_dma = buf_dma; + usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 2), buf, RX_BUFFER_SIZE, ems_usb_read_bulk_callback, dev); @@ -631,6 +630,9 @@ static int ems_usb_start(struct ems_usb *dev) break; } + dev->rxbuf[i] = buf; + dev->rxbuf_dma[i] = buf_dma; + /* Drop reference, USB core will take care of freeing it */ usb_free_urb(urb); } @@ -696,6 +698,10 @@ static void unlink_all_urbs(struct ems_usb *dev) usb_kill_anchored_urbs(&dev->rx_submitted); + for (i = 0; i < MAX_RX_URBS; ++i) + usb_free_coherent(dev->udev, RX_BUFFER_SIZE, + dev->rxbuf[i], dev->rxbuf_dma[i]); + usb_kill_anchored_urbs(&dev->tx_submitted); atomic_set(&dev->active_tx_urbs, 0); @@ -749,7 +755,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN + sizeof(struct cpc_can_msg); - if (can_dropped_invalid_skb(netdev, skb)) + if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; /* create a URB, and a buffer for it, and copy the data to the URB */ @@ -767,7 +773,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne msg = (struct ems_cpc_msg *)&buf[CPC_HEADER_SIZE]; msg->msg.can_msg.id = cpu_to_le32(cf->can_id & CAN_ERR_MASK); - msg->msg.can_msg.length = cf->can_dlc; + msg->msg.can_msg.length = cf->len; if (cf->can_id & CAN_RTR_FLAG) { msg->type = cf->can_id & CAN_EFF_FLAG ? @@ -778,10 +784,10 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne msg->type = cf->can_id & CAN_EFF_FLAG ? CPC_CMD_TYPE_EXT_CAN_FRAME : CPC_CMD_TYPE_CAN_FRAME; - for (i = 0; i < cf->can_dlc; i++) + for (i = 0; i < cf->len; i++) msg->msg.can_msg.msg[i] = cf->data[i]; - msg->length = CPC_CAN_MSG_MIN_SIZE + cf->can_dlc; + msg->length = CPC_CAN_MSG_MIN_SIZE + cf->len; } for (i = 0; i < MAX_TX_URBS; i++) { @@ -806,24 +812,22 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne context->dev = dev; context->echo_index = i; - context->dlc = cf->can_dlc; usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf, size, ems_usb_write_bulk_callback, context); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &dev->tx_submitted); - can_put_echo_skb(skb, netdev, context->echo_index); + can_put_echo_skb(skb, netdev, context->echo_index, 0); atomic_inc(&dev->active_tx_urbs); err = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(err)) { - can_free_echo_skb(netdev, context->echo_index); + can_free_echo_skb(netdev, context->echo_index, NULL); usb_unanchor_urb(urb); usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); - dev_kfree_skb(skb); atomic_dec(&dev->active_tx_urbs); @@ -881,11 +885,14 @@ static const struct net_device_ops ems_usb_netdev_ops = { .ndo_open = ems_usb_open, .ndo_stop = ems_usb_close, .ndo_start_xmit = ems_usb_start_xmit, - .ndo_change_mtu = can_change_mtu, +}; + +static const struct ethtool_ops ems_usb_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, }; static const struct can_bittiming_const ems_usb_bittiming_const = { - .name = "ems_usb", + .name = KBUILD_MODNAME, .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, @@ -995,6 +1002,7 @@ static int ems_usb_probe(struct usb_interface *intf, dev->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; netdev->netdev_ops = &ems_usb_netdev_ops; + netdev->ethtool_ops = &ems_usb_ethtool_ops; netdev->flags |= IFF_ECHO; /* we support local echo */ @@ -1065,7 +1073,6 @@ static void ems_usb_disconnect(struct usb_interface *intf) if (dev) { unregister_netdev(dev->netdev); - free_candev(dev->netdev); unlink_all_urbs(dev); @@ -1073,12 +1080,14 @@ static void ems_usb_disconnect(struct usb_interface *intf) kfree(dev->intr_in_buffer); kfree(dev->tx_msg_buffer); + + free_candev(dev->netdev); } } /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver ems_usb_driver = { - .name = "ems_usb", + .name = KBUILD_MODNAME, .probe = ems_usb_probe, .disconnect = ems_usb_disconnect, .id_table = ems_usb_table, diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c new file mode 100644 index 000000000000..08da507faef4 --- /dev/null +++ b/drivers/net/can/usb/esd_usb.c @@ -0,0 +1,1398 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * CAN driver for esd electronics gmbh CAN-USB/2, CAN-USB/3 and CAN-USB/Micro + * + * Copyright (C) 2010-2012 esd electronic system design gmbh, Matthias Fuchs <socketcan@esd.eu> + * Copyright (C) 2022-2024 esd electronics gmbh, Frank Jungclaus <frank.jungclaus@esd.eu> + */ + +#include <linux/can.h> +#include <linux/can/dev.h> +#include <linux/can/error.h> +#include <linux/err.h> +#include <linux/ethtool.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/signal.h> +#include <linux/slab.h> +#include <linux/units.h> +#include <linux/usb.h> + +MODULE_AUTHOR("Matthias Fuchs <socketcan@esd.eu>"); +MODULE_AUTHOR("Frank Jungclaus <frank.jungclaus@esd.eu>"); +MODULE_DESCRIPTION("CAN driver for esd electronics gmbh CAN-USB/2, CAN-USB/3 and CAN-USB/Micro interfaces"); +MODULE_LICENSE("GPL v2"); + +/* USB vendor and product ID */ +#define ESD_USB_ESDGMBH_VENDOR_ID 0x0ab4 +#define ESD_USB_CANUSB2_PRODUCT_ID 0x0010 +#define ESD_USB_CANUSBM_PRODUCT_ID 0x0011 +#define ESD_USB_CANUSB3_PRODUCT_ID 0x0014 + +/* CAN controller clock frequencies */ +#define ESD_USB_2_CAN_CLOCK (60 * MEGA) /* Hz */ +#define ESD_USB_M_CAN_CLOCK (36 * MEGA) /* Hz */ +#define ESD_USB_3_CAN_CLOCK (80 * MEGA) /* Hz */ + +/* Maximum number of CAN nets */ +#define ESD_USB_MAX_NETS 2 + +/* USB commands */ +#define ESD_USB_CMD_VERSION 1 /* also used for VERSION_REPLY */ +#define ESD_USB_CMD_CAN_RX 2 /* device to host only */ +#define ESD_USB_CMD_CAN_TX 3 /* also used for TX_DONE */ +#define ESD_USB_CMD_SETBAUD 4 /* also used for SETBAUD_REPLY */ +#define ESD_USB_CMD_TS 5 /* also used for TS_REPLY */ +#define ESD_USB_CMD_IDADD 6 /* also used for IDADD_REPLY */ + +/* esd CAN message flags - dlc field */ +#define ESD_USB_RTR BIT(4) +#define ESD_USB_NO_BRS BIT(4) +#define ESD_USB_ESI BIT(5) +#define ESD_USB_FD BIT(7) + +/* esd CAN message flags - id field */ +#define ESD_USB_EXTID BIT(29) +#define ESD_USB_EVENT BIT(30) +#define ESD_USB_IDMASK GENMASK(28, 0) + +/* esd CAN event ids */ +#define ESD_USB_EV_CAN_ERROR_EXT 2 /* CAN controller specific diagnostic data */ + +/* baudrate message flags */ +#define ESD_USB_LOM BIT(30) /* Listen Only Mode */ +#define ESD_USB_UBR BIT(31) /* User Bit Rate (controller BTR) in bits 0..27 */ +#define ESD_USB_NO_BAUDRATE GENMASK(30, 0) /* bit rate unconfigured */ + +/* bit timing esd CAN-USB */ +#define ESD_USB_2_TSEG1_SHIFT 16 +#define ESD_USB_2_TSEG2_SHIFT 20 +#define ESD_USB_2_SJW_SHIFT 14 +#define ESD_USB_M_SJW_SHIFT 24 +#define ESD_USB_TRIPLE_SAMPLES BIT(23) + +/* Transmitter Delay Compensation */ +#define ESD_USB_3_TDC_MODE_AUTO 0 + +/* esd IDADD message */ +#define ESD_USB_ID_ENABLE BIT(7) +#define ESD_USB_MAX_ID_SEGMENT 64 + +/* SJA1000 ECC register (emulated by usb firmware) */ +#define ESD_USB_SJA1000_ECC_SEG GENMASK(4, 0) +#define ESD_USB_SJA1000_ECC_DIR BIT(5) +#define ESD_USB_SJA1000_ECC_ERR BIT(2, 1) +#define ESD_USB_SJA1000_ECC_BIT 0x00 +#define ESD_USB_SJA1000_ECC_FORM BIT(6) +#define ESD_USB_SJA1000_ECC_STUFF BIT(7) +#define ESD_USB_SJA1000_ECC_MASK GENMASK(7, 6) + +/* esd bus state event codes */ +#define ESD_USB_BUSSTATE_MASK GENMASK(7, 6) +#define ESD_USB_BUSSTATE_WARN BIT(6) +#define ESD_USB_BUSSTATE_ERRPASSIVE BIT(7) +#define ESD_USB_BUSSTATE_BUSOFF GENMASK(7, 6) + +#define ESD_USB_RX_BUFFER_SIZE 1024 +#define ESD_USB_MAX_RX_URBS 4 +#define ESD_USB_MAX_TX_URBS 16 /* must be power of 2 */ + +/* Modes for CAN-USB/3, to be used for esd_usb_3_set_baudrate_msg_x.mode */ +#define ESD_USB_3_BAUDRATE_MODE_DISABLE 0 /* remove from bus */ +#define ESD_USB_3_BAUDRATE_MODE_INDEX 1 /* ESD (CiA) bit rate idx */ +#define ESD_USB_3_BAUDRATE_MODE_BTR_CTRL 2 /* BTR values (controller)*/ +#define ESD_USB_3_BAUDRATE_MODE_BTR_CANONICAL 3 /* BTR values (canonical) */ +#define ESD_USB_3_BAUDRATE_MODE_NUM 4 /* numerical bit rate */ +#define ESD_USB_3_BAUDRATE_MODE_AUTOBAUD 5 /* autobaud */ + +/* Flags for CAN-USB/3, to be used for esd_usb_3_set_baudrate_msg_x.flags */ +#define ESD_USB_3_BAUDRATE_FLAG_FD BIT(0) /* enable CAN FD mode */ +#define ESD_USB_3_BAUDRATE_FLAG_LOM BIT(1) /* enable listen only mode */ +#define ESD_USB_3_BAUDRATE_FLAG_STM BIT(2) /* enable self test mode */ +#define ESD_USB_3_BAUDRATE_FLAG_TRS BIT(3) /* enable triple sampling */ +#define ESD_USB_3_BAUDRATE_FLAG_TXP BIT(4) /* enable transmit pause */ + +struct esd_usb_header_msg { + u8 len; /* total message length in 32bit words */ + u8 cmd; + u8 rsvd[2]; +}; + +struct esd_usb_version_msg { + u8 len; /* total message length in 32bit words */ + u8 cmd; + u8 rsvd; + u8 flags; + __le32 drv_version; +}; + +struct esd_usb_version_reply_msg { + u8 len; /* total message length in 32bit words */ + u8 cmd; + u8 nets; + u8 features; + __le32 version; + u8 name[16]; + __le32 rsvd; + __le32 ts; +}; + +struct esd_usb_rx_msg { + u8 len; /* total message length in 32bit words */ + u8 cmd; + u8 net; + u8 dlc; + __le32 ts; + __le32 id; /* upper 3 bits contain flags */ + union { + u8 data[CAN_MAX_DLEN]; + u8 data_fd[CANFD_MAX_DLEN]; + struct { + u8 status; /* CAN Controller Status */ + u8 ecc; /* Error Capture Register */ + u8 rec; /* RX Error Counter */ + u8 tec; /* TX Error Counter */ + } ev_can_err_ext; /* For ESD_EV_CAN_ERROR_EXT */ + }; +}; + +struct esd_usb_tx_msg { + u8 len; /* total message length in 32bit words */ + u8 cmd; + u8 net; + u8 dlc; + u32 hnd; /* opaque handle, not used by device */ + __le32 id; /* upper 3 bits contain flags */ + union { + u8 data[CAN_MAX_DLEN]; + u8 data_fd[CANFD_MAX_DLEN]; + }; +}; + +struct esd_usb_tx_done_msg { + u8 len; /* total message length in 32bit words */ + u8 cmd; + u8 net; + u8 status; + u32 hnd; /* opaque handle, not used by device */ + __le32 ts; +}; + +struct esd_usb_id_filter_msg { + u8 len; /* total message length in 32bit words */ + u8 cmd; + u8 net; + u8 option; + __le32 mask[ESD_USB_MAX_ID_SEGMENT + 1]; /* +1 for 29bit extended IDs */ +}; + +struct esd_usb_set_baudrate_msg { + u8 len; /* total message length in 32bit words */ + u8 cmd; + u8 net; + u8 rsvd; + __le32 baud; +}; + +/* CAN-USB/3 baudrate configuration, used for nominal as well as for data bit rate */ +struct esd_usb_3_baudrate_cfg { + __le16 brp; /* bit rate pre-scaler */ + __le16 tseg1; /* time segment before sample point */ + __le16 tseg2; /* time segment after sample point */ + __le16 sjw; /* synchronization jump Width */ +}; + +/* In principle, the esd CAN-USB/3 supports Transmitter Delay Compensation (TDC), + * but currently only the automatic TDC mode is supported by this driver. + * An implementation for manual TDC configuration will follow. + * + * For information about struct esd_usb_3_tdc_cfg, see + * NTCAN Application Developers Manual, 6.2.25 NTCAN_TDC_CFG + related chapters + * https://esd.eu/fileadmin/esd/docs/manuals/NTCAN_Part1_Function_API_Manual_en_56.pdf + */ +struct esd_usb_3_tdc_cfg { + u8 tdc_mode; /* transmitter delay compensation mode */ + u8 ssp_offset; /* secondary sample point offset in mtq */ + s8 ssp_shift; /* secondary sample point shift in mtq */ + u8 tdc_filter; /* TDC filter in mtq */ +}; + +/* Extended version of the above set_baudrate_msg for a CAN-USB/3 + * to define the CAN bit timing configuration of the CAN controller in + * CAN FD mode as well as in Classical CAN mode. + * + * The payload of this command is a NTCAN_BAUDRATE_X structure according to + * esd electronics gmbh, NTCAN Application Developers Manual, 6.2.15 NTCAN_BAUDRATE_X + * https://esd.eu/fileadmin/esd/docs/manuals/NTCAN_Part1_Function_API_Manual_en_56.pdf + */ +struct esd_usb_3_set_baudrate_msg_x { + u8 len; /* total message length in 32bit words */ + u8 cmd; + u8 net; + u8 rsvd; /*reserved */ + /* Payload ... */ + __le16 mode; /* mode word, see ESD_USB_3_BAUDRATE_MODE_xxx */ + __le16 flags; /* control flags, see ESD_USB_3_BAUDRATE_FLAG_xxx */ + struct esd_usb_3_tdc_cfg tdc; /* TDC configuration */ + struct esd_usb_3_baudrate_cfg nom; /* nominal bit rate */ + struct esd_usb_3_baudrate_cfg data; /* data bit rate */ +}; + +/* Main message type used between library and application */ +union __packed esd_usb_msg { + struct esd_usb_header_msg hdr; + struct esd_usb_version_msg version; + struct esd_usb_version_reply_msg version_reply; + struct esd_usb_rx_msg rx; + struct esd_usb_tx_msg tx; + struct esd_usb_tx_done_msg txdone; + struct esd_usb_set_baudrate_msg setbaud; + struct esd_usb_3_set_baudrate_msg_x setbaud_x; + struct esd_usb_id_filter_msg filter; +}; + +static struct usb_device_id esd_usb_table[] = { + {USB_DEVICE(ESD_USB_ESDGMBH_VENDOR_ID, ESD_USB_CANUSB2_PRODUCT_ID)}, + {USB_DEVICE(ESD_USB_ESDGMBH_VENDOR_ID, ESD_USB_CANUSBM_PRODUCT_ID)}, + {USB_DEVICE(ESD_USB_ESDGMBH_VENDOR_ID, ESD_USB_CANUSB3_PRODUCT_ID)}, + {} +}; +MODULE_DEVICE_TABLE(usb, esd_usb_table); + +struct esd_usb_net_priv; + +struct esd_tx_urb_context { + struct esd_usb_net_priv *priv; + u32 echo_index; +}; + +struct esd_usb { + struct usb_device *udev; + struct esd_usb_net_priv *nets[ESD_USB_MAX_NETS]; + + struct usb_anchor rx_submitted; + + int net_count; + u32 version; + int rxinitdone; + int in_usb_disconnect; + void *rxbuf[ESD_USB_MAX_RX_URBS]; + dma_addr_t rxbuf_dma[ESD_USB_MAX_RX_URBS]; +}; + +struct esd_usb_net_priv { + struct can_priv can; /* must be the first member */ + + atomic_t active_tx_jobs; + struct usb_anchor tx_submitted; + struct esd_tx_urb_context tx_contexts[ESD_USB_MAX_TX_URBS]; + + struct esd_usb *usb; + struct net_device *netdev; + int index; + u8 old_state; + struct can_berr_counter bec; +}; + +static void esd_usb_rx_event(struct esd_usb_net_priv *priv, + union esd_usb_msg *msg) +{ + struct net_device_stats *stats = &priv->netdev->stats; + struct can_frame *cf; + struct sk_buff *skb; + u32 id = le32_to_cpu(msg->rx.id) & ESD_USB_IDMASK; + + if (id == ESD_USB_EV_CAN_ERROR_EXT) { + u8 state = msg->rx.ev_can_err_ext.status; + u8 ecc = msg->rx.ev_can_err_ext.ecc; + + priv->bec.rxerr = msg->rx.ev_can_err_ext.rec; + priv->bec.txerr = msg->rx.ev_can_err_ext.tec; + + netdev_dbg(priv->netdev, + "CAN_ERR_EV_EXT: dlc=%#02x state=%02x ecc=%02x rec=%02x tec=%02x\n", + msg->rx.dlc, state, ecc, + priv->bec.rxerr, priv->bec.txerr); + + /* if berr-reporting is off, only pass through on state change ... */ + if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && + state == priv->old_state) + return; + + skb = alloc_can_err_skb(priv->netdev, &cf); + if (!skb) + stats->rx_dropped++; + + if (state != priv->old_state) { + enum can_state tx_state, rx_state; + enum can_state new_state = CAN_STATE_ERROR_ACTIVE; + + priv->old_state = state; + + switch (state & ESD_USB_BUSSTATE_MASK) { + case ESD_USB_BUSSTATE_BUSOFF: + new_state = CAN_STATE_BUS_OFF; + can_bus_off(priv->netdev); + break; + case ESD_USB_BUSSTATE_WARN: + new_state = CAN_STATE_ERROR_WARNING; + break; + case ESD_USB_BUSSTATE_ERRPASSIVE: + new_state = CAN_STATE_ERROR_PASSIVE; + break; + default: + new_state = CAN_STATE_ERROR_ACTIVE; + priv->bec.txerr = 0; + priv->bec.rxerr = 0; + break; + } + + if (new_state != priv->can.state) { + tx_state = (priv->bec.txerr >= priv->bec.rxerr) ? new_state : 0; + rx_state = (priv->bec.txerr <= priv->bec.rxerr) ? new_state : 0; + can_change_state(priv->netdev, cf, + tx_state, rx_state); + } + } else if (skb) { + priv->can.can_stats.bus_error++; + stats->rx_errors++; + + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + + switch (ecc & ESD_USB_SJA1000_ECC_MASK) { + case ESD_USB_SJA1000_ECC_BIT: + cf->data[2] |= CAN_ERR_PROT_BIT; + break; + case ESD_USB_SJA1000_ECC_FORM: + cf->data[2] |= CAN_ERR_PROT_FORM; + break; + case ESD_USB_SJA1000_ECC_STUFF: + cf->data[2] |= CAN_ERR_PROT_STUFF; + break; + default: + break; + } + + /* Error occurred during transmission? */ + if (!(ecc & ESD_USB_SJA1000_ECC_DIR)) + cf->data[2] |= CAN_ERR_PROT_TX; + + /* Bit stream position in CAN frame as the error was detected */ + cf->data[3] = ecc & ESD_USB_SJA1000_ECC_SEG; + } + + if (skb) { + cf->can_id |= CAN_ERR_CNT; + cf->data[6] = priv->bec.txerr; + cf->data[7] = priv->bec.rxerr; + + netif_rx(skb); + } + } +} + +static void esd_usb_rx_can_msg(struct esd_usb_net_priv *priv, + union esd_usb_msg *msg) +{ + struct net_device_stats *stats = &priv->netdev->stats; + struct can_frame *cf; + struct canfd_frame *cfd; + struct sk_buff *skb; + u32 id; + u8 len; + + if (!netif_device_present(priv->netdev)) + return; + + id = le32_to_cpu(msg->rx.id); + + if (id & ESD_USB_EVENT) { + esd_usb_rx_event(priv, msg); + } else { + if (msg->rx.dlc & ESD_USB_FD) { + skb = alloc_canfd_skb(priv->netdev, &cfd); + } else { + skb = alloc_can_skb(priv->netdev, &cf); + cfd = (struct canfd_frame *)cf; + } + + if (skb == NULL) { + stats->rx_dropped++; + return; + } + + cfd->can_id = id & ESD_USB_IDMASK; + + if (msg->rx.dlc & ESD_USB_FD) { + /* masking by 0x0F is already done within can_fd_dlc2len() */ + cfd->len = can_fd_dlc2len(msg->rx.dlc); + len = cfd->len; + if ((msg->rx.dlc & ESD_USB_NO_BRS) == 0) + cfd->flags |= CANFD_BRS; + if (msg->rx.dlc & ESD_USB_ESI) + cfd->flags |= CANFD_ESI; + } else { + can_frame_set_cc_len(cf, msg->rx.dlc & ~ESD_USB_RTR, priv->can.ctrlmode); + len = cf->len; + if (msg->rx.dlc & ESD_USB_RTR) { + cf->can_id |= CAN_RTR_FLAG; + len = 0; + } + } + + if (id & ESD_USB_EXTID) + cfd->can_id |= CAN_EFF_FLAG; + + memcpy(cfd->data, msg->rx.data_fd, len); + stats->rx_bytes += len; + stats->rx_packets++; + + netif_rx(skb); + } +} + +static void esd_usb_tx_done_msg(struct esd_usb_net_priv *priv, + union esd_usb_msg *msg) +{ + struct net_device_stats *stats = &priv->netdev->stats; + struct net_device *netdev = priv->netdev; + struct esd_tx_urb_context *context; + + if (!netif_device_present(netdev)) + return; + + context = &priv->tx_contexts[msg->txdone.hnd & (ESD_USB_MAX_TX_URBS - 1)]; + + if (!msg->txdone.status) { + stats->tx_packets++; + stats->tx_bytes += can_get_echo_skb(netdev, context->echo_index, + NULL); + } else { + stats->tx_errors++; + can_free_echo_skb(netdev, context->echo_index, NULL); + } + + /* Release context */ + context->echo_index = ESD_USB_MAX_TX_URBS; + atomic_dec(&priv->active_tx_jobs); + + netif_wake_queue(netdev); +} + +static void esd_usb_read_bulk_callback(struct urb *urb) +{ + struct esd_usb *dev = urb->context; + int err; + int pos = 0; + int i; + + switch (urb->status) { + case 0: /* success */ + break; + + case -ENOENT: + case -EPIPE: + case -EPROTO: + case -ESHUTDOWN: + return; + + default: + dev_info(dev->udev->dev.parent, + "Rx URB aborted (%pe)\n", ERR_PTR(urb->status)); + goto resubmit_urb; + } + + while (pos < urb->actual_length) { + union esd_usb_msg *msg; + + msg = (union esd_usb_msg *)(urb->transfer_buffer + pos); + + switch (msg->hdr.cmd) { + case ESD_USB_CMD_CAN_RX: + if (msg->rx.net >= dev->net_count) { + dev_err(dev->udev->dev.parent, "format error\n"); + break; + } + + esd_usb_rx_can_msg(dev->nets[msg->rx.net], msg); + break; + + case ESD_USB_CMD_CAN_TX: + if (msg->txdone.net >= dev->net_count) { + dev_err(dev->udev->dev.parent, "format error\n"); + break; + } + + esd_usb_tx_done_msg(dev->nets[msg->txdone.net], + msg); + break; + } + + pos += msg->hdr.len * sizeof(u32); /* convert to # of bytes */ + + if (pos > urb->actual_length) { + dev_err(dev->udev->dev.parent, "format error\n"); + break; + } + } + +resubmit_urb: + usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1), + urb->transfer_buffer, ESD_USB_RX_BUFFER_SIZE, + esd_usb_read_bulk_callback, dev); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err == -ENODEV) { + for (i = 0; i < dev->net_count; i++) { + if (dev->nets[i]) + netif_device_detach(dev->nets[i]->netdev); + } + } else if (err) { + dev_err(dev->udev->dev.parent, + "failed resubmitting read bulk urb: %pe\n", ERR_PTR(err)); + } +} + +/* callback for bulk IN urb */ +static void esd_usb_write_bulk_callback(struct urb *urb) +{ + struct esd_tx_urb_context *context = urb->context; + struct esd_usb_net_priv *priv; + struct net_device *netdev; + size_t size = sizeof(union esd_usb_msg); + + WARN_ON(!context); + + priv = context->priv; + netdev = priv->netdev; + + /* free up our allocated buffer */ + usb_free_coherent(urb->dev, size, + urb->transfer_buffer, urb->transfer_dma); + + if (!netif_device_present(netdev)) + return; + + if (urb->status) + netdev_info(netdev, "Tx URB aborted (%pe)\n", ERR_PTR(urb->status)); + + netif_trans_update(netdev); +} + +static ssize_t firmware_show(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct usb_interface *intf = to_usb_interface(d); + struct esd_usb *dev = usb_get_intfdata(intf); + + return sprintf(buf, "%d.%d.%d\n", + (dev->version >> 12) & 0xf, + (dev->version >> 8) & 0xf, + dev->version & 0xff); +} +static DEVICE_ATTR_RO(firmware); + +static ssize_t hardware_show(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct usb_interface *intf = to_usb_interface(d); + struct esd_usb *dev = usb_get_intfdata(intf); + + return sprintf(buf, "%d.%d.%d\n", + (dev->version >> 28) & 0xf, + (dev->version >> 24) & 0xf, + (dev->version >> 16) & 0xff); +} +static DEVICE_ATTR_RO(hardware); + +static ssize_t nets_show(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct usb_interface *intf = to_usb_interface(d); + struct esd_usb *dev = usb_get_intfdata(intf); + + return sprintf(buf, "%d", dev->net_count); +} +static DEVICE_ATTR_RO(nets); + +static int esd_usb_send_msg(struct esd_usb *dev, union esd_usb_msg *msg) +{ + int actual_length; + + return usb_bulk_msg(dev->udev, + usb_sndbulkpipe(dev->udev, 2), + msg, + msg->hdr.len * sizeof(u32), /* convert to # of bytes */ + &actual_length, + 1000); +} + +static int esd_usb_wait_msg(struct esd_usb *dev, + union esd_usb_msg *msg) +{ + int actual_length; + + return usb_bulk_msg(dev->udev, + usb_rcvbulkpipe(dev->udev, 1), + msg, + sizeof(*msg), + &actual_length, + 1000); +} + +static int esd_usb_setup_rx_urbs(struct esd_usb *dev) +{ + int i, err = 0; + + if (dev->rxinitdone) + return 0; + + for (i = 0; i < ESD_USB_MAX_RX_URBS; i++) { + struct urb *urb = NULL; + u8 *buf = NULL; + dma_addr_t buf_dma; + + /* create a URB, and a buffer for it */ + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) { + err = -ENOMEM; + break; + } + + buf = usb_alloc_coherent(dev->udev, ESD_USB_RX_BUFFER_SIZE, GFP_KERNEL, + &buf_dma); + if (!buf) { + dev_warn(dev->udev->dev.parent, + "No memory left for USB buffer\n"); + err = -ENOMEM; + goto freeurb; + } + + urb->transfer_dma = buf_dma; + + usb_fill_bulk_urb(urb, dev->udev, + usb_rcvbulkpipe(dev->udev, 1), + buf, ESD_USB_RX_BUFFER_SIZE, + esd_usb_read_bulk_callback, dev); + urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + usb_anchor_urb(urb, &dev->rx_submitted); + + err = usb_submit_urb(urb, GFP_KERNEL); + if (err) { + usb_unanchor_urb(urb); + usb_free_coherent(dev->udev, ESD_USB_RX_BUFFER_SIZE, buf, + urb->transfer_dma); + goto freeurb; + } + + dev->rxbuf[i] = buf; + dev->rxbuf_dma[i] = buf_dma; + +freeurb: + /* Drop reference, USB core will take care of freeing it */ + usb_free_urb(urb); + if (err) + break; + } + + /* Did we submit any URBs */ + if (i == 0) { + dev_err(dev->udev->dev.parent, "couldn't setup read URBs\n"); + return err; + } + + /* Warn if we've couldn't transmit all the URBs */ + if (i < ESD_USB_MAX_RX_URBS) { + dev_warn(dev->udev->dev.parent, + "rx performance may be slow\n"); + } + + dev->rxinitdone = 1; + return 0; +} + +/* Start interface */ +static int esd_usb_start(struct esd_usb_net_priv *priv) +{ + struct esd_usb *dev = priv->usb; + struct net_device *netdev = priv->netdev; + union esd_usb_msg *msg; + int err, i; + + msg = kmalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) { + err = -ENOMEM; + goto out; + } + + /* Enable all IDs + * The IDADD message takes up to 64 32 bit bitmasks (2048 bits). + * Each bit represents one 11 bit CAN identifier. A set bit + * enables reception of the corresponding CAN identifier. A cleared + * bit disabled this identifier. An additional bitmask value + * following the CAN 2.0A bits is used to enable reception of + * extended CAN frames. Only the LSB of this final mask is checked + * for the complete 29 bit ID range. The IDADD message also allows + * filter configuration for an ID subset. In this case you can add + * the number of the starting bitmask (0..64) to the filter.option + * field followed by only some bitmasks. + */ + msg->hdr.cmd = ESD_USB_CMD_IDADD; + msg->hdr.len = sizeof(struct esd_usb_id_filter_msg) / sizeof(u32); /* # of 32bit words */ + msg->filter.net = priv->index; + msg->filter.option = ESD_USB_ID_ENABLE; /* start with segment 0 */ + for (i = 0; i < ESD_USB_MAX_ID_SEGMENT; i++) + msg->filter.mask[i] = cpu_to_le32(GENMASK(31, 0)); + /* enable 29bit extended IDs */ + msg->filter.mask[ESD_USB_MAX_ID_SEGMENT] = cpu_to_le32(BIT(0)); + + err = esd_usb_send_msg(dev, msg); + if (err) + goto out; + + err = esd_usb_setup_rx_urbs(dev); + if (err) + goto out; + + priv->can.state = CAN_STATE_ERROR_ACTIVE; + +out: + if (err == -ENODEV) + netif_device_detach(netdev); + if (err) + netdev_err(netdev, "couldn't start device: %pe\n", ERR_PTR(err)); + + kfree(msg); + return err; +} + +static void unlink_all_urbs(struct esd_usb *dev) +{ + struct esd_usb_net_priv *priv; + int i, j; + + usb_kill_anchored_urbs(&dev->rx_submitted); + + for (i = 0; i < ESD_USB_MAX_RX_URBS; ++i) + usb_free_coherent(dev->udev, ESD_USB_RX_BUFFER_SIZE, + dev->rxbuf[i], dev->rxbuf_dma[i]); + + for (i = 0; i < dev->net_count; i++) { + priv = dev->nets[i]; + if (priv) { + usb_kill_anchored_urbs(&priv->tx_submitted); + atomic_set(&priv->active_tx_jobs, 0); + + for (j = 0; j < ESD_USB_MAX_TX_URBS; j++) + priv->tx_contexts[j].echo_index = ESD_USB_MAX_TX_URBS; + } + } +} + +static int esd_usb_open(struct net_device *netdev) +{ + struct esd_usb_net_priv *priv = netdev_priv(netdev); + int err; + + /* common open */ + err = open_candev(netdev); + if (err) + return err; + + /* finally start device */ + err = esd_usb_start(priv); + if (err) { + close_candev(netdev); + return err; + } + + netif_start_queue(netdev); + + return 0; +} + +static netdev_tx_t esd_usb_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct esd_usb_net_priv *priv = netdev_priv(netdev); + struct esd_usb *dev = priv->usb; + struct esd_tx_urb_context *context = NULL; + struct net_device_stats *stats = &netdev->stats; + struct canfd_frame *cfd = (struct canfd_frame *)skb->data; + union esd_usb_msg *msg; + struct urb *urb; + u8 *buf; + int i, err; + int ret = NETDEV_TX_OK; + size_t size = sizeof(union esd_usb_msg); + + if (can_dev_dropped_skb(netdev, skb)) + return NETDEV_TX_OK; + + /* create a URB, and a buffer for it, and copy the data to the URB */ + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) { + stats->tx_dropped++; + dev_kfree_skb(skb); + goto nourbmem; + } + + buf = usb_alloc_coherent(dev->udev, size, GFP_ATOMIC, + &urb->transfer_dma); + if (!buf) { + netdev_err(netdev, "No memory left for USB buffer\n"); + stats->tx_dropped++; + dev_kfree_skb(skb); + goto nobufmem; + } + + msg = (union esd_usb_msg *)buf; + + /* minimal length as # of 32bit words */ + msg->hdr.len = offsetof(struct esd_usb_tx_msg, data) / sizeof(u32); + msg->hdr.cmd = ESD_USB_CMD_CAN_TX; + msg->tx.net = priv->index; + + if (can_is_canfd_skb(skb)) { + msg->tx.dlc = can_fd_len2dlc(cfd->len); + msg->tx.dlc |= ESD_USB_FD; + + if ((cfd->flags & CANFD_BRS) == 0) + msg->tx.dlc |= ESD_USB_NO_BRS; + } else { + msg->tx.dlc = can_get_cc_dlc((struct can_frame *)cfd, priv->can.ctrlmode); + + if (cfd->can_id & CAN_RTR_FLAG) + msg->tx.dlc |= ESD_USB_RTR; + } + + msg->tx.id = cpu_to_le32(cfd->can_id & CAN_ERR_MASK); + + if (cfd->can_id & CAN_EFF_FLAG) + msg->tx.id |= cpu_to_le32(ESD_USB_EXTID); + + memcpy(msg->tx.data_fd, cfd->data, cfd->len); + + /* round up, then divide by 4 to add the payload length as # of 32bit words */ + msg->hdr.len += DIV_ROUND_UP(cfd->len, sizeof(u32)); + + for (i = 0; i < ESD_USB_MAX_TX_URBS; i++) { + if (priv->tx_contexts[i].echo_index == ESD_USB_MAX_TX_URBS) { + context = &priv->tx_contexts[i]; + break; + } + } + + /* This may never happen */ + if (!context) { + netdev_warn(netdev, "couldn't find free context\n"); + ret = NETDEV_TX_BUSY; + goto releasebuf; + } + + context->priv = priv; + context->echo_index = i; + + /* hnd must not be 0 - MSB is stripped in txdone handling */ + msg->tx.hnd = BIT(31) | i; /* returned in TX done message */ + + usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf, + msg->hdr.len * sizeof(u32), /* convert to # of bytes */ + esd_usb_write_bulk_callback, context); + + urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + + usb_anchor_urb(urb, &priv->tx_submitted); + + can_put_echo_skb(skb, netdev, context->echo_index, 0); + + atomic_inc(&priv->active_tx_jobs); + + /* Slow down tx path */ + if (atomic_read(&priv->active_tx_jobs) >= ESD_USB_MAX_TX_URBS) + netif_stop_queue(netdev); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err) { + can_free_echo_skb(netdev, context->echo_index, NULL); + + atomic_dec(&priv->active_tx_jobs); + usb_unanchor_urb(urb); + + stats->tx_dropped++; + + if (err == -ENODEV) + netif_device_detach(netdev); + else + netdev_warn(netdev, "failed tx_urb %pe\n", ERR_PTR(err)); + + goto releasebuf; + } + + netif_trans_update(netdev); + + /* Release our reference to this URB, the USB core will eventually free + * it entirely. + */ + usb_free_urb(urb); + + return NETDEV_TX_OK; + +releasebuf: + usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); + +nobufmem: + usb_free_urb(urb); + +nourbmem: + return ret; +} + +/* Stop interface */ +static int esd_usb_stop(struct esd_usb_net_priv *priv) +{ + union esd_usb_msg *msg; + int err; + int i; + + msg = kmalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) + return -ENOMEM; + + /* Disable all IDs (see esd_usb_start()) */ + msg->hdr.cmd = ESD_USB_CMD_IDADD; + msg->hdr.len = sizeof(struct esd_usb_id_filter_msg) / sizeof(u32);/* # of 32bit words */ + msg->filter.net = priv->index; + msg->filter.option = ESD_USB_ID_ENABLE; /* start with segment 0 */ + for (i = 0; i <= ESD_USB_MAX_ID_SEGMENT; i++) + msg->filter.mask[i] = 0; + err = esd_usb_send_msg(priv->usb, msg); + if (err < 0) { + netdev_err(priv->netdev, "sending idadd message failed: %pe\n", ERR_PTR(err)); + goto bail; + } + + /* set CAN controller to reset mode */ + msg->hdr.len = sizeof(struct esd_usb_set_baudrate_msg) / sizeof(u32); /* # of 32bit words */ + msg->hdr.cmd = ESD_USB_CMD_SETBAUD; + msg->setbaud.net = priv->index; + msg->setbaud.rsvd = 0; + msg->setbaud.baud = cpu_to_le32(ESD_USB_NO_BAUDRATE); + err = esd_usb_send_msg(priv->usb, msg); + if (err < 0) + netdev_err(priv->netdev, "sending setbaud message failed: %pe\n", ERR_PTR(err)); + +bail: + kfree(msg); + + return err; +} + +static int esd_usb_close(struct net_device *netdev) +{ + struct esd_usb_net_priv *priv = netdev_priv(netdev); + int err = 0; + + if (!priv->usb->in_usb_disconnect) { + /* It's moot to try this in usb_disconnect()! */ + err = esd_usb_stop(priv); + } + + priv->can.state = CAN_STATE_STOPPED; + + netif_stop_queue(netdev); + + close_candev(netdev); + + return err; +} + +static const struct net_device_ops esd_usb_netdev_ops = { + .ndo_open = esd_usb_open, + .ndo_stop = esd_usb_close, + .ndo_start_xmit = esd_usb_start_xmit, +}; + +static const struct ethtool_ops esd_usb_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, +}; + +static const struct can_bittiming_const esd_usb_2_bittiming_const = { + .name = "esd_usb_2", + .tseg1_min = 1, + .tseg1_max = 16, + .tseg2_min = 1, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 1024, + .brp_inc = 1, +}; + +static int esd_usb_2_set_bittiming(struct net_device *netdev) +{ + const struct can_bittiming_const *btc = &esd_usb_2_bittiming_const; + struct esd_usb_net_priv *priv = netdev_priv(netdev); + struct can_bittiming *bt = &priv->can.bittiming; + union esd_usb_msg *msg; + int err; + u32 canbtr; + int sjw_shift; + + canbtr = ESD_USB_UBR; + if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + canbtr |= ESD_USB_LOM; + + canbtr |= (bt->brp - 1) & (btc->brp_max - 1); + + if (le16_to_cpu(priv->usb->udev->descriptor.idProduct) == + ESD_USB_CANUSBM_PRODUCT_ID) + sjw_shift = ESD_USB_M_SJW_SHIFT; + else + sjw_shift = ESD_USB_2_SJW_SHIFT; + + canbtr |= ((bt->sjw - 1) & (btc->sjw_max - 1)) + << sjw_shift; + canbtr |= ((bt->prop_seg + bt->phase_seg1 - 1) + & (btc->tseg1_max - 1)) + << ESD_USB_2_TSEG1_SHIFT; + canbtr |= ((bt->phase_seg2 - 1) & (btc->tseg2_max - 1)) + << ESD_USB_2_TSEG2_SHIFT; + if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) + canbtr |= ESD_USB_TRIPLE_SAMPLES; + + msg = kmalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.len = sizeof(struct esd_usb_set_baudrate_msg) / sizeof(u32); /* # of 32bit words */ + msg->hdr.cmd = ESD_USB_CMD_SETBAUD; + msg->setbaud.net = priv->index; + msg->setbaud.rsvd = 0; + msg->setbaud.baud = cpu_to_le32(canbtr); + + netdev_dbg(netdev, "setting BTR=%#x\n", canbtr); + + err = esd_usb_send_msg(priv->usb, msg); + + kfree(msg); + return err; +} + +/* Nominal bittiming constants, see + * Microchip SAM E70/S70/V70/V71, Data Sheet, Rev. G - 07/2022 + * 48.6.8 MCAN Nominal Bit Timing and Prescaler Register + */ +static const struct can_bittiming_const esd_usb_3_nom_bittiming_const = { + .name = "esd_usb_3", + .tseg1_min = 2, + .tseg1_max = 256, + .tseg2_min = 2, + .tseg2_max = 128, + .sjw_max = 128, + .brp_min = 1, + .brp_max = 512, + .brp_inc = 1, +}; + +/* Data bittiming constants, see + * Microchip SAM E70/S70/V70/V71, Data Sheet, Rev. G - 07/2022 + * 48.6.4 MCAN Data Bit Timing and Prescaler Register + */ +static const struct can_bittiming_const esd_usb_3_data_bittiming_const = { + .name = "esd_usb_3", + .tseg1_min = 2, + .tseg1_max = 32, + .tseg2_min = 1, + .tseg2_max = 16, + .sjw_max = 8, + .brp_min = 1, + .brp_max = 32, + .brp_inc = 1, +}; + +static int esd_usb_3_set_bittiming(struct net_device *netdev) +{ + const struct can_bittiming_const *nom_btc = &esd_usb_3_nom_bittiming_const; + const struct can_bittiming_const *data_btc = &esd_usb_3_data_bittiming_const; + struct esd_usb_net_priv *priv = netdev_priv(netdev); + struct can_bittiming *nom_bt = &priv->can.bittiming; + struct can_bittiming *data_bt = &priv->can.fd.data_bittiming; + struct esd_usb_3_set_baudrate_msg_x *baud_x; + union esd_usb_msg *msg; + u16 flags = 0; + int err; + + msg = kmalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) + return -ENOMEM; + + baud_x = &msg->setbaud_x; + + /* Canonical is the most reasonable mode for SocketCAN on CAN-USB/3 ... */ + baud_x->mode = cpu_to_le16(ESD_USB_3_BAUDRATE_MODE_BTR_CANONICAL); + + if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + flags |= ESD_USB_3_BAUDRATE_FLAG_LOM; + + baud_x->nom.brp = cpu_to_le16(nom_bt->brp & (nom_btc->brp_max - 1)); + baud_x->nom.sjw = cpu_to_le16(nom_bt->sjw & (nom_btc->sjw_max - 1)); + baud_x->nom.tseg1 = cpu_to_le16((nom_bt->prop_seg + nom_bt->phase_seg1) + & (nom_btc->tseg1_max - 1)); + baud_x->nom.tseg2 = cpu_to_le16(nom_bt->phase_seg2 & (nom_btc->tseg2_max - 1)); + + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { + baud_x->data.brp = cpu_to_le16(data_bt->brp & (data_btc->brp_max - 1)); + baud_x->data.sjw = cpu_to_le16(data_bt->sjw & (data_btc->sjw_max - 1)); + baud_x->data.tseg1 = cpu_to_le16((data_bt->prop_seg + data_bt->phase_seg1) + & (data_btc->tseg1_max - 1)); + baud_x->data.tseg2 = cpu_to_le16(data_bt->phase_seg2 & (data_btc->tseg2_max - 1)); + flags |= ESD_USB_3_BAUDRATE_FLAG_FD; + } + + /* Currently this driver only supports the automatic TDC mode */ + baud_x->tdc.tdc_mode = ESD_USB_3_TDC_MODE_AUTO; + baud_x->tdc.ssp_offset = 0; + baud_x->tdc.ssp_shift = 0; + baud_x->tdc.tdc_filter = 0; + + baud_x->flags = cpu_to_le16(flags); + baud_x->net = priv->index; + baud_x->rsvd = 0; + + /* set len as # of 32bit words */ + msg->hdr.len = sizeof(struct esd_usb_3_set_baudrate_msg_x) / sizeof(u32); + msg->hdr.cmd = ESD_USB_CMD_SETBAUD; + + netdev_dbg(netdev, + "ctrlmode=%#x/%#x, esd-net=%u, esd-mode=%#x, esd-flags=%#x\n", + priv->can.ctrlmode, priv->can.ctrlmode_supported, + priv->index, le16_to_cpu(baud_x->mode), flags); + + err = esd_usb_send_msg(priv->usb, msg); + + kfree(msg); + return err; +} + +static int esd_usb_get_berr_counter(const struct net_device *netdev, + struct can_berr_counter *bec) +{ + struct esd_usb_net_priv *priv = netdev_priv(netdev); + + bec->txerr = priv->bec.txerr; + bec->rxerr = priv->bec.rxerr; + + return 0; +} + +static int esd_usb_set_mode(struct net_device *netdev, enum can_mode mode) +{ + switch (mode) { + case CAN_MODE_START: + netif_wake_queue(netdev); + break; + + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int esd_usb_probe_one_net(struct usb_interface *intf, int index) +{ + struct esd_usb *dev = usb_get_intfdata(intf); + struct net_device *netdev; + struct esd_usb_net_priv *priv; + int err = 0; + int i; + + netdev = alloc_candev(sizeof(*priv), ESD_USB_MAX_TX_URBS); + if (!netdev) { + dev_err(&intf->dev, "couldn't alloc candev\n"); + err = -ENOMEM; + goto done; + } + + priv = netdev_priv(netdev); + + init_usb_anchor(&priv->tx_submitted); + atomic_set(&priv->active_tx_jobs, 0); + + for (i = 0; i < ESD_USB_MAX_TX_URBS; i++) + priv->tx_contexts[i].echo_index = ESD_USB_MAX_TX_URBS; + + priv->usb = dev; + priv->netdev = netdev; + priv->index = index; + + priv->can.state = CAN_STATE_STOPPED; + priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_CC_LEN8_DLC | + CAN_CTRLMODE_BERR_REPORTING; + + switch (le16_to_cpu(dev->udev->descriptor.idProduct)) { + case ESD_USB_CANUSB3_PRODUCT_ID: + priv->can.clock.freq = ESD_USB_3_CAN_CLOCK; + priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD; + priv->can.bittiming_const = &esd_usb_3_nom_bittiming_const; + priv->can.fd.data_bittiming_const = &esd_usb_3_data_bittiming_const; + priv->can.do_set_bittiming = esd_usb_3_set_bittiming; + priv->can.fd.do_set_data_bittiming = esd_usb_3_set_bittiming; + break; + + case ESD_USB_CANUSBM_PRODUCT_ID: + priv->can.clock.freq = ESD_USB_M_CAN_CLOCK; + priv->can.bittiming_const = &esd_usb_2_bittiming_const; + priv->can.do_set_bittiming = esd_usb_2_set_bittiming; + break; + + case ESD_USB_CANUSB2_PRODUCT_ID: + default: + priv->can.clock.freq = ESD_USB_2_CAN_CLOCK; + priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; + priv->can.bittiming_const = &esd_usb_2_bittiming_const; + priv->can.do_set_bittiming = esd_usb_2_set_bittiming; + break; + } + + priv->can.do_set_mode = esd_usb_set_mode; + priv->can.do_get_berr_counter = esd_usb_get_berr_counter; + + netdev->flags |= IFF_ECHO; /* we support local echo */ + + netdev->netdev_ops = &esd_usb_netdev_ops; + netdev->ethtool_ops = &esd_usb_ethtool_ops; + + SET_NETDEV_DEV(netdev, &intf->dev); + netdev->dev_id = index; + + err = register_candev(netdev); + if (err) { + dev_err(&intf->dev, "couldn't register CAN device: %pe\n", ERR_PTR(err)); + free_candev(netdev); + err = -ENOMEM; + goto done; + } + + dev->nets[index] = priv; + netdev_info(netdev, "registered\n"); + +done: + return err; +} + +/* probe function for new USB devices + * + * check version information and number of available + * CAN interfaces + */ +static int esd_usb_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct esd_usb *dev; + union esd_usb_msg *msg; + int i, err; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) { + err = -ENOMEM; + goto done; + } + + dev->udev = interface_to_usbdev(intf); + + init_usb_anchor(&dev->rx_submitted); + + usb_set_intfdata(intf, dev); + + msg = kmalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) { + err = -ENOMEM; + goto free_msg; + } + + /* query number of CAN interfaces (nets) */ + msg->hdr.cmd = ESD_USB_CMD_VERSION; + msg->hdr.len = sizeof(struct esd_usb_version_msg) / sizeof(u32); /* # of 32bit words */ + msg->version.rsvd = 0; + msg->version.flags = 0; + msg->version.drv_version = 0; + + err = esd_usb_send_msg(dev, msg); + if (err < 0) { + dev_err(&intf->dev, "sending version message failed\n"); + goto free_msg; + } + + err = esd_usb_wait_msg(dev, msg); + if (err < 0) { + dev_err(&intf->dev, "no version message answer\n"); + goto free_msg; + } + + dev->net_count = (int)msg->version_reply.nets; + dev->version = le32_to_cpu(msg->version_reply.version); + + if (device_create_file(&intf->dev, &dev_attr_firmware)) + dev_err(&intf->dev, + "Couldn't create device file for firmware\n"); + + if (device_create_file(&intf->dev, &dev_attr_hardware)) + dev_err(&intf->dev, + "Couldn't create device file for hardware\n"); + + if (device_create_file(&intf->dev, &dev_attr_nets)) + dev_err(&intf->dev, + "Couldn't create device file for nets\n"); + + /* do per device probing */ + for (i = 0; i < dev->net_count; i++) + esd_usb_probe_one_net(intf, i); + +free_msg: + kfree(msg); + if (err) + kfree(dev); +done: + return err; +} + +/* called by the usb core when the device is removed from the system */ +static void esd_usb_disconnect(struct usb_interface *intf) +{ + struct esd_usb *dev = usb_get_intfdata(intf); + struct net_device *netdev; + int i; + + device_remove_file(&intf->dev, &dev_attr_firmware); + device_remove_file(&intf->dev, &dev_attr_hardware); + device_remove_file(&intf->dev, &dev_attr_nets); + + usb_set_intfdata(intf, NULL); + + if (dev) { + dev->in_usb_disconnect = 1; + for (i = 0; i < dev->net_count; i++) { + if (dev->nets[i]) { + netdev = dev->nets[i]->netdev; + netdev_info(netdev, "unregister\n"); + unregister_netdev(netdev); + free_candev(netdev); + } + } + unlink_all_urbs(dev); + kfree(dev); + } +} + +/* usb specific object needed to register this driver with the usb subsystem */ +static struct usb_driver esd_usb_driver = { + .name = KBUILD_MODNAME, + .probe = esd_usb_probe, + .disconnect = esd_usb_disconnect, + .id_table = esd_usb_table, +}; + +module_usb_driver(esd_usb_driver); diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c deleted file mode 100644 index 5820fd5b69b5..000000000000 --- a/drivers/net/can/usb/esd_usb2.c +++ /dev/null @@ -1,1153 +0,0 @@ -/* - * CAN driver for esd CAN-USB/2 and CAN-USB/Micro - * - * Copyright (C) 2010-2012 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published - * by the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - */ -#include <linux/signal.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/netdevice.h> -#include <linux/usb.h> - -#include <linux/can.h> -#include <linux/can/dev.h> -#include <linux/can/error.h> - -MODULE_AUTHOR("Matthias Fuchs <matthias.fuchs@esd.eu>"); -MODULE_DESCRIPTION("CAN driver for esd CAN-USB/2 and CAN-USB/Micro interfaces"); -MODULE_LICENSE("GPL v2"); - -/* Define these values to match your devices */ -#define USB_ESDGMBH_VENDOR_ID 0x0ab4 -#define USB_CANUSB2_PRODUCT_ID 0x0010 -#define USB_CANUSBM_PRODUCT_ID 0x0011 - -#define ESD_USB2_CAN_CLOCK 60000000 -#define ESD_USBM_CAN_CLOCK 36000000 -#define ESD_USB2_MAX_NETS 2 - -/* USB2 commands */ -#define CMD_VERSION 1 /* also used for VERSION_REPLY */ -#define CMD_CAN_RX 2 /* device to host only */ -#define CMD_CAN_TX 3 /* also used for TX_DONE */ -#define CMD_SETBAUD 4 /* also used for SETBAUD_REPLY */ -#define CMD_TS 5 /* also used for TS_REPLY */ -#define CMD_IDADD 6 /* also used for IDADD_REPLY */ - -/* esd CAN message flags - dlc field */ -#define ESD_RTR 0x10 - -/* esd CAN message flags - id field */ -#define ESD_EXTID 0x20000000 -#define ESD_EVENT 0x40000000 -#define ESD_IDMASK 0x1fffffff - -/* esd CAN event ids used by this driver */ -#define ESD_EV_CAN_ERROR_EXT 2 - -/* baudrate message flags */ -#define ESD_USB2_UBR 0x80000000 -#define ESD_USB2_LOM 0x40000000 -#define ESD_USB2_NO_BAUDRATE 0x7fffffff -#define ESD_USB2_TSEG1_MIN 1 -#define ESD_USB2_TSEG1_MAX 16 -#define ESD_USB2_TSEG1_SHIFT 16 -#define ESD_USB2_TSEG2_MIN 1 -#define ESD_USB2_TSEG2_MAX 8 -#define ESD_USB2_TSEG2_SHIFT 20 -#define ESD_USB2_SJW_MAX 4 -#define ESD_USB2_SJW_SHIFT 14 -#define ESD_USBM_SJW_SHIFT 24 -#define ESD_USB2_BRP_MIN 1 -#define ESD_USB2_BRP_MAX 1024 -#define ESD_USB2_BRP_INC 1 -#define ESD_USB2_3_SAMPLES 0x00800000 - -/* esd IDADD message */ -#define ESD_ID_ENABLE 0x80 -#define ESD_MAX_ID_SEGMENT 64 - -/* SJA1000 ECC register (emulated by usb2 firmware) */ -#define SJA1000_ECC_SEG 0x1F -#define SJA1000_ECC_DIR 0x20 -#define SJA1000_ECC_ERR 0x06 -#define SJA1000_ECC_BIT 0x00 -#define SJA1000_ECC_FORM 0x40 -#define SJA1000_ECC_STUFF 0x80 -#define SJA1000_ECC_MASK 0xc0 - -/* esd bus state event codes */ -#define ESD_BUSSTATE_MASK 0xc0 -#define ESD_BUSSTATE_WARN 0x40 -#define ESD_BUSSTATE_ERRPASSIVE 0x80 -#define ESD_BUSSTATE_BUSOFF 0xc0 - -#define RX_BUFFER_SIZE 1024 -#define MAX_RX_URBS 4 -#define MAX_TX_URBS 16 /* must be power of 2 */ - -struct header_msg { - u8 len; /* len is always the total message length in 32bit words */ - u8 cmd; - u8 rsvd[2]; -}; - -struct version_msg { - u8 len; - u8 cmd; - u8 rsvd; - u8 flags; - __le32 drv_version; -}; - -struct version_reply_msg { - u8 len; - u8 cmd; - u8 nets; - u8 features; - __le32 version; - u8 name[16]; - __le32 rsvd; - __le32 ts; -}; - -struct rx_msg { - u8 len; - u8 cmd; - u8 net; - u8 dlc; - __le32 ts; - __le32 id; /* upper 3 bits contain flags */ - u8 data[8]; -}; - -struct tx_msg { - u8 len; - u8 cmd; - u8 net; - u8 dlc; - u32 hnd; /* opaque handle, not used by device */ - __le32 id; /* upper 3 bits contain flags */ - u8 data[8]; -}; - -struct tx_done_msg { - u8 len; - u8 cmd; - u8 net; - u8 status; - u32 hnd; /* opaque handle, not used by device */ - __le32 ts; -}; - -struct id_filter_msg { - u8 len; - u8 cmd; - u8 net; - u8 option; - __le32 mask[ESD_MAX_ID_SEGMENT + 1]; -}; - -struct set_baudrate_msg { - u8 len; - u8 cmd; - u8 net; - u8 rsvd; - __le32 baud; -}; - -/* Main message type used between library and application */ -struct __attribute__ ((packed)) esd_usb2_msg { - union { - struct header_msg hdr; - struct version_msg version; - struct version_reply_msg version_reply; - struct rx_msg rx; - struct tx_msg tx; - struct tx_done_msg txdone; - struct set_baudrate_msg setbaud; - struct id_filter_msg filter; - } msg; -}; - -static struct usb_device_id esd_usb2_table[] = { - {USB_DEVICE(USB_ESDGMBH_VENDOR_ID, USB_CANUSB2_PRODUCT_ID)}, - {USB_DEVICE(USB_ESDGMBH_VENDOR_ID, USB_CANUSBM_PRODUCT_ID)}, - {} -}; -MODULE_DEVICE_TABLE(usb, esd_usb2_table); - -struct esd_usb2_net_priv; - -struct esd_tx_urb_context { - struct esd_usb2_net_priv *priv; - u32 echo_index; - int dlc; -}; - -struct esd_usb2 { - struct usb_device *udev; - struct esd_usb2_net_priv *nets[ESD_USB2_MAX_NETS]; - - struct usb_anchor rx_submitted; - - int net_count; - u32 version; - int rxinitdone; -}; - -struct esd_usb2_net_priv { - struct can_priv can; /* must be the first member */ - - atomic_t active_tx_jobs; - struct usb_anchor tx_submitted; - struct esd_tx_urb_context tx_contexts[MAX_TX_URBS]; - - struct esd_usb2 *usb2; - struct net_device *netdev; - int index; - u8 old_state; - struct can_berr_counter bec; -}; - -static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv, - struct esd_usb2_msg *msg) -{ - struct net_device_stats *stats = &priv->netdev->stats; - struct can_frame *cf; - struct sk_buff *skb; - u32 id = le32_to_cpu(msg->msg.rx.id) & ESD_IDMASK; - - if (id == ESD_EV_CAN_ERROR_EXT) { - u8 state = msg->msg.rx.data[0]; - u8 ecc = msg->msg.rx.data[1]; - u8 txerr = msg->msg.rx.data[2]; - u8 rxerr = msg->msg.rx.data[3]; - - skb = alloc_can_err_skb(priv->netdev, &cf); - if (skb == NULL) { - stats->rx_dropped++; - return; - } - - if (state != priv->old_state) { - priv->old_state = state; - - switch (state & ESD_BUSSTATE_MASK) { - case ESD_BUSSTATE_BUSOFF: - priv->can.state = CAN_STATE_BUS_OFF; - cf->can_id |= CAN_ERR_BUSOFF; - priv->can.can_stats.bus_off++; - can_bus_off(priv->netdev); - break; - case ESD_BUSSTATE_WARN: - priv->can.state = CAN_STATE_ERROR_WARNING; - priv->can.can_stats.error_warning++; - break; - case ESD_BUSSTATE_ERRPASSIVE: - priv->can.state = CAN_STATE_ERROR_PASSIVE; - priv->can.can_stats.error_passive++; - break; - default: - priv->can.state = CAN_STATE_ERROR_ACTIVE; - break; - } - } else { - priv->can.can_stats.bus_error++; - stats->rx_errors++; - - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - - switch (ecc & SJA1000_ECC_MASK) { - case SJA1000_ECC_BIT: - cf->data[2] |= CAN_ERR_PROT_BIT; - break; - case SJA1000_ECC_FORM: - cf->data[2] |= CAN_ERR_PROT_FORM; - break; - case SJA1000_ECC_STUFF: - cf->data[2] |= CAN_ERR_PROT_STUFF; - break; - default: - cf->data[3] = ecc & SJA1000_ECC_SEG; - break; - } - - /* Error occurred during transmission? */ - if (!(ecc & SJA1000_ECC_DIR)) - cf->data[2] |= CAN_ERR_PROT_TX; - - if (priv->can.state == CAN_STATE_ERROR_WARNING || - priv->can.state == CAN_STATE_ERROR_PASSIVE) { - cf->data[1] = (txerr > rxerr) ? - CAN_ERR_CRTL_TX_PASSIVE : - CAN_ERR_CRTL_RX_PASSIVE; - } - cf->data[6] = txerr; - cf->data[7] = rxerr; - } - - priv->bec.txerr = txerr; - priv->bec.rxerr = rxerr; - - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_rx(skb); - } -} - -static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv, - struct esd_usb2_msg *msg) -{ - struct net_device_stats *stats = &priv->netdev->stats; - struct can_frame *cf; - struct sk_buff *skb; - int i; - u32 id; - - if (!netif_device_present(priv->netdev)) - return; - - id = le32_to_cpu(msg->msg.rx.id); - - if (id & ESD_EVENT) { - esd_usb2_rx_event(priv, msg); - } else { - skb = alloc_can_skb(priv->netdev, &cf); - if (skb == NULL) { - stats->rx_dropped++; - return; - } - - cf->can_id = id & ESD_IDMASK; - cf->can_dlc = get_can_dlc(msg->msg.rx.dlc & ~ESD_RTR); - - if (id & ESD_EXTID) - cf->can_id |= CAN_EFF_FLAG; - - if (msg->msg.rx.dlc & ESD_RTR) { - cf->can_id |= CAN_RTR_FLAG; - } else { - for (i = 0; i < cf->can_dlc; i++) - cf->data[i] = msg->msg.rx.data[i]; - } - - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_rx(skb); - } - - return; -} - -static void esd_usb2_tx_done_msg(struct esd_usb2_net_priv *priv, - struct esd_usb2_msg *msg) -{ - struct net_device_stats *stats = &priv->netdev->stats; - struct net_device *netdev = priv->netdev; - struct esd_tx_urb_context *context; - - if (!netif_device_present(netdev)) - return; - - context = &priv->tx_contexts[msg->msg.txdone.hnd & (MAX_TX_URBS - 1)]; - - if (!msg->msg.txdone.status) { - stats->tx_packets++; - stats->tx_bytes += context->dlc; - can_get_echo_skb(netdev, context->echo_index); - } else { - stats->tx_errors++; - can_free_echo_skb(netdev, context->echo_index); - } - - /* Release context */ - context->echo_index = MAX_TX_URBS; - atomic_dec(&priv->active_tx_jobs); - - netif_wake_queue(netdev); -} - -static void esd_usb2_read_bulk_callback(struct urb *urb) -{ - struct esd_usb2 *dev = urb->context; - int retval; - int pos = 0; - int i; - - switch (urb->status) { - case 0: /* success */ - break; - - case -ENOENT: - case -EPIPE: - case -EPROTO: - case -ESHUTDOWN: - return; - - default: - dev_info(dev->udev->dev.parent, - "Rx URB aborted (%d)\n", urb->status); - goto resubmit_urb; - } - - while (pos < urb->actual_length) { - struct esd_usb2_msg *msg; - - msg = (struct esd_usb2_msg *)(urb->transfer_buffer + pos); - - switch (msg->msg.hdr.cmd) { - case CMD_CAN_RX: - if (msg->msg.rx.net >= dev->net_count) { - dev_err(dev->udev->dev.parent, "format error\n"); - break; - } - - esd_usb2_rx_can_msg(dev->nets[msg->msg.rx.net], msg); - break; - - case CMD_CAN_TX: - if (msg->msg.txdone.net >= dev->net_count) { - dev_err(dev->udev->dev.parent, "format error\n"); - break; - } - - esd_usb2_tx_done_msg(dev->nets[msg->msg.txdone.net], - msg); - break; - } - - pos += msg->msg.hdr.len << 2; - - if (pos > urb->actual_length) { - dev_err(dev->udev->dev.parent, "format error\n"); - break; - } - } - -resubmit_urb: - usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1), - urb->transfer_buffer, RX_BUFFER_SIZE, - esd_usb2_read_bulk_callback, dev); - - retval = usb_submit_urb(urb, GFP_ATOMIC); - if (retval == -ENODEV) { - for (i = 0; i < dev->net_count; i++) { - if (dev->nets[i]) - netif_device_detach(dev->nets[i]->netdev); - } - } else if (retval) { - dev_err(dev->udev->dev.parent, - "failed resubmitting read bulk urb: %d\n", retval); - } - - return; -} - -/* - * callback for bulk IN urb - */ -static void esd_usb2_write_bulk_callback(struct urb *urb) -{ - struct esd_tx_urb_context *context = urb->context; - struct esd_usb2_net_priv *priv; - struct net_device *netdev; - size_t size = sizeof(struct esd_usb2_msg); - - WARN_ON(!context); - - priv = context->priv; - netdev = priv->netdev; - - /* free up our allocated buffer */ - usb_free_coherent(urb->dev, size, - urb->transfer_buffer, urb->transfer_dma); - - if (!netif_device_present(netdev)) - return; - - if (urb->status) - netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status); - - netif_trans_update(netdev); -} - -static ssize_t show_firmware(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct usb_interface *intf = to_usb_interface(d); - struct esd_usb2 *dev = usb_get_intfdata(intf); - - return sprintf(buf, "%d.%d.%d\n", - (dev->version >> 12) & 0xf, - (dev->version >> 8) & 0xf, - dev->version & 0xff); -} -static DEVICE_ATTR(firmware, 0444, show_firmware, NULL); - -static ssize_t show_hardware(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct usb_interface *intf = to_usb_interface(d); - struct esd_usb2 *dev = usb_get_intfdata(intf); - - return sprintf(buf, "%d.%d.%d\n", - (dev->version >> 28) & 0xf, - (dev->version >> 24) & 0xf, - (dev->version >> 16) & 0xff); -} -static DEVICE_ATTR(hardware, 0444, show_hardware, NULL); - -static ssize_t show_nets(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct usb_interface *intf = to_usb_interface(d); - struct esd_usb2 *dev = usb_get_intfdata(intf); - - return sprintf(buf, "%d", dev->net_count); -} -static DEVICE_ATTR(nets, 0444, show_nets, NULL); - -static int esd_usb2_send_msg(struct esd_usb2 *dev, struct esd_usb2_msg *msg) -{ - int actual_length; - - return usb_bulk_msg(dev->udev, - usb_sndbulkpipe(dev->udev, 2), - msg, - msg->msg.hdr.len << 2, - &actual_length, - 1000); -} - -static int esd_usb2_wait_msg(struct esd_usb2 *dev, - struct esd_usb2_msg *msg) -{ - int actual_length; - - return usb_bulk_msg(dev->udev, - usb_rcvbulkpipe(dev->udev, 1), - msg, - sizeof(*msg), - &actual_length, - 1000); -} - -static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev) -{ - int i, err = 0; - - if (dev->rxinitdone) - return 0; - - for (i = 0; i < MAX_RX_URBS; i++) { - struct urb *urb = NULL; - u8 *buf = NULL; - - /* create a URB, and a buffer for it */ - urb = usb_alloc_urb(0, GFP_KERNEL); - if (!urb) { - err = -ENOMEM; - break; - } - - buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL, - &urb->transfer_dma); - if (!buf) { - dev_warn(dev->udev->dev.parent, - "No memory left for USB buffer\n"); - err = -ENOMEM; - goto freeurb; - } - - usb_fill_bulk_urb(urb, dev->udev, - usb_rcvbulkpipe(dev->udev, 1), - buf, RX_BUFFER_SIZE, - esd_usb2_read_bulk_callback, dev); - urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; - usb_anchor_urb(urb, &dev->rx_submitted); - - err = usb_submit_urb(urb, GFP_KERNEL); - if (err) { - usb_unanchor_urb(urb); - usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf, - urb->transfer_dma); - } - -freeurb: - /* Drop reference, USB core will take care of freeing it */ - usb_free_urb(urb); - if (err) - break; - } - - /* Did we submit any URBs */ - if (i == 0) { - dev_err(dev->udev->dev.parent, "couldn't setup read URBs\n"); - return err; - } - - /* Warn if we've couldn't transmit all the URBs */ - if (i < MAX_RX_URBS) { - dev_warn(dev->udev->dev.parent, - "rx performance may be slow\n"); - } - - dev->rxinitdone = 1; - return 0; -} - -/* - * Start interface - */ -static int esd_usb2_start(struct esd_usb2_net_priv *priv) -{ - struct esd_usb2 *dev = priv->usb2; - struct net_device *netdev = priv->netdev; - struct esd_usb2_msg *msg; - int err, i; - - msg = kmalloc(sizeof(*msg), GFP_KERNEL); - if (!msg) { - err = -ENOMEM; - goto out; - } - - /* - * Enable all IDs - * The IDADD message takes up to 64 32 bit bitmasks (2048 bits). - * Each bit represents one 11 bit CAN identifier. A set bit - * enables reception of the corresponding CAN identifier. A cleared - * bit disabled this identifier. An additional bitmask value - * following the CAN 2.0A bits is used to enable reception of - * extended CAN frames. Only the LSB of this final mask is checked - * for the complete 29 bit ID range. The IDADD message also allows - * filter configuration for an ID subset. In this case you can add - * the number of the starting bitmask (0..64) to the filter.option - * field followed by only some bitmasks. - */ - msg->msg.hdr.cmd = CMD_IDADD; - msg->msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT; - msg->msg.filter.net = priv->index; - msg->msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */ - for (i = 0; i < ESD_MAX_ID_SEGMENT; i++) - msg->msg.filter.mask[i] = cpu_to_le32(0xffffffff); - /* enable 29bit extended IDs */ - msg->msg.filter.mask[ESD_MAX_ID_SEGMENT] = cpu_to_le32(0x00000001); - - err = esd_usb2_send_msg(dev, msg); - if (err) - goto out; - - err = esd_usb2_setup_rx_urbs(dev); - if (err) - goto out; - - priv->can.state = CAN_STATE_ERROR_ACTIVE; - -out: - if (err == -ENODEV) - netif_device_detach(netdev); - if (err) - netdev_err(netdev, "couldn't start device: %d\n", err); - - kfree(msg); - return err; -} - -static void unlink_all_urbs(struct esd_usb2 *dev) -{ - struct esd_usb2_net_priv *priv; - int i, j; - - usb_kill_anchored_urbs(&dev->rx_submitted); - for (i = 0; i < dev->net_count; i++) { - priv = dev->nets[i]; - if (priv) { - usb_kill_anchored_urbs(&priv->tx_submitted); - atomic_set(&priv->active_tx_jobs, 0); - - for (j = 0; j < MAX_TX_URBS; j++) - priv->tx_contexts[j].echo_index = MAX_TX_URBS; - } - } -} - -static int esd_usb2_open(struct net_device *netdev) -{ - struct esd_usb2_net_priv *priv = netdev_priv(netdev); - int err; - - /* common open */ - err = open_candev(netdev); - if (err) - return err; - - /* finally start device */ - err = esd_usb2_start(priv); - if (err) { - netdev_warn(netdev, "couldn't start device: %d\n", err); - close_candev(netdev); - return err; - } - - netif_start_queue(netdev); - - return 0; -} - -static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb, - struct net_device *netdev) -{ - struct esd_usb2_net_priv *priv = netdev_priv(netdev); - struct esd_usb2 *dev = priv->usb2; - struct esd_tx_urb_context *context = NULL; - struct net_device_stats *stats = &netdev->stats; - struct can_frame *cf = (struct can_frame *)skb->data; - struct esd_usb2_msg *msg; - struct urb *urb; - u8 *buf; - int i, err; - int ret = NETDEV_TX_OK; - size_t size = sizeof(struct esd_usb2_msg); - - if (can_dropped_invalid_skb(netdev, skb)) - return NETDEV_TX_OK; - - /* create a URB, and a buffer for it, and copy the data to the URB */ - urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!urb) { - stats->tx_dropped++; - dev_kfree_skb(skb); - goto nourbmem; - } - - buf = usb_alloc_coherent(dev->udev, size, GFP_ATOMIC, - &urb->transfer_dma); - if (!buf) { - netdev_err(netdev, "No memory left for USB buffer\n"); - stats->tx_dropped++; - dev_kfree_skb(skb); - goto nobufmem; - } - - msg = (struct esd_usb2_msg *)buf; - - msg->msg.hdr.len = 3; /* minimal length */ - msg->msg.hdr.cmd = CMD_CAN_TX; - msg->msg.tx.net = priv->index; - msg->msg.tx.dlc = cf->can_dlc; - msg->msg.tx.id = cpu_to_le32(cf->can_id & CAN_ERR_MASK); - - if (cf->can_id & CAN_RTR_FLAG) - msg->msg.tx.dlc |= ESD_RTR; - - if (cf->can_id & CAN_EFF_FLAG) - msg->msg.tx.id |= cpu_to_le32(ESD_EXTID); - - for (i = 0; i < cf->can_dlc; i++) - msg->msg.tx.data[i] = cf->data[i]; - - msg->msg.hdr.len += (cf->can_dlc + 3) >> 2; - - for (i = 0; i < MAX_TX_URBS; i++) { - if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) { - context = &priv->tx_contexts[i]; - break; - } - } - - /* - * This may never happen. - */ - if (!context) { - netdev_warn(netdev, "couldn't find free context\n"); - ret = NETDEV_TX_BUSY; - goto releasebuf; - } - - context->priv = priv; - context->echo_index = i; - context->dlc = cf->can_dlc; - - /* hnd must not be 0 - MSB is stripped in txdone handling */ - msg->msg.tx.hnd = 0x80000000 | i; /* returned in TX done message */ - - usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf, - msg->msg.hdr.len << 2, - esd_usb2_write_bulk_callback, context); - - urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; - - usb_anchor_urb(urb, &priv->tx_submitted); - - can_put_echo_skb(skb, netdev, context->echo_index); - - atomic_inc(&priv->active_tx_jobs); - - /* Slow down tx path */ - if (atomic_read(&priv->active_tx_jobs) >= MAX_TX_URBS) - netif_stop_queue(netdev); - - err = usb_submit_urb(urb, GFP_ATOMIC); - if (err) { - can_free_echo_skb(netdev, context->echo_index); - - atomic_dec(&priv->active_tx_jobs); - usb_unanchor_urb(urb); - - stats->tx_dropped++; - - if (err == -ENODEV) - netif_device_detach(netdev); - else - netdev_warn(netdev, "failed tx_urb %d\n", err); - - goto releasebuf; - } - - netif_trans_update(netdev); - - /* - * Release our reference to this URB, the USB core will eventually free - * it entirely. - */ - usb_free_urb(urb); - - return NETDEV_TX_OK; - -releasebuf: - usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); - -nobufmem: - usb_free_urb(urb); - -nourbmem: - return ret; -} - -static int esd_usb2_close(struct net_device *netdev) -{ - struct esd_usb2_net_priv *priv = netdev_priv(netdev); - struct esd_usb2_msg *msg; - int i; - - msg = kmalloc(sizeof(*msg), GFP_KERNEL); - if (!msg) - return -ENOMEM; - - /* Disable all IDs (see esd_usb2_start()) */ - msg->msg.hdr.cmd = CMD_IDADD; - msg->msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT; - msg->msg.filter.net = priv->index; - msg->msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */ - for (i = 0; i <= ESD_MAX_ID_SEGMENT; i++) - msg->msg.filter.mask[i] = 0; - if (esd_usb2_send_msg(priv->usb2, msg) < 0) - netdev_err(netdev, "sending idadd message failed\n"); - - /* set CAN controller to reset mode */ - msg->msg.hdr.len = 2; - msg->msg.hdr.cmd = CMD_SETBAUD; - msg->msg.setbaud.net = priv->index; - msg->msg.setbaud.rsvd = 0; - msg->msg.setbaud.baud = cpu_to_le32(ESD_USB2_NO_BAUDRATE); - if (esd_usb2_send_msg(priv->usb2, msg) < 0) - netdev_err(netdev, "sending setbaud message failed\n"); - - priv->can.state = CAN_STATE_STOPPED; - - netif_stop_queue(netdev); - - close_candev(netdev); - - kfree(msg); - - return 0; -} - -static const struct net_device_ops esd_usb2_netdev_ops = { - .ndo_open = esd_usb2_open, - .ndo_stop = esd_usb2_close, - .ndo_start_xmit = esd_usb2_start_xmit, - .ndo_change_mtu = can_change_mtu, -}; - -static const struct can_bittiming_const esd_usb2_bittiming_const = { - .name = "esd_usb2", - .tseg1_min = ESD_USB2_TSEG1_MIN, - .tseg1_max = ESD_USB2_TSEG1_MAX, - .tseg2_min = ESD_USB2_TSEG2_MIN, - .tseg2_max = ESD_USB2_TSEG2_MAX, - .sjw_max = ESD_USB2_SJW_MAX, - .brp_min = ESD_USB2_BRP_MIN, - .brp_max = ESD_USB2_BRP_MAX, - .brp_inc = ESD_USB2_BRP_INC, -}; - -static int esd_usb2_set_bittiming(struct net_device *netdev) -{ - struct esd_usb2_net_priv *priv = netdev_priv(netdev); - struct can_bittiming *bt = &priv->can.bittiming; - struct esd_usb2_msg *msg; - int err; - u32 canbtr; - int sjw_shift; - - canbtr = ESD_USB2_UBR; - if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) - canbtr |= ESD_USB2_LOM; - - canbtr |= (bt->brp - 1) & (ESD_USB2_BRP_MAX - 1); - - if (le16_to_cpu(priv->usb2->udev->descriptor.idProduct) == - USB_CANUSBM_PRODUCT_ID) - sjw_shift = ESD_USBM_SJW_SHIFT; - else - sjw_shift = ESD_USB2_SJW_SHIFT; - - canbtr |= ((bt->sjw - 1) & (ESD_USB2_SJW_MAX - 1)) - << sjw_shift; - canbtr |= ((bt->prop_seg + bt->phase_seg1 - 1) - & (ESD_USB2_TSEG1_MAX - 1)) - << ESD_USB2_TSEG1_SHIFT; - canbtr |= ((bt->phase_seg2 - 1) & (ESD_USB2_TSEG2_MAX - 1)) - << ESD_USB2_TSEG2_SHIFT; - if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) - canbtr |= ESD_USB2_3_SAMPLES; - - msg = kmalloc(sizeof(*msg), GFP_KERNEL); - if (!msg) - return -ENOMEM; - - msg->msg.hdr.len = 2; - msg->msg.hdr.cmd = CMD_SETBAUD; - msg->msg.setbaud.net = priv->index; - msg->msg.setbaud.rsvd = 0; - msg->msg.setbaud.baud = cpu_to_le32(canbtr); - - netdev_info(netdev, "setting BTR=%#x\n", canbtr); - - err = esd_usb2_send_msg(priv->usb2, msg); - - kfree(msg); - return err; -} - -static int esd_usb2_get_berr_counter(const struct net_device *netdev, - struct can_berr_counter *bec) -{ - struct esd_usb2_net_priv *priv = netdev_priv(netdev); - - bec->txerr = priv->bec.txerr; - bec->rxerr = priv->bec.rxerr; - - return 0; -} - -static int esd_usb2_set_mode(struct net_device *netdev, enum can_mode mode) -{ - switch (mode) { - case CAN_MODE_START: - netif_wake_queue(netdev); - break; - - default: - return -EOPNOTSUPP; - } - - return 0; -} - -static int esd_usb2_probe_one_net(struct usb_interface *intf, int index) -{ - struct esd_usb2 *dev = usb_get_intfdata(intf); - struct net_device *netdev; - struct esd_usb2_net_priv *priv; - int err = 0; - int i; - - netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS); - if (!netdev) { - dev_err(&intf->dev, "couldn't alloc candev\n"); - err = -ENOMEM; - goto done; - } - - priv = netdev_priv(netdev); - - init_usb_anchor(&priv->tx_submitted); - atomic_set(&priv->active_tx_jobs, 0); - - for (i = 0; i < MAX_TX_URBS; i++) - priv->tx_contexts[i].echo_index = MAX_TX_URBS; - - priv->usb2 = dev; - priv->netdev = netdev; - priv->index = index; - - priv->can.state = CAN_STATE_STOPPED; - priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY; - - if (le16_to_cpu(dev->udev->descriptor.idProduct) == - USB_CANUSBM_PRODUCT_ID) - priv->can.clock.freq = ESD_USBM_CAN_CLOCK; - else { - priv->can.clock.freq = ESD_USB2_CAN_CLOCK; - priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; - } - - priv->can.bittiming_const = &esd_usb2_bittiming_const; - priv->can.do_set_bittiming = esd_usb2_set_bittiming; - priv->can.do_set_mode = esd_usb2_set_mode; - priv->can.do_get_berr_counter = esd_usb2_get_berr_counter; - - netdev->flags |= IFF_ECHO; /* we support local echo */ - - netdev->netdev_ops = &esd_usb2_netdev_ops; - - SET_NETDEV_DEV(netdev, &intf->dev); - netdev->dev_id = index; - - err = register_candev(netdev); - if (err) { - dev_err(&intf->dev, "couldn't register CAN device: %d\n", err); - free_candev(netdev); - err = -ENOMEM; - goto done; - } - - dev->nets[index] = priv; - netdev_info(netdev, "device %s registered\n", netdev->name); - -done: - return err; -} - -/* - * probe function for new USB2 devices - * - * check version information and number of available - * CAN interfaces - */ -static int esd_usb2_probe(struct usb_interface *intf, - const struct usb_device_id *id) -{ - struct esd_usb2 *dev; - struct esd_usb2_msg *msg; - int i, err; - - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) { - err = -ENOMEM; - goto done; - } - - dev->udev = interface_to_usbdev(intf); - - init_usb_anchor(&dev->rx_submitted); - - usb_set_intfdata(intf, dev); - - msg = kmalloc(sizeof(*msg), GFP_KERNEL); - if (!msg) { - err = -ENOMEM; - goto free_msg; - } - - /* query number of CAN interfaces (nets) */ - msg->msg.hdr.cmd = CMD_VERSION; - msg->msg.hdr.len = 2; - msg->msg.version.rsvd = 0; - msg->msg.version.flags = 0; - msg->msg.version.drv_version = 0; - - err = esd_usb2_send_msg(dev, msg); - if (err < 0) { - dev_err(&intf->dev, "sending version message failed\n"); - goto free_msg; - } - - err = esd_usb2_wait_msg(dev, msg); - if (err < 0) { - dev_err(&intf->dev, "no version message answer\n"); - goto free_msg; - } - - dev->net_count = (int)msg->msg.version_reply.nets; - dev->version = le32_to_cpu(msg->msg.version_reply.version); - - if (device_create_file(&intf->dev, &dev_attr_firmware)) - dev_err(&intf->dev, - "Couldn't create device file for firmware\n"); - - if (device_create_file(&intf->dev, &dev_attr_hardware)) - dev_err(&intf->dev, - "Couldn't create device file for hardware\n"); - - if (device_create_file(&intf->dev, &dev_attr_nets)) - dev_err(&intf->dev, - "Couldn't create device file for nets\n"); - - /* do per device probing */ - for (i = 0; i < dev->net_count; i++) - esd_usb2_probe_one_net(intf, i); - -free_msg: - kfree(msg); - if (err) - kfree(dev); -done: - return err; -} - -/* - * called by the usb core when the device is removed from the system - */ -static void esd_usb2_disconnect(struct usb_interface *intf) -{ - struct esd_usb2 *dev = usb_get_intfdata(intf); - struct net_device *netdev; - int i; - - device_remove_file(&intf->dev, &dev_attr_firmware); - device_remove_file(&intf->dev, &dev_attr_hardware); - device_remove_file(&intf->dev, &dev_attr_nets); - - usb_set_intfdata(intf, NULL); - - if (dev) { - for (i = 0; i < dev->net_count; i++) { - if (dev->nets[i]) { - netdev = dev->nets[i]->netdev; - unregister_netdev(netdev); - free_candev(netdev); - } - } - unlink_all_urbs(dev); - kfree(dev); - } -} - -/* usb specific object needed to register this driver with the usb subsystem */ -static struct usb_driver esd_usb2_driver = { - .name = "esd_usb2", - .probe = esd_usb2_probe, - .disconnect = esd_usb2_disconnect, - .id_table = esd_usb2_table, -}; - -module_usb_driver(esd_usb2_driver); diff --git a/drivers/net/can/usb/etas_es58x/Makefile b/drivers/net/can/usb/etas_es58x/Makefile new file mode 100644 index 000000000000..d6667ebe259f --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_CAN_ETAS_ES58X) += etas_es58x.o +etas_es58x-y = es58x_core.o es58x_devlink.o es581_4.o es58x_fd.o diff --git a/drivers/net/can/usb/etas_es58x/es581_4.c b/drivers/net/can/usb/etas_es58x/es581_4.c new file mode 100644 index 000000000000..1888ca1de7b6 --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/es581_4.c @@ -0,0 +1,507 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces. + * + * File es581_4.c: Adds support to ETAS ES581.4. + * + * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved. + * Copyright (c) 2020 ETAS K.K.. All rights reserved. + * Copyright (c) 2020-2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr> + */ + +#include <linux/unaligned.h> +#include <linux/kernel.h> +#include <linux/units.h> + +#include "es58x_core.h" +#include "es581_4.h" + +/** + * es581_4_sizeof_rx_tx_msg() - Calculate the actual length of the + * structure of a rx or tx message. + * @msg: message of variable length, must have a dlc field. + * + * Even if RTR frames have actually no payload, the ES58X devices + * still expect it. Must be a macro in order to accept several types + * (struct es581_4_tx_can_msg and struct es581_4_rx_can_msg) as an + * input. + * + * Return: length of the message. + */ +#define es581_4_sizeof_rx_tx_msg(msg) \ + offsetof(typeof(msg), data[can_cc_dlc2len((msg).dlc)]) + +static u16 es581_4_get_msg_len(const union es58x_urb_cmd *urb_cmd) +{ + return get_unaligned_le16(&urb_cmd->es581_4_urb_cmd.msg_len); +} + +static int es581_4_echo_msg(struct es58x_device *es58x_dev, + const struct es581_4_urb_cmd *es581_4_urb_cmd) +{ + struct net_device *netdev; + const struct es581_4_bulk_echo_msg *bulk_echo_msg; + const struct es581_4_echo_msg *echo_msg; + u64 *tstamps = es58x_dev->timestamps; + u16 msg_len; + u32 first_packet_idx, packet_idx; + unsigned int dropped = 0; + int i, num_element, ret; + + bulk_echo_msg = &es581_4_urb_cmd->bulk_echo_msg; + msg_len = get_unaligned_le16(&es581_4_urb_cmd->msg_len) - + sizeof(bulk_echo_msg->channel_no); + num_element = es58x_msg_num_element(es58x_dev->dev, + bulk_echo_msg->echo_msg, msg_len); + if (num_element <= 0) + return num_element; + + ret = es58x_get_netdev(es58x_dev, bulk_echo_msg->channel_no, + ES581_4_CHANNEL_IDX_OFFSET, &netdev); + if (ret) + return ret; + + echo_msg = &bulk_echo_msg->echo_msg[0]; + first_packet_idx = get_unaligned_le32(&echo_msg->packet_idx); + packet_idx = first_packet_idx; + for (i = 0; i < num_element; i++) { + u32 tmp_idx; + + echo_msg = &bulk_echo_msg->echo_msg[i]; + tmp_idx = get_unaligned_le32(&echo_msg->packet_idx); + if (tmp_idx == packet_idx - 1) { + if (net_ratelimit()) + netdev_warn(netdev, + "Received echo packet idx %u twice\n", + packet_idx - 1); + dropped++; + continue; + } + if (tmp_idx != packet_idx) { + netdev_err(netdev, "Echo packet idx jumped from %u to %u\n", + packet_idx - 1, echo_msg->packet_idx); + return -EBADMSG; + } + + tstamps[i] = get_unaligned_le64(&echo_msg->timestamp); + packet_idx++; + } + + netdev->stats.tx_dropped += dropped; + return es58x_can_get_echo_skb(netdev, first_packet_idx, + tstamps, num_element - dropped); +} + +static int es581_4_rx_can_msg(struct es58x_device *es58x_dev, + const struct es581_4_urb_cmd *es581_4_urb_cmd, + u16 msg_len) +{ + const struct device *dev = es58x_dev->dev; + struct net_device *netdev; + int pkts, num_element, channel_no, ret; + + num_element = es58x_msg_num_element(dev, es581_4_urb_cmd->rx_can_msg, + msg_len); + if (num_element <= 0) + return num_element; + + channel_no = es581_4_urb_cmd->rx_can_msg[0].channel_no; + ret = es58x_get_netdev(es58x_dev, channel_no, + ES581_4_CHANNEL_IDX_OFFSET, &netdev); + if (ret) + return ret; + + if (!netif_running(netdev)) { + if (net_ratelimit()) + netdev_info(netdev, + "%s: %s is down, dropping %d rx packets\n", + __func__, netdev->name, num_element); + netdev->stats.rx_dropped += num_element; + return 0; + } + + for (pkts = 0; pkts < num_element; pkts++) { + const struct es581_4_rx_can_msg *rx_can_msg = + &es581_4_urb_cmd->rx_can_msg[pkts]; + u64 tstamp = get_unaligned_le64(&rx_can_msg->timestamp); + canid_t can_id = get_unaligned_le32(&rx_can_msg->can_id); + + if (channel_no != rx_can_msg->channel_no) + return -EBADMSG; + + ret = es58x_rx_can_msg(netdev, tstamp, rx_can_msg->data, + can_id, rx_can_msg->flags, + rx_can_msg->dlc); + if (ret) + break; + } + + return ret; +} + +static int es581_4_rx_err_msg(struct es58x_device *es58x_dev, + const struct es581_4_rx_err_msg *rx_err_msg) +{ + struct net_device *netdev; + enum es58x_err error = get_unaligned_le32(&rx_err_msg->error); + int ret; + + ret = es58x_get_netdev(es58x_dev, rx_err_msg->channel_no, + ES581_4_CHANNEL_IDX_OFFSET, &netdev); + if (ret) + return ret; + + return es58x_rx_err_msg(netdev, error, 0, + get_unaligned_le64(&rx_err_msg->timestamp)); +} + +static int es581_4_rx_event_msg(struct es58x_device *es58x_dev, + const struct es581_4_rx_event_msg *rx_event_msg) +{ + struct net_device *netdev; + enum es58x_event event = get_unaligned_le32(&rx_event_msg->event); + int ret; + + ret = es58x_get_netdev(es58x_dev, rx_event_msg->channel_no, + ES581_4_CHANNEL_IDX_OFFSET, &netdev); + if (ret) + return ret; + + return es58x_rx_err_msg(netdev, 0, event, + get_unaligned_le64(&rx_event_msg->timestamp)); +} + +static int es581_4_rx_cmd_ret_u32(struct es58x_device *es58x_dev, + const struct es581_4_urb_cmd *es581_4_urb_cmd, + enum es58x_ret_type ret_type) +{ + struct net_device *netdev; + const struct es581_4_rx_cmd_ret *rx_cmd_ret; + u16 msg_len = get_unaligned_le16(&es581_4_urb_cmd->msg_len); + int ret; + + ret = es58x_check_msg_len(es58x_dev->dev, + es581_4_urb_cmd->rx_cmd_ret, msg_len); + if (ret) + return ret; + + rx_cmd_ret = &es581_4_urb_cmd->rx_cmd_ret; + + ret = es58x_get_netdev(es58x_dev, rx_cmd_ret->channel_no, + ES581_4_CHANNEL_IDX_OFFSET, &netdev); + if (ret) + return ret; + + return es58x_rx_cmd_ret_u32(netdev, ret_type, + get_unaligned_le32(&rx_cmd_ret->rx_cmd_ret_le32)); +} + +static int es581_4_tx_ack_msg(struct es58x_device *es58x_dev, + const struct es581_4_urb_cmd *es581_4_urb_cmd) +{ + struct net_device *netdev; + const struct es581_4_tx_ack_msg *tx_ack_msg; + u16 msg_len = get_unaligned_le16(&es581_4_urb_cmd->msg_len); + int ret; + + tx_ack_msg = &es581_4_urb_cmd->tx_ack_msg; + ret = es58x_check_msg_len(es58x_dev->dev, *tx_ack_msg, msg_len); + if (ret) + return ret; + + if (tx_ack_msg->rx_cmd_ret_u8 != ES58X_RET_U8_OK) + return es58x_rx_cmd_ret_u8(es58x_dev->dev, + ES58X_RET_TYPE_TX_MSG, + tx_ack_msg->rx_cmd_ret_u8); + + ret = es58x_get_netdev(es58x_dev, tx_ack_msg->channel_no, + ES581_4_CHANNEL_IDX_OFFSET, &netdev); + if (ret) + return ret; + + return es58x_tx_ack_msg(netdev, + get_unaligned_le16(&tx_ack_msg->tx_free_entries), + ES58X_RET_U32_OK); +} + +static int es581_4_dispatch_rx_cmd(struct es58x_device *es58x_dev, + const struct es581_4_urb_cmd *es581_4_urb_cmd) +{ + const struct device *dev = es58x_dev->dev; + u16 msg_len = get_unaligned_le16(&es581_4_urb_cmd->msg_len); + enum es581_4_rx_type rx_type = es581_4_urb_cmd->rx_can_msg[0].rx_type; + int ret = 0; + + switch (rx_type) { + case ES581_4_RX_TYPE_MESSAGE: + return es581_4_rx_can_msg(es58x_dev, es581_4_urb_cmd, msg_len); + + case ES581_4_RX_TYPE_ERROR: + ret = es58x_check_msg_len(dev, es581_4_urb_cmd->rx_err_msg, + msg_len); + if (ret < 0) + return ret; + return es581_4_rx_err_msg(es58x_dev, + &es581_4_urb_cmd->rx_err_msg); + + case ES581_4_RX_TYPE_EVENT: + ret = es58x_check_msg_len(dev, es581_4_urb_cmd->rx_event_msg, + msg_len); + if (ret < 0) + return ret; + return es581_4_rx_event_msg(es58x_dev, + &es581_4_urb_cmd->rx_event_msg); + + default: + dev_err(dev, "%s: Unknown rx_type 0x%02X\n", __func__, rx_type); + return -EBADRQC; + } +} + +static int es581_4_handle_urb_cmd(struct es58x_device *es58x_dev, + const union es58x_urb_cmd *urb_cmd) +{ + const struct es581_4_urb_cmd *es581_4_urb_cmd; + struct device *dev = es58x_dev->dev; + u16 msg_len = es581_4_get_msg_len(urb_cmd); + int ret; + + es581_4_urb_cmd = &urb_cmd->es581_4_urb_cmd; + + if (es581_4_urb_cmd->cmd_type != ES581_4_CAN_COMMAND_TYPE) { + dev_err(dev, "%s: Unknown command type (0x%02X)\n", + __func__, es581_4_urb_cmd->cmd_type); + return -EBADRQC; + } + + switch ((enum es581_4_cmd_id)es581_4_urb_cmd->cmd_id) { + case ES581_4_CMD_ID_SET_BITTIMING: + return es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd, + ES58X_RET_TYPE_SET_BITTIMING); + + case ES581_4_CMD_ID_ENABLE_CHANNEL: + return es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd, + ES58X_RET_TYPE_ENABLE_CHANNEL); + + case ES581_4_CMD_ID_TX_MSG: + return es581_4_tx_ack_msg(es58x_dev, es581_4_urb_cmd); + + case ES581_4_CMD_ID_RX_MSG: + return es581_4_dispatch_rx_cmd(es58x_dev, es581_4_urb_cmd); + + case ES581_4_CMD_ID_RESET_RX: + ret = es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd, + ES58X_RET_TYPE_RESET_RX); + return ret; + + case ES581_4_CMD_ID_RESET_TX: + ret = es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd, + ES58X_RET_TYPE_RESET_TX); + return ret; + + case ES581_4_CMD_ID_DISABLE_CHANNEL: + return es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd, + ES58X_RET_TYPE_DISABLE_CHANNEL); + + case ES581_4_CMD_ID_TIMESTAMP: + ret = es58x_check_msg_len(dev, es581_4_urb_cmd->timestamp, + msg_len); + if (ret < 0) + return ret; + es58x_rx_timestamp(es58x_dev, + get_unaligned_le64(&es581_4_urb_cmd->timestamp)); + return 0; + + case ES581_4_CMD_ID_ECHO: + return es581_4_echo_msg(es58x_dev, es581_4_urb_cmd); + + case ES581_4_CMD_ID_DEVICE_ERR: + ret = es58x_check_msg_len(dev, es581_4_urb_cmd->rx_cmd_ret_u8, + msg_len); + if (ret) + return ret; + return es58x_rx_cmd_ret_u8(dev, ES58X_RET_TYPE_DEVICE_ERR, + es581_4_urb_cmd->rx_cmd_ret_u8); + + default: + dev_warn(dev, "%s: Unexpected command ID: 0x%02X\n", + __func__, es581_4_urb_cmd->cmd_id); + return -EBADRQC; + } +} + +static void es581_4_fill_urb_header(union es58x_urb_cmd *urb_cmd, u8 cmd_type, + u8 cmd_id, u8 channel_idx, u16 msg_len) +{ + struct es581_4_urb_cmd *es581_4_urb_cmd = &urb_cmd->es581_4_urb_cmd; + + es581_4_urb_cmd->SOF = cpu_to_le16(es581_4_param.tx_start_of_frame); + es581_4_urb_cmd->cmd_type = cmd_type; + es581_4_urb_cmd->cmd_id = cmd_id; + es581_4_urb_cmd->msg_len = cpu_to_le16(msg_len); +} + +static int es581_4_tx_can_msg(struct es58x_priv *priv, + const struct sk_buff *skb) +{ + struct es58x_device *es58x_dev = priv->es58x_dev; + union es58x_urb_cmd *urb_cmd = priv->tx_urb->transfer_buffer; + struct es581_4_urb_cmd *es581_4_urb_cmd = &urb_cmd->es581_4_urb_cmd; + struct can_frame *cf = (struct can_frame *)skb->data; + struct es581_4_tx_can_msg *tx_can_msg; + u16 msg_len; + int ret; + + if (can_is_canfd_skb(skb)) + return -EMSGSIZE; + + if (priv->tx_can_msg_cnt == 0) { + msg_len = sizeof(es581_4_urb_cmd->bulk_tx_can_msg.num_can_msg); + es581_4_fill_urb_header(urb_cmd, ES581_4_CAN_COMMAND_TYPE, + ES581_4_CMD_ID_TX_MSG, + priv->channel_idx, msg_len); + es581_4_urb_cmd->bulk_tx_can_msg.num_can_msg = 0; + } else { + msg_len = es581_4_get_msg_len(urb_cmd); + } + + ret = es58x_check_msg_max_len(es58x_dev->dev, + es581_4_urb_cmd->bulk_tx_can_msg, + msg_len + sizeof(*tx_can_msg)); + if (ret) + return ret; + + /* Fill message contents. */ + tx_can_msg = (typeof(tx_can_msg))&es581_4_urb_cmd->raw_msg[msg_len]; + put_unaligned_le32(es58x_get_raw_can_id(cf), &tx_can_msg->can_id); + put_unaligned_le32(priv->tx_head, &tx_can_msg->packet_idx); + put_unaligned_le16((u16)es58x_get_flags(skb), &tx_can_msg->flags); + tx_can_msg->channel_no = priv->channel_idx + ES581_4_CHANNEL_IDX_OFFSET; + tx_can_msg->dlc = can_get_cc_dlc(cf, priv->can.ctrlmode); + + memcpy(tx_can_msg->data, cf->data, cf->len); + + /* Calculate new sizes. */ + es581_4_urb_cmd->bulk_tx_can_msg.num_can_msg++; + msg_len += es581_4_sizeof_rx_tx_msg(*tx_can_msg); + priv->tx_urb->transfer_buffer_length = es58x_get_urb_cmd_len(es58x_dev, + msg_len); + es581_4_urb_cmd->msg_len = cpu_to_le16(msg_len); + + return 0; +} + +static int es581_4_set_bittiming(struct es58x_priv *priv) +{ + struct es581_4_tx_conf_msg tx_conf_msg = { 0 }; + struct can_bittiming *bt = &priv->can.bittiming; + + tx_conf_msg.bitrate = cpu_to_le32(bt->bitrate); + /* bt->sample_point is in tenth of percent. Convert it to percent. */ + tx_conf_msg.sample_point = cpu_to_le32(bt->sample_point / 10U); + tx_conf_msg.samples_per_bit = cpu_to_le32(ES58X_SAMPLES_PER_BIT_ONE); + tx_conf_msg.bit_time = cpu_to_le32(can_bit_time(bt)); + tx_conf_msg.sjw = cpu_to_le32(bt->sjw); + tx_conf_msg.sync_edge = cpu_to_le32(ES58X_SYNC_EDGE_SINGLE); + tx_conf_msg.physical_layer = + cpu_to_le32(ES58X_PHYSICAL_LAYER_HIGH_SPEED); + tx_conf_msg.echo_mode = cpu_to_le32(ES58X_ECHO_ON); + tx_conf_msg.channel_no = priv->channel_idx + ES581_4_CHANNEL_IDX_OFFSET; + + return es58x_send_msg(priv->es58x_dev, ES581_4_CAN_COMMAND_TYPE, + ES581_4_CMD_ID_SET_BITTIMING, &tx_conf_msg, + sizeof(tx_conf_msg), priv->channel_idx); +} + +static int es581_4_enable_channel(struct es58x_priv *priv) +{ + int ret; + u8 msg = priv->channel_idx + ES581_4_CHANNEL_IDX_OFFSET; + + ret = es581_4_set_bittiming(priv); + if (ret) + return ret; + + return es58x_send_msg(priv->es58x_dev, ES581_4_CAN_COMMAND_TYPE, + ES581_4_CMD_ID_ENABLE_CHANNEL, &msg, sizeof(msg), + priv->channel_idx); +} + +static int es581_4_disable_channel(struct es58x_priv *priv) +{ + u8 msg = priv->channel_idx + ES581_4_CHANNEL_IDX_OFFSET; + + return es58x_send_msg(priv->es58x_dev, ES581_4_CAN_COMMAND_TYPE, + ES581_4_CMD_ID_DISABLE_CHANNEL, &msg, sizeof(msg), + priv->channel_idx); +} + +static int es581_4_reset_device(struct es58x_device *es58x_dev) +{ + return es58x_send_msg(es58x_dev, ES581_4_CAN_COMMAND_TYPE, + ES581_4_CMD_ID_RESET_DEVICE, + ES58X_EMPTY_MSG, 0, ES58X_CHANNEL_IDX_NA); +} + +static int es581_4_get_timestamp(struct es58x_device *es58x_dev) +{ + return es58x_send_msg(es58x_dev, ES581_4_CAN_COMMAND_TYPE, + ES581_4_CMD_ID_TIMESTAMP, + ES58X_EMPTY_MSG, 0, ES58X_CHANNEL_IDX_NA); +} + +/* Nominal bittiming constants for ES581.4 as specified in the + * microcontroller datasheet: "Stellaris(R) LM3S5B91 Microcontroller" + * table 17-4 "CAN Protocol Ranges" from Texas Instruments. + */ +static const struct can_bittiming_const es581_4_bittiming_const = { + .name = "ES581.4", + .tseg1_min = 1, + .tseg1_max = 8, + .tseg2_min = 1, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 128, + .brp_inc = 1 +}; + +const struct es58x_parameters es581_4_param = { + .bittiming_const = &es581_4_bittiming_const, + .data_bittiming_const = NULL, + .tdc_const = NULL, + .bitrate_max = 1 * MEGA /* BPS */, + .clock = {.freq = 50 * MEGA /* Hz */}, + .ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC, + .tx_start_of_frame = 0xAFAF, + .rx_start_of_frame = 0xFAFA, + .tx_urb_cmd_max_len = ES581_4_TX_URB_CMD_MAX_LEN, + .rx_urb_cmd_max_len = ES581_4_RX_URB_CMD_MAX_LEN, + /* Size of internal device TX queue is 330. + * + * However, we witnessed some ES58X_ERR_PROT_CRC errors from + * the device and thus, echo_skb_max was lowered to the + * empirical value of 75 which seems stable and then rounded + * down to become a power of two. + * + * Root cause of those ES58X_ERR_PROT_CRC errors is still + * unclear. + */ + .fifo_mask = 63, /* echo_skb_max = 64 */ + .dql_min_limit = CAN_FRAME_LEN_MAX * 50, /* Empirical value. */ + .tx_bulk_max = ES581_4_TX_BULK_MAX, + .urb_cmd_header_len = ES581_4_URB_CMD_HEADER_LEN, + .rx_urb_max = ES58X_RX_URBS_MAX, + .tx_urb_max = ES58X_TX_URBS_MAX +}; + +const struct es58x_operators es581_4_ops = { + .get_msg_len = es581_4_get_msg_len, + .handle_urb_cmd = es581_4_handle_urb_cmd, + .fill_urb_header = es581_4_fill_urb_header, + .tx_can_msg = es581_4_tx_can_msg, + .enable_channel = es581_4_enable_channel, + .disable_channel = es581_4_disable_channel, + .reset_device = es581_4_reset_device, + .get_timestamp = es581_4_get_timestamp +}; diff --git a/drivers/net/can/usb/etas_es58x/es581_4.h b/drivers/net/can/usb/etas_es58x/es581_4.h new file mode 100644 index 000000000000..667ecb77168c --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/es581_4.h @@ -0,0 +1,207 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces. + * + * File es581_4.h: Definitions and declarations specific to ETAS + * ES581.4. + * + * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved. + * Copyright (c) 2020 ETAS K.K.. All rights reserved. + * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr> + */ + +#ifndef __ES581_4_H__ +#define __ES581_4_H__ + +#include <linux/types.h> + +#define ES581_4_NUM_CAN_CH 2 +#define ES581_4_CHANNEL_IDX_OFFSET 1 + +#define ES581_4_TX_BULK_MAX 25 +#define ES581_4_RX_BULK_MAX 30 +#define ES581_4_ECHO_BULK_MAX 30 + +enum es581_4_cmd_type { + ES581_4_CAN_COMMAND_TYPE = 0x45 +}; + +enum es581_4_cmd_id { + ES581_4_CMD_ID_OPEN_CHANNEL = 0x01, + ES581_4_CMD_ID_CLOSE_CHANNEL = 0x02, + ES581_4_CMD_ID_SET_BITTIMING = 0x03, + ES581_4_CMD_ID_ENABLE_CHANNEL = 0x04, + ES581_4_CMD_ID_TX_MSG = 0x05, + ES581_4_CMD_ID_RX_MSG = 0x06, + ES581_4_CMD_ID_RESET_RX = 0x0A, + ES581_4_CMD_ID_RESET_TX = 0x0B, + ES581_4_CMD_ID_DISABLE_CHANNEL = 0x0C, + ES581_4_CMD_ID_TIMESTAMP = 0x0E, + ES581_4_CMD_ID_RESET_DEVICE = 0x28, + ES581_4_CMD_ID_ECHO = 0x71, + ES581_4_CMD_ID_DEVICE_ERR = 0x72 +}; + +enum es581_4_rx_type { + ES581_4_RX_TYPE_MESSAGE = 1, + ES581_4_RX_TYPE_ERROR = 3, + ES581_4_RX_TYPE_EVENT = 4 +}; + +/** + * struct es581_4_tx_conf_msg - Channel configuration. + * @bitrate: Bitrate. + * @sample_point: Sample point is in percent [0..100]. + * @samples_per_bit: type enum es58x_samples_per_bit. + * @bit_time: Number of time quanta in one bit. + * @sjw: Synchronization Jump Width. + * @sync_edge: type enum es58x_sync_edge. + * @physical_layer: type enum es58x_physical_layer. + * @echo_mode: type enum es58x_echo_mode. + * @channel_no: Channel number, starting from 1. Not to be confused + * with channed_idx of the ES58X FD which starts from 0. + */ +struct es581_4_tx_conf_msg { + __le32 bitrate; + __le32 sample_point; + __le32 samples_per_bit; + __le32 bit_time; + __le32 sjw; + __le32 sync_edge; + __le32 physical_layer; + __le32 echo_mode; + u8 channel_no; +} __packed; + +struct es581_4_tx_can_msg { + __le32 can_id; + __le32 packet_idx; + __le16 flags; + u8 channel_no; + u8 dlc; + u8 data[CAN_MAX_DLEN]; +} __packed; + +/* The ES581.4 allows bulk transfer. */ +struct es581_4_bulk_tx_can_msg { + u8 num_can_msg; + /* Using type "u8[]" instead of "struct es581_4_tx_can_msg[]" + * for tx_msg_buf because each member has a flexible size. + */ + u8 tx_can_msg_buf[ES581_4_TX_BULK_MAX * + sizeof(struct es581_4_tx_can_msg)]; +} __packed; + +struct es581_4_echo_msg { + __le64 timestamp; + __le32 packet_idx; +} __packed; + +struct es581_4_bulk_echo_msg { + u8 channel_no; + struct es581_4_echo_msg echo_msg[ES581_4_ECHO_BULK_MAX]; +} __packed; + +/* Normal Rx CAN Message */ +struct es581_4_rx_can_msg { + __le64 timestamp; + u8 rx_type; /* type enum es581_4_rx_type */ + u8 flags; /* type enum es58x_flag */ + u8 channel_no; + u8 dlc; + __le32 can_id; + u8 data[CAN_MAX_DLEN]; +} __packed; + +struct es581_4_rx_err_msg { + __le64 timestamp; + __le16 rx_type; /* type enum es581_4_rx_type */ + __le16 flags; /* type enum es58x_flag */ + u8 channel_no; + u8 __padding[2]; + u8 dlc; + __le32 tag; /* Related to the CAN filtering. Unused in this module */ + __le32 can_id; + __le32 error; /* type enum es58x_error */ + __le32 destination; /* Unused in this module */ +} __packed; + +struct es581_4_rx_event_msg { + __le64 timestamp; + __le16 rx_type; /* type enum es581_4_rx_type */ + u8 channel_no; + u8 __padding; + __le32 tag; /* Related to the CAN filtering. Unused in this module */ + __le32 event; /* type enum es58x_event */ + __le32 destination; /* Unused in this module */ +} __packed; + +struct es581_4_tx_ack_msg { + __le16 tx_free_entries; /* Number of remaining free entries in the device TX queue */ + u8 channel_no; + u8 rx_cmd_ret_u8; /* type enum es58x_cmd_ret_code_u8 */ +} __packed; + +struct es581_4_rx_cmd_ret { + __le32 rx_cmd_ret_le32; + u8 channel_no; + u8 __padding[3]; +} __packed; + +/** + * struct es581_4_urb_cmd - Commands received from or sent to the + * ES581.4 device. + * @SOF: Start of Frame. + * @cmd_type: Command Type (type: enum es581_4_cmd_type). The CRC + * calculation starts at this position. + * @cmd_id: Command ID (type: enum es581_4_cmd_id). + * @msg_len: Length of the message, excluding CRC (i.e. length of the + * union). + * @tx_conf_msg: Channel configuration. + * @bulk_tx_can_msg: Tx messages. + * @rx_can_msg: Array of Rx messages. + * @bulk_echo_msg: Tx message being looped back. + * @rx_err_msg: Error message. + * @rx_event_msg: Event message. + * @tx_ack_msg: Tx acknowledgment message. + * @rx_cmd_ret: Command return code. + * @timestamp: Timestamp reply. + * @rx_cmd_ret_u8: Rx 8 bits return code (type: enum + * es58x_cmd_ret_code_u8). + * @raw_msg: Message raw payload. + * @reserved_for_crc16_do_not_use: The structure ends with a + * CRC16. Because the structures in above union are of variable + * lengths, we can not predict the offset of the CRC in + * advance. Use functions es58x_get_crc() and es58x_set_crc() to + * manipulate it. + */ +struct es581_4_urb_cmd { + __le16 SOF; + u8 cmd_type; + u8 cmd_id; + __le16 msg_len; + + union { + struct es581_4_tx_conf_msg tx_conf_msg; + struct es581_4_bulk_tx_can_msg bulk_tx_can_msg; + struct es581_4_rx_can_msg rx_can_msg[ES581_4_RX_BULK_MAX]; + struct es581_4_bulk_echo_msg bulk_echo_msg; + struct es581_4_rx_err_msg rx_err_msg; + struct es581_4_rx_event_msg rx_event_msg; + struct es581_4_tx_ack_msg tx_ack_msg; + struct es581_4_rx_cmd_ret rx_cmd_ret; + __le64 timestamp; + u8 rx_cmd_ret_u8; + DECLARE_FLEX_ARRAY(u8, raw_msg); + } __packed; + + __le16 reserved_for_crc16_do_not_use; +} __packed; + +#define ES581_4_URB_CMD_HEADER_LEN (offsetof(struct es581_4_urb_cmd, raw_msg)) +#define ES581_4_TX_URB_CMD_MAX_LEN \ + ES58X_SIZEOF_URB_CMD(struct es581_4_urb_cmd, bulk_tx_can_msg) +#define ES581_4_RX_URB_CMD_MAX_LEN \ + ES58X_SIZEOF_URB_CMD(struct es581_4_urb_cmd, rx_can_msg) + +#endif /* __ES581_4_H__ */ diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c new file mode 100644 index 000000000000..f799233c2b72 --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/es58x_core.c @@ -0,0 +1,2272 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces. + * + * File es58x_core.c: Core logic to manage the network devices and the + * USB interface. + * + * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved. + * Copyright (c) 2020 ETAS K.K.. All rights reserved. + * Copyright (c) 2020-2025 Vincent Mailhol <mailhol@kernel.org> + */ + +#include <linux/unaligned.h> +#include <linux/crc16.h> +#include <linux/ethtool.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/usb.h> +#include <net/devlink.h> + +#include "es58x_core.h" + +MODULE_AUTHOR("Vincent Mailhol <mailhol.vincent@wanadoo.fr>"); +MODULE_AUTHOR("Arunachalam Santhanam <arunachalam.santhanam@in.bosch.com>"); +MODULE_DESCRIPTION("Socket CAN driver for ETAS ES58X USB adapters"); +MODULE_LICENSE("GPL v2"); + +#define ES58X_VENDOR_ID 0x108C +#define ES581_4_PRODUCT_ID 0x0159 +#define ES582_1_PRODUCT_ID 0x0168 +#define ES584_1_PRODUCT_ID 0x0169 + +/* ES58X FD has some interface protocols unsupported by this driver. */ +#define ES58X_FD_INTERFACE_PROTOCOL 0 + +/* Table of devices which work with this driver. */ +static const struct usb_device_id es58x_id_table[] = { + { + /* ETAS GmbH ES581.4 USB dual-channel CAN Bus Interface module. */ + USB_DEVICE(ES58X_VENDOR_ID, ES581_4_PRODUCT_ID), + .driver_info = ES58X_DUAL_CHANNEL + }, { + /* ETAS GmbH ES582.1 USB dual-channel CAN FD Bus Interface module. */ + USB_DEVICE_INTERFACE_PROTOCOL(ES58X_VENDOR_ID, ES582_1_PRODUCT_ID, + ES58X_FD_INTERFACE_PROTOCOL), + .driver_info = ES58X_DUAL_CHANNEL | ES58X_FD_FAMILY + }, { + /* ETAS GmbH ES584.1 USB single-channel CAN FD Bus Interface module. */ + USB_DEVICE_INTERFACE_PROTOCOL(ES58X_VENDOR_ID, ES584_1_PRODUCT_ID, + ES58X_FD_INTERFACE_PROTOCOL), + .driver_info = ES58X_FD_FAMILY + }, { + /* Terminating entry */ + } +}; + +MODULE_DEVICE_TABLE(usb, es58x_id_table); + +#define es58x_print_hex_dump(buf, len) \ + print_hex_dump(KERN_DEBUG, \ + KBUILD_MODNAME " " __stringify(buf) ": ", \ + DUMP_PREFIX_NONE, 16, 1, buf, len, false) + +#define es58x_print_hex_dump_debug(buf, len) \ + print_hex_dump_debug(KBUILD_MODNAME " " __stringify(buf) ": ",\ + DUMP_PREFIX_NONE, 16, 1, buf, len, false) + +/* The last two bytes of an ES58X command is a CRC16. The first two + * bytes (the start of frame) are skipped and the CRC calculation + * starts on the third byte. + */ +#define ES58X_CRC_CALC_OFFSET sizeof_field(union es58x_urb_cmd, sof) + +/** + * es58x_calculate_crc() - Compute the crc16 of a given URB. + * @urb_cmd: The URB command for which we want to calculate the CRC. + * @urb_len: Length of @urb_cmd. Must be at least bigger than 4 + * (ES58X_CRC_CALC_OFFSET + sizeof(crc)) + * + * Return: crc16 value. + */ +static u16 es58x_calculate_crc(const union es58x_urb_cmd *urb_cmd, u16 urb_len) +{ + u16 crc; + ssize_t len = urb_len - ES58X_CRC_CALC_OFFSET - sizeof(crc); + + crc = crc16(0, &urb_cmd->raw_cmd[ES58X_CRC_CALC_OFFSET], len); + return crc; +} + +/** + * es58x_get_crc() - Get the CRC value of a given URB. + * @urb_cmd: The URB command for which we want to get the CRC. + * @urb_len: Length of @urb_cmd. Must be at least bigger than 4 + * (ES58X_CRC_CALC_OFFSET + sizeof(crc)) + * + * Return: crc16 value. + */ +static u16 es58x_get_crc(const union es58x_urb_cmd *urb_cmd, u16 urb_len) +{ + u16 crc; + const __le16 *crc_addr; + + crc_addr = (__le16 *)&urb_cmd->raw_cmd[urb_len - sizeof(crc)]; + crc = get_unaligned_le16(crc_addr); + return crc; +} + +/** + * es58x_set_crc() - Set the CRC value of a given URB. + * @urb_cmd: The URB command for which we want to get the CRC. + * @urb_len: Length of @urb_cmd. Must be at least bigger than 4 + * (ES58X_CRC_CALC_OFFSET + sizeof(crc)) + */ +static void es58x_set_crc(union es58x_urb_cmd *urb_cmd, u16 urb_len) +{ + u16 crc; + __le16 *crc_addr; + + crc = es58x_calculate_crc(urb_cmd, urb_len); + crc_addr = (__le16 *)&urb_cmd->raw_cmd[urb_len - sizeof(crc)]; + put_unaligned_le16(crc, crc_addr); +} + +/** + * es58x_check_crc() - Validate the CRC value of a given URB. + * @es58x_dev: ES58X device. + * @urb_cmd: The URB command for which we want to check the CRC. + * @urb_len: Length of @urb_cmd. Must be at least bigger than 4 + * (ES58X_CRC_CALC_OFFSET + sizeof(crc)) + * + * Return: zero on success, -EBADMSG if the CRC check fails. + */ +static int es58x_check_crc(struct es58x_device *es58x_dev, + const union es58x_urb_cmd *urb_cmd, u16 urb_len) +{ + u16 calculated_crc = es58x_calculate_crc(urb_cmd, urb_len); + u16 expected_crc = es58x_get_crc(urb_cmd, urb_len); + + if (expected_crc != calculated_crc) { + dev_err_ratelimited(es58x_dev->dev, + "%s: Bad CRC, urb_len: %d\n", + __func__, urb_len); + return -EBADMSG; + } + + return 0; +} + +/** + * es58x_timestamp_to_ns() - Convert a timestamp value received from a + * ES58X device to nanoseconds. + * @timestamp: Timestamp received from a ES58X device. + * + * The timestamp received from ES58X is expressed in multiples of 0.5 + * micro seconds. This function converts it in to nanoseconds. + * + * Return: Timestamp value in nanoseconds. + */ +static u64 es58x_timestamp_to_ns(u64 timestamp) +{ + const u64 es58x_timestamp_ns_mult_coef = 500ULL; + + return es58x_timestamp_ns_mult_coef * timestamp; +} + +/** + * es58x_set_skb_timestamp() - Set the hardware timestamp of an skb. + * @netdev: CAN network device. + * @skb: socket buffer of a CAN message. + * @timestamp: Timestamp received from an ES58X device. + * + * Used for both received and echo messages. + */ +static void es58x_set_skb_timestamp(struct net_device *netdev, + struct sk_buff *skb, u64 timestamp) +{ + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + struct skb_shared_hwtstamps *hwts; + + hwts = skb_hwtstamps(skb); + /* Ignoring overflow (overflow on 64 bits timestamp with nano + * second precision would occur after more than 500 years). + */ + hwts->hwtstamp = ns_to_ktime(es58x_timestamp_to_ns(timestamp) + + es58x_dev->realtime_diff_ns); +} + +/** + * es58x_rx_timestamp() - Handle a received timestamp. + * @es58x_dev: ES58X device. + * @timestamp: Timestamp received from a ES58X device. + * + * Calculate the difference between the ES58X device and the kernel + * internal clocks. This difference will be later used as an offset to + * convert the timestamps of RX and echo messages to match the kernel + * system time (e.g. convert to UNIX time). + */ +void es58x_rx_timestamp(struct es58x_device *es58x_dev, u64 timestamp) +{ + u64 ktime_real_ns = ktime_get_real_ns(); + u64 device_timestamp = es58x_timestamp_to_ns(timestamp); + + dev_dbg(es58x_dev->dev, "%s: request round-trip time: %llu ns\n", + __func__, ktime_real_ns - es58x_dev->ktime_req_ns); + + es58x_dev->realtime_diff_ns = + (es58x_dev->ktime_req_ns + ktime_real_ns) / 2 - device_timestamp; + es58x_dev->ktime_req_ns = 0; + + dev_dbg(es58x_dev->dev, + "%s: Device timestamp: %llu, diff with kernel: %llu\n", + __func__, device_timestamp, es58x_dev->realtime_diff_ns); +} + +/** + * es58x_set_realtime_diff_ns() - Calculate difference between the + * clocks of the ES58X device and the kernel + * @es58x_dev: ES58X device. + * + * Request a timestamp from the ES58X device. Once the answer is + * received, the timestamp difference will be set by the callback + * function es58x_rx_timestamp(). + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_set_realtime_diff_ns(struct es58x_device *es58x_dev) +{ + if (es58x_dev->ktime_req_ns) { + dev_warn(es58x_dev->dev, + "%s: Previous request to set timestamp has not completed yet\n", + __func__); + return -EBUSY; + } + + es58x_dev->ktime_req_ns = ktime_get_real_ns(); + return es58x_dev->ops->get_timestamp(es58x_dev); +} + +/** + * es58x_is_can_state_active() - Is the network device in an active + * CAN state? + * @netdev: CAN network device. + * + * The device is considered active if it is able to send or receive + * CAN frames, that is to say if it is in any of + * CAN_STATE_ERROR_ACTIVE, CAN_STATE_ERROR_WARNING or + * CAN_STATE_ERROR_PASSIVE states. + * + * Caution: when recovering from a bus-off, + * net/core/dev.c#can_restart() will call + * net/core/dev.c#can_flush_echo_skb() without using any kind of + * locks. For this reason, it is critical to guarantee that no TX or + * echo operations (i.e. any access to priv->echo_skb[]) can be done + * while this function is returning false. + * + * Return: true if the device is active, else returns false. + */ +static bool es58x_is_can_state_active(struct net_device *netdev) +{ + return es58x_priv(netdev)->can.state < CAN_STATE_BUS_OFF; +} + +/** + * es58x_is_echo_skb_threshold_reached() - Determine the limit of how + * many skb slots can be taken before we should stop the network + * queue. + * @priv: ES58X private parameters related to the network device. + * + * We need to save enough free skb slots in order to be able to do + * bulk send. This function can be used to determine when to wake or + * stop the network queue in regard to the number of skb slots already + * taken if the echo FIFO. + * + * Return: boolean. + */ +static bool es58x_is_echo_skb_threshold_reached(struct es58x_priv *priv) +{ + u32 num_echo_skb = priv->tx_head - priv->tx_tail; + u32 threshold = priv->can.echo_skb_max - + priv->es58x_dev->param->tx_bulk_max + 1; + + return num_echo_skb >= threshold; +} + +/** + * es58x_can_free_echo_skb_tail() - Remove the oldest echo skb of the + * echo FIFO. + * @netdev: CAN network device. + * + * Naming convention: the tail is the beginning of the FIFO, i.e. the + * first skb to have entered the FIFO. + */ +static void es58x_can_free_echo_skb_tail(struct net_device *netdev) +{ + struct es58x_priv *priv = es58x_priv(netdev); + u16 fifo_mask = priv->es58x_dev->param->fifo_mask; + unsigned int frame_len = 0; + + can_free_echo_skb(netdev, priv->tx_tail & fifo_mask, &frame_len); + netdev_completed_queue(netdev, 1, frame_len); + + priv->tx_tail++; + + netdev->stats.tx_dropped++; +} + +/** + * es58x_can_get_echo_skb_recovery() - Try to re-sync the echo FIFO. + * @netdev: CAN network device. + * @rcv_packet_idx: Index + * + * This function should not be called under normal circumstances. In + * the unlikely case that one or several URB packages get dropped by + * the device, the index will get out of sync. Try to recover by + * dropping the echo skb packets with older indexes. + * + * Return: zero if recovery was successful, -EINVAL otherwise. + */ +static int es58x_can_get_echo_skb_recovery(struct net_device *netdev, + u32 rcv_packet_idx) +{ + struct es58x_priv *priv = es58x_priv(netdev); + int ret = 0; + + netdev->stats.tx_errors++; + + if (net_ratelimit()) + netdev_warn(netdev, + "Bad echo packet index: %u. First index: %u, end index %u, num_echo_skb: %02u/%02u\n", + rcv_packet_idx, priv->tx_tail, priv->tx_head, + priv->tx_head - priv->tx_tail, + priv->can.echo_skb_max); + + if ((s32)(rcv_packet_idx - priv->tx_tail) < 0) { + if (net_ratelimit()) + netdev_warn(netdev, + "Received echo index is from the past. Ignoring it\n"); + ret = -EINVAL; + } else if ((s32)(rcv_packet_idx - priv->tx_head) >= 0) { + if (net_ratelimit()) + netdev_err(netdev, + "Received echo index is from the future. Ignoring it\n"); + ret = -EINVAL; + } else { + if (net_ratelimit()) + netdev_warn(netdev, + "Recovery: dropping %u echo skb from index %u to %u\n", + rcv_packet_idx - priv->tx_tail, + priv->tx_tail, rcv_packet_idx - 1); + while (priv->tx_tail != rcv_packet_idx) { + if (priv->tx_tail == priv->tx_head) + return -EINVAL; + es58x_can_free_echo_skb_tail(netdev); + } + } + return ret; +} + +/** + * es58x_can_get_echo_skb() - Get the skb from the echo FIFO and loop + * it back locally. + * @netdev: CAN network device. + * @rcv_packet_idx: Index of the first packet received from the device. + * @tstamps: Array of hardware timestamps received from a ES58X device. + * @pkts: Number of packets (and so, length of @tstamps). + * + * Callback function for when we receive a self reception + * acknowledgment. Retrieves the skb from the echo FIFO, sets its + * hardware timestamp (the actual time it was sent) and loops it back + * locally. + * + * The device has to be active (i.e. network interface UP and not in + * bus off state or restarting). + * + * Packet indexes must be consecutive (i.e. index of first packet is + * @rcv_packet_idx, index of second packet is @rcv_packet_idx + 1 and + * index of last packet is @rcv_packet_idx + @pkts - 1). + * + * Return: zero on success. + */ +int es58x_can_get_echo_skb(struct net_device *netdev, u32 rcv_packet_idx, + u64 *tstamps, unsigned int pkts) +{ + struct es58x_priv *priv = es58x_priv(netdev); + unsigned int rx_total_frame_len = 0; + unsigned int num_echo_skb = priv->tx_head - priv->tx_tail; + int i; + u16 fifo_mask = priv->es58x_dev->param->fifo_mask; + + if (!netif_running(netdev)) { + if (net_ratelimit()) + netdev_info(netdev, + "%s: %s is down, dropping %d echo packets\n", + __func__, netdev->name, pkts); + netdev->stats.tx_dropped += pkts; + return 0; + } else if (!es58x_is_can_state_active(netdev)) { + if (net_ratelimit()) + netdev_dbg(netdev, + "Bus is off or device is restarting. Ignoring %u echo packets from index %u\n", + pkts, rcv_packet_idx); + /* stats.tx_dropped will be (or was already) + * incremented by + * drivers/net/can/net/dev.c:can_flush_echo_skb(). + */ + return 0; + } else if (num_echo_skb == 0) { + if (net_ratelimit()) + netdev_warn(netdev, + "Received %u echo packets from index: %u but echo skb queue is empty.\n", + pkts, rcv_packet_idx); + netdev->stats.tx_dropped += pkts; + return 0; + } + + if (priv->tx_tail != rcv_packet_idx) { + if (es58x_can_get_echo_skb_recovery(netdev, rcv_packet_idx) < 0) { + if (net_ratelimit()) + netdev_warn(netdev, + "Could not find echo skb for echo packet index: %u\n", + rcv_packet_idx); + return 0; + } + } + if (num_echo_skb < pkts) { + int pkts_drop = pkts - num_echo_skb; + + if (net_ratelimit()) + netdev_err(netdev, + "Received %u echo packets but have only %d echo skb. Dropping %d echo skb\n", + pkts, num_echo_skb, pkts_drop); + netdev->stats.tx_dropped += pkts_drop; + pkts -= pkts_drop; + } + + for (i = 0; i < pkts; i++) { + unsigned int skb_idx = priv->tx_tail & fifo_mask; + struct sk_buff *skb = priv->can.echo_skb[skb_idx]; + unsigned int frame_len = 0; + + if (skb) + es58x_set_skb_timestamp(netdev, skb, tstamps[i]); + + netdev->stats.tx_bytes += can_get_echo_skb(netdev, skb_idx, + &frame_len); + rx_total_frame_len += frame_len; + + priv->tx_tail++; + } + + netdev_completed_queue(netdev, pkts, rx_total_frame_len); + netdev->stats.tx_packets += pkts; + + priv->err_passive_before_rtx_success = 0; + if (!es58x_is_echo_skb_threshold_reached(priv)) + netif_wake_queue(netdev); + + return 0; +} + +/** + * es58x_can_reset_echo_fifo() - Reset the echo FIFO. + * @netdev: CAN network device. + * + * The echo_skb array of struct can_priv will be flushed by + * drivers/net/can/dev.c:can_flush_echo_skb(). This function resets + * the parameters of the struct es58x_priv of our device and reset the + * queue (c.f. BQL). + */ +static void es58x_can_reset_echo_fifo(struct net_device *netdev) +{ + struct es58x_priv *priv = es58x_priv(netdev); + + priv->tx_tail = 0; + priv->tx_head = 0; + priv->tx_urb = NULL; + priv->err_passive_before_rtx_success = 0; + netdev_reset_queue(netdev); +} + +/** + * es58x_flush_pending_tx_msg() - Reset the buffer for transmission messages. + * @netdev: CAN network device. + * + * es58x_start_xmit() will queue up to tx_bulk_max messages in + * &tx_urb buffer and do a bulk send of all messages in one single URB + * (c.f. xmit_more flag). When the device recovers from a bus off + * state or when the device stops, the tx_urb buffer might still have + * pending messages in it and thus need to be flushed. + */ +static void es58x_flush_pending_tx_msg(struct net_device *netdev) +{ + struct es58x_priv *priv = es58x_priv(netdev); + struct es58x_device *es58x_dev = priv->es58x_dev; + + if (priv->tx_urb) { + netdev_warn(netdev, "%s: dropping %d TX messages\n", + __func__, priv->tx_can_msg_cnt); + netdev->stats.tx_dropped += priv->tx_can_msg_cnt; + while (priv->tx_can_msg_cnt > 0) { + unsigned int frame_len = 0; + u16 fifo_mask = priv->es58x_dev->param->fifo_mask; + + priv->tx_head--; + priv->tx_can_msg_cnt--; + can_free_echo_skb(netdev, priv->tx_head & fifo_mask, + &frame_len); + netdev_completed_queue(netdev, 1, frame_len); + } + usb_anchor_urb(priv->tx_urb, &priv->es58x_dev->tx_urbs_idle); + atomic_inc(&es58x_dev->tx_urbs_idle_cnt); + usb_free_urb(priv->tx_urb); + } + priv->tx_urb = NULL; +} + +/** + * es58x_tx_ack_msg() - Handle acknowledgment messages. + * @netdev: CAN network device. + * @tx_free_entries: Number of free entries in the device transmit FIFO. + * @rx_cmd_ret_u32: error code as returned by the ES58X device. + * + * ES58X sends an acknowledgment message after a transmission request + * is done. This is mandatory for the ES581.4 but is optional (and + * deactivated in this driver) for the ES58X_FD family. + * + * Under normal circumstances, this function should never throw an + * error message. + * + * Return: zero on success, errno when any error occurs. + */ +int es58x_tx_ack_msg(struct net_device *netdev, u16 tx_free_entries, + enum es58x_ret_u32 rx_cmd_ret_u32) +{ + struct es58x_priv *priv = es58x_priv(netdev); + + if (tx_free_entries <= priv->es58x_dev->param->tx_bulk_max) { + if (net_ratelimit()) + netdev_err(netdev, + "Only %d entries left in device queue, num_echo_skb: %d/%d\n", + tx_free_entries, + priv->tx_head - priv->tx_tail, + priv->can.echo_skb_max); + netif_stop_queue(netdev); + } + + return es58x_rx_cmd_ret_u32(netdev, ES58X_RET_TYPE_TX_MSG, + rx_cmd_ret_u32); +} + +/** + * es58x_rx_can_msg() - Handle a received a CAN message. + * @netdev: CAN network device. + * @timestamp: Hardware time stamp (only relevant in rx branches). + * @data: CAN payload. + * @can_id: CAN ID. + * @es58x_flags: Please refer to enum es58x_flag. + * @dlc: Data Length Code (raw value). + * + * Fill up a CAN skb and post it. + * + * This function handles the case where the DLC of a classical CAN + * frame is greater than CAN_MAX_DLEN (c.f. the len8_dlc field of + * struct can_frame). + * + * Return: zero on success. + */ +int es58x_rx_can_msg(struct net_device *netdev, u64 timestamp, const u8 *data, + canid_t can_id, enum es58x_flag es58x_flags, u8 dlc) +{ + struct canfd_frame *cfd; + struct can_frame *ccf; + struct sk_buff *skb; + u8 len; + bool is_can_fd = !!(es58x_flags & ES58X_FLAG_FD_DATA); + + if (dlc > CAN_MAX_RAW_DLC) { + netdev_err(netdev, + "%s: DLC is %d but maximum should be %d\n", + __func__, dlc, CAN_MAX_RAW_DLC); + return -EMSGSIZE; + } + + if (is_can_fd) { + len = can_fd_dlc2len(dlc); + skb = alloc_canfd_skb(netdev, &cfd); + } else { + len = can_cc_dlc2len(dlc); + skb = alloc_can_skb(netdev, &ccf); + cfd = (struct canfd_frame *)ccf; + } + if (!skb) { + netdev->stats.rx_dropped++; + return 0; + } + + cfd->can_id = can_id; + if (es58x_flags & ES58X_FLAG_EFF) + cfd->can_id |= CAN_EFF_FLAG; + if (is_can_fd) { + cfd->len = len; + if (es58x_flags & ES58X_FLAG_FD_BRS) + cfd->flags |= CANFD_BRS; + if (es58x_flags & ES58X_FLAG_FD_ESI) + cfd->flags |= CANFD_ESI; + } else { + can_frame_set_cc_len(ccf, dlc, es58x_priv(netdev)->can.ctrlmode); + if (es58x_flags & ES58X_FLAG_RTR) { + ccf->can_id |= CAN_RTR_FLAG; + len = 0; + } + } + memcpy(cfd->data, data, len); + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += len; + + es58x_set_skb_timestamp(netdev, skb, timestamp); + netif_rx(skb); + + es58x_priv(netdev)->err_passive_before_rtx_success = 0; + + return 0; +} + +/** + * es58x_rx_err_msg() - Handle a received CAN event or error message. + * @netdev: CAN network device. + * @error: Error code. + * @event: Event code. + * @timestamp: Timestamp received from a ES58X device. + * + * Handle the errors and events received by the ES58X device, create + * a CAN error skb and post it. + * + * In some rare cases the devices might get stuck alternating between + * CAN_STATE_ERROR_PASSIVE and CAN_STATE_ERROR_WARNING. To prevent + * this behavior, we force a bus off state if the device goes in + * CAN_STATE_ERROR_WARNING for ES58X_MAX_CONSECUTIVE_WARN consecutive + * times with no successful transmission or reception in between. + * + * Once the device is in bus off state, the only way to restart it is + * through the drivers/net/can/dev.c:can_restart() function. The + * device is technically capable to recover by itself under certain + * circumstances, however, allowing self recovery would create + * complex race conditions with drivers/net/can/dev.c:can_restart() + * and thus was not implemented. To activate automatic restart, please + * set the restart-ms parameter (e.g. ip link set can0 type can + * restart-ms 100). + * + * If the bus is really instable, this function would try to send a + * lot of log messages. Those are rate limited (i.e. you will see + * messages such as "net_ratelimit: XXX callbacks suppressed" in + * dmesg). + * + * Return: zero on success, errno when any error occurs. + */ +int es58x_rx_err_msg(struct net_device *netdev, enum es58x_err error, + enum es58x_event event, u64 timestamp) +{ + struct es58x_priv *priv = es58x_priv(netdev); + struct can_priv *can = netdev_priv(netdev); + struct can_device_stats *can_stats = &can->can_stats; + struct can_frame *cf = NULL; + struct sk_buff *skb; + int ret = 0; + + if (!netif_running(netdev)) { + if (net_ratelimit()) + netdev_info(netdev, "%s: %s is down, dropping packet\n", + __func__, netdev->name); + netdev->stats.rx_dropped++; + return 0; + } + + if (error == ES58X_ERR_OK && event == ES58X_EVENT_OK) { + netdev_err(netdev, "%s: Both error and event are zero\n", + __func__); + return -EINVAL; + } + + skb = alloc_can_err_skb(netdev, &cf); + + switch (error) { + case ES58X_ERR_OK: /* 0: No error */ + break; + + case ES58X_ERR_PROT_STUFF: + if (net_ratelimit()) + netdev_dbg(netdev, "Error BITSTUFF\n"); + if (cf) + cf->data[2] |= CAN_ERR_PROT_STUFF; + break; + + case ES58X_ERR_PROT_FORM: + if (net_ratelimit()) + netdev_dbg(netdev, "Error FORMAT\n"); + if (cf) + cf->data[2] |= CAN_ERR_PROT_FORM; + break; + + case ES58X_ERR_ACK: + if (net_ratelimit()) + netdev_dbg(netdev, "Error ACK\n"); + if (cf) + cf->can_id |= CAN_ERR_ACK; + break; + + case ES58X_ERR_PROT_BIT: + if (net_ratelimit()) + netdev_dbg(netdev, "Error BIT\n"); + if (cf) + cf->data[2] |= CAN_ERR_PROT_BIT; + break; + + case ES58X_ERR_PROT_CRC: + if (net_ratelimit()) + netdev_dbg(netdev, "Error CRC\n"); + if (cf) + cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; + break; + + case ES58X_ERR_PROT_BIT1: + if (net_ratelimit()) + netdev_dbg(netdev, + "Error: expected a recessive bit but monitored a dominant one\n"); + if (cf) + cf->data[2] |= CAN_ERR_PROT_BIT1; + break; + + case ES58X_ERR_PROT_BIT0: + if (net_ratelimit()) + netdev_dbg(netdev, + "Error expected a dominant bit but monitored a recessive one\n"); + if (cf) + cf->data[2] |= CAN_ERR_PROT_BIT0; + break; + + case ES58X_ERR_PROT_OVERLOAD: + if (net_ratelimit()) + netdev_dbg(netdev, "Error OVERLOAD\n"); + if (cf) + cf->data[2] |= CAN_ERR_PROT_OVERLOAD; + break; + + case ES58X_ERR_PROT_UNSPEC: + if (net_ratelimit()) + netdev_dbg(netdev, "Unspecified error\n"); + if (cf) + cf->can_id |= CAN_ERR_PROT; + break; + + default: + if (net_ratelimit()) + netdev_err(netdev, + "%s: Unspecified error code 0x%04X\n", + __func__, (int)error); + if (cf) + cf->can_id |= CAN_ERR_PROT; + break; + } + + switch (event) { + case ES58X_EVENT_OK: /* 0: No event */ + break; + + case ES58X_EVENT_CRTL_ACTIVE: + if (can->state == CAN_STATE_BUS_OFF) { + netdev_err(netdev, + "%s: state transition: BUS OFF -> ACTIVE\n", + __func__); + } + if (net_ratelimit()) + netdev_dbg(netdev, "Event CAN BUS ACTIVE\n"); + if (cf) + cf->data[1] |= CAN_ERR_CRTL_ACTIVE; + can->state = CAN_STATE_ERROR_ACTIVE; + break; + + case ES58X_EVENT_CRTL_PASSIVE: + if (net_ratelimit()) + netdev_dbg(netdev, "Event CAN BUS PASSIVE\n"); + /* Either TX or RX error count reached passive state + * but we do not know which. Setting both flags by + * default. + */ + if (cf) { + cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; + cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; + } + if (can->state < CAN_STATE_BUS_OFF) + can->state = CAN_STATE_ERROR_PASSIVE; + can_stats->error_passive++; + if (priv->err_passive_before_rtx_success < U8_MAX) + priv->err_passive_before_rtx_success++; + break; + + case ES58X_EVENT_CRTL_WARNING: + if (net_ratelimit()) + netdev_dbg(netdev, "Event CAN BUS WARNING\n"); + /* Either TX or RX error count reached warning state + * but we do not know which. Setting both flags by + * default. + */ + if (cf) { + cf->data[1] |= CAN_ERR_CRTL_RX_WARNING; + cf->data[1] |= CAN_ERR_CRTL_TX_WARNING; + } + if (can->state < CAN_STATE_BUS_OFF) + can->state = CAN_STATE_ERROR_WARNING; + can_stats->error_warning++; + break; + + case ES58X_EVENT_BUSOFF: + if (net_ratelimit()) + netdev_dbg(netdev, "Event CAN BUS OFF\n"); + if (cf) + cf->can_id |= CAN_ERR_BUSOFF; + can_stats->bus_off++; + netif_stop_queue(netdev); + if (can->state != CAN_STATE_BUS_OFF) { + can->state = CAN_STATE_BUS_OFF; + can_bus_off(netdev); + ret = can->do_set_mode(netdev, CAN_MODE_STOP); + } + break; + + case ES58X_EVENT_SINGLE_WIRE: + if (net_ratelimit()) + netdev_warn(netdev, + "Lost connection on either CAN high or CAN low\n"); + /* Lost connection on either CAN high or CAN + * low. Setting both flags by default. + */ + if (cf) { + cf->data[4] |= CAN_ERR_TRX_CANH_NO_WIRE; + cf->data[4] |= CAN_ERR_TRX_CANL_NO_WIRE; + } + break; + + default: + if (net_ratelimit()) + netdev_err(netdev, + "%s: Unspecified event code 0x%04X\n", + __func__, (int)event); + if (cf) + cf->can_id |= CAN_ERR_CRTL; + break; + } + + if (cf) { + if (cf->data[1]) + cf->can_id |= CAN_ERR_CRTL; + if (cf->data[2] || cf->data[3]) { + cf->can_id |= CAN_ERR_PROT; + can_stats->bus_error++; + } + if (cf->data[4]) + cf->can_id |= CAN_ERR_TRX; + + es58x_set_skb_timestamp(netdev, skb, timestamp); + netif_rx(skb); + } + + if ((event & ES58X_EVENT_CRTL_PASSIVE) && + priv->err_passive_before_rtx_success == ES58X_CONSECUTIVE_ERR_PASSIVE_MAX) { + netdev_info(netdev, + "Got %d consecutive warning events with no successful RX or TX. Forcing bus-off\n", + priv->err_passive_before_rtx_success); + return es58x_rx_err_msg(netdev, ES58X_ERR_OK, + ES58X_EVENT_BUSOFF, timestamp); + } + + return ret; +} + +/** + * es58x_cmd_ret_desc() - Convert a command type to a string. + * @cmd_ret_type: Type of the command which triggered the return code. + * + * The final line (return "<unknown>") should not be reached. If this + * is the case, there is an implementation bug. + * + * Return: a readable description of the @cmd_ret_type. + */ +static const char *es58x_cmd_ret_desc(enum es58x_ret_type cmd_ret_type) +{ + switch (cmd_ret_type) { + case ES58X_RET_TYPE_SET_BITTIMING: + return "Set bittiming"; + case ES58X_RET_TYPE_ENABLE_CHANNEL: + return "Enable channel"; + case ES58X_RET_TYPE_DISABLE_CHANNEL: + return "Disable channel"; + case ES58X_RET_TYPE_TX_MSG: + return "Transmit message"; + case ES58X_RET_TYPE_RESET_RX: + return "Reset RX"; + case ES58X_RET_TYPE_RESET_TX: + return "Reset TX"; + case ES58X_RET_TYPE_DEVICE_ERR: + return "Device error"; + } + + return "<unknown>"; +}; + +/** + * es58x_rx_cmd_ret_u8() - Handle the command's return code received + * from the ES58X device. + * @dev: Device, only used for the dev_XXX() print functions. + * @cmd_ret_type: Type of the command which triggered the return code. + * @rx_cmd_ret_u8: Command error code as returned by the ES58X device. + * + * Handles the 8 bits command return code. Those are specific to the + * ES581.4 device. The return value will eventually be used by + * es58x_handle_urb_cmd() function which will take proper actions in + * case of critical issues such and memory errors or bad CRC values. + * + * In contrast with es58x_rx_cmd_ret_u32(), the network device is + * unknown. + * + * Return: zero on success, return errno when any error occurs. + */ +int es58x_rx_cmd_ret_u8(struct device *dev, + enum es58x_ret_type cmd_ret_type, + enum es58x_ret_u8 rx_cmd_ret_u8) +{ + const char *ret_desc = es58x_cmd_ret_desc(cmd_ret_type); + + switch (rx_cmd_ret_u8) { + case ES58X_RET_U8_OK: + dev_dbg_ratelimited(dev, "%s: OK\n", ret_desc); + return 0; + + case ES58X_RET_U8_ERR_UNSPECIFIED_FAILURE: + dev_err(dev, "%s: unspecified failure\n", ret_desc); + return -EBADMSG; + + case ES58X_RET_U8_ERR_NO_MEM: + dev_err(dev, "%s: device ran out of memory\n", ret_desc); + return -ENOMEM; + + case ES58X_RET_U8_ERR_BAD_CRC: + dev_err(dev, "%s: CRC of previous command is incorrect\n", + ret_desc); + return -EIO; + + default: + dev_err(dev, "%s: returned unknown value: 0x%02X\n", + ret_desc, rx_cmd_ret_u8); + return -EBADMSG; + } +} + +/** + * es58x_rx_cmd_ret_u32() - Handle the command return code received + * from the ES58X device. + * @netdev: CAN network device. + * @cmd_ret_type: Type of the command which triggered the return code. + * @rx_cmd_ret_u32: error code as returned by the ES58X device. + * + * Handles the 32 bits command return code. The return value will + * eventually be used by es58x_handle_urb_cmd() function which will + * take proper actions in case of critical issues such and memory + * errors or bad CRC values. + * + * Return: zero on success, errno when any error occurs. + */ +int es58x_rx_cmd_ret_u32(struct net_device *netdev, + enum es58x_ret_type cmd_ret_type, + enum es58x_ret_u32 rx_cmd_ret_u32) +{ + struct es58x_priv *priv = es58x_priv(netdev); + const struct es58x_operators *ops = priv->es58x_dev->ops; + const char *ret_desc = es58x_cmd_ret_desc(cmd_ret_type); + + switch (rx_cmd_ret_u32) { + case ES58X_RET_U32_OK: + switch (cmd_ret_type) { + case ES58X_RET_TYPE_ENABLE_CHANNEL: + es58x_can_reset_echo_fifo(netdev); + priv->can.state = CAN_STATE_ERROR_ACTIVE; + netif_wake_queue(netdev); + netdev_info(netdev, + "%s: %s (Serial Number %s): CAN%d channel becomes ready\n", + ret_desc, priv->es58x_dev->udev->product, + priv->es58x_dev->udev->serial, + priv->channel_idx + 1); + break; + + case ES58X_RET_TYPE_TX_MSG: + if (IS_ENABLED(CONFIG_VERBOSE_DEBUG) && net_ratelimit()) + netdev_vdbg(netdev, "%s: OK\n", ret_desc); + break; + + default: + netdev_dbg(netdev, "%s: OK\n", ret_desc); + break; + } + return 0; + + case ES58X_RET_U32_ERR_UNSPECIFIED_FAILURE: + if (cmd_ret_type == ES58X_RET_TYPE_ENABLE_CHANNEL) { + int ret; + + netdev_warn(netdev, + "%s: channel is already opened, closing and re-opening it to reflect new configuration\n", + ret_desc); + ret = ops->disable_channel(es58x_priv(netdev)); + if (ret) + return ret; + return ops->enable_channel(es58x_priv(netdev)); + } + if (cmd_ret_type == ES58X_RET_TYPE_DISABLE_CHANNEL) { + netdev_info(netdev, + "%s: channel is already closed\n", ret_desc); + return 0; + } + netdev_err(netdev, + "%s: unspecified failure\n", ret_desc); + return -EBADMSG; + + case ES58X_RET_U32_ERR_NO_MEM: + netdev_err(netdev, "%s: device ran out of memory\n", ret_desc); + return -ENOMEM; + + case ES58X_RET_U32_WARN_PARAM_ADJUSTED: + netdev_warn(netdev, + "%s: some incompatible parameters have been adjusted\n", + ret_desc); + return 0; + + case ES58X_RET_U32_WARN_TX_MAYBE_REORDER: + netdev_warn(netdev, + "%s: TX messages might have been reordered\n", + ret_desc); + return 0; + + case ES58X_RET_U32_ERR_TIMEDOUT: + netdev_err(netdev, "%s: command timed out\n", ret_desc); + return -ETIMEDOUT; + + case ES58X_RET_U32_ERR_FIFO_FULL: + netdev_warn(netdev, "%s: fifo is full\n", ret_desc); + return 0; + + case ES58X_RET_U32_ERR_BAD_CONFIG: + netdev_err(netdev, "%s: bad configuration\n", ret_desc); + return -EINVAL; + + case ES58X_RET_U32_ERR_NO_RESOURCE: + netdev_err(netdev, "%s: no resource available\n", ret_desc); + return -EBUSY; + + default: + netdev_err(netdev, "%s returned unknown value: 0x%08X\n", + ret_desc, rx_cmd_ret_u32); + return -EBADMSG; + } +} + +/** + * es58x_increment_rx_errors() - Increment the network devices' error + * count. + * @es58x_dev: ES58X device. + * + * If an error occurs on the early stages on receiving an URB command, + * we might not be able to figure out on which network device the + * error occurred. In such case, we arbitrarily increment the error + * count of all the network devices attached to our ES58X device. + */ +static void es58x_increment_rx_errors(struct es58x_device *es58x_dev) +{ + int i; + + for (i = 0; i < es58x_dev->num_can_ch; i++) + if (es58x_dev->netdev[i]) + es58x_dev->netdev[i]->stats.rx_errors++; +} + +/** + * es58x_handle_urb_cmd() - Handle the URB command + * @es58x_dev: ES58X device. + * @urb_cmd: The URB command received from the ES58X device, might not + * be aligned. + * + * Sends the URB command to the device specific function. Manages the + * errors thrown back by those functions. + */ +static void es58x_handle_urb_cmd(struct es58x_device *es58x_dev, + const union es58x_urb_cmd *urb_cmd) +{ + const struct es58x_operators *ops = es58x_dev->ops; + size_t cmd_len; + int i, ret; + + ret = ops->handle_urb_cmd(es58x_dev, urb_cmd); + switch (ret) { + case 0: /* OK */ + return; + + case -ENODEV: + dev_err_ratelimited(es58x_dev->dev, "Device is not ready\n"); + break; + + case -EINVAL: + case -EMSGSIZE: + case -EBADRQC: + case -EBADMSG: + case -ECHRNG: + case -ETIMEDOUT: + cmd_len = es58x_get_urb_cmd_len(es58x_dev, + ops->get_msg_len(urb_cmd)); + dev_err(es58x_dev->dev, + "ops->handle_urb_cmd() returned error %pe", + ERR_PTR(ret)); + es58x_print_hex_dump(urb_cmd, cmd_len); + break; + + case -EFAULT: + case -ENOMEM: + case -EIO: + default: + dev_crit(es58x_dev->dev, + "ops->handle_urb_cmd() returned error %pe, detaching all network devices\n", + ERR_PTR(ret)); + for (i = 0; i < es58x_dev->num_can_ch; i++) + if (es58x_dev->netdev[i]) + netif_device_detach(es58x_dev->netdev[i]); + if (es58x_dev->ops->reset_device) + es58x_dev->ops->reset_device(es58x_dev); + break; + } + + /* Because the urb command could not fully be parsed, + * channel_id is not confirmed. Incrementing rx_errors count + * of all channels. + */ + es58x_increment_rx_errors(es58x_dev); +} + +/** + * es58x_check_rx_urb() - Check the length and format of the URB command. + * @es58x_dev: ES58X device. + * @urb_cmd: The URB command received from the ES58X device, might not + * be aligned. + * @urb_actual_len: The actual length of the URB command. + * + * Check if the first message of the received urb is valid, that is to + * say that both the header and the length are coherent. + * + * Return: + * the length of the first message of the URB on success. + * + * -ENODATA if the URB command is incomplete (in which case, the URB + * command should be buffered and combined with the next URB to try to + * reconstitute the URB command). + * + * -EOVERFLOW if the length is bigger than the maximum expected one. + * + * -EBADRQC if the start of frame does not match the expected value. + */ +static signed int es58x_check_rx_urb(struct es58x_device *es58x_dev, + const union es58x_urb_cmd *urb_cmd, + u32 urb_actual_len) +{ + const struct device *dev = es58x_dev->dev; + const struct es58x_parameters *param = es58x_dev->param; + u16 sof, msg_len; + signed int urb_cmd_len, ret; + + if (urb_actual_len < param->urb_cmd_header_len) { + dev_vdbg(dev, + "%s: Received %d bytes [%*ph]: header incomplete\n", + __func__, urb_actual_len, urb_actual_len, + urb_cmd->raw_cmd); + return -ENODATA; + } + + sof = get_unaligned_le16(&urb_cmd->sof); + if (sof != param->rx_start_of_frame) { + dev_err_ratelimited(es58x_dev->dev, + "%s: Expected sequence 0x%04X for start of frame but got 0x%04X.\n", + __func__, param->rx_start_of_frame, sof); + return -EBADRQC; + } + + msg_len = es58x_dev->ops->get_msg_len(urb_cmd); + urb_cmd_len = es58x_get_urb_cmd_len(es58x_dev, msg_len); + if (urb_cmd_len > param->rx_urb_cmd_max_len) { + dev_err_ratelimited(es58x_dev->dev, + "%s: Biggest expected size for rx urb_cmd is %u but receive a command of size %d\n", + __func__, + param->rx_urb_cmd_max_len, urb_cmd_len); + return -EOVERFLOW; + } else if (urb_actual_len < urb_cmd_len) { + dev_vdbg(dev, "%s: Received %02d/%02d bytes\n", + __func__, urb_actual_len, urb_cmd_len); + return -ENODATA; + } + + ret = es58x_check_crc(es58x_dev, urb_cmd, urb_cmd_len); + if (ret) + return ret; + + return urb_cmd_len; +} + +/** + * es58x_copy_to_cmd_buf() - Copy an array to the URB command buffer. + * @es58x_dev: ES58X device. + * @raw_cmd: the buffer we want to copy. + * @raw_cmd_len: length of @raw_cmd. + * + * Concatenates @raw_cmd_len bytes of @raw_cmd to the end of the URB + * command buffer. + * + * Return: zero on success, -EMSGSIZE if not enough space is available + * to do the copy. + */ +static int es58x_copy_to_cmd_buf(struct es58x_device *es58x_dev, + u8 *raw_cmd, int raw_cmd_len) +{ + if (es58x_dev->rx_cmd_buf_len + raw_cmd_len > + es58x_dev->param->rx_urb_cmd_max_len) + return -EMSGSIZE; + + memcpy(&es58x_dev->rx_cmd_buf.raw_cmd[es58x_dev->rx_cmd_buf_len], + raw_cmd, raw_cmd_len); + es58x_dev->rx_cmd_buf_len += raw_cmd_len; + + return 0; +} + +/** + * es58x_split_urb_try_recovery() - Try to recover bad URB sequences. + * @es58x_dev: ES58X device. + * @raw_cmd: pointer to the buffer we want to copy. + * @raw_cmd_len: length of @raw_cmd. + * + * Under some rare conditions, we might get incorrect URBs from the + * device. From our observations, one of the valid URB gets replaced + * by one from the past. The full root cause is not identified. + * + * This function looks for the next start of frame in the urb buffer + * in order to try to recover. + * + * Such behavior was not observed on the devices of the ES58X FD + * family and only seems to impact the ES581.4. + * + * Return: the number of bytes dropped on success, -EBADMSG if recovery failed. + */ +static int es58x_split_urb_try_recovery(struct es58x_device *es58x_dev, + u8 *raw_cmd, size_t raw_cmd_len) +{ + union es58x_urb_cmd *urb_cmd; + signed int urb_cmd_len; + u16 sof; + int dropped_bytes = 0; + + es58x_increment_rx_errors(es58x_dev); + + while (raw_cmd_len > sizeof(sof)) { + urb_cmd = (union es58x_urb_cmd *)raw_cmd; + sof = get_unaligned_le16(&urb_cmd->sof); + + if (sof == es58x_dev->param->rx_start_of_frame) { + urb_cmd_len = es58x_check_rx_urb(es58x_dev, + urb_cmd, raw_cmd_len); + if ((urb_cmd_len == -ENODATA) || urb_cmd_len > 0) { + dev_info_ratelimited(es58x_dev->dev, + "Recovery successful! Dropped %d bytes (urb_cmd_len: %d)\n", + dropped_bytes, + urb_cmd_len); + return dropped_bytes; + } + } + raw_cmd++; + raw_cmd_len--; + dropped_bytes++; + } + + dev_warn_ratelimited(es58x_dev->dev, "%s: Recovery failed\n", __func__); + return -EBADMSG; +} + +/** + * es58x_handle_incomplete_cmd() - Reconstitute an URB command from + * different URB pieces. + * @es58x_dev: ES58X device. + * @urb: last urb buffer received. + * + * The device might split the URB commands in an arbitrary amount of + * pieces. This function concatenates those in an URB buffer until a + * full URB command is reconstituted and consume it. + * + * Return: + * number of bytes consumed from @urb if successful. + * + * -ENODATA if the URB command is still incomplete. + * + * -EBADMSG if the URB command is incorrect. + */ +static signed int es58x_handle_incomplete_cmd(struct es58x_device *es58x_dev, + struct urb *urb) +{ + size_t cpy_len; + signed int urb_cmd_len, tmp_cmd_buf_len, ret; + + tmp_cmd_buf_len = es58x_dev->rx_cmd_buf_len; + cpy_len = min_t(int, es58x_dev->param->rx_urb_cmd_max_len - + es58x_dev->rx_cmd_buf_len, urb->actual_length); + ret = es58x_copy_to_cmd_buf(es58x_dev, urb->transfer_buffer, cpy_len); + if (ret < 0) + return ret; + + urb_cmd_len = es58x_check_rx_urb(es58x_dev, &es58x_dev->rx_cmd_buf, + es58x_dev->rx_cmd_buf_len); + if (urb_cmd_len == -ENODATA) { + return -ENODATA; + } else if (urb_cmd_len < 0) { + dev_err_ratelimited(es58x_dev->dev, + "Could not reconstitute incomplete command from previous URB, dropping %d bytes\n", + tmp_cmd_buf_len + urb->actual_length); + dev_err_ratelimited(es58x_dev->dev, + "Error code: %pe, es58x_dev->rx_cmd_buf_len: %d, urb->actual_length: %u\n", + ERR_PTR(urb_cmd_len), + tmp_cmd_buf_len, urb->actual_length); + es58x_print_hex_dump(&es58x_dev->rx_cmd_buf, tmp_cmd_buf_len); + es58x_print_hex_dump(urb->transfer_buffer, urb->actual_length); + return urb->actual_length; + } + + es58x_handle_urb_cmd(es58x_dev, &es58x_dev->rx_cmd_buf); + return urb_cmd_len - tmp_cmd_buf_len; /* consumed length */ +} + +/** + * es58x_split_urb() - Cut the received URB in individual URB commands. + * @es58x_dev: ES58X device. + * @urb: last urb buffer received. + * + * The device might send urb in bulk format (i.e. several URB commands + * concatenated together). This function will split all the commands + * contained in the urb. + * + * Return: + * number of bytes consumed from @urb if successful. + * + * -ENODATA if the URB command is incomplete. + * + * -EBADMSG if the URB command is incorrect. + */ +static signed int es58x_split_urb(struct es58x_device *es58x_dev, + struct urb *urb) +{ + union es58x_urb_cmd *urb_cmd; + u8 *raw_cmd = urb->transfer_buffer; + s32 raw_cmd_len = urb->actual_length; + int ret; + + if (es58x_dev->rx_cmd_buf_len != 0) { + ret = es58x_handle_incomplete_cmd(es58x_dev, urb); + if (ret != -ENODATA) + es58x_dev->rx_cmd_buf_len = 0; + if (ret < 0) + return ret; + + raw_cmd += ret; + raw_cmd_len -= ret; + } + + while (raw_cmd_len > 0) { + if (raw_cmd[0] == ES58X_HEARTBEAT) { + raw_cmd++; + raw_cmd_len--; + continue; + } + urb_cmd = (union es58x_urb_cmd *)raw_cmd; + ret = es58x_check_rx_urb(es58x_dev, urb_cmd, raw_cmd_len); + if (ret > 0) { + es58x_handle_urb_cmd(es58x_dev, urb_cmd); + } else if (ret == -ENODATA) { + es58x_copy_to_cmd_buf(es58x_dev, raw_cmd, raw_cmd_len); + return -ENODATA; + } else if (ret < 0) { + ret = es58x_split_urb_try_recovery(es58x_dev, raw_cmd, + raw_cmd_len); + if (ret < 0) + return ret; + } + raw_cmd += ret; + raw_cmd_len -= ret; + } + + return 0; +} + +/** + * es58x_read_bulk_callback() - Callback for reading data from device. + * @urb: last urb buffer received. + * + * This function gets eventually called each time an URB is received + * from the ES58X device. + * + * Checks urb status, calls read function and resubmits urb read + * operation. + */ +static void es58x_read_bulk_callback(struct urb *urb) +{ + struct es58x_device *es58x_dev = urb->context; + const struct device *dev = es58x_dev->dev; + int i, ret; + + switch (urb->status) { + case 0: /* success */ + break; + + case -EOVERFLOW: + dev_err_ratelimited(dev, "%s: error %pe\n", + __func__, ERR_PTR(urb->status)); + es58x_print_hex_dump_debug(urb->transfer_buffer, + urb->transfer_buffer_length); + goto resubmit_urb; + + case -EPROTO: + dev_warn_ratelimited(dev, "%s: error %pe. Device unplugged?\n", + __func__, ERR_PTR(urb->status)); + goto free_urb; + + case -ENOENT: + case -EPIPE: + dev_err_ratelimited(dev, "%s: error %pe\n", + __func__, ERR_PTR(urb->status)); + goto free_urb; + + case -ESHUTDOWN: + dev_dbg_ratelimited(dev, "%s: error %pe\n", + __func__, ERR_PTR(urb->status)); + goto free_urb; + + default: + dev_err_ratelimited(dev, "%s: error %pe\n", + __func__, ERR_PTR(urb->status)); + goto resubmit_urb; + } + + ret = es58x_split_urb(es58x_dev, urb); + if ((ret != -ENODATA) && ret < 0) { + dev_err(es58x_dev->dev, "es58x_split_urb() returned error %pe", + ERR_PTR(ret)); + es58x_print_hex_dump_debug(urb->transfer_buffer, + urb->actual_length); + + /* Because the urb command could not be parsed, + * channel_id is not confirmed. Incrementing rx_errors + * count of all channels. + */ + es58x_increment_rx_errors(es58x_dev); + } + + resubmit_urb: + ret = usb_submit_urb(urb, GFP_ATOMIC); + if (ret == -ENODEV) { + for (i = 0; i < es58x_dev->num_can_ch; i++) + if (es58x_dev->netdev[i]) + netif_device_detach(es58x_dev->netdev[i]); + } else if (ret) + dev_err_ratelimited(dev, + "Failed resubmitting read bulk urb: %pe\n", + ERR_PTR(ret)); + return; + + free_urb: + usb_free_coherent(urb->dev, urb->transfer_buffer_length, + urb->transfer_buffer, urb->transfer_dma); +} + +/** + * es58x_write_bulk_callback() - Callback after writing data to the device. + * @urb: urb buffer which was previously submitted. + * + * This function gets eventually called each time an URB was sent to + * the ES58X device. + * + * Puts the @urb back to the urbs idle anchor and tries to restart the + * network queue. + */ +static void es58x_write_bulk_callback(struct urb *urb) +{ + struct net_device *netdev = urb->context; + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + + switch (urb->status) { + case 0: /* success */ + break; + + case -EOVERFLOW: + if (net_ratelimit()) + netdev_err(netdev, "%s: error %pe\n", + __func__, ERR_PTR(urb->status)); + es58x_print_hex_dump(urb->transfer_buffer, + urb->transfer_buffer_length); + break; + + case -ENOENT: + if (net_ratelimit()) + netdev_dbg(netdev, "%s: error %pe\n", + __func__, ERR_PTR(urb->status)); + usb_free_coherent(urb->dev, + es58x_dev->param->tx_urb_cmd_max_len, + urb->transfer_buffer, urb->transfer_dma); + return; + + default: + if (net_ratelimit()) + netdev_info(netdev, "%s: error %pe\n", + __func__, ERR_PTR(urb->status)); + break; + } + + usb_anchor_urb(urb, &es58x_dev->tx_urbs_idle); + atomic_inc(&es58x_dev->tx_urbs_idle_cnt); +} + +/** + * es58x_alloc_urb() - Allocate memory for an URB and its transfer + * buffer. + * @es58x_dev: ES58X device. + * @urb: URB to be allocated. + * @buf: used to return DMA address of buffer. + * @buf_len: requested buffer size. + * @mem_flags: affect whether allocation may block. + * + * Allocates an URB and its @transfer_buffer and set its @transfer_dma + * address. + * + * This function is used at start-up to allocate all RX URBs at once + * and during run time for TX URBs. + * + * Return: zero on success, -ENOMEM if no memory is available. + */ +static int es58x_alloc_urb(struct es58x_device *es58x_dev, struct urb **urb, + u8 **buf, size_t buf_len, gfp_t mem_flags) +{ + *urb = usb_alloc_urb(0, mem_flags); + if (!*urb) { + dev_err(es58x_dev->dev, "No memory left for URBs\n"); + return -ENOMEM; + } + + *buf = usb_alloc_coherent(es58x_dev->udev, buf_len, + mem_flags, &(*urb)->transfer_dma); + if (!*buf) { + dev_err(es58x_dev->dev, "No memory left for USB buffer\n"); + usb_free_urb(*urb); + return -ENOMEM; + } + + (*urb)->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + + return 0; +} + +/** + * es58x_get_tx_urb() - Get an URB for transmission. + * @es58x_dev: ES58X device. + * + * Gets an URB from the idle urbs anchor or allocate a new one if the + * anchor is empty. + * + * If there are more than ES58X_TX_URBS_MAX in the idle anchor, do + * some garbage collection. The garbage collection is done here + * instead of within es58x_write_bulk_callback() because + * usb_free_coherent() should not be used in IRQ context: + * c.f. WARN_ON(irqs_disabled()) in dma_free_attrs(). + * + * Return: a pointer to an URB on success, NULL if no memory is + * available. + */ +static struct urb *es58x_get_tx_urb(struct es58x_device *es58x_dev) +{ + atomic_t *idle_cnt = &es58x_dev->tx_urbs_idle_cnt; + struct urb *urb = usb_get_from_anchor(&es58x_dev->tx_urbs_idle); + + if (!urb) { + size_t tx_buf_len; + u8 *buf; + + tx_buf_len = es58x_dev->param->tx_urb_cmd_max_len; + if (es58x_alloc_urb(es58x_dev, &urb, &buf, tx_buf_len, + GFP_ATOMIC)) + return NULL; + + usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->tx_pipe, + buf, tx_buf_len, es58x_write_bulk_callback, + NULL); + return urb; + } + + while (atomic_dec_return(idle_cnt) > ES58X_TX_URBS_MAX) { + /* Garbage collector */ + struct urb *tmp = usb_get_from_anchor(&es58x_dev->tx_urbs_idle); + + if (!tmp) + break; + usb_free_coherent(tmp->dev, + es58x_dev->param->tx_urb_cmd_max_len, + tmp->transfer_buffer, tmp->transfer_dma); + usb_free_urb(tmp); + } + + return urb; +} + +/** + * es58x_submit_urb() - Send data to the device. + * @es58x_dev: ES58X device. + * @urb: URB to be sent. + * @netdev: CAN network device. + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_submit_urb(struct es58x_device *es58x_dev, struct urb *urb, + struct net_device *netdev) +{ + int ret; + + es58x_set_crc(urb->transfer_buffer, urb->transfer_buffer_length); + urb->context = netdev; + usb_anchor_urb(urb, &es58x_dev->tx_urbs_busy); + ret = usb_submit_urb(urb, GFP_ATOMIC); + if (ret) { + netdev_err(netdev, "%s: USB send urb failure: %pe\n", + __func__, ERR_PTR(ret)); + usb_unanchor_urb(urb); + usb_free_coherent(urb->dev, + es58x_dev->param->tx_urb_cmd_max_len, + urb->transfer_buffer, urb->transfer_dma); + } + usb_free_urb(urb); + + return ret; +} + +/** + * es58x_send_msg() - Prepare an URB and submit it. + * @es58x_dev: ES58X device. + * @cmd_type: Command type. + * @cmd_id: Command ID. + * @msg: ES58X message to be sent. + * @msg_len: Length of @msg. + * @channel_idx: Index of the network device. + * + * Creates an URB command from a given message, sets the header and the + * CRC and then submits it. + * + * Return: zero on success, errno when any error occurs. + */ +int es58x_send_msg(struct es58x_device *es58x_dev, u8 cmd_type, u8 cmd_id, + const void *msg, u16 msg_len, int channel_idx) +{ + struct net_device *netdev; + union es58x_urb_cmd *urb_cmd; + struct urb *urb; + int urb_cmd_len; + + if (channel_idx == ES58X_CHANNEL_IDX_NA) + netdev = es58x_dev->netdev[0]; /* Default to first channel */ + else + netdev = es58x_dev->netdev[channel_idx]; + + urb_cmd_len = es58x_get_urb_cmd_len(es58x_dev, msg_len); + if (urb_cmd_len > es58x_dev->param->tx_urb_cmd_max_len) + return -EOVERFLOW; + + urb = es58x_get_tx_urb(es58x_dev); + if (!urb) + return -ENOMEM; + + urb_cmd = urb->transfer_buffer; + es58x_dev->ops->fill_urb_header(urb_cmd, cmd_type, cmd_id, + channel_idx, msg_len); + memcpy(&urb_cmd->raw_cmd[es58x_dev->param->urb_cmd_header_len], + msg, msg_len); + urb->transfer_buffer_length = urb_cmd_len; + + return es58x_submit_urb(es58x_dev, urb, netdev); +} + +/** + * es58x_alloc_rx_urbs() - Allocate RX URBs. + * @es58x_dev: ES58X device. + * + * Allocate URBs for reception and anchor them. + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_alloc_rx_urbs(struct es58x_device *es58x_dev) +{ + const struct device *dev = es58x_dev->dev; + const struct es58x_parameters *param = es58x_dev->param; + u16 rx_buf_len = usb_maxpacket(es58x_dev->udev, es58x_dev->rx_pipe); + struct urb *urb; + u8 *buf; + int i; + int ret = -EINVAL; + + for (i = 0; i < param->rx_urb_max; i++) { + ret = es58x_alloc_urb(es58x_dev, &urb, &buf, rx_buf_len, + GFP_KERNEL); + if (ret) + break; + + usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->rx_pipe, + buf, rx_buf_len, es58x_read_bulk_callback, + es58x_dev); + usb_anchor_urb(urb, &es58x_dev->rx_urbs); + + ret = usb_submit_urb(urb, GFP_KERNEL); + if (ret) { + usb_unanchor_urb(urb); + usb_free_coherent(es58x_dev->udev, rx_buf_len, + buf, urb->transfer_dma); + usb_free_urb(urb); + break; + } + usb_free_urb(urb); + } + + if (i == 0) { + dev_err(dev, "%s: Could not setup any rx URBs\n", __func__); + return ret; + } + dev_dbg(dev, "%s: Allocated %d rx URBs each of size %u\n", + __func__, i, rx_buf_len); + + return ret; +} + +/** + * es58x_free_urbs() - Free all the TX and RX URBs. + * @es58x_dev: ES58X device. + */ +static void es58x_free_urbs(struct es58x_device *es58x_dev) +{ + struct urb *urb; + + if (!usb_wait_anchor_empty_timeout(&es58x_dev->tx_urbs_busy, 1000)) { + dev_err(es58x_dev->dev, "%s: Timeout, some TX urbs still remain\n", + __func__); + usb_kill_anchored_urbs(&es58x_dev->tx_urbs_busy); + } + + while ((urb = usb_get_from_anchor(&es58x_dev->tx_urbs_idle)) != NULL) { + usb_free_coherent(urb->dev, es58x_dev->param->tx_urb_cmd_max_len, + urb->transfer_buffer, urb->transfer_dma); + usb_free_urb(urb); + atomic_dec(&es58x_dev->tx_urbs_idle_cnt); + } + if (atomic_read(&es58x_dev->tx_urbs_idle_cnt)) + dev_err(es58x_dev->dev, + "All idle urbs were freed but tx_urb_idle_cnt is %d\n", + atomic_read(&es58x_dev->tx_urbs_idle_cnt)); + + usb_kill_anchored_urbs(&es58x_dev->rx_urbs); +} + +/** + * es58x_open() - Enable the network device. + * @netdev: CAN network device. + * + * Called when the network transitions to the up state. Allocate the + * URB resources if needed and open the channel. + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_open(struct net_device *netdev) +{ + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + int ret; + + if (!es58x_dev->opened_channel_cnt) { + ret = es58x_alloc_rx_urbs(es58x_dev); + if (ret) + return ret; + + ret = es58x_set_realtime_diff_ns(es58x_dev); + if (ret) + goto free_urbs; + } + + ret = open_candev(netdev); + if (ret) + goto free_urbs; + + ret = es58x_dev->ops->enable_channel(es58x_priv(netdev)); + if (ret) + goto free_urbs; + + es58x_dev->opened_channel_cnt++; + netif_start_queue(netdev); + + return ret; + + free_urbs: + if (!es58x_dev->opened_channel_cnt) + es58x_free_urbs(es58x_dev); + netdev_err(netdev, "%s: Could not open the network device: %pe\n", + __func__, ERR_PTR(ret)); + + return ret; +} + +/** + * es58x_stop() - Disable the network device. + * @netdev: CAN network device. + * + * Called when the network transitions to the down state. If all the + * channels of the device are closed, free the URB resources which are + * not needed anymore. + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_stop(struct net_device *netdev) +{ + struct es58x_priv *priv = es58x_priv(netdev); + struct es58x_device *es58x_dev = priv->es58x_dev; + int ret; + + netif_stop_queue(netdev); + ret = es58x_dev->ops->disable_channel(priv); + if (ret) + return ret; + + priv->can.state = CAN_STATE_STOPPED; + es58x_can_reset_echo_fifo(netdev); + close_candev(netdev); + + es58x_flush_pending_tx_msg(netdev); + + es58x_dev->opened_channel_cnt--; + if (!es58x_dev->opened_channel_cnt) + es58x_free_urbs(es58x_dev); + + return 0; +} + +/** + * es58x_xmit_commit() - Send the bulk urb. + * @netdev: CAN network device. + * + * Do the bulk send. This function should be called only once by bulk + * transmission. + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_xmit_commit(struct net_device *netdev) +{ + struct es58x_priv *priv = es58x_priv(netdev); + int ret; + + if (!es58x_is_can_state_active(netdev)) + return -ENETDOWN; + + if (es58x_is_echo_skb_threshold_reached(priv)) + netif_stop_queue(netdev); + + ret = es58x_submit_urb(priv->es58x_dev, priv->tx_urb, netdev); + if (ret == 0) + priv->tx_urb = NULL; + + return ret; +} + +/** + * es58x_xmit_more() - Can we put more packets? + * @priv: ES58X private parameters related to the network device. + * + * Return: true if we can put more, false if it is time to send. + */ +static bool es58x_xmit_more(struct es58x_priv *priv) +{ + unsigned int free_slots = + priv->can.echo_skb_max - (priv->tx_head - priv->tx_tail); + + return netdev_xmit_more() && free_slots > 0 && + priv->tx_can_msg_cnt < priv->es58x_dev->param->tx_bulk_max; +} + +/** + * es58x_start_xmit() - Transmit an skb. + * @skb: socket buffer of a CAN message. + * @netdev: CAN network device. + * + * Called when a packet needs to be transmitted. + * + * This function relies on Byte Queue Limits (BQL). The main benefit + * is to increase the throughput by allowing bulk transfers + * (c.f. xmit_more flag). + * + * Queues up to tx_bulk_max messages in &tx_urb buffer and does + * a bulk send of all messages in one single URB. + * + * Return: NETDEV_TX_OK regardless of if we could transmit the @skb or + * had to drop it. + */ +static netdev_tx_t es58x_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct es58x_priv *priv = es58x_priv(netdev); + struct es58x_device *es58x_dev = priv->es58x_dev; + unsigned int frame_len; + int ret; + + if (can_dev_dropped_skb(netdev, skb)) { + if (priv->tx_urb) + goto xmit_commit; + return NETDEV_TX_OK; + } + + if (priv->tx_urb && priv->tx_can_msg_is_fd != can_is_canfd_skb(skb)) { + /* Can not do bulk send with mixed CAN and CAN FD frames. */ + ret = es58x_xmit_commit(netdev); + if (ret) + goto drop_skb; + } + + if (!priv->tx_urb) { + priv->tx_urb = es58x_get_tx_urb(es58x_dev); + if (!priv->tx_urb) { + ret = -ENOMEM; + goto drop_skb; + } + priv->tx_can_msg_cnt = 0; + priv->tx_can_msg_is_fd = can_is_canfd_skb(skb); + } + + ret = es58x_dev->ops->tx_can_msg(priv, skb); + if (ret) + goto drop_skb; + + frame_len = can_skb_get_frame_len(skb); + ret = can_put_echo_skb(skb, netdev, + priv->tx_head & es58x_dev->param->fifo_mask, + frame_len); + if (ret) + goto xmit_failure; + netdev_sent_queue(netdev, frame_len); + + priv->tx_head++; + priv->tx_can_msg_cnt++; + + xmit_commit: + if (!es58x_xmit_more(priv)) { + ret = es58x_xmit_commit(netdev); + if (ret) + goto xmit_failure; + } + + return NETDEV_TX_OK; + + drop_skb: + dev_kfree_skb(skb); + netdev->stats.tx_dropped++; + xmit_failure: + netdev_warn(netdev, "%s: send message failure: %pe\n", + __func__, ERR_PTR(ret)); + netdev->stats.tx_errors++; + es58x_flush_pending_tx_msg(netdev); + return NETDEV_TX_OK; +} + +static const struct net_device_ops es58x_netdev_ops = { + .ndo_open = es58x_open, + .ndo_stop = es58x_stop, + .ndo_start_xmit = es58x_start_xmit, + .ndo_hwtstamp_get = can_hwtstamp_get, + .ndo_hwtstamp_set = can_hwtstamp_set, +}; + +static const struct ethtool_ops es58x_ethtool_ops = { + .get_ts_info = can_ethtool_op_get_ts_info_hwts, +}; + +/** + * es58x_set_mode() - Change network device mode. + * @netdev: CAN network device. + * @mode: either %CAN_MODE_START, %CAN_MODE_STOP or %CAN_MODE_SLEEP + * + * Currently, this function is only used to stop and restart the + * channel during a bus off event (c.f. es58x_rx_err_msg() and + * drivers/net/can/dev.c:can_restart() which are the two only + * callers). + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_set_mode(struct net_device *netdev, enum can_mode mode) +{ + struct es58x_priv *priv = es58x_priv(netdev); + + switch (mode) { + case CAN_MODE_START: + switch (priv->can.state) { + case CAN_STATE_BUS_OFF: + return priv->es58x_dev->ops->enable_channel(priv); + + case CAN_STATE_STOPPED: + return es58x_open(netdev); + + case CAN_STATE_ERROR_ACTIVE: + case CAN_STATE_ERROR_WARNING: + case CAN_STATE_ERROR_PASSIVE: + default: + return 0; + } + + case CAN_MODE_STOP: + switch (priv->can.state) { + case CAN_STATE_STOPPED: + return 0; + + case CAN_STATE_ERROR_ACTIVE: + case CAN_STATE_ERROR_WARNING: + case CAN_STATE_ERROR_PASSIVE: + case CAN_STATE_BUS_OFF: + default: + return priv->es58x_dev->ops->disable_channel(priv); + } + + case CAN_MODE_SLEEP: + default: + return -EOPNOTSUPP; + } +} + +/** + * es58x_init_priv() - Initialize private parameters. + * @es58x_dev: ES58X device. + * @priv: ES58X private parameters related to the network device. + * @channel_idx: Index of the network device. + * + * Return: zero on success, errno if devlink port could not be + * properly registered. + */ +static int es58x_init_priv(struct es58x_device *es58x_dev, + struct es58x_priv *priv, int channel_idx) +{ + struct devlink_port_attrs attrs = { + .flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL, + }; + const struct es58x_parameters *param = es58x_dev->param; + struct can_priv *can = &priv->can; + + priv->es58x_dev = es58x_dev; + priv->channel_idx = channel_idx; + priv->tx_urb = NULL; + priv->tx_can_msg_cnt = 0; + + can->bittiming_const = param->bittiming_const; + if (param->ctrlmode_supported & CAN_CTRLMODE_FD) { + can->fd.data_bittiming_const = param->data_bittiming_const; + can->fd.tdc_const = param->tdc_const; + } + can->bitrate_max = param->bitrate_max; + can->clock = param->clock; + can->state = CAN_STATE_STOPPED; + can->ctrlmode_supported = param->ctrlmode_supported; + can->do_set_mode = es58x_set_mode; + + devlink_port_attrs_set(&priv->devlink_port, &attrs); + return devlink_port_register(priv_to_devlink(es58x_dev), + &priv->devlink_port, channel_idx); +} + +/** + * es58x_init_netdev() - Initialize the network device. + * @es58x_dev: ES58X device. + * @channel_idx: Index of the network device. + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_init_netdev(struct es58x_device *es58x_dev, int channel_idx) +{ + struct net_device *netdev; + struct device *dev = es58x_dev->dev; + int ret; + + netdev = alloc_candev(sizeof(struct es58x_priv), + es58x_dev->param->fifo_mask + 1); + if (!netdev) { + dev_err(dev, "Could not allocate candev\n"); + return -ENOMEM; + } + SET_NETDEV_DEV(netdev, dev); + es58x_dev->netdev[channel_idx] = netdev; + ret = es58x_init_priv(es58x_dev, es58x_priv(netdev), channel_idx); + if (ret) + goto free_candev; + SET_NETDEV_DEVLINK_PORT(netdev, &es58x_priv(netdev)->devlink_port); + + netdev->netdev_ops = &es58x_netdev_ops; + netdev->ethtool_ops = &es58x_ethtool_ops; + netdev->flags |= IFF_ECHO; /* We support local echo */ + netdev->dev_port = channel_idx; + + ret = register_candev(netdev); + if (ret) + goto devlink_port_unregister; + + netdev_queue_set_dql_min_limit(netdev_get_tx_queue(netdev, 0), + es58x_dev->param->dql_min_limit); + + return ret; + + devlink_port_unregister: + devlink_port_unregister(&es58x_priv(netdev)->devlink_port); + free_candev: + es58x_dev->netdev[channel_idx] = NULL; + free_candev(netdev); + return ret; +} + +/** + * es58x_free_netdevs() - Release all network resources of the device. + * @es58x_dev: ES58X device. + */ +static void es58x_free_netdevs(struct es58x_device *es58x_dev) +{ + int i; + + for (i = 0; i < es58x_dev->num_can_ch; i++) { + struct net_device *netdev = es58x_dev->netdev[i]; + + if (!netdev) + continue; + unregister_candev(netdev); + devlink_port_unregister(&es58x_priv(netdev)->devlink_port); + es58x_dev->netdev[i] = NULL; + free_candev(netdev); + } +} + +/** + * es58x_init_es58x_dev() - Initialize the ES58X device. + * @intf: USB interface. + * @driver_info: Quirks of the device. + * + * Return: pointer to an ES58X device on success, error pointer when + * any error occurs. + */ +static struct es58x_device *es58x_init_es58x_dev(struct usb_interface *intf, + kernel_ulong_t driver_info) +{ + struct device *dev = &intf->dev; + struct es58x_device *es58x_dev; + struct devlink *devlink; + const struct es58x_parameters *param; + const struct es58x_operators *ops; + struct usb_device *udev = interface_to_usbdev(intf); + struct usb_endpoint_descriptor *ep_in, *ep_out; + int ret; + + dev_info(dev, "Starting %s %s (Serial Number %s)\n", + udev->manufacturer, udev->product, udev->serial); + + ret = usb_find_common_endpoints(intf->cur_altsetting, &ep_in, &ep_out, + NULL, NULL); + if (ret) + return ERR_PTR(ret); + + if (driver_info & ES58X_FD_FAMILY) { + param = &es58x_fd_param; + ops = &es58x_fd_ops; + } else { + param = &es581_4_param; + ops = &es581_4_ops; + } + + devlink = devlink_alloc(&es58x_dl_ops, es58x_sizeof_es58x_device(param), + dev); + if (!devlink) + return ERR_PTR(-ENOMEM); + + es58x_dev = devlink_priv(devlink); + es58x_dev->param = param; + es58x_dev->ops = ops; + es58x_dev->dev = dev; + es58x_dev->udev = udev; + + if (driver_info & ES58X_DUAL_CHANNEL) + es58x_dev->num_can_ch = 2; + else + es58x_dev->num_can_ch = 1; + + init_usb_anchor(&es58x_dev->rx_urbs); + init_usb_anchor(&es58x_dev->tx_urbs_idle); + init_usb_anchor(&es58x_dev->tx_urbs_busy); + atomic_set(&es58x_dev->tx_urbs_idle_cnt, 0); + usb_set_intfdata(intf, es58x_dev); + + es58x_dev->rx_pipe = usb_rcvbulkpipe(es58x_dev->udev, + ep_in->bEndpointAddress); + es58x_dev->tx_pipe = usb_sndbulkpipe(es58x_dev->udev, + ep_out->bEndpointAddress); + + return es58x_dev; +} + +/** + * es58x_probe() - Initialize the USB device. + * @intf: USB interface. + * @id: USB device ID. + * + * Return: zero on success, -ENODEV if the interface is not supported + * or errno when any other error occurs. + */ +static int es58x_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct es58x_device *es58x_dev; + int ch_idx; + + es58x_dev = es58x_init_es58x_dev(intf, id->driver_info); + if (IS_ERR(es58x_dev)) + return PTR_ERR(es58x_dev); + + es58x_parse_product_info(es58x_dev); + devlink_register(priv_to_devlink(es58x_dev)); + + for (ch_idx = 0; ch_idx < es58x_dev->num_can_ch; ch_idx++) { + int ret = es58x_init_netdev(es58x_dev, ch_idx); + + if (ret) { + es58x_free_netdevs(es58x_dev); + return ret; + } + } + + return 0; +} + +/** + * es58x_disconnect() - Disconnect the USB device. + * @intf: USB interface + * + * Called by the usb core when driver is unloaded or device is + * removed. + */ +static void es58x_disconnect(struct usb_interface *intf) +{ + struct es58x_device *es58x_dev = usb_get_intfdata(intf); + + dev_info(&intf->dev, "Disconnecting %s %s\n", + es58x_dev->udev->manufacturer, es58x_dev->udev->product); + + devlink_unregister(priv_to_devlink(es58x_dev)); + es58x_free_netdevs(es58x_dev); + es58x_free_urbs(es58x_dev); + devlink_free(priv_to_devlink(es58x_dev)); + usb_set_intfdata(intf, NULL); +} + +static struct usb_driver es58x_driver = { + .name = KBUILD_MODNAME, + .probe = es58x_probe, + .disconnect = es58x_disconnect, + .id_table = es58x_id_table +}; + +module_usb_driver(es58x_driver); diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h new file mode 100644 index 000000000000..2e183bdeedd7 --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/es58x_core.h @@ -0,0 +1,750 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces. + * + * File es58x_core.h: All common definitions and declarations. + * + * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved. + * Copyright (c) 2020 ETAS K.K.. All rights reserved. + * Copyright (c) 2020-2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr> + */ + +#ifndef __ES58X_COMMON_H__ +#define __ES58X_COMMON_H__ + +#include <linux/can.h> +#include <linux/can/dev.h> +#include <linux/netdevice.h> +#include <linux/types.h> +#include <linux/usb.h> +#include <net/devlink.h> + +#include "es581_4.h" +#include "es58x_fd.h" + +/* Driver constants */ +#define ES58X_RX_URBS_MAX 5 /* Empirical value */ +#define ES58X_TX_URBS_MAX 6 /* Empirical value */ + +#define ES58X_MAX(param) \ + (ES581_4_##param > ES58X_FD_##param ? \ + ES581_4_##param : ES58X_FD_##param) +#define ES58X_TX_BULK_MAX ES58X_MAX(TX_BULK_MAX) +#define ES58X_RX_BULK_MAX ES58X_MAX(RX_BULK_MAX) +#define ES58X_ECHO_BULK_MAX ES58X_MAX(ECHO_BULK_MAX) +#define ES58X_NUM_CAN_CH_MAX ES58X_MAX(NUM_CAN_CH) + +/* Use this when channel index is irrelevant (e.g. device + * timestamp). + */ +#define ES58X_CHANNEL_IDX_NA 0xFF +#define ES58X_EMPTY_MSG NULL + +/* Threshold on consecutive CAN_STATE_ERROR_PASSIVE. If we receive + * ES58X_CONSECUTIVE_ERR_PASSIVE_MAX times the event + * ES58X_ERR_CRTL_PASSIVE in a row without any successful RX or TX, + * we force the device to switch to CAN_STATE_BUS_OFF state. + */ +#define ES58X_CONSECUTIVE_ERR_PASSIVE_MAX 254 + +/* A magic number sent by the ES581.4 to inform it is alive. */ +#define ES58X_HEARTBEAT 0x11 + +/** + * enum es58x_driver_info - Quirks of the device. + * @ES58X_DUAL_CHANNEL: Device has two CAN channels. If this flag is + * not set, it is implied that the device has only one CAN + * channel. + * @ES58X_FD_FAMILY: Device is CAN-FD capable. If this flag is not + * set, the device only supports classical CAN. + */ +enum es58x_driver_info { + ES58X_DUAL_CHANNEL = BIT(0), + ES58X_FD_FAMILY = BIT(1) +}; + +enum es58x_echo { + ES58X_ECHO_OFF = 0, + ES58X_ECHO_ON = 1 +}; + +/** + * enum es58x_physical_layer - Type of the physical layer. + * @ES58X_PHYSICAL_LAYER_HIGH_SPEED: High-speed CAN (c.f. ISO + * 11898-2). + * + * Some products of the ETAS portfolio also support low-speed CAN + * (c.f. ISO 11898-3). However, all the devices in scope of this + * driver do not support the option, thus, the enum has only one + * member. + */ +enum es58x_physical_layer { + ES58X_PHYSICAL_LAYER_HIGH_SPEED = 1 +}; + +enum es58x_samples_per_bit { + ES58X_SAMPLES_PER_BIT_ONE = 1, + ES58X_SAMPLES_PER_BIT_THREE = 2 +}; + +/** + * enum es58x_sync_edge - Synchronization method. + * @ES58X_SYNC_EDGE_SINGLE: ISO CAN specification defines the use of a + * single edge synchronization. The synchronization should be + * done on recessive to dominant level change. + * + * For information, ES582.1 and ES584.1 also support a double + * synchronization, requiring both recessive to dominant then dominant + * to recessive level change. However, this is not supported in + * SocketCAN framework, thus, the enum has only one member. + */ +enum es58x_sync_edge { + ES58X_SYNC_EDGE_SINGLE = 1 +}; + +/** + * enum es58x_flag - CAN flags for RX/TX messages. + * @ES58X_FLAG_EFF: Extended Frame Format (EFF). + * @ES58X_FLAG_RTR: Remote Transmission Request (RTR). + * @ES58X_FLAG_FD_BRS: Bit rate switch (BRS): second bitrate for + * payload data. + * @ES58X_FLAG_FD_ESI: Error State Indicator (ESI): tell if the + * transmitting node is in error passive mode. + * @ES58X_FLAG_FD_DATA: CAN FD frame. + */ +enum es58x_flag { + ES58X_FLAG_EFF = BIT(0), + ES58X_FLAG_RTR = BIT(1), + ES58X_FLAG_FD_BRS = BIT(3), + ES58X_FLAG_FD_ESI = BIT(5), + ES58X_FLAG_FD_DATA = BIT(6) +}; + +/** + * enum es58x_err - CAN error detection. + * @ES58X_ERR_OK: No errors. + * @ES58X_ERR_PROT_STUFF: Bit stuffing error: more than 5 consecutive + * equal bits. + * @ES58X_ERR_PROT_FORM: Frame format error. + * @ES58X_ERR_ACK: Received no ACK on transmission. + * @ES58X_ERR_PROT_BIT: Single bit error. + * @ES58X_ERR_PROT_CRC: Incorrect 15, 17 or 21 bits CRC. + * @ES58X_ERR_PROT_BIT1: Unable to send recessive bit: tried to send + * recessive bit 1 but monitored dominant bit 0. + * @ES58X_ERR_PROT_BIT0: Unable to send dominant bit: tried to send + * dominant bit 0 but monitored recessive bit 1. + * @ES58X_ERR_PROT_OVERLOAD: Bus overload. + * @ES58X_ERR_PROT_UNSPEC: Unspecified. + * + * Please refer to ISO 11898-1:2015, section 10.11 "Error detection" + * and section 10.13 "Overload signaling" for additional details. + */ +enum es58x_err { + ES58X_ERR_OK = 0, + ES58X_ERR_PROT_STUFF = BIT(0), + ES58X_ERR_PROT_FORM = BIT(1), + ES58X_ERR_ACK = BIT(2), + ES58X_ERR_PROT_BIT = BIT(3), + ES58X_ERR_PROT_CRC = BIT(4), + ES58X_ERR_PROT_BIT1 = BIT(5), + ES58X_ERR_PROT_BIT0 = BIT(6), + ES58X_ERR_PROT_OVERLOAD = BIT(7), + ES58X_ERR_PROT_UNSPEC = BIT(31) +}; + +/** + * enum es58x_event - CAN error codes returned by the device. + * @ES58X_EVENT_OK: No errors. + * @ES58X_EVENT_CRTL_ACTIVE: Active state: both TR and RX error count + * is less than 128. + * @ES58X_EVENT_CRTL_PASSIVE: Passive state: either TX or RX error + * count is greater than 127. + * @ES58X_EVENT_CRTL_WARNING: Warning state: either TX or RX error + * count is greater than 96. + * @ES58X_EVENT_BUSOFF: Bus off. + * @ES58X_EVENT_SINGLE_WIRE: Lost connection on either CAN high or CAN + * low. + * + * Please refer to ISO 11898-1:2015, section 12.1.4 "Rules of fault + * confinement" for additional details. + */ +enum es58x_event { + ES58X_EVENT_OK = 0, + ES58X_EVENT_CRTL_ACTIVE = BIT(0), + ES58X_EVENT_CRTL_PASSIVE = BIT(1), + ES58X_EVENT_CRTL_WARNING = BIT(2), + ES58X_EVENT_BUSOFF = BIT(3), + ES58X_EVENT_SINGLE_WIRE = BIT(4) +}; + +/* enum es58x_ret_u8 - Device return error codes, 8 bit format. + * + * Specific to ES581.4. + */ +enum es58x_ret_u8 { + ES58X_RET_U8_OK = 0x00, + ES58X_RET_U8_ERR_UNSPECIFIED_FAILURE = 0x80, + ES58X_RET_U8_ERR_NO_MEM = 0x81, + ES58X_RET_U8_ERR_BAD_CRC = 0x99 +}; + +/* enum es58x_ret_u32 - Device return error codes, 32 bit format. + */ +enum es58x_ret_u32 { + ES58X_RET_U32_OK = 0x00000000UL, + ES58X_RET_U32_ERR_UNSPECIFIED_FAILURE = 0x80000000UL, + ES58X_RET_U32_ERR_NO_MEM = 0x80004001UL, + ES58X_RET_U32_WARN_PARAM_ADJUSTED = 0x40004000UL, + ES58X_RET_U32_WARN_TX_MAYBE_REORDER = 0x40004001UL, + ES58X_RET_U32_ERR_TIMEDOUT = 0x80000008UL, + ES58X_RET_U32_ERR_FIFO_FULL = 0x80003002UL, + ES58X_RET_U32_ERR_BAD_CONFIG = 0x80004000UL, + ES58X_RET_U32_ERR_NO_RESOURCE = 0x80004002UL +}; + +/* enum es58x_ret_type - Type of the command returned by the ES58X + * device. + */ +enum es58x_ret_type { + ES58X_RET_TYPE_SET_BITTIMING, + ES58X_RET_TYPE_ENABLE_CHANNEL, + ES58X_RET_TYPE_DISABLE_CHANNEL, + ES58X_RET_TYPE_TX_MSG, + ES58X_RET_TYPE_RESET_RX, + ES58X_RET_TYPE_RESET_TX, + ES58X_RET_TYPE_DEVICE_ERR +}; + +union es58x_urb_cmd { + struct es581_4_urb_cmd es581_4_urb_cmd; + struct es58x_fd_urb_cmd es58x_fd_urb_cmd; + struct { /* Common header parts of all variants */ + __le16 sof; + u8 cmd_type; + u8 cmd_id; + } __packed; + DECLARE_FLEX_ARRAY(u8, raw_cmd); +}; + +/** + * struct es58x_priv - All information specific to a CAN channel. + * @can: struct can_priv must be the first member (Socket CAN relies + * on the fact that function netdev_priv() returns a pointer to + * a struct can_priv). + * @devlink_port: devlink instance for the network interface. + * @es58x_dev: pointer to the corresponding ES58X device. + * @tx_urb: Used as a buffer to concatenate the TX messages and to do + * a bulk send. Please refer to es58x_start_xmit() for more + * details. + * @tx_tail: Index of the oldest packet still pending for + * completion. @tx_tail & echo_skb_mask represents the beginning + * of the echo skb FIFO, i.e. index of the first element. + * @tx_head: Index of the next packet to be sent to the + * device. @tx_head & echo_skb_mask represents the end of the + * echo skb FIFO plus one, i.e. the first free index. + * @tx_can_msg_cnt: Number of messages in @tx_urb. + * @tx_can_msg_is_fd: false: all messages in @tx_urb are Classical + * CAN, true: all messages in @tx_urb are CAN FD. Rationale: + * ES58X FD devices do not allow to mix Classical CAN and FD CAN + * frames in one single bulk transmission. + * @err_passive_before_rtx_success: The ES58X device might enter in a + * state in which it keeps alternating between error passive + * and active states. This counter keeps track of the number of + * error passive and if it gets bigger than + * ES58X_CONSECUTIVE_ERR_PASSIVE_MAX, es58x_rx_err_msg() will + * force the status to bus-off. + * @channel_idx: Channel index, starts at zero. + */ +struct es58x_priv { + struct can_priv can; + struct devlink_port devlink_port; + struct es58x_device *es58x_dev; + struct urb *tx_urb; + + u32 tx_tail; + u32 tx_head; + + u8 tx_can_msg_cnt; + bool tx_can_msg_is_fd; + + u8 err_passive_before_rtx_success; + + u8 channel_idx; +}; + +/** + * struct es58x_parameters - Constant parameters of a given hardware + * variant. + * @bittiming_const: Nominal bittimming constant parameters. + * @data_bittiming_const: Data bittiming constant parameters. + * @tdc_const: Transmission Delay Compensation constant parameters. + * @bitrate_max: Maximum bitrate supported by the device. + * @clock: CAN clock parameters. + * @ctrlmode_supported: List of supported modes. Please refer to + * can/netlink.h file for additional details. + * @tx_start_of_frame: Magic number at the beginning of each TX URB + * command. + * @rx_start_of_frame: Magic number at the beginning of each RX URB + * command. + * @tx_urb_cmd_max_len: Maximum length of a TX URB command. + * @rx_urb_cmd_max_len: Maximum length of a RX URB command. + * @fifo_mask: Bit mask to quickly convert the tx_tail and tx_head + * field of the struct es58x_priv into echo_skb + * indexes. Properties: @fifo_mask = echo_skb_max - 1 where + * echo_skb_max must be a power of two. Also, echo_skb_max must + * not exceed the maximum size of the device internal TX FIFO + * length. This parameter is used to control the network queue + * wake/stop logic. + * @dql_min_limit: Dynamic Queue Limits (DQL) absolute minimum limit + * of bytes allowed to be queued on this network device transmit + * queue. Used by the Byte Queue Limits (BQL) to determine how + * frequently the xmit_more flag will be set to true in + * es58x_start_xmit(). Set this value higher to optimize for + * throughput but be aware that it might have a negative impact + * on the latency! This value can also be set dynamically. Please + * refer to Documentation/ABI/testing/sysfs-class-net-queues for + * more details. + * @tx_bulk_max: Maximum number of TX messages that can be sent in one + * single URB packet. + * @urb_cmd_header_len: Length of the URB command header. + * @rx_urb_max: Number of RX URB to be allocated during device probe. + * @tx_urb_max: Number of TX URB to be allocated during device probe. + */ +struct es58x_parameters { + const struct can_bittiming_const *bittiming_const; + const struct can_bittiming_const *data_bittiming_const; + const struct can_tdc_const *tdc_const; + u32 bitrate_max; + struct can_clock clock; + u32 ctrlmode_supported; + u16 tx_start_of_frame; + u16 rx_start_of_frame; + u16 tx_urb_cmd_max_len; + u16 rx_urb_cmd_max_len; + u16 fifo_mask; + u16 dql_min_limit; + u8 tx_bulk_max; + u8 urb_cmd_header_len; + u8 rx_urb_max; + u8 tx_urb_max; +}; + +/** + * struct es58x_operators - Function pointers used to encode/decode + * the TX/RX messages. + * @get_msg_len: Get field msg_len of the urb_cmd. The offset of + * msg_len inside urb_cmd depends of the device model. + * @handle_urb_cmd: Decode the URB command received from the device + * and dispatch it to the relevant sub function. + * @fill_urb_header: Fill the header of urb_cmd. + * @tx_can_msg: Encode a TX CAN message and add it to the bulk buffer + * cmd_buf of es58x_dev. + * @enable_channel: Start the CAN channel. + * @disable_channel: Stop the CAN channel. + * @reset_device: Full reset of the device. N.B: this feature is only + * present on the ES581.4. For ES58X FD devices, this field is + * set to NULL. + * @get_timestamp: Request a timestamp from the ES58X device. + */ +struct es58x_operators { + u16 (*get_msg_len)(const union es58x_urb_cmd *urb_cmd); + int (*handle_urb_cmd)(struct es58x_device *es58x_dev, + const union es58x_urb_cmd *urb_cmd); + void (*fill_urb_header)(union es58x_urb_cmd *urb_cmd, u8 cmd_type, + u8 cmd_id, u8 channel_idx, u16 cmd_len); + int (*tx_can_msg)(struct es58x_priv *priv, const struct sk_buff *skb); + int (*enable_channel)(struct es58x_priv *priv); + int (*disable_channel)(struct es58x_priv *priv); + int (*reset_device)(struct es58x_device *es58x_dev); + int (*get_timestamp)(struct es58x_device *es58x_dev); +}; + +/** + * struct es58x_sw_version - Version number of the firmware or the + * bootloader. + * @major: Version major number, represented on two digits. + * @minor: Version minor number, represented on two digits. + * @revision: Version revision number, represented on two digits. + * + * The firmware and the bootloader share the same format: "xx.xx.xx" + * where 'x' is a digit. Both can be retrieved from the product + * information string. + */ +struct es58x_sw_version { + u8 major; + u8 minor; + u8 revision; +}; + +/** + * struct es58x_hw_revision - Hardware revision number. + * @letter: Revision letter, an alphanumeric character. + * @major: Version major number, represented on three digits. + * @minor: Version minor number, represented on three digits. + * + * The hardware revision uses its own format: "axxx/xxx" where 'a' is + * an alphanumeric character and 'x' a digit. It can be retrieved from + * the product information string. + */ +struct es58x_hw_revision { + char letter; + u16 major; + u16 minor; +}; + +/** + * struct es58x_device - All information specific to an ES58X device. + * @dev: Device information. + * @udev: USB device information. + * @netdev: Array of our CAN channels. + * @param: The constant parameters. + * @ops: Operators. + * @rx_pipe: USB reception pipe. + * @tx_pipe: USB transmission pipe. + * @rx_urbs: Anchor for received URBs. + * @tx_urbs_busy: Anchor for TX URBs which were send to the device. + * @tx_urbs_idle: Anchor for TX USB which are idle. This driver + * allocates the memory for the URBs during the probe. When a TX + * URB is needed, it can be taken from this anchor. The network + * queue wake/stop logic should prevent this URB from getting + * empty. Please refer to es58x_get_tx_urb() for more details. + * @tx_urbs_idle_cnt: number of urbs in @tx_urbs_idle. + * @firmware_version: The firmware version number. + * @bootloader_version: The bootloader version number. + * @hardware_revision: The hardware revision number. + * @ktime_req_ns: kernel timestamp when es58x_set_realtime_diff_ns() + * was called. + * @realtime_diff_ns: difference in nanoseconds between the clocks of + * the ES58X device and the kernel. + * @timestamps: a temporary buffer to store the time stamps before + * feeding them to es58x_can_get_echo_skb(). Can only be used + * in RX branches. + * @num_can_ch: Number of CAN channel (i.e. number of elements of @netdev). + * @opened_channel_cnt: number of channels opened. Free of race + * conditions because its two users (net_device_ops:ndo_open() + * and net_device_ops:ndo_close()) guarantee that the network + * stack big kernel lock (a.k.a. rtnl_mutex) is being hold. + * @rx_cmd_buf_len: Length of @rx_cmd_buf. + * @rx_cmd_buf: The device might split the URB commands in an + * arbitrary amount of pieces. This buffer is used to concatenate + * all those pieces. Can only be used in RX branches. This field + * has to be the last one of the structure because it is has a + * flexible size (c.f. es58x_sizeof_es58x_device() function). + */ +struct es58x_device { + struct device *dev; + struct usb_device *udev; + struct net_device *netdev[ES58X_NUM_CAN_CH_MAX]; + + const struct es58x_parameters *param; + const struct es58x_operators *ops; + + unsigned int rx_pipe; + unsigned int tx_pipe; + + struct usb_anchor rx_urbs; + struct usb_anchor tx_urbs_busy; + struct usb_anchor tx_urbs_idle; + atomic_t tx_urbs_idle_cnt; + + struct es58x_sw_version firmware_version; + struct es58x_sw_version bootloader_version; + struct es58x_hw_revision hardware_revision; + + u64 ktime_req_ns; + s64 realtime_diff_ns; + + u64 timestamps[ES58X_ECHO_BULK_MAX]; + + u8 num_can_ch; + u8 opened_channel_cnt; + + u16 rx_cmd_buf_len; + union es58x_urb_cmd rx_cmd_buf; +}; + +/** + * es58x_sizeof_es58x_device() - Calculate the maximum length of + * struct es58x_device. + * @es58x_dev_param: The constant parameters of the device. + * + * The length of struct es58x_device depends on the length of its last + * field: rx_cmd_buf. This macro allows to optimize the memory + * allocation. + * + * Return: length of struct es58x_device. + */ +static inline size_t es58x_sizeof_es58x_device(const struct es58x_parameters + *es58x_dev_param) +{ + return offsetof(struct es58x_device, rx_cmd_buf) + + es58x_dev_param->rx_urb_cmd_max_len; +} + +static inline int __es58x_check_msg_len(const struct device *dev, + const char *stringified_msg, + size_t actual_len, size_t expected_len) +{ + if (expected_len != actual_len) { + dev_err(dev, + "Length of %s is %zu but received command is %zu.\n", + stringified_msg, expected_len, actual_len); + return -EMSGSIZE; + } + return 0; +} + +/** + * es58x_check_msg_len() - Check the size of a received message. + * @dev: Device, used to print error messages. + * @msg: Received message, must not be a pointer. + * @actual_len: Length of the message as advertised in the command header. + * + * Must be a macro in order to accept the different types of messages + * as an input. Can be use with any of the messages which have a fixed + * length. Check for an exact match of the size. + * + * Return: zero on success, -EMSGSIZE if @actual_len differs from the + * expected length. + */ +#define es58x_check_msg_len(dev, msg, actual_len) \ + __es58x_check_msg_len(dev, __stringify(msg), \ + actual_len, sizeof(msg)) + +static inline int __es58x_check_msg_max_len(const struct device *dev, + const char *stringified_msg, + size_t actual_len, + size_t expected_len) +{ + if (actual_len > expected_len) { + dev_err(dev, + "Maximum length for %s is %zu but received command is %zu.\n", + stringified_msg, expected_len, actual_len); + return -EOVERFLOW; + } + return 0; +} + +/** + * es58x_check_msg_max_len() - Check the maximum size of a received message. + * @dev: Device, used to print error messages. + * @msg: Received message, must not be a pointer. + * @actual_len: Length of the message as advertised in the command header. + * + * Must be a macro in order to accept the different types of messages + * as an input. To be used with the messages of variable sizes. Only + * check that the message is not bigger than the maximum expected + * size. + * + * Return: zero on success, -EOVERFLOW if @actual_len is greater than + * the expected length. + */ +#define es58x_check_msg_max_len(dev, msg, actual_len) \ + __es58x_check_msg_max_len(dev, __stringify(msg), \ + actual_len, sizeof(msg)) + +static inline int __es58x_msg_num_element(const struct device *dev, + const char *stringified_msg, + size_t actual_len, size_t msg_len, + size_t elem_len) +{ + size_t actual_num_elem = actual_len / elem_len; + size_t expected_num_elem = msg_len / elem_len; + + if (actual_num_elem == 0) { + dev_err(dev, + "Minimum length for %s is %zu but received command is %zu.\n", + stringified_msg, elem_len, actual_len); + return -EMSGSIZE; + } else if ((actual_len % elem_len) != 0) { + dev_err(dev, + "Received command length: %zu is not a multiple of %s[0]: %zu\n", + actual_len, stringified_msg, elem_len); + return -EMSGSIZE; + } else if (actual_num_elem > expected_num_elem) { + dev_err(dev, + "Array %s is supposed to have %zu elements each of size %zu...\n", + stringified_msg, expected_num_elem, elem_len); + dev_err(dev, + "... But received command has %zu elements (total length %zu).\n", + actual_num_elem, actual_len); + return -EOVERFLOW; + } + return actual_num_elem; +} + +/** + * es58x_msg_num_element() - Check size and give the number of + * elements in a message of array type. + * @dev: Device, used to print error messages. + * @msg: Received message, must be an array. + * @actual_len: Length of the message as advertised in the command + * header. + * + * Must be a macro in order to accept the different types of messages + * as an input. To be used on message of array type. Array's element + * has to be of fixed size (else use es58x_check_msg_max_len()). Check + * that the total length is an exact multiple of the length of a + * single element. + * + * Return: number of elements in the array on success, -EOVERFLOW if + * @actual_len is greater than the expected length, -EMSGSIZE if + * @actual_len is not a multiple of a single element. + */ +#define es58x_msg_num_element(dev, msg, actual_len) \ +({ \ + size_t __elem_len = sizeof((msg)[0]) + __must_be_array(msg); \ + __es58x_msg_num_element(dev, __stringify(msg), actual_len, \ + sizeof(msg), __elem_len); \ +}) + +/** + * es58x_priv() - Get the priv member and cast it to struct es58x_priv. + * @netdev: CAN network device. + * + * Return: ES58X device. + */ +static inline struct es58x_priv *es58x_priv(struct net_device *netdev) +{ + return (struct es58x_priv *)netdev_priv(netdev); +} + +/** + * ES58X_SIZEOF_URB_CMD() - Calculate the maximum length of an urb + * command for a given message field name. + * @es58x_urb_cmd_type: type (either "struct es581_4_urb_cmd" or + * "struct es58x_fd_urb_cmd"). + * @msg_field: name of the message field. + * + * Must be a macro in order to accept the different command types as + * an input. + * + * Return: length of the urb command. + */ +#define ES58X_SIZEOF_URB_CMD(es58x_urb_cmd_type, msg_field) \ + (offsetof(es58x_urb_cmd_type, raw_msg) \ + + sizeof_field(es58x_urb_cmd_type, msg_field) \ + + sizeof_field(es58x_urb_cmd_type, \ + reserved_for_crc16_do_not_use)) + +/** + * es58x_get_urb_cmd_len() - Calculate the actual length of an urb + * command for a given message length. + * @es58x_dev: ES58X device. + * @msg_len: Length of the message. + * + * Add the header and CRC lengths to the message length. + * + * Return: length of the urb command. + */ +static inline size_t es58x_get_urb_cmd_len(struct es58x_device *es58x_dev, + u16 msg_len) +{ + return es58x_dev->param->urb_cmd_header_len + msg_len + sizeof(u16); +} + +/** + * es58x_get_netdev() - Get the network device. + * @es58x_dev: ES58X device. + * @channel_no: The channel number as advertised in the urb command. + * @channel_idx_offset: Some of the ES58x starts channel numbering + * from 0 (ES58X FD), others from 1 (ES581.4). + * @netdev: CAN network device. + * + * Do a sanity check on the index provided by the device. + * + * Return: zero on success, -ECHRNG if the received channel number is + * out of range and -ENODEV if the network device is not yet + * configured. + */ +static inline int es58x_get_netdev(struct es58x_device *es58x_dev, + int channel_no, int channel_idx_offset, + struct net_device **netdev) +{ + int channel_idx = channel_no - channel_idx_offset; + + *netdev = NULL; + if (channel_idx < 0 || channel_idx >= es58x_dev->num_can_ch) + return -ECHRNG; + + *netdev = es58x_dev->netdev[channel_idx]; + if (!*netdev || !netif_device_present(*netdev)) + return -ENODEV; + + return 0; +} + +/** + * es58x_get_raw_can_id() - Get the CAN ID. + * @cf: CAN frame. + * + * Mask the CAN ID in order to only keep the significant bits. + * + * Return: the raw value of the CAN ID. + */ +static inline int es58x_get_raw_can_id(const struct can_frame *cf) +{ + if (cf->can_id & CAN_EFF_FLAG) + return cf->can_id & CAN_EFF_MASK; + else + return cf->can_id & CAN_SFF_MASK; +} + +/** + * es58x_get_flags() - Get the CAN flags. + * @skb: socket buffer of a CAN message. + * + * Return: the CAN flag as an enum es58x_flag. + */ +static inline enum es58x_flag es58x_get_flags(const struct sk_buff *skb) +{ + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + enum es58x_flag es58x_flags = 0; + + if (cf->can_id & CAN_EFF_FLAG) + es58x_flags |= ES58X_FLAG_EFF; + + if (can_is_canfd_skb(skb)) { + es58x_flags |= ES58X_FLAG_FD_DATA; + if (cf->flags & CANFD_BRS) + es58x_flags |= ES58X_FLAG_FD_BRS; + if (cf->flags & CANFD_ESI) + es58x_flags |= ES58X_FLAG_FD_ESI; + } else if (cf->can_id & CAN_RTR_FLAG) + /* Remote frames are only defined in Classical CAN frames */ + es58x_flags |= ES58X_FLAG_RTR; + + return es58x_flags; +} + +/* es58x_core.c. */ +int es58x_can_get_echo_skb(struct net_device *netdev, u32 packet_idx, + u64 *tstamps, unsigned int pkts); +int es58x_tx_ack_msg(struct net_device *netdev, u16 tx_free_entries, + enum es58x_ret_u32 rx_cmd_ret_u32); +int es58x_rx_can_msg(struct net_device *netdev, u64 timestamp, const u8 *data, + canid_t can_id, enum es58x_flag es58x_flags, u8 dlc); +int es58x_rx_err_msg(struct net_device *netdev, enum es58x_err error, + enum es58x_event event, u64 timestamp); +void es58x_rx_timestamp(struct es58x_device *es58x_dev, u64 timestamp); +int es58x_rx_cmd_ret_u8(struct device *dev, enum es58x_ret_type cmd_ret_type, + enum es58x_ret_u8 rx_cmd_ret_u8); +int es58x_rx_cmd_ret_u32(struct net_device *netdev, + enum es58x_ret_type cmd_ret_type, + enum es58x_ret_u32 rx_cmd_ret_u32); +int es58x_send_msg(struct es58x_device *es58x_dev, u8 cmd_type, u8 cmd_id, + const void *msg, u16 cmd_len, int channel_idx); + +/* es58x_devlink.c. */ +void es58x_parse_product_info(struct es58x_device *es58x_dev); +extern const struct devlink_ops es58x_dl_ops; + +/* es581_4.c. */ +extern const struct es58x_parameters es581_4_param; +extern const struct es58x_operators es581_4_ops; + +/* es58x_fd.c. */ +extern const struct es58x_parameters es58x_fd_param; +extern const struct es58x_operators es58x_fd_ops; + +#endif /* __ES58X_COMMON_H__ */ diff --git a/drivers/net/can/usb/etas_es58x/es58x_devlink.c b/drivers/net/can/usb/etas_es58x/es58x_devlink.c new file mode 100644 index 000000000000..0d155eb1b9e9 --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/es58x_devlink.c @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces. + * + * File es58x_devlink.c: report the product information using devlink. + * + * Copyright (c) 2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr> + */ + +#include <linux/ctype.h> +#include <linux/device.h> +#include <linux/usb.h> +#include <net/devlink.h> + +#include "es58x_core.h" + +/* USB descriptor index containing the product information string. */ +#define ES58X_PROD_INFO_IDX 6 + +/** + * es58x_parse_sw_version() - Extract boot loader or firmware version. + * @es58x_dev: ES58X device. + * @prod_info: USB custom string returned by the device. + * @prefix: Select which information should be parsed. Set it to "FW" + * to parse the firmware version or to "BL" to parse the + * bootloader version. + * + * The @prod_info string contains the firmware and the bootloader + * version number all prefixed by a magic string and concatenated with + * other numbers. Depending on the device, the firmware (bootloader) + * format is either "FW_Vxx.xx.xx" ("BL_Vxx.xx.xx") or "FW:xx.xx.xx" + * ("BL:xx.xx.xx") where 'x' represents a digit. @prod_info must + * contains the common part of those prefixes: "FW" or "BL". + * + * Parse @prod_info and store the version number in + * &es58x_dev.firmware_version or &es58x_dev.bootloader_version + * according to @prefix value. + * + * Return: zero on success, -EINVAL if @prefix contains an invalid + * value and -EBADMSG if @prod_info could not be parsed. + */ +static int es58x_parse_sw_version(struct es58x_device *es58x_dev, + const char *prod_info, const char *prefix) +{ + struct es58x_sw_version *version; + int major, minor, revision; + + if (!strcmp(prefix, "FW")) + version = &es58x_dev->firmware_version; + else if (!strcmp(prefix, "BL")) + version = &es58x_dev->bootloader_version; + else + return -EINVAL; + + /* Go to prefix */ + prod_info = strstr(prod_info, prefix); + if (!prod_info) + return -EBADMSG; + /* Go to beginning of the version number */ + while (!isdigit(*prod_info)) { + prod_info++; + if (!*prod_info) + return -EBADMSG; + } + + if (sscanf(prod_info, "%2u.%2u.%2u", &major, &minor, &revision) != 3) + return -EBADMSG; + + version->major = major; + version->minor = minor; + version->revision = revision; + + return 0; +} + +/** + * es58x_parse_hw_rev() - Extract hardware revision number. + * @es58x_dev: ES58X device. + * @prod_info: USB custom string returned by the device. + * + * @prod_info contains the hardware revision prefixed by a magic + * string and conquenated together with other numbers. Depending on + * the device, the hardware revision format is either + * "HW_VER:axxx/xxx" or "HR:axxx/xxx" where 'a' represents a letter + * and 'x' a digit. + * + * Parse @prod_info and store the hardware revision number in + * &es58x_dev.hardware_revision. + * + * Return: zero on success, -EBADMSG if @prod_info could not be + * parsed. + */ +static int es58x_parse_hw_rev(struct es58x_device *es58x_dev, + const char *prod_info) +{ + char letter; + int major, minor; + + /* The only occurrence of 'H' is in the hardware revision prefix. */ + prod_info = strchr(prod_info, 'H'); + if (!prod_info) + return -EBADMSG; + /* Go to beginning of the hardware revision */ + prod_info = strchr(prod_info, ':'); + if (!prod_info) + return -EBADMSG; + prod_info++; + + if (sscanf(prod_info, "%c%3u/%3u", &letter, &major, &minor) != 3) + return -EBADMSG; + + es58x_dev->hardware_revision.letter = letter; + es58x_dev->hardware_revision.major = major; + es58x_dev->hardware_revision.minor = minor; + + return 0; +} + +/** + * es58x_parse_product_info() - Parse the ES58x product information + * string. + * @es58x_dev: ES58X device. + * + * Retrieve the product information string and parse it to extract the + * firmware version, the bootloader version and the hardware + * revision. + * + * If the function fails, set the version or revision to an invalid + * value and emit an informal message. Continue probing because the + * product information is not critical for the driver to operate. + */ +void es58x_parse_product_info(struct es58x_device *es58x_dev) +{ + static const struct es58x_sw_version sw_version_not_set = { + .major = -1, + .minor = -1, + .revision = -1, + }; + static const struct es58x_hw_revision hw_revision_not_set = { + .letter = '\0', + .major = -1, + .minor = -1, + }; + char *prod_info; + + es58x_dev->firmware_version = sw_version_not_set; + es58x_dev->bootloader_version = sw_version_not_set; + es58x_dev->hardware_revision = hw_revision_not_set; + + prod_info = usb_cache_string(es58x_dev->udev, ES58X_PROD_INFO_IDX); + if (!prod_info) { + dev_warn(es58x_dev->dev, + "could not retrieve the product info string\n"); + return; + } + + if (es58x_parse_sw_version(es58x_dev, prod_info, "FW") || + es58x_parse_sw_version(es58x_dev, prod_info, "BL") || + es58x_parse_hw_rev(es58x_dev, prod_info)) + dev_info(es58x_dev->dev, + "could not parse product info: '%s'\n", prod_info); + + kfree(prod_info); +} + +/** + * es58x_sw_version_is_valid() - Check if the version is a valid number. + * @sw_ver: Version number of either the firmware or the bootloader. + * + * If any of the software version sub-numbers do not fit on two + * digits, the version is invalid, most probably because the product + * string could not be parsed. + * + * Return: @true if the software version is valid, @false otherwise. + */ +static inline bool es58x_sw_version_is_valid(struct es58x_sw_version *sw_ver) +{ + return sw_ver->major < 100 && sw_ver->minor < 100 && + sw_ver->revision < 100; +} + +/** + * es58x_hw_revision_is_valid() - Check if the revision is a valid number. + * @hw_rev: Revision number of the hardware. + * + * If &es58x_hw_revision.letter is not a alphanumeric character or if + * any of the hardware revision sub-numbers do not fit on three + * digits, the revision is invalid, most probably because the product + * string could not be parsed. + * + * Return: @true if the hardware revision is valid, @false otherwise. + */ +static inline bool es58x_hw_revision_is_valid(struct es58x_hw_revision *hw_rev) +{ + return isalnum(hw_rev->letter) && hw_rev->major < 1000 && + hw_rev->minor < 1000; +} + +/** + * es58x_devlink_info_get() - Report the product information. + * @devlink: Devlink. + * @req: skb wrapper where to put requested information. + * @extack: Unused. + * + * Report the firmware version, the bootloader version, the hardware + * revision and the serial number through netlink. + * + * Return: zero on success, errno when any error occurs. + */ +static int es58x_devlink_info_get(struct devlink *devlink, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct es58x_device *es58x_dev = devlink_priv(devlink); + struct es58x_sw_version *fw_ver = &es58x_dev->firmware_version; + struct es58x_sw_version *bl_ver = &es58x_dev->bootloader_version; + struct es58x_hw_revision *hw_rev = &es58x_dev->hardware_revision; + char buf[MAX(sizeof("xx.xx.xx"), sizeof("axxx/xxx"))]; + int ret = 0; + + if (es58x_sw_version_is_valid(fw_ver)) { + snprintf(buf, sizeof(buf), "%02u.%02u.%02u", + fw_ver->major, fw_ver->minor, fw_ver->revision); + ret = devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW, + buf); + if (ret) + return ret; + } + + if (es58x_sw_version_is_valid(bl_ver)) { + snprintf(buf, sizeof(buf), "%02u.%02u.%02u", + bl_ver->major, bl_ver->minor, bl_ver->revision); + ret = devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW_BOOTLOADER, + buf); + if (ret) + return ret; + } + + if (es58x_hw_revision_is_valid(hw_rev)) { + snprintf(buf, sizeof(buf), "%c%03u/%03u", + hw_rev->letter, hw_rev->major, hw_rev->minor); + ret = devlink_info_version_fixed_put(req, + DEVLINK_INFO_VERSION_GENERIC_BOARD_REV, + buf); + if (ret) + return ret; + } + + if (es58x_dev->udev->serial) + ret = devlink_info_serial_number_put(req, + es58x_dev->udev->serial); + + return ret; +} + +const struct devlink_ops es58x_dl_ops = { + .info_get = es58x_devlink_info_get, +}; diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c new file mode 100644 index 000000000000..6476add1c105 --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c @@ -0,0 +1,565 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces. + * + * File es58x_fd.c: Adds support to ETAS ES582.1 and ES584.1 (naming + * convention: we use the term "ES58X FD" when referring to those two + * variants together). + * + * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved. + * Copyright (c) 2020 ETAS K.K.. All rights reserved. + * Copyright (c) 2020-2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr> + */ + +#include <linux/unaligned.h> +#include <linux/kernel.h> +#include <linux/units.h> + +#include "es58x_core.h" +#include "es58x_fd.h" + +/** + * es58x_fd_sizeof_rx_tx_msg() - Calculate the actual length of the + * structure of a rx or tx message. + * @msg: message of variable length, must have a dlc and a len fields. + * + * Even if RTR frames have actually no payload, the ES58X devices + * still expect it. Must be a macro in order to accept several types + * (struct es58x_fd_tx_can_msg and struct es58x_fd_rx_can_msg) as an + * input. + * + * Return: length of the message. + */ +#define es58x_fd_sizeof_rx_tx_msg(msg) \ +({ \ + typeof(msg) __msg = (msg); \ + size_t __msg_len; \ + \ + if (__msg.flags & ES58X_FLAG_FD_DATA) \ + __msg_len = canfd_sanitize_len(__msg.len); \ + else \ + __msg_len = can_cc_dlc2len(__msg.dlc); \ + \ + offsetof(typeof(__msg), data[__msg_len]); \ +}) + +static enum es58x_fd_cmd_type es58x_fd_cmd_type(struct net_device *netdev) +{ + u32 ctrlmode = es58x_priv(netdev)->can.ctrlmode; + + if (ctrlmode & (CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO)) + return ES58X_FD_CMD_TYPE_CANFD; + else + return ES58X_FD_CMD_TYPE_CAN; +} + +static u16 es58x_fd_get_msg_len(const union es58x_urb_cmd *urb_cmd) +{ + return get_unaligned_le16(&urb_cmd->es58x_fd_urb_cmd.msg_len); +} + +static int es58x_fd_echo_msg(struct net_device *netdev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) +{ + struct es58x_priv *priv = es58x_priv(netdev); + const struct es58x_fd_echo_msg *echo_msg; + struct es58x_device *es58x_dev = priv->es58x_dev; + u64 *tstamps = es58x_dev->timestamps; + u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); + int i, num_element; + u32 rcv_packet_idx; + + const u32 mask = GENMASK(BITS_PER_TYPE(mask) - 1, + BITS_PER_TYPE(echo_msg->packet_idx)); + + num_element = es58x_msg_num_element(es58x_dev->dev, + es58x_fd_urb_cmd->echo_msg, + msg_len); + if (num_element < 0) + return num_element; + echo_msg = es58x_fd_urb_cmd->echo_msg; + + rcv_packet_idx = (priv->tx_tail & mask) | echo_msg[0].packet_idx; + for (i = 0; i < num_element; i++) { + if ((u8)rcv_packet_idx != echo_msg[i].packet_idx) { + netdev_err(netdev, "Packet idx jumped from %u to %u\n", + (u8)rcv_packet_idx - 1, + echo_msg[i].packet_idx); + return -EBADMSG; + } + + tstamps[i] = get_unaligned_le64(&echo_msg[i].timestamp); + rcv_packet_idx++; + } + + return es58x_can_get_echo_skb(netdev, priv->tx_tail, tstamps, num_element); +} + +static int es58x_fd_rx_can_msg(struct net_device *netdev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) +{ + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + const u8 *rx_can_msg_buf = es58x_fd_urb_cmd->rx_can_msg_buf; + u16 rx_can_msg_buf_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); + int pkts, ret; + + ret = es58x_check_msg_max_len(es58x_dev->dev, + es58x_fd_urb_cmd->rx_can_msg_buf, + rx_can_msg_buf_len); + if (ret) + return ret; + + for (pkts = 0; rx_can_msg_buf_len > 0; pkts++) { + const struct es58x_fd_rx_can_msg *rx_can_msg = + (const struct es58x_fd_rx_can_msg *)rx_can_msg_buf; + bool is_can_fd = !!(rx_can_msg->flags & ES58X_FLAG_FD_DATA); + /* rx_can_msg_len is the length of the rx_can_msg + * buffer. Not to be confused with rx_can_msg->len + * which is the length of the CAN payload + * rx_can_msg->data. + */ + u16 rx_can_msg_len = es58x_fd_sizeof_rx_tx_msg(*rx_can_msg); + + if (rx_can_msg_len > rx_can_msg_buf_len) { + netdev_err(netdev, + "%s: Expected a rx_can_msg of size %d but only %d bytes are left in rx_can_msg_buf\n", + __func__, + rx_can_msg_len, rx_can_msg_buf_len); + return -EMSGSIZE; + } + if (rx_can_msg->len > CANFD_MAX_DLEN) { + netdev_err(netdev, + "%s: Data length is %d but maximum should be %d\n", + __func__, rx_can_msg->len, CANFD_MAX_DLEN); + return -EMSGSIZE; + } + + if (netif_running(netdev)) { + u64 tstamp = get_unaligned_le64(&rx_can_msg->timestamp); + canid_t can_id = get_unaligned_le32(&rx_can_msg->can_id); + u8 dlc; + + if (is_can_fd) + dlc = can_fd_len2dlc(rx_can_msg->len); + else + dlc = rx_can_msg->dlc; + + ret = es58x_rx_can_msg(netdev, tstamp, rx_can_msg->data, + can_id, rx_can_msg->flags, dlc); + if (ret) + break; + } + + rx_can_msg_buf_len -= rx_can_msg_len; + rx_can_msg_buf += rx_can_msg_len; + } + + if (!netif_running(netdev)) { + if (net_ratelimit()) + netdev_info(netdev, + "%s: %s is down, dropping %d rx packets\n", + __func__, netdev->name, pkts); + netdev->stats.rx_dropped += pkts; + } + + return ret; +} + +static int es58x_fd_rx_event_msg(struct net_device *netdev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) +{ + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); + const struct es58x_fd_rx_event_msg *rx_event_msg; + int ret; + + rx_event_msg = &es58x_fd_urb_cmd->rx_event_msg; + ret = es58x_check_msg_len(es58x_dev->dev, *rx_event_msg, msg_len); + if (ret) + return ret; + + return es58x_rx_err_msg(netdev, rx_event_msg->error_code, + rx_event_msg->event_code, + get_unaligned_le64(&rx_event_msg->timestamp)); +} + +static int es58x_fd_rx_cmd_ret_u32(struct net_device *netdev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd, + enum es58x_ret_type cmd_ret_type) +{ + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); + int ret; + + ret = es58x_check_msg_len(es58x_dev->dev, + es58x_fd_urb_cmd->rx_cmd_ret_le32, msg_len); + if (ret) + return ret; + + return es58x_rx_cmd_ret_u32(netdev, cmd_ret_type, + get_unaligned_le32(&es58x_fd_urb_cmd->rx_cmd_ret_le32)); +} + +static int es58x_fd_tx_ack_msg(struct net_device *netdev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) +{ + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + const struct es58x_fd_tx_ack_msg *tx_ack_msg; + u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); + int ret; + + tx_ack_msg = &es58x_fd_urb_cmd->tx_ack_msg; + ret = es58x_check_msg_len(es58x_dev->dev, *tx_ack_msg, msg_len); + if (ret) + return ret; + + return es58x_tx_ack_msg(netdev, + get_unaligned_le16(&tx_ack_msg->tx_free_entries), + get_unaligned_le32(&tx_ack_msg->rx_cmd_ret_le32)); +} + +static int es58x_fd_can_cmd_id(struct es58x_device *es58x_dev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) +{ + struct net_device *netdev; + int ret; + + ret = es58x_get_netdev(es58x_dev, es58x_fd_urb_cmd->channel_idx, + ES58X_FD_CHANNEL_IDX_OFFSET, &netdev); + if (ret) + return ret; + + switch ((enum es58x_fd_can_cmd_id)es58x_fd_urb_cmd->cmd_id) { + case ES58X_FD_CAN_CMD_ID_ENABLE_CHANNEL: + return es58x_fd_rx_cmd_ret_u32(netdev, es58x_fd_urb_cmd, + ES58X_RET_TYPE_ENABLE_CHANNEL); + + case ES58X_FD_CAN_CMD_ID_DISABLE_CHANNEL: + return es58x_fd_rx_cmd_ret_u32(netdev, es58x_fd_urb_cmd, + ES58X_RET_TYPE_DISABLE_CHANNEL); + + case ES58X_FD_CAN_CMD_ID_TX_MSG: + return es58x_fd_tx_ack_msg(netdev, es58x_fd_urb_cmd); + + case ES58X_FD_CAN_CMD_ID_ECHO_MSG: + return es58x_fd_echo_msg(netdev, es58x_fd_urb_cmd); + + case ES58X_FD_CAN_CMD_ID_RX_MSG: + return es58x_fd_rx_can_msg(netdev, es58x_fd_urb_cmd); + + case ES58X_FD_CAN_CMD_ID_RESET_RX: + return es58x_fd_rx_cmd_ret_u32(netdev, es58x_fd_urb_cmd, + ES58X_RET_TYPE_RESET_RX); + + case ES58X_FD_CAN_CMD_ID_RESET_TX: + return es58x_fd_rx_cmd_ret_u32(netdev, es58x_fd_urb_cmd, + ES58X_RET_TYPE_RESET_TX); + + case ES58X_FD_CAN_CMD_ID_ERROR_OR_EVENT_MSG: + return es58x_fd_rx_event_msg(netdev, es58x_fd_urb_cmd); + + default: + return -EBADRQC; + } +} + +static int es58x_fd_device_cmd_id(struct es58x_device *es58x_dev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) +{ + u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); + int ret; + + switch ((enum es58x_fd_dev_cmd_id)es58x_fd_urb_cmd->cmd_id) { + case ES58X_FD_DEV_CMD_ID_TIMESTAMP: + ret = es58x_check_msg_len(es58x_dev->dev, + es58x_fd_urb_cmd->timestamp, msg_len); + if (ret) + return ret; + es58x_rx_timestamp(es58x_dev, + get_unaligned_le64(&es58x_fd_urb_cmd->timestamp)); + return 0; + + default: + return -EBADRQC; + } +} + +static int es58x_fd_handle_urb_cmd(struct es58x_device *es58x_dev, + const union es58x_urb_cmd *urb_cmd) +{ + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd; + int ret; + + es58x_fd_urb_cmd = &urb_cmd->es58x_fd_urb_cmd; + + switch ((enum es58x_fd_cmd_type)es58x_fd_urb_cmd->cmd_type) { + case ES58X_FD_CMD_TYPE_CAN: + case ES58X_FD_CMD_TYPE_CANFD: + ret = es58x_fd_can_cmd_id(es58x_dev, es58x_fd_urb_cmd); + break; + + case ES58X_FD_CMD_TYPE_DEVICE: + ret = es58x_fd_device_cmd_id(es58x_dev, es58x_fd_urb_cmd); + break; + + default: + ret = -EBADRQC; + break; + } + + if (ret == -EBADRQC) + dev_err(es58x_dev->dev, + "%s: Unknown command type (0x%02X) and command ID (0x%02X) combination\n", + __func__, es58x_fd_urb_cmd->cmd_type, + es58x_fd_urb_cmd->cmd_id); + + return ret; +} + +static void es58x_fd_fill_urb_header(union es58x_urb_cmd *urb_cmd, u8 cmd_type, + u8 cmd_id, u8 channel_idx, u16 msg_len) +{ + struct es58x_fd_urb_cmd *es58x_fd_urb_cmd = &urb_cmd->es58x_fd_urb_cmd; + + es58x_fd_urb_cmd->SOF = cpu_to_le16(es58x_fd_param.tx_start_of_frame); + es58x_fd_urb_cmd->cmd_type = cmd_type; + es58x_fd_urb_cmd->cmd_id = cmd_id; + es58x_fd_urb_cmd->channel_idx = channel_idx; + es58x_fd_urb_cmd->msg_len = cpu_to_le16(msg_len); +} + +static int es58x_fd_tx_can_msg(struct es58x_priv *priv, + const struct sk_buff *skb) +{ + struct es58x_device *es58x_dev = priv->es58x_dev; + union es58x_urb_cmd *urb_cmd = priv->tx_urb->transfer_buffer; + struct es58x_fd_urb_cmd *es58x_fd_urb_cmd = &urb_cmd->es58x_fd_urb_cmd; + struct can_frame *cf = (struct can_frame *)skb->data; + struct es58x_fd_tx_can_msg *tx_can_msg; + bool is_fd = can_is_canfd_skb(skb); + u16 msg_len; + int ret; + + if (priv->tx_can_msg_cnt == 0) { + msg_len = 0; + es58x_fd_fill_urb_header(urb_cmd, + is_fd ? ES58X_FD_CMD_TYPE_CANFD + : ES58X_FD_CMD_TYPE_CAN, + ES58X_FD_CAN_CMD_ID_TX_MSG_NO_ACK, + priv->channel_idx, msg_len); + } else { + msg_len = es58x_fd_get_msg_len(urb_cmd); + } + + ret = es58x_check_msg_max_len(es58x_dev->dev, + es58x_fd_urb_cmd->tx_can_msg_buf, + msg_len + sizeof(*tx_can_msg)); + if (ret) + return ret; + + /* Fill message contents. */ + tx_can_msg = (typeof(tx_can_msg))&es58x_fd_urb_cmd->raw_msg[msg_len]; + tx_can_msg->packet_idx = (u8)priv->tx_head; + put_unaligned_le32(es58x_get_raw_can_id(cf), &tx_can_msg->can_id); + tx_can_msg->flags = (u8)es58x_get_flags(skb); + if (is_fd) + tx_can_msg->len = cf->len; + else + tx_can_msg->dlc = can_get_cc_dlc(cf, priv->can.ctrlmode); + memcpy(tx_can_msg->data, cf->data, cf->len); + + /* Calculate new sizes */ + msg_len += es58x_fd_sizeof_rx_tx_msg(*tx_can_msg); + priv->tx_urb->transfer_buffer_length = es58x_get_urb_cmd_len(es58x_dev, + msg_len); + put_unaligned_le16(msg_len, &es58x_fd_urb_cmd->msg_len); + + return 0; +} + +static void es58x_fd_convert_bittiming(struct es58x_fd_bittiming *es58x_fd_bt, + struct can_bittiming *bt) +{ + /* The actual value set in the hardware registers is one less + * than the functional value. + */ + const int offset = 1; + + es58x_fd_bt->bitrate = cpu_to_le32(bt->bitrate); + es58x_fd_bt->tseg1 = + cpu_to_le16(bt->prop_seg + bt->phase_seg1 - offset); + es58x_fd_bt->tseg2 = cpu_to_le16(bt->phase_seg2 - offset); + es58x_fd_bt->brp = cpu_to_le16(bt->brp - offset); + es58x_fd_bt->sjw = cpu_to_le16(bt->sjw - offset); +} + +static int es58x_fd_enable_channel(struct es58x_priv *priv) +{ + struct es58x_device *es58x_dev = priv->es58x_dev; + struct net_device *netdev = es58x_dev->netdev[priv->channel_idx]; + struct es58x_fd_tx_conf_msg tx_conf_msg = { 0 }; + u32 ctrlmode; + size_t conf_len = 0; + + es58x_fd_convert_bittiming(&tx_conf_msg.nominal_bittiming, + &priv->can.bittiming); + ctrlmode = priv->can.ctrlmode; + + if (ctrlmode & CAN_CTRLMODE_3_SAMPLES) + tx_conf_msg.samples_per_bit = ES58X_SAMPLES_PER_BIT_THREE; + else + tx_conf_msg.samples_per_bit = ES58X_SAMPLES_PER_BIT_ONE; + tx_conf_msg.sync_edge = ES58X_SYNC_EDGE_SINGLE; + tx_conf_msg.physical_layer = ES58X_PHYSICAL_LAYER_HIGH_SPEED; + tx_conf_msg.echo_mode = ES58X_ECHO_ON; + if (ctrlmode & CAN_CTRLMODE_LISTENONLY) + tx_conf_msg.ctrlmode |= ES58X_FD_CTRLMODE_PASSIVE; + else + tx_conf_msg.ctrlmode |= ES58X_FD_CTRLMODE_ACTIVE; + + if (ctrlmode & CAN_CTRLMODE_FD_NON_ISO) { + tx_conf_msg.ctrlmode |= ES58X_FD_CTRLMODE_FD_NON_ISO; + tx_conf_msg.canfd_enabled = 1; + } else if (ctrlmode & CAN_CTRLMODE_FD) { + tx_conf_msg.ctrlmode |= ES58X_FD_CTRLMODE_FD; + tx_conf_msg.canfd_enabled = 1; + } + + if (tx_conf_msg.canfd_enabled) { + es58x_fd_convert_bittiming(&tx_conf_msg.data_bittiming, + &priv->can.fd.data_bittiming); + + if (can_fd_tdc_is_enabled(&priv->can)) { + tx_conf_msg.tdc_enabled = 1; + tx_conf_msg.tdco = cpu_to_le16(priv->can.fd.tdc.tdco); + tx_conf_msg.tdcf = cpu_to_le16(priv->can.fd.tdc.tdcf); + } + + conf_len = ES58X_FD_CANFD_CONF_LEN; + } else { + conf_len = ES58X_FD_CAN_CONF_LEN; + } + + return es58x_send_msg(es58x_dev, es58x_fd_cmd_type(netdev), + ES58X_FD_CAN_CMD_ID_ENABLE_CHANNEL, + &tx_conf_msg, conf_len, priv->channel_idx); +} + +static int es58x_fd_disable_channel(struct es58x_priv *priv) +{ + /* The type (ES58X_FD_CMD_TYPE_CAN or ES58X_FD_CMD_TYPE_CANFD) does + * not matter here. + */ + return es58x_send_msg(priv->es58x_dev, ES58X_FD_CMD_TYPE_CAN, + ES58X_FD_CAN_CMD_ID_DISABLE_CHANNEL, + ES58X_EMPTY_MSG, 0, priv->channel_idx); +} + +static int es58x_fd_get_timestamp(struct es58x_device *es58x_dev) +{ + return es58x_send_msg(es58x_dev, ES58X_FD_CMD_TYPE_DEVICE, + ES58X_FD_DEV_CMD_ID_TIMESTAMP, ES58X_EMPTY_MSG, + 0, ES58X_CHANNEL_IDX_NA); +} + +/* Nominal bittiming constants for ES582.1 and ES584.1 as specified in + * the microcontroller datasheet: "SAM E70/S70/V70/V71 Family" section + * 49.6.8 "MCAN Nominal Bit Timing and Prescaler Register" from + * Microchip. + * + * The values from the specification are the hardware register + * values. To convert them to the functional values, all ranges were + * incremented by 1 (e.g. range [0..n-1] changed to [1..n]). + */ +static const struct can_bittiming_const es58x_fd_nom_bittiming_const = { + .name = "ES582.1/ES584.1", + .tseg1_min = 2, + .tseg1_max = 256, + .tseg2_min = 2, + .tseg2_max = 128, + .sjw_max = 128, + .brp_min = 1, + .brp_max = 512, + .brp_inc = 1 +}; + +/* Data bittiming constants for ES582.1 and ES584.1 as specified in + * the microcontroller datasheet: "SAM E70/S70/V70/V71 Family" section + * 49.6.4 "MCAN Data Bit Timing and Prescaler Register" from + * Microchip. + */ +static const struct can_bittiming_const es58x_fd_data_bittiming_const = { + .name = "ES582.1/ES584.1", + .tseg1_min = 2, + .tseg1_max = 32, + .tseg2_min = 1, + .tseg2_max = 16, + .sjw_max = 8, + .brp_min = 1, + .brp_max = 32, + .brp_inc = 1 +}; + +/* Transmission Delay Compensation constants for ES582.1 and ES584.1 + * as specified in the microcontroller datasheet: "SAM E70/S70/V70/V71 + * Family" section 49.6.15 "MCAN Transmitter Delay Compensation + * Register" from Microchip. + */ +static const struct can_tdc_const es58x_tdc_const = { + .tdcv_min = 0, + .tdcv_max = 0, /* Manual mode not supported. */ + .tdco_min = 0, + .tdco_max = 127, + .tdcf_min = 0, + .tdcf_max = 127 +}; + +const struct es58x_parameters es58x_fd_param = { + .bittiming_const = &es58x_fd_nom_bittiming_const, + .data_bittiming_const = &es58x_fd_data_bittiming_const, + .tdc_const = &es58x_tdc_const, + /* The devices use NXP TJA1044G transievers which guarantee + * the timing for data rates up to 5 Mbps. Bitrates up to 8 + * Mbps work in an optimal environment but are not recommended + * for production environment. + */ + .bitrate_max = 8 * MEGA /* BPS */, + .clock = {.freq = 80 * MEGA /* Hz */}, + .ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO | + CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_TDC_AUTO, + .tx_start_of_frame = 0xCEFA, /* FACE in little endian */ + .rx_start_of_frame = 0xFECA, /* CAFE in little endian */ + .tx_urb_cmd_max_len = ES58X_FD_TX_URB_CMD_MAX_LEN, + .rx_urb_cmd_max_len = ES58X_FD_RX_URB_CMD_MAX_LEN, + /* Size of internal device TX queue is 500. + * + * However, when reaching value around 278, the device's busy + * LED turns on and thus maximum value of 500 is never reached + * in practice. Also, when this value is too high, some error + * on the echo_msg were witnessed when the device is + * recovering from bus off. + * + * For above reasons, a value that would prevent the device + * from becoming busy was chosen. In practice, BQL would + * prevent the value from even getting closer to below + * maximum, so no impact on performance was measured. + */ + .fifo_mask = 255, /* echo_skb_max = 256 */ + .dql_min_limit = CAN_FRAME_LEN_MAX * 15, /* Empirical value. */ + .tx_bulk_max = ES58X_FD_TX_BULK_MAX, + .urb_cmd_header_len = ES58X_FD_URB_CMD_HEADER_LEN, + .rx_urb_max = ES58X_RX_URBS_MAX, + .tx_urb_max = ES58X_TX_URBS_MAX +}; + +const struct es58x_operators es58x_fd_ops = { + .get_msg_len = es58x_fd_get_msg_len, + .handle_urb_cmd = es58x_fd_handle_urb_cmd, + .fill_urb_header = es58x_fd_fill_urb_header, + .tx_can_msg = es58x_fd_tx_can_msg, + .enable_channel = es58x_fd_enable_channel, + .disable_channel = es58x_fd_disable_channel, + .reset_device = NULL, /* Not implemented in the device firmware. */ + .get_timestamp = es58x_fd_get_timestamp +}; diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.h b/drivers/net/can/usb/etas_es58x/es58x_fd.h new file mode 100644 index 000000000000..c4b19a6a33ae --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/es58x_fd.h @@ -0,0 +1,234 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces. + * + * File es58x_fd.h: Definitions and declarations specific to ETAS + * ES582.1 and ES584.1 (naming convention: we use the term "ES58X FD" + * when referring to those two variants together). + * + * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved. + * Copyright (c) 2020 ETAS K.K.. All rights reserved. + * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr> + */ + +#ifndef __ES58X_FD_H__ +#define __ES58X_FD_H__ + +#include <linux/types.h> + +#define ES582_1_NUM_CAN_CH 2 +#define ES584_1_NUM_CAN_CH 1 +#define ES58X_FD_NUM_CAN_CH 2 +#define ES58X_FD_CHANNEL_IDX_OFFSET 0 + +#define ES58X_FD_TX_BULK_MAX 100 +#define ES58X_FD_RX_BULK_MAX 100 +#define ES58X_FD_ECHO_BULK_MAX 100 + +enum es58x_fd_cmd_type { + ES58X_FD_CMD_TYPE_CAN = 0x03, + ES58X_FD_CMD_TYPE_CANFD = 0x04, + ES58X_FD_CMD_TYPE_DEVICE = 0xFF +}; + +/* Command IDs for ES58X_FD_CMD_TYPE_{CAN,CANFD}. */ +enum es58x_fd_can_cmd_id { + ES58X_FD_CAN_CMD_ID_ENABLE_CHANNEL = 0x01, + ES58X_FD_CAN_CMD_ID_DISABLE_CHANNEL = 0x02, + ES58X_FD_CAN_CMD_ID_TX_MSG = 0x05, + ES58X_FD_CAN_CMD_ID_ECHO_MSG = 0x07, + ES58X_FD_CAN_CMD_ID_RX_MSG = 0x10, + ES58X_FD_CAN_CMD_ID_ERROR_OR_EVENT_MSG = 0x11, + ES58X_FD_CAN_CMD_ID_RESET_RX = 0x20, + ES58X_FD_CAN_CMD_ID_RESET_TX = 0x21, + ES58X_FD_CAN_CMD_ID_TX_MSG_NO_ACK = 0x55 +}; + +/* Command IDs for ES58X_FD_CMD_TYPE_DEVICE. */ +enum es58x_fd_dev_cmd_id { + ES58X_FD_DEV_CMD_ID_GETTIMETICKS = 0x01, + ES58X_FD_DEV_CMD_ID_TIMESTAMP = 0x02 +}; + +/** + * enum es58x_fd_ctrlmode - Controller mode. + * @ES58X_FD_CTRLMODE_ACTIVE: send and receive messages. + * @ES58X_FD_CTRLMODE_PASSIVE: only receive messages (monitor). Do not + * send anything, not even the acknowledgment bit. + * @ES58X_FD_CTRLMODE_FD: CAN FD according to ISO11898-1. + * @ES58X_FD_CTRLMODE_FD_NON_ISO: follow Bosch CAN FD Specification + * V1.0 + * @ES58X_FD_CTRLMODE_DISABLE_PROTOCOL_EXCEPTION_HANDLING: How to + * behave when CAN FD reserved bit is monitored as + * dominant. (c.f. ISO 11898-1:2015, section 10.4.2.4 "Control + * field", paragraph "r0 bit"). 0 (not disable = enable): send + * error frame. 1 (disable): goes into bus integration mode + * (c.f. below). + * @ES58X_FD_CTRLMODE_EDGE_FILTER_DURING_BUS_INTEGRATION: 0: Edge + * filtering is disabled. 1: Edge filtering is enabled. Two + * consecutive dominant bits required to detect an edge for hard + * synchronization. + */ +enum es58x_fd_ctrlmode { + ES58X_FD_CTRLMODE_ACTIVE = 0, + ES58X_FD_CTRLMODE_PASSIVE = BIT(0), + ES58X_FD_CTRLMODE_FD = BIT(4), + ES58X_FD_CTRLMODE_FD_NON_ISO = BIT(5), + ES58X_FD_CTRLMODE_DISABLE_PROTOCOL_EXCEPTION_HANDLING = BIT(6), + ES58X_FD_CTRLMODE_EDGE_FILTER_DURING_BUS_INTEGRATION = BIT(7) +}; + +struct es58x_fd_bittiming { + __le32 bitrate; + __le16 tseg1; /* range: [tseg1_min-1..tseg1_max-1] */ + __le16 tseg2; /* range: [tseg2_min-1..tseg2_max-1] */ + __le16 brp; /* range: [brp_min-1..brp_max-1] */ + __le16 sjw; /* range: [0..sjw_max-1] */ +} __packed; + +/** + * struct es58x_fd_tx_conf_msg - Channel configuration. + * @nominal_bittiming: Nominal bittiming. + * @samples_per_bit: type enum es58x_samples_per_bit. + * @sync_edge: type enum es58x_sync_edge. + * @physical_layer: type enum es58x_physical_layer. + * @echo_mode: type enum es58x_echo_mode. + * @ctrlmode: type enum es58x_fd_ctrlmode. + * @canfd_enabled: boolean (0: Classical CAN, 1: CAN and/or CANFD). + * @data_bittiming: Bittiming for flexible data-rate transmission. + * @tdc_enabled: Transmitter Delay Compensation switch (0: TDC is + * disabled, 1: TDC is enabled). + * @tdco: Transmitter Delay Compensation Offset. + * @tdcf: Transmitter Delay Compensation Filter window. + * + * Please refer to the microcontroller datasheet: "SAM E70/S70/V70/V71 + * Family" section 49 "Controller Area Network (MCAN)" for additional + * information. + */ +struct es58x_fd_tx_conf_msg { + struct es58x_fd_bittiming nominal_bittiming; + u8 samples_per_bit; + u8 sync_edge; + u8 physical_layer; + u8 echo_mode; + u8 ctrlmode; + u8 canfd_enabled; + struct es58x_fd_bittiming data_bittiming; + u8 tdc_enabled; + __le16 tdco; + __le16 tdcf; +} __packed; + +#define ES58X_FD_CAN_CONF_LEN \ + (offsetof(struct es58x_fd_tx_conf_msg, canfd_enabled)) +#define ES58X_FD_CANFD_CONF_LEN (sizeof(struct es58x_fd_tx_conf_msg)) + +struct es58x_fd_tx_can_msg { + u8 packet_idx; + __le32 can_id; + u8 flags; + union { + u8 dlc; /* Only if cmd_id is ES58X_FD_CMD_TYPE_CAN */ + u8 len; /* Only if cmd_id is ES58X_FD_CMD_TYPE_CANFD */ + } __packed; + u8 data[CANFD_MAX_DLEN]; +} __packed; + +#define ES58X_FD_CAN_TX_LEN \ + (offsetof(struct es58x_fd_tx_can_msg, data[CAN_MAX_DLEN])) +#define ES58X_FD_CANFD_TX_LEN (sizeof(struct es58x_fd_tx_can_msg)) + +struct es58x_fd_rx_can_msg { + __le64 timestamp; + __le32 can_id; + u8 flags; + union { + u8 dlc; /* Only if cmd_id is ES58X_FD_CMD_TYPE_CAN */ + u8 len; /* Only if cmd_id is ES58X_FD_CMD_TYPE_CANFD */ + } __packed; + u8 data[CANFD_MAX_DLEN]; +} __packed; + +#define ES58X_FD_CAN_RX_LEN \ + (offsetof(struct es58x_fd_rx_can_msg, data[CAN_MAX_DLEN])) +#define ES58X_FD_CANFD_RX_LEN (sizeof(struct es58x_fd_rx_can_msg)) + +struct es58x_fd_echo_msg { + __le64 timestamp; + u8 packet_idx; +} __packed; + +struct es58x_fd_rx_event_msg { + __le64 timestamp; + __le32 can_id; + u8 flags; /* type enum es58x_flag */ + u8 error_type; /* 0: event, 1: error */ + u8 error_code; + u8 event_code; +} __packed; + +struct es58x_fd_tx_ack_msg { + __le32 rx_cmd_ret_le32; /* type enum es58x_cmd_ret_code_u32 */ + __le16 tx_free_entries; /* Number of remaining free entries in the device TX queue */ +} __packed; + +/** + * struct es58x_fd_urb_cmd - Commands received from or sent to the + * ES58X FD device. + * @SOF: Start of Frame. + * @cmd_type: Command Type (type: enum es58x_fd_cmd_type). The CRC + * calculation starts at this position. + * @cmd_id: Command ID (type: enum es58x_fd_cmd_id). + * @channel_idx: Channel index starting at 0. + * @msg_len: Length of the message, excluding CRC (i.e. length of the + * union). + * @tx_conf_msg: Channel configuration. + * @tx_can_msg_buf: Concatenation of Tx messages. Type is "u8[]" + * instead of "struct es58x_fd_tx_msg[]" because the structure + * has a flexible size. + * @rx_can_msg_buf: Concatenation Rx messages. Type is "u8[]" instead + * of "struct es58x_fd_rx_msg[]" because the structure has a + * flexible size. + * @echo_msg: Array of echo messages (e.g. Tx messages being looped + * back). + * @rx_event_msg: Error or event message. + * @tx_ack_msg: Tx acknowledgment message. + * @timestamp: Timestamp reply. + * @rx_cmd_ret_le32: Rx 32 bits return code (type: enum + * es58x_cmd_ret_code_u32). + * @raw_msg: Message raw payload. + * @reserved_for_crc16_do_not_use: The structure ends with a + * CRC16. Because the structures in above union are of variable + * lengths, we can not predict the offset of the CRC in + * advance. Use functions es58x_get_crc() and es58x_set_crc() to + * manipulate it. + */ +struct es58x_fd_urb_cmd { + __le16 SOF; + u8 cmd_type; + u8 cmd_id; + u8 channel_idx; + __le16 msg_len; + + union { + struct es58x_fd_tx_conf_msg tx_conf_msg; + u8 tx_can_msg_buf[ES58X_FD_TX_BULK_MAX * ES58X_FD_CANFD_TX_LEN]; + u8 rx_can_msg_buf[ES58X_FD_RX_BULK_MAX * ES58X_FD_CANFD_RX_LEN]; + struct es58x_fd_echo_msg echo_msg[ES58X_FD_ECHO_BULK_MAX]; + struct es58x_fd_rx_event_msg rx_event_msg; + struct es58x_fd_tx_ack_msg tx_ack_msg; + __le64 timestamp; + __le32 rx_cmd_ret_le32; + DECLARE_FLEX_ARRAY(u8, raw_msg); + } __packed; + + __le16 reserved_for_crc16_do_not_use; +} __packed; + +#define ES58X_FD_URB_CMD_HEADER_LEN (offsetof(struct es58x_fd_urb_cmd, raw_msg)) +#define ES58X_FD_TX_URB_CMD_MAX_LEN \ + ES58X_SIZEOF_URB_CMD(struct es58x_fd_urb_cmd, tx_can_msg_buf) +#define ES58X_FD_RX_URB_CMD_MAX_LEN \ + ES58X_SIZEOF_URB_CMD(struct es58x_fd_urb_cmd, rx_can_msg_buf) + +#endif /* __ES58X_FD_H__ */ diff --git a/drivers/net/can/usb/f81604.c b/drivers/net/can/usb/f81604.c new file mode 100644 index 000000000000..efe61ece79ea --- /dev/null +++ b/drivers/net/can/usb/f81604.c @@ -0,0 +1,1204 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Fintek F81604 USB-to-2CAN controller driver. + * + * Copyright (C) 2023 Ji-Ze Hong (Peter Hong) <peter_hong@fintek.com.tw> + */ +#include <linux/bitfield.h> +#include <linux/netdevice.h> +#include <linux/units.h> +#include <linux/usb.h> + +#include <linux/can.h> +#include <linux/can/dev.h> +#include <linux/can/error.h> +#include <linux/can/platform/sja1000.h> + +#include <linux/unaligned.h> + +/* vendor and product id */ +#define F81604_VENDOR_ID 0x2c42 +#define F81604_PRODUCT_ID 0x1709 +#define F81604_CAN_CLOCK (12 * MEGA) +#define F81604_MAX_DEV 2 +#define F81604_SET_DEVICE_RETRY 10 + +#define F81604_USB_TIMEOUT 2000 +#define F81604_SET_GET_REGISTER 0xA0 +#define F81604_PORT_OFFSET 0x1000 +#define F81604_MAX_RX_URBS 4 + +#define F81604_CMD_DATA 0x00 + +#define F81604_DLC_LEN_MASK GENMASK(3, 0) +#define F81604_DLC_EFF_BIT BIT(7) +#define F81604_DLC_RTR_BIT BIT(6) + +#define F81604_SFF_SHIFT 5 +#define F81604_EFF_SHIFT 3 + +#define F81604_BRP_MASK GENMASK(5, 0) +#define F81604_SJW_MASK GENMASK(7, 6) + +#define F81604_SEG1_MASK GENMASK(3, 0) +#define F81604_SEG2_MASK GENMASK(6, 4) + +#define F81604_CLEAR_ALC 0 +#define F81604_CLEAR_ECC 1 +#define F81604_CLEAR_OVERRUN 2 + +/* device setting */ +#define F81604_CTRL_MODE_REG 0x80 +#define F81604_TX_ONESHOT (0x03 << 3) +#define F81604_TX_NORMAL (0x01 << 3) +#define F81604_RX_AUTO_RELEASE_BUF BIT(1) +#define F81604_INT_WHEN_CHANGE BIT(0) + +#define F81604_TERMINATOR_REG 0x105 +#define F81604_CAN0_TERM BIT(2) +#define F81604_CAN1_TERM BIT(3) + +#define F81604_TERMINATION_DISABLED CAN_TERMINATION_DISABLED +#define F81604_TERMINATION_ENABLED 120 + +/* SJA1000 registers - manual section 6.4 (Pelican Mode) */ +#define F81604_SJA1000_MOD 0x00 +#define F81604_SJA1000_CMR 0x01 +#define F81604_SJA1000_IR 0x03 +#define F81604_SJA1000_IER 0x04 +#define F81604_SJA1000_ALC 0x0B +#define F81604_SJA1000_ECC 0x0C +#define F81604_SJA1000_RXERR 0x0E +#define F81604_SJA1000_TXERR 0x0F +#define F81604_SJA1000_ACCC0 0x10 +#define F81604_SJA1000_ACCM0 0x14 +#define F81604_MAX_FILTER_CNT 4 + +/* Common registers - manual section 6.5 */ +#define F81604_SJA1000_BTR0 0x06 +#define F81604_SJA1000_BTR1 0x07 +#define F81604_SJA1000_BTR1_SAMPLE_TRIPLE BIT(7) +#define F81604_SJA1000_OCR 0x08 +#define F81604_SJA1000_CDR 0x1F + +/* mode register */ +#define F81604_SJA1000_MOD_RM 0x01 +#define F81604_SJA1000_MOD_LOM 0x02 +#define F81604_SJA1000_MOD_STM 0x04 + +/* commands */ +#define F81604_SJA1000_CMD_CDO 0x08 + +/* interrupt sources */ +#define F81604_SJA1000_IRQ_BEI 0x80 +#define F81604_SJA1000_IRQ_ALI 0x40 +#define F81604_SJA1000_IRQ_EPI 0x20 +#define F81604_SJA1000_IRQ_DOI 0x08 +#define F81604_SJA1000_IRQ_EI 0x04 +#define F81604_SJA1000_IRQ_TI 0x02 +#define F81604_SJA1000_IRQ_RI 0x01 +#define F81604_SJA1000_IRQ_ALL 0xFF +#define F81604_SJA1000_IRQ_OFF 0x00 + +/* status register content */ +#define F81604_SJA1000_SR_BS 0x80 +#define F81604_SJA1000_SR_ES 0x40 +#define F81604_SJA1000_SR_TCS 0x08 + +/* ECC register */ +#define F81604_SJA1000_ECC_SEG 0x1F +#define F81604_SJA1000_ECC_DIR 0x20 +#define F81604_SJA1000_ECC_BIT 0x00 +#define F81604_SJA1000_ECC_FORM 0x40 +#define F81604_SJA1000_ECC_STUFF 0x80 +#define F81604_SJA1000_ECC_MASK 0xc0 + +/* ALC register */ +#define F81604_SJA1000_ALC_MASK 0x1f + +/* table of devices that work with this driver */ +static const struct usb_device_id f81604_table[] = { + { USB_DEVICE(F81604_VENDOR_ID, F81604_PRODUCT_ID) }, + {} /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(usb, f81604_table); + +static const struct ethtool_ops f81604_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, +}; + +static const u16 f81604_termination[] = { F81604_TERMINATION_DISABLED, + F81604_TERMINATION_ENABLED }; + +struct f81604_priv { + struct net_device *netdev[F81604_MAX_DEV]; +}; + +struct f81604_port_priv { + struct can_priv can; + struct net_device *netdev; + struct sk_buff *echo_skb; + + unsigned long clear_flags; + struct work_struct clear_reg_work; + + struct usb_device *dev; + struct usb_interface *intf; + + struct usb_anchor urbs_anchor; +}; + +/* Interrupt endpoint data format: + * Byte 0: Status register. + * Byte 1: Interrupt register. + * Byte 2: Interrupt enable register. + * Byte 3: Arbitration lost capture(ALC) register. + * Byte 4: Error code capture(ECC) register. + * Byte 5: Error warning limit register. + * Byte 6: RX error counter register. + * Byte 7: TX error counter register. + * Byte 8: Reserved. + */ +struct f81604_int_data { + u8 sr; + u8 isrc; + u8 ier; + u8 alc; + u8 ecc; + u8 ewlr; + u8 rxerr; + u8 txerr; + u8 val; +} __packed __aligned(4); + +struct f81604_sff { + __be16 id; + u8 data[CAN_MAX_DLEN]; +} __packed __aligned(2); + +struct f81604_eff { + __be32 id; + u8 data[CAN_MAX_DLEN]; +} __packed __aligned(2); + +struct f81604_can_frame { + u8 cmd; + + /* According for F81604 DLC define: + * bit 3~0: data length (0~8) + * bit6: is RTR flag. + * bit7: is EFF frame. + */ + u8 dlc; + + union { + struct f81604_sff sff; + struct f81604_eff eff; + }; +} __packed __aligned(2); + +static const u8 bulk_in_addr[F81604_MAX_DEV] = { 2, 4 }; +static const u8 bulk_out_addr[F81604_MAX_DEV] = { 1, 3 }; +static const u8 int_in_addr[F81604_MAX_DEV] = { 1, 3 }; + +static int f81604_write(struct usb_device *dev, u16 reg, u8 data) +{ + int ret; + + ret = usb_control_msg_send(dev, 0, F81604_SET_GET_REGISTER, + USB_TYPE_VENDOR | USB_DIR_OUT, 0, reg, + &data, sizeof(data), F81604_USB_TIMEOUT, + GFP_KERNEL); + if (ret) + dev_err(&dev->dev, "%s: reg: %x data: %x failed: %pe\n", + __func__, reg, data, ERR_PTR(ret)); + + return ret; +} + +static int f81604_read(struct usb_device *dev, u16 reg, u8 *data) +{ + int ret; + + ret = usb_control_msg_recv(dev, 0, F81604_SET_GET_REGISTER, + USB_TYPE_VENDOR | USB_DIR_IN, 0, reg, data, + sizeof(*data), F81604_USB_TIMEOUT, + GFP_KERNEL); + + if (ret < 0) + dev_err(&dev->dev, "%s: reg: %x failed: %pe\n", __func__, reg, + ERR_PTR(ret)); + + return ret; +} + +static int f81604_update_bits(struct usb_device *dev, u16 reg, u8 mask, + u8 data) +{ + int ret; + u8 tmp; + + ret = f81604_read(dev, reg, &tmp); + if (ret) + return ret; + + tmp &= ~mask; + tmp |= (mask & data); + + return f81604_write(dev, reg, tmp); +} + +static int f81604_sja1000_write(struct f81604_port_priv *priv, u16 reg, + u8 data) +{ + int port = priv->netdev->dev_port; + int real_reg; + + real_reg = reg + F81604_PORT_OFFSET * port + F81604_PORT_OFFSET; + return f81604_write(priv->dev, real_reg, data); +} + +static int f81604_sja1000_read(struct f81604_port_priv *priv, u16 reg, + u8 *data) +{ + int port = priv->netdev->dev_port; + int real_reg; + + real_reg = reg + F81604_PORT_OFFSET * port + F81604_PORT_OFFSET; + return f81604_read(priv->dev, real_reg, data); +} + +static int f81604_set_reset_mode(struct f81604_port_priv *priv) +{ + int ret, i; + u8 tmp; + + /* disable interrupts */ + ret = f81604_sja1000_write(priv, F81604_SJA1000_IER, + F81604_SJA1000_IRQ_OFF); + if (ret) + return ret; + + for (i = 0; i < F81604_SET_DEVICE_RETRY; i++) { + ret = f81604_sja1000_read(priv, F81604_SJA1000_MOD, &tmp); + if (ret) + return ret; + + /* check reset bit */ + if (tmp & F81604_SJA1000_MOD_RM) { + priv->can.state = CAN_STATE_STOPPED; + return 0; + } + + /* reset chip */ + ret = f81604_sja1000_write(priv, F81604_SJA1000_MOD, + F81604_SJA1000_MOD_RM); + if (ret) + return ret; + } + + return -EPERM; +} + +static int f81604_set_normal_mode(struct f81604_port_priv *priv) +{ + u8 tmp, ier = 0; + u8 mod_reg = 0; + int ret, i; + + for (i = 0; i < F81604_SET_DEVICE_RETRY; i++) { + ret = f81604_sja1000_read(priv, F81604_SJA1000_MOD, &tmp); + if (ret) + return ret; + + /* check reset bit */ + if ((tmp & F81604_SJA1000_MOD_RM) == 0) { + priv->can.state = CAN_STATE_ERROR_ACTIVE; + /* enable interrupts, RI handled by bulk-in */ + ier = F81604_SJA1000_IRQ_ALL & ~F81604_SJA1000_IRQ_RI; + if (!(priv->can.ctrlmode & + CAN_CTRLMODE_BERR_REPORTING)) + ier &= ~F81604_SJA1000_IRQ_BEI; + + return f81604_sja1000_write(priv, F81604_SJA1000_IER, + ier); + } + + /* set chip to normal mode */ + if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + mod_reg |= F81604_SJA1000_MOD_LOM; + if (priv->can.ctrlmode & CAN_CTRLMODE_PRESUME_ACK) + mod_reg |= F81604_SJA1000_MOD_STM; + + ret = f81604_sja1000_write(priv, F81604_SJA1000_MOD, mod_reg); + if (ret) + return ret; + } + + return -EPERM; +} + +static int f81604_chipset_init(struct f81604_port_priv *priv) +{ + int i, ret; + + /* set clock divider and output control register */ + ret = f81604_sja1000_write(priv, F81604_SJA1000_CDR, + CDR_CBP | CDR_PELICAN); + if (ret) + return ret; + + /* set acceptance filter (accept all) */ + for (i = 0; i < F81604_MAX_FILTER_CNT; ++i) { + ret = f81604_sja1000_write(priv, F81604_SJA1000_ACCC0 + i, 0); + if (ret) + return ret; + } + + for (i = 0; i < F81604_MAX_FILTER_CNT; ++i) { + ret = f81604_sja1000_write(priv, F81604_SJA1000_ACCM0 + i, + 0xFF); + if (ret) + return ret; + } + + return f81604_sja1000_write(priv, F81604_SJA1000_OCR, + OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL | + OCR_MODE_NORMAL); +} + +static void f81604_process_rx_packet(struct net_device *netdev, + struct f81604_can_frame *frame) +{ + struct net_device_stats *stats = &netdev->stats; + struct can_frame *cf; + struct sk_buff *skb; + + if (frame->cmd != F81604_CMD_DATA) + return; + + skb = alloc_can_skb(netdev, &cf); + if (!skb) { + stats->rx_dropped++; + return; + } + + cf->len = can_cc_dlc2len(frame->dlc & F81604_DLC_LEN_MASK); + + if (frame->dlc & F81604_DLC_EFF_BIT) { + cf->can_id = get_unaligned_be32(&frame->eff.id) >> + F81604_EFF_SHIFT; + cf->can_id |= CAN_EFF_FLAG; + + if (!(frame->dlc & F81604_DLC_RTR_BIT)) + memcpy(cf->data, frame->eff.data, cf->len); + } else { + cf->can_id = get_unaligned_be16(&frame->sff.id) >> + F81604_SFF_SHIFT; + + if (!(frame->dlc & F81604_DLC_RTR_BIT)) + memcpy(cf->data, frame->sff.data, cf->len); + } + + if (frame->dlc & F81604_DLC_RTR_BIT) + cf->can_id |= CAN_RTR_FLAG; + else + stats->rx_bytes += cf->len; + + stats->rx_packets++; + netif_rx(skb); +} + +static void f81604_read_bulk_callback(struct urb *urb) +{ + struct f81604_can_frame *frame = urb->transfer_buffer; + struct net_device *netdev = urb->context; + int ret; + + if (!netif_device_present(netdev)) + return; + + if (urb->status) + netdev_info(netdev, "%s: URB aborted %pe\n", __func__, + ERR_PTR(urb->status)); + + switch (urb->status) { + case 0: /* success */ + break; + + case -ENOENT: + case -EPIPE: + case -EPROTO: + case -ESHUTDOWN: + return; + + default: + goto resubmit_urb; + } + + if (urb->actual_length != sizeof(*frame)) { + netdev_warn(netdev, "URB length %u not equal to %zu\n", + urb->actual_length, sizeof(*frame)); + goto resubmit_urb; + } + + f81604_process_rx_packet(netdev, frame); + +resubmit_urb: + ret = usb_submit_urb(urb, GFP_ATOMIC); + if (ret == -ENODEV) + netif_device_detach(netdev); + else if (ret) + netdev_err(netdev, + "%s: failed to resubmit read bulk urb: %pe\n", + __func__, ERR_PTR(ret)); +} + +static void f81604_handle_tx(struct f81604_port_priv *priv, + struct f81604_int_data *data) +{ + struct net_device *netdev = priv->netdev; + struct net_device_stats *stats = &netdev->stats; + + /* transmission buffer released */ + if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT && + !(data->sr & F81604_SJA1000_SR_TCS)) { + stats->tx_errors++; + can_free_echo_skb(netdev, 0, NULL); + } else { + /* transmission complete */ + stats->tx_bytes += can_get_echo_skb(netdev, 0, NULL); + stats->tx_packets++; + } + + netif_wake_queue(netdev); +} + +static void f81604_handle_can_bus_errors(struct f81604_port_priv *priv, + struct f81604_int_data *data) +{ + enum can_state can_state = priv->can.state; + struct net_device *netdev = priv->netdev; + struct net_device_stats *stats = &netdev->stats; + struct can_frame *cf; + struct sk_buff *skb; + + /* Note: ALC/ECC will not auto clear by read here, must be cleared by + * read register (via clear_reg_work). + */ + + skb = alloc_can_err_skb(netdev, &cf); + if (skb) { + cf->can_id |= CAN_ERR_CNT; + cf->data[6] = data->txerr; + cf->data[7] = data->rxerr; + } + + if (data->isrc & F81604_SJA1000_IRQ_DOI) { + /* data overrun interrupt */ + netdev_dbg(netdev, "data overrun interrupt\n"); + + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + } + + stats->rx_over_errors++; + stats->rx_errors++; + + set_bit(F81604_CLEAR_OVERRUN, &priv->clear_flags); + } + + if (data->isrc & F81604_SJA1000_IRQ_EI) { + /* error warning interrupt */ + netdev_dbg(netdev, "error warning interrupt\n"); + + if (data->sr & F81604_SJA1000_SR_BS) + can_state = CAN_STATE_BUS_OFF; + else if (data->sr & F81604_SJA1000_SR_ES) + can_state = CAN_STATE_ERROR_WARNING; + else + can_state = CAN_STATE_ERROR_ACTIVE; + } + + if (data->isrc & F81604_SJA1000_IRQ_BEI) { + /* bus error interrupt */ + netdev_dbg(netdev, "bus error interrupt\n"); + + priv->can.can_stats.bus_error++; + + if (skb) { + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + + /* set error type */ + switch (data->ecc & F81604_SJA1000_ECC_MASK) { + case F81604_SJA1000_ECC_BIT: + cf->data[2] |= CAN_ERR_PROT_BIT; + break; + case F81604_SJA1000_ECC_FORM: + cf->data[2] |= CAN_ERR_PROT_FORM; + break; + case F81604_SJA1000_ECC_STUFF: + cf->data[2] |= CAN_ERR_PROT_STUFF; + break; + default: + break; + } + + /* set error location */ + cf->data[3] = data->ecc & F81604_SJA1000_ECC_SEG; + } + + /* Error occurred during transmission? */ + if ((data->ecc & F81604_SJA1000_ECC_DIR) == 0) { + stats->tx_errors++; + if (skb) + cf->data[2] |= CAN_ERR_PROT_TX; + } else { + stats->rx_errors++; + } + + set_bit(F81604_CLEAR_ECC, &priv->clear_flags); + } + + if (data->isrc & F81604_SJA1000_IRQ_EPI) { + if (can_state == CAN_STATE_ERROR_PASSIVE) + can_state = CAN_STATE_ERROR_WARNING; + else + can_state = CAN_STATE_ERROR_PASSIVE; + + /* error passive interrupt */ + netdev_dbg(netdev, "error passive interrupt: %d\n", can_state); + } + + if (data->isrc & F81604_SJA1000_IRQ_ALI) { + /* arbitration lost interrupt */ + netdev_dbg(netdev, "arbitration lost interrupt\n"); + + priv->can.can_stats.arbitration_lost++; + + if (skb) { + cf->can_id |= CAN_ERR_LOSTARB; + cf->data[0] = data->alc & F81604_SJA1000_ALC_MASK; + } + + set_bit(F81604_CLEAR_ALC, &priv->clear_flags); + } + + if (can_state != priv->can.state) { + enum can_state tx_state, rx_state; + + tx_state = data->txerr >= data->rxerr ? can_state : 0; + rx_state = data->txerr <= data->rxerr ? can_state : 0; + + can_change_state(netdev, cf, tx_state, rx_state); + + if (can_state == CAN_STATE_BUS_OFF) + can_bus_off(netdev); + } + + if (priv->clear_flags) + schedule_work(&priv->clear_reg_work); + + if (skb) + netif_rx(skb); +} + +static void f81604_read_int_callback(struct urb *urb) +{ + struct f81604_int_data *data = urb->transfer_buffer; + struct net_device *netdev = urb->context; + struct f81604_port_priv *priv; + int ret; + + priv = netdev_priv(netdev); + + if (!netif_device_present(netdev)) + return; + + if (urb->status) + netdev_info(netdev, "%s: Int URB aborted: %pe\n", __func__, + ERR_PTR(urb->status)); + + switch (urb->status) { + case 0: /* success */ + break; + + case -ENOENT: + case -EPIPE: + case -EPROTO: + case -ESHUTDOWN: + return; + + default: + goto resubmit_urb; + } + + /* handle Errors */ + if (data->isrc & (F81604_SJA1000_IRQ_DOI | F81604_SJA1000_IRQ_EI | + F81604_SJA1000_IRQ_BEI | F81604_SJA1000_IRQ_EPI | + F81604_SJA1000_IRQ_ALI)) + f81604_handle_can_bus_errors(priv, data); + + /* handle TX */ + if (priv->can.state != CAN_STATE_BUS_OFF && + (data->isrc & F81604_SJA1000_IRQ_TI)) + f81604_handle_tx(priv, data); + +resubmit_urb: + ret = usb_submit_urb(urb, GFP_ATOMIC); + if (ret == -ENODEV) + netif_device_detach(netdev); + else if (ret) + netdev_err(netdev, "%s: failed to resubmit int urb: %pe\n", + __func__, ERR_PTR(ret)); +} + +static void f81604_unregister_urbs(struct f81604_port_priv *priv) +{ + usb_kill_anchored_urbs(&priv->urbs_anchor); +} + +static int f81604_register_urbs(struct f81604_port_priv *priv) +{ + struct net_device *netdev = priv->netdev; + struct f81604_int_data *int_data; + int id = netdev->dev_port; + struct urb *int_urb; + int rx_urb_cnt; + int ret; + + for (rx_urb_cnt = 0; rx_urb_cnt < F81604_MAX_RX_URBS; ++rx_urb_cnt) { + struct f81604_can_frame *frame; + struct urb *rx_urb; + + rx_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!rx_urb) { + ret = -ENOMEM; + break; + } + + frame = kmalloc(sizeof(*frame), GFP_KERNEL); + if (!frame) { + usb_free_urb(rx_urb); + ret = -ENOMEM; + break; + } + + usb_fill_bulk_urb(rx_urb, priv->dev, + usb_rcvbulkpipe(priv->dev, bulk_in_addr[id]), + frame, sizeof(*frame), + f81604_read_bulk_callback, netdev); + + rx_urb->transfer_flags |= URB_FREE_BUFFER; + usb_anchor_urb(rx_urb, &priv->urbs_anchor); + + ret = usb_submit_urb(rx_urb, GFP_KERNEL); + if (ret) { + usb_unanchor_urb(rx_urb); + usb_free_urb(rx_urb); + break; + } + + /* Drop reference, USB core will take care of freeing it */ + usb_free_urb(rx_urb); + } + + if (rx_urb_cnt == 0) { + netdev_warn(netdev, "%s: submit rx urb failed: %pe\n", + __func__, ERR_PTR(ret)); + + goto error; + } + + int_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!int_urb) { + ret = -ENOMEM; + goto error; + } + + int_data = kmalloc(sizeof(*int_data), GFP_KERNEL); + if (!int_data) { + usb_free_urb(int_urb); + ret = -ENOMEM; + goto error; + } + + usb_fill_int_urb(int_urb, priv->dev, + usb_rcvintpipe(priv->dev, int_in_addr[id]), int_data, + sizeof(*int_data), f81604_read_int_callback, netdev, + 1); + + int_urb->transfer_flags |= URB_FREE_BUFFER; + usb_anchor_urb(int_urb, &priv->urbs_anchor); + + ret = usb_submit_urb(int_urb, GFP_KERNEL); + if (ret) { + usb_unanchor_urb(int_urb); + usb_free_urb(int_urb); + + netdev_warn(netdev, "%s: submit int urb failed: %pe\n", + __func__, ERR_PTR(ret)); + goto error; + } + + /* Drop reference, USB core will take care of freeing it */ + usb_free_urb(int_urb); + + return 0; + +error: + f81604_unregister_urbs(priv); + return ret; +} + +static int f81604_start(struct net_device *netdev) +{ + struct f81604_port_priv *priv = netdev_priv(netdev); + int ret; + u8 mode; + u8 tmp; + + mode = F81604_RX_AUTO_RELEASE_BUF | F81604_INT_WHEN_CHANGE; + + /* Set TR/AT mode */ + if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) + mode |= F81604_TX_ONESHOT; + else + mode |= F81604_TX_NORMAL; + + ret = f81604_sja1000_write(priv, F81604_CTRL_MODE_REG, mode); + if (ret) + return ret; + + /* set reset mode */ + ret = f81604_set_reset_mode(priv); + if (ret) + return ret; + + ret = f81604_chipset_init(priv); + if (ret) + return ret; + + /* Clear error counters and error code capture */ + ret = f81604_sja1000_write(priv, F81604_SJA1000_TXERR, 0); + if (ret) + return ret; + + ret = f81604_sja1000_write(priv, F81604_SJA1000_RXERR, 0); + if (ret) + return ret; + + /* Read clear for ECC/ALC/IR register */ + ret = f81604_sja1000_read(priv, F81604_SJA1000_ECC, &tmp); + if (ret) + return ret; + + ret = f81604_sja1000_read(priv, F81604_SJA1000_ALC, &tmp); + if (ret) + return ret; + + ret = f81604_sja1000_read(priv, F81604_SJA1000_IR, &tmp); + if (ret) + return ret; + + ret = f81604_register_urbs(priv); + if (ret) + return ret; + + ret = f81604_set_normal_mode(priv); + if (ret) { + f81604_unregister_urbs(priv); + return ret; + } + + return 0; +} + +static int f81604_set_bittiming(struct net_device *dev) +{ + struct f81604_port_priv *priv = netdev_priv(dev); + struct can_bittiming *bt = &priv->can.bittiming; + u8 btr0, btr1; + int ret; + + btr0 = FIELD_PREP(F81604_BRP_MASK, bt->brp - 1) | + FIELD_PREP(F81604_SJW_MASK, bt->sjw - 1); + + btr1 = FIELD_PREP(F81604_SEG1_MASK, + bt->prop_seg + bt->phase_seg1 - 1) | + FIELD_PREP(F81604_SEG2_MASK, bt->phase_seg2 - 1); + + if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) + btr1 |= F81604_SJA1000_BTR1_SAMPLE_TRIPLE; + + ret = f81604_sja1000_write(priv, F81604_SJA1000_BTR0, btr0); + if (ret) { + netdev_warn(dev, "%s: Set BTR0 failed: %pe\n", __func__, + ERR_PTR(ret)); + return ret; + } + + ret = f81604_sja1000_write(priv, F81604_SJA1000_BTR1, btr1); + if (ret) { + netdev_warn(dev, "%s: Set BTR1 failed: %pe\n", __func__, + ERR_PTR(ret)); + return ret; + } + + return 0; +} + +static int f81604_set_mode(struct net_device *netdev, enum can_mode mode) +{ + int ret; + + switch (mode) { + case CAN_MODE_START: + ret = f81604_start(netdev); + if (!ret && netif_queue_stopped(netdev)) + netif_wake_queue(netdev); + break; + + default: + ret = -EOPNOTSUPP; + } + + return ret; +} + +static void f81604_write_bulk_callback(struct urb *urb) +{ + struct net_device *netdev = urb->context; + + if (!netif_device_present(netdev)) + return; + + if (urb->status) + netdev_info(netdev, "%s: Tx URB error: %pe\n", __func__, + ERR_PTR(urb->status)); +} + +static void f81604_clear_reg_work(struct work_struct *work) +{ + struct f81604_port_priv *priv; + u8 tmp; + + priv = container_of(work, struct f81604_port_priv, clear_reg_work); + + /* dummy read for clear Arbitration lost capture(ALC) register. */ + if (test_and_clear_bit(F81604_CLEAR_ALC, &priv->clear_flags)) + f81604_sja1000_read(priv, F81604_SJA1000_ALC, &tmp); + + /* dummy read for clear Error code capture(ECC) register. */ + if (test_and_clear_bit(F81604_CLEAR_ECC, &priv->clear_flags)) + f81604_sja1000_read(priv, F81604_SJA1000_ECC, &tmp); + + /* dummy write for clear data overrun flag. */ + if (test_and_clear_bit(F81604_CLEAR_OVERRUN, &priv->clear_flags)) + f81604_sja1000_write(priv, F81604_SJA1000_CMR, + F81604_SJA1000_CMD_CDO); +} + +static netdev_tx_t f81604_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct can_frame *cf = (struct can_frame *)skb->data; + struct f81604_port_priv *priv = netdev_priv(netdev); + struct net_device_stats *stats = &netdev->stats; + struct f81604_can_frame *frame; + struct urb *write_urb; + int ret; + + if (can_dev_dropped_skb(netdev, skb)) + return NETDEV_TX_OK; + + netif_stop_queue(netdev); + + write_urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!write_urb) + goto nomem_urb; + + frame = kzalloc(sizeof(*frame), GFP_ATOMIC); + if (!frame) + goto nomem_buf; + + usb_fill_bulk_urb(write_urb, priv->dev, + usb_sndbulkpipe(priv->dev, + bulk_out_addr[netdev->dev_port]), + frame, sizeof(*frame), f81604_write_bulk_callback, + priv->netdev); + + write_urb->transfer_flags |= URB_FREE_BUFFER; + + frame->cmd = F81604_CMD_DATA; + frame->dlc = cf->len; + + if (cf->can_id & CAN_RTR_FLAG) + frame->dlc |= F81604_DLC_RTR_BIT; + + if (cf->can_id & CAN_EFF_FLAG) { + u32 id = (cf->can_id & CAN_EFF_MASK) << F81604_EFF_SHIFT; + + put_unaligned_be32(id, &frame->eff.id); + + frame->dlc |= F81604_DLC_EFF_BIT; + + if (!(cf->can_id & CAN_RTR_FLAG)) + memcpy(&frame->eff.data, cf->data, cf->len); + } else { + u32 id = (cf->can_id & CAN_SFF_MASK) << F81604_SFF_SHIFT; + + put_unaligned_be16(id, &frame->sff.id); + + if (!(cf->can_id & CAN_RTR_FLAG)) + memcpy(&frame->sff.data, cf->data, cf->len); + } + + can_put_echo_skb(skb, netdev, 0, 0); + + ret = usb_submit_urb(write_urb, GFP_ATOMIC); + if (ret) { + netdev_err(netdev, "%s: failed to resubmit tx bulk urb: %pe\n", + __func__, ERR_PTR(ret)); + + can_free_echo_skb(netdev, 0, NULL); + stats->tx_dropped++; + stats->tx_errors++; + + if (ret == -ENODEV) + netif_device_detach(netdev); + else + netif_wake_queue(netdev); + } + + /* let usb core take care of this urb */ + usb_free_urb(write_urb); + + return NETDEV_TX_OK; + +nomem_buf: + usb_free_urb(write_urb); + +nomem_urb: + dev_kfree_skb(skb); + stats->tx_dropped++; + stats->tx_errors++; + netif_wake_queue(netdev); + + return NETDEV_TX_OK; +} + +static int f81604_get_berr_counter(const struct net_device *netdev, + struct can_berr_counter *bec) +{ + struct f81604_port_priv *priv = netdev_priv(netdev); + u8 txerr, rxerr; + int ret; + + ret = f81604_sja1000_read(priv, F81604_SJA1000_TXERR, &txerr); + if (ret) + return ret; + + ret = f81604_sja1000_read(priv, F81604_SJA1000_RXERR, &rxerr); + if (ret) + return ret; + + bec->txerr = txerr; + bec->rxerr = rxerr; + + return 0; +} + +/* Open USB device */ +static int f81604_open(struct net_device *netdev) +{ + int ret; + + ret = open_candev(netdev); + if (ret) + return ret; + + ret = f81604_start(netdev); + if (ret) { + if (ret == -ENODEV) + netif_device_detach(netdev); + + close_candev(netdev); + return ret; + } + + netif_start_queue(netdev); + return 0; +} + +/* Close USB device */ +static int f81604_close(struct net_device *netdev) +{ + struct f81604_port_priv *priv = netdev_priv(netdev); + + f81604_set_reset_mode(priv); + + netif_stop_queue(netdev); + cancel_work_sync(&priv->clear_reg_work); + close_candev(netdev); + + f81604_unregister_urbs(priv); + + return 0; +} + +static const struct net_device_ops f81604_netdev_ops = { + .ndo_open = f81604_open, + .ndo_stop = f81604_close, + .ndo_start_xmit = f81604_start_xmit, +}; + +static const struct can_bittiming_const f81604_bittiming_const = { + .name = KBUILD_MODNAME, + .tseg1_min = 1, + .tseg1_max = 16, + .tseg2_min = 1, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 64, + .brp_inc = 1, +}; + +/* Called by the usb core when driver is unloaded or device is removed */ +static void f81604_disconnect(struct usb_interface *intf) +{ + struct f81604_priv *priv = usb_get_intfdata(intf); + int i; + + for (i = 0; i < ARRAY_SIZE(priv->netdev); ++i) { + if (!priv->netdev[i]) + continue; + + unregister_netdev(priv->netdev[i]); + free_candev(priv->netdev[i]); + } +} + +static int __f81604_set_termination(struct usb_device *dev, int idx, u16 term) +{ + u8 mask, data = 0; + + if (idx == 0) + mask = F81604_CAN0_TERM; + else + mask = F81604_CAN1_TERM; + + if (term) + data = mask; + + return f81604_update_bits(dev, F81604_TERMINATOR_REG, mask, data); +} + +static int f81604_set_termination(struct net_device *netdev, u16 term) +{ + struct f81604_port_priv *port_priv = netdev_priv(netdev); + + ASSERT_RTNL(); + + return __f81604_set_termination(port_priv->dev, netdev->dev_port, + term); +} + +static int f81604_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct usb_device *dev = interface_to_usbdev(intf); + struct net_device *netdev; + struct f81604_priv *priv; + int i, ret; + + priv = devm_kzalloc(&intf->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + usb_set_intfdata(intf, priv); + + for (i = 0; i < ARRAY_SIZE(priv->netdev); ++i) { + ret = __f81604_set_termination(dev, i, 0); + if (ret) { + dev_err(&intf->dev, + "Setting termination of CH#%d failed: %pe\n", + i, ERR_PTR(ret)); + return ret; + } + } + + for (i = 0; i < ARRAY_SIZE(priv->netdev); ++i) { + struct f81604_port_priv *port_priv; + + netdev = alloc_candev(sizeof(*port_priv), 1); + if (!netdev) { + dev_err(&intf->dev, "Couldn't alloc candev: %d\n", i); + ret = -ENOMEM; + + goto failure_cleanup; + } + + port_priv = netdev_priv(netdev); + + INIT_WORK(&port_priv->clear_reg_work, f81604_clear_reg_work); + init_usb_anchor(&port_priv->urbs_anchor); + + port_priv->intf = intf; + port_priv->dev = dev; + port_priv->netdev = netdev; + port_priv->can.clock.freq = F81604_CAN_CLOCK; + + port_priv->can.termination_const = f81604_termination; + port_priv->can.termination_const_cnt = + ARRAY_SIZE(f81604_termination); + port_priv->can.bittiming_const = &f81604_bittiming_const; + port_priv->can.do_set_bittiming = f81604_set_bittiming; + port_priv->can.do_set_mode = f81604_set_mode; + port_priv->can.do_set_termination = f81604_set_termination; + port_priv->can.do_get_berr_counter = f81604_get_berr_counter; + port_priv->can.ctrlmode_supported = + CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_3_SAMPLES | + CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_BERR_REPORTING | + CAN_CTRLMODE_PRESUME_ACK; + + netdev->ethtool_ops = &f81604_ethtool_ops; + netdev->netdev_ops = &f81604_netdev_ops; + netdev->flags |= IFF_ECHO; + netdev->dev_port = i; + + SET_NETDEV_DEV(netdev, &intf->dev); + + ret = register_candev(netdev); + if (ret) { + netdev_err(netdev, "register CAN device failed: %pe\n", + ERR_PTR(ret)); + free_candev(netdev); + + goto failure_cleanup; + } + + priv->netdev[i] = netdev; + } + + return 0; + +failure_cleanup: + f81604_disconnect(intf); + return ret; +} + +static struct usb_driver f81604_driver = { + .name = KBUILD_MODNAME, + .probe = f81604_probe, + .disconnect = f81604_disconnect, + .id_table = f81604_table, +}; + +module_usb_driver(f81604_driver); + +MODULE_AUTHOR("Ji-Ze Hong (Peter Hong) <peter_hong@fintek.com.tw>"); +MODULE_DESCRIPTION("Fintek F81604 USB to 2xCANBUS"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 17c21ad3b95e..e29e85b67fd4 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -1,41 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0-only /* CAN driver for Geschwister Schneider USB/CAN devices * and bytewerk.org candleLight USB CAN interfaces. * * Copyright (C) 2013-2016 Geschwister Schneider Technologie-, * Entwicklungs- und Vertriebs UG (Haftungsbeschränkt). * Copyright (C) 2016 Hubert Denkmair + * Copyright (c) 2023 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de> * * Many thanks to all socketcan devs! - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published - * by the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. */ +#include <linux/bitfield.h> +#include <linux/clocksource.h> +#include <linux/ethtool.h> #include <linux/init.h> -#include <linux/signal.h> #include <linux/module.h> #include <linux/netdevice.h> +#include <linux/signal.h> +#include <linux/timecounter.h> +#include <linux/units.h> #include <linux/usb.h> +#include <linux/workqueue.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> +#include <linux/can/rx-offload.h> /* Device specific constants */ -#define USB_GSUSB_1_VENDOR_ID 0x1d50 -#define USB_GSUSB_1_PRODUCT_ID 0x606f +#define USB_GS_USB_1_VENDOR_ID 0x1d50 +#define USB_GS_USB_1_PRODUCT_ID 0x606f -#define USB_CANDLELIGHT_VENDOR_ID 0x1209 +#define USB_CANDLELIGHT_VENDOR_ID 0x1209 #define USB_CANDLELIGHT_PRODUCT_ID 0x2323 -#define GSUSB_ENDPOINT_IN 1 -#define GSUSB_ENDPOINT_OUT 2 +#define USB_CES_CANEXT_FD_VENDOR_ID 0x1cd2 +#define USB_CES_CANEXT_FD_PRODUCT_ID 0x606f + +#define USB_ABE_CANDEBUGGER_FD_VENDOR_ID 0x16d0 +#define USB_ABE_CANDEBUGGER_FD_PRODUCT_ID 0x10b8 + +#define USB_XYLANTA_SAINT3_VENDOR_ID 0x16d0 +#define USB_XYLANTA_SAINT3_PRODUCT_ID 0x0f30 + +#define USB_CANNECTIVITY_VENDOR_ID 0x1209 +#define USB_CANNECTIVITY_PRODUCT_ID 0xca01 + +/* Timestamp 32 bit timer runs at 1 MHz (1 µs tick). Worker accounts + * for timer overflow (will be after ~71 minutes) + */ +#define GS_USB_TIMESTAMP_TIMER_HZ (1 * HZ_PER_MHZ) +#define GS_USB_TIMESTAMP_WORK_DELAY_SEC 1800 +static_assert(GS_USB_TIMESTAMP_WORK_DELAY_SEC < + CYCLECOUNTER_MASK(32) / GS_USB_TIMESTAMP_TIMER_HZ / 2); /* Device specific constants */ enum gs_usb_breq { @@ -47,6 +64,14 @@ enum gs_usb_breq { GS_USB_BREQ_DEVICE_CONFIG, GS_USB_BREQ_TIMESTAMP, GS_USB_BREQ_IDENTIFY, + GS_USB_BREQ_GET_USER_ID, + GS_USB_BREQ_QUIRK_CANTACT_PRO_DATA_BITTIMING = GS_USB_BREQ_GET_USER_ID, + GS_USB_BREQ_SET_USER_ID, + GS_USB_BREQ_DATA_BITTIMING, + GS_USB_BREQ_BT_CONST_EXT, + GS_USB_BREQ_SET_TERMINATION, + GS_USB_BREQ_GET_TERMINATION, + GS_USB_BREQ_GET_STATE, }; enum gs_can_mode { @@ -70,85 +95,196 @@ enum gs_can_identify_mode { GS_CAN_IDENTIFY_ON }; +enum gs_can_termination_state { + GS_CAN_TERMINATION_STATE_OFF = 0, + GS_CAN_TERMINATION_STATE_ON +}; + +#define GS_USB_TERMINATION_DISABLED CAN_TERMINATION_DISABLED +#define GS_USB_TERMINATION_ENABLED 120 + /* data types passed between host and device */ + +/* The firmware on the original USB2CAN by Geschwister Schneider + * Technologie Entwicklungs- und Vertriebs UG exchanges all data + * between the host and the device in host byte order. This is done + * with the struct gs_host_config::byte_order member, which is sent + * first to indicate the desired byte order. + * + * The widely used open source firmware candleLight doesn't support + * this feature and exchanges the data in little endian byte order. + */ struct gs_host_config { - u32 byte_order; + __le32 byte_order; } __packed; -/* All data exchanged between host and device is exchanged in host byte order, - * thanks to the struct gs_host_config byte_order member, which is sent first - * to indicate the desired byte order. - */ struct gs_device_config { u8 reserved1; u8 reserved2; u8 reserved3; u8 icount; - u32 sw_version; - u32 hw_version; + __le32 sw_version; + __le32 hw_version; } __packed; -#define GS_CAN_MODE_NORMAL 0 -#define GS_CAN_MODE_LISTEN_ONLY BIT(0) -#define GS_CAN_MODE_LOOP_BACK BIT(1) -#define GS_CAN_MODE_TRIPLE_SAMPLE BIT(2) -#define GS_CAN_MODE_ONE_SHOT BIT(3) +#define GS_CAN_MODE_NORMAL 0 +#define GS_CAN_MODE_LISTEN_ONLY BIT(0) +#define GS_CAN_MODE_LOOP_BACK BIT(1) +#define GS_CAN_MODE_TRIPLE_SAMPLE BIT(2) +#define GS_CAN_MODE_ONE_SHOT BIT(3) +#define GS_CAN_MODE_HW_TIMESTAMP BIT(4) +/* GS_CAN_FEATURE_IDENTIFY BIT(5) */ +/* GS_CAN_FEATURE_USER_ID BIT(6) */ +#define GS_CAN_MODE_PAD_PKTS_TO_MAX_PKT_SIZE BIT(7) +#define GS_CAN_MODE_FD BIT(8) +/* GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX BIT(9) */ +/* GS_CAN_FEATURE_BT_CONST_EXT BIT(10) */ +/* GS_CAN_FEATURE_TERMINATION BIT(11) */ +#define GS_CAN_MODE_BERR_REPORTING BIT(12) +/* GS_CAN_FEATURE_GET_STATE BIT(13) */ struct gs_device_mode { - u32 mode; - u32 flags; + __le32 mode; + __le32 flags; } __packed; struct gs_device_state { - u32 state; - u32 rxerr; - u32 txerr; + __le32 state; + __le32 rxerr; + __le32 txerr; } __packed; struct gs_device_bittiming { - u32 prop_seg; - u32 phase_seg1; - u32 phase_seg2; - u32 sjw; - u32 brp; + __le32 prop_seg; + __le32 phase_seg1; + __le32 phase_seg2; + __le32 sjw; + __le32 brp; } __packed; struct gs_identify_mode { - u32 mode; + __le32 mode; +} __packed; + +struct gs_device_termination_state { + __le32 state; } __packed; -#define GS_CAN_FEATURE_LISTEN_ONLY BIT(0) -#define GS_CAN_FEATURE_LOOP_BACK BIT(1) -#define GS_CAN_FEATURE_TRIPLE_SAMPLE BIT(2) -#define GS_CAN_FEATURE_ONE_SHOT BIT(3) -#define GS_CAN_FEATURE_HW_TIMESTAMP BIT(4) -#define GS_CAN_FEATURE_IDENTIFY BIT(5) +#define GS_CAN_FEATURE_LISTEN_ONLY BIT(0) +#define GS_CAN_FEATURE_LOOP_BACK BIT(1) +#define GS_CAN_FEATURE_TRIPLE_SAMPLE BIT(2) +#define GS_CAN_FEATURE_ONE_SHOT BIT(3) +#define GS_CAN_FEATURE_HW_TIMESTAMP BIT(4) +#define GS_CAN_FEATURE_IDENTIFY BIT(5) +#define GS_CAN_FEATURE_USER_ID BIT(6) +#define GS_CAN_FEATURE_PAD_PKTS_TO_MAX_PKT_SIZE BIT(7) +#define GS_CAN_FEATURE_FD BIT(8) +#define GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX BIT(9) +#define GS_CAN_FEATURE_BT_CONST_EXT BIT(10) +#define GS_CAN_FEATURE_TERMINATION BIT(11) +#define GS_CAN_FEATURE_BERR_REPORTING BIT(12) +#define GS_CAN_FEATURE_GET_STATE BIT(13) +#define GS_CAN_FEATURE_MASK GENMASK(13, 0) + +/* internal quirks - keep in GS_CAN_FEATURE space for now */ + +/* CANtact Pro original firmware: + * BREQ DATA_BITTIMING overlaps with GET_USER_ID + */ +#define GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO BIT(31) struct gs_device_bt_const { - u32 feature; - u32 fclk_can; - u32 tseg1_min; - u32 tseg1_max; - u32 tseg2_min; - u32 tseg2_max; - u32 sjw_max; - u32 brp_min; - u32 brp_max; - u32 brp_inc; + __le32 feature; + __le32 fclk_can; + __le32 tseg1_min; + __le32 tseg1_max; + __le32 tseg2_min; + __le32 tseg2_max; + __le32 sjw_max; + __le32 brp_min; + __le32 brp_max; + __le32 brp_inc; } __packed; -#define GS_CAN_FLAG_OVERFLOW 1 +struct gs_device_bt_const_extended { + __le32 feature; + __le32 fclk_can; + __le32 tseg1_min; + __le32 tseg1_max; + __le32 tseg2_min; + __le32 tseg2_max; + __le32 sjw_max; + __le32 brp_min; + __le32 brp_max; + __le32 brp_inc; + + __le32 dtseg1_min; + __le32 dtseg1_max; + __le32 dtseg2_min; + __le32 dtseg2_max; + __le32 dsjw_max; + __le32 dbrp_min; + __le32 dbrp_max; + __le32 dbrp_inc; +} __packed; -struct gs_host_frame { - u32 echo_id; - u32 can_id; +#define GS_CAN_FLAG_OVERFLOW BIT(0) +#define GS_CAN_FLAG_FD BIT(1) +#define GS_CAN_FLAG_BRS BIT(2) +#define GS_CAN_FLAG_ESI BIT(3) + +struct classic_can { + u8 data[8]; +} __packed; - u8 can_dlc; - u8 channel; - u8 flags; - u8 reserved; +struct classic_can_ts { + u8 data[8]; + __le32 timestamp_us; +} __packed; +struct classic_can_quirk { u8 data[8]; + u8 quirk; +} __packed; + +struct canfd { + u8 data[64]; +} __packed; + +struct canfd_ts { + u8 data[64]; + __le32 timestamp_us; +} __packed; + +struct canfd_quirk { + u8 data[64]; + u8 quirk; +} __packed; + +/* struct gs_host_frame::echo_id == GS_HOST_FRAME_ECHO_ID_RX indicates + * a regular RX'ed CAN frame + */ +#define GS_HOST_FRAME_ECHO_ID_RX 0xffffffff + +struct gs_host_frame { + struct_group(header, + u32 echo_id; + __le32 can_id; + + u8 can_dlc; + u8 channel; + u8 flags; + u8 reserved; + ); + + union { + DECLARE_FLEX_ARRAY(struct classic_can, classic_can); + DECLARE_FLEX_ARRAY(struct classic_can_ts, classic_can_ts); + DECLARE_FLEX_ARRAY(struct classic_can_quirk, classic_can_quirk); + DECLARE_FLEX_ARRAY(struct canfd, canfd); + DECLARE_FLEX_ARRAY(struct canfd_ts, canfd_ts); + DECLARE_FLEX_ARRAY(struct canfd_quirk, canfd_quirk); + }; } __packed; /* The GS USB devices make use of the same flags and masks as in * linux/can.h and linux/can/error.h, and no additional mapping is necessary. @@ -158,10 +294,7 @@ struct gs_host_frame { #define GS_MAX_TX_URBS 10 /* Only launch a max of GS_MAX_RX_URBS usb requests at a time. */ #define GS_MAX_RX_URBS 30 -/* Maximum number of interfaces the driver supports per device. - * Current hardware only supports 2 interfaces. The future may vary. - */ -#define GS_MAX_INTF 2 +#define GS_NAPI_WEIGHT 32 struct gs_tx_context { struct gs_can *dev; @@ -171,15 +304,18 @@ struct gs_tx_context { struct gs_can { struct can_priv can; /* must be the first member */ + struct can_rx_offload offload; struct gs_usb *parent; struct net_device *netdev; struct usb_device *udev; - struct usb_interface *iface; - struct can_bittiming_const bt_const; + struct can_bittiming_const bt_const, data_bt_const; unsigned int channel; /* channel number */ + u32 feature; + unsigned int hf_size_tx; + /* This lock prevents a race condition between xmit and receive. */ spinlock_t tx_ctx_lock; struct gs_tx_context tx_context[GS_MAX_TX_URBS]; @@ -190,10 +326,22 @@ struct gs_can { /* usb interface struct */ struct gs_usb { - struct gs_can *canch[GS_MAX_INTF]; struct usb_anchor rx_submitted; - atomic_t active_channels; struct usb_device *udev; + + /* time counter for hardware timestamps */ + struct cyclecounter cc; + struct timecounter tc; + spinlock_t tc_lock; /* spinlock to guard access tc->cycle_last */ + struct delayed_work timestamp; + + unsigned int hf_size_rx; + u8 active_channels; + u8 channel_cnt; + + unsigned int pipe_in; + unsigned int pipe_out; + struct gs_can *canch[] __counted_by(channel_cnt); }; /* 'allocate' a tx context. @@ -243,31 +391,108 @@ static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev, return NULL; } -static int gs_cmd_reset(struct gs_can *gsdev) +static int gs_cmd_reset(struct gs_can *dev) { - struct gs_device_mode *dm; - struct usb_interface *intf = gsdev->iface; + struct gs_device_mode dm = { + .mode = cpu_to_le32(GS_CAN_MODE_RESET), + }; + + return usb_control_msg_send(dev->udev, 0, GS_USB_BREQ_MODE, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, + dev->channel, 0, &dm, sizeof(dm), 1000, + GFP_KERNEL); +} + +static inline int gs_usb_get_timestamp(const struct gs_usb *parent, + u32 *timestamp_p) +{ + __le32 timestamp; int rc; - dm = kzalloc(sizeof(*dm), GFP_KERNEL); - if (!dm) - return -ENOMEM; + rc = usb_control_msg_recv(parent->udev, 0, GS_USB_BREQ_TIMESTAMP, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, + 0, 0, + ×tamp, sizeof(timestamp), + USB_CTRL_GET_TIMEOUT, + GFP_KERNEL); + if (rc) + return rc; - dm->mode = GS_CAN_MODE_RESET; + *timestamp_p = le32_to_cpu(timestamp); - rc = usb_control_msg(interface_to_usbdev(intf), - usb_sndctrlpipe(interface_to_usbdev(intf), 0), - GS_USB_BREQ_MODE, - USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, - gsdev->channel, - 0, - dm, - sizeof(*dm), - 1000); + return 0; +} - kfree(dm); +static u64 gs_usb_timestamp_read(struct cyclecounter *cc) __must_hold(&dev->tc_lock) +{ + struct gs_usb *parent = container_of(cc, struct gs_usb, cc); + u32 timestamp = 0; + int err; + + lockdep_assert_held(&parent->tc_lock); + + /* drop lock for synchronous USB transfer */ + spin_unlock_bh(&parent->tc_lock); + err = gs_usb_get_timestamp(parent, ×tamp); + spin_lock_bh(&parent->tc_lock); + if (err) + dev_err(&parent->udev->dev, + "Error %d while reading timestamp. HW timestamps may be inaccurate.", + err); + + return timestamp; +} - return rc; +static void gs_usb_timestamp_work(struct work_struct *work) +{ + struct delayed_work *delayed_work = to_delayed_work(work); + struct gs_usb *parent; + + parent = container_of(delayed_work, struct gs_usb, timestamp); + spin_lock_bh(&parent->tc_lock); + timecounter_read(&parent->tc); + spin_unlock_bh(&parent->tc_lock); + + schedule_delayed_work(&parent->timestamp, + GS_USB_TIMESTAMP_WORK_DELAY_SEC * HZ); +} + +static void gs_usb_skb_set_timestamp(struct gs_can *dev, + struct sk_buff *skb, u32 timestamp) +{ + struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); + struct gs_usb *parent = dev->parent; + u64 ns; + + spin_lock_bh(&parent->tc_lock); + ns = timecounter_cyc2time(&parent->tc, timestamp); + spin_unlock_bh(&parent->tc_lock); + + hwtstamps->hwtstamp = ns_to_ktime(ns); +} + +static void gs_usb_timestamp_init(struct gs_usb *parent) +{ + struct cyclecounter *cc = &parent->cc; + + cc->read = gs_usb_timestamp_read; + cc->mask = CYCLECOUNTER_MASK(32); + cc->shift = 32 - bits_per(NSEC_PER_SEC / GS_USB_TIMESTAMP_TIMER_HZ); + cc->mult = clocksource_hz2mult(GS_USB_TIMESTAMP_TIMER_HZ, cc->shift); + + spin_lock_init(&parent->tc_lock); + spin_lock_bh(&parent->tc_lock); + timecounter_init(&parent->tc, &parent->cc, ktime_get_real_ns()); + spin_unlock_bh(&parent->tc_lock); + + INIT_DELAYED_WORK(&parent->timestamp, gs_usb_timestamp_work); + schedule_delayed_work(&parent->timestamp, + GS_USB_TIMESTAMP_WORK_DELAY_SEC * HZ); +} + +static void gs_usb_timestamp_stop(struct gs_usb *parent) +{ + cancel_delayed_work_sync(&parent->timestamp); } static void gs_update_state(struct gs_can *dev, struct can_frame *cf) @@ -295,19 +520,107 @@ static void gs_update_state(struct gs_can *dev, struct can_frame *cf) } } +static u32 gs_usb_set_timestamp(struct gs_can *dev, struct sk_buff *skb, + const struct gs_host_frame *hf) +{ + u32 timestamp; + + if (hf->flags & GS_CAN_FLAG_FD) + timestamp = le32_to_cpu(hf->canfd_ts->timestamp_us); + else + timestamp = le32_to_cpu(hf->classic_can_ts->timestamp_us); + + if (skb) + gs_usb_skb_set_timestamp(dev, skb, timestamp); + + return timestamp; +} + +static void gs_usb_rx_offload(struct gs_can *dev, struct sk_buff *skb, + const struct gs_host_frame *hf) +{ + struct can_rx_offload *offload = &dev->offload; + int rc; + + if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) { + const u32 ts = gs_usb_set_timestamp(dev, skb, hf); + + rc = can_rx_offload_queue_timestamp(offload, skb, ts); + } else { + rc = can_rx_offload_queue_tail(offload, skb); + } + + if (rc) + dev->netdev->stats.rx_fifo_errors++; +} + +static unsigned int +gs_usb_get_echo_skb(struct gs_can *dev, struct sk_buff *skb, + const struct gs_host_frame *hf) +{ + struct can_rx_offload *offload = &dev->offload; + const u32 echo_id = hf->echo_id; + unsigned int len; + + if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) { + const u32 ts = gs_usb_set_timestamp(dev, skb, hf); + + len = can_rx_offload_get_echo_skb_queue_timestamp(offload, echo_id, + ts, NULL); + } else { + len = can_rx_offload_get_echo_skb_queue_tail(offload, echo_id, + NULL); + } + + return len; +} + +static unsigned int +gs_usb_get_minimum_rx_length(const struct gs_can *dev, const struct gs_host_frame *hf, + unsigned int *data_length_p) +{ + unsigned int minimum_length, data_length = 0; + + if (hf->flags & GS_CAN_FLAG_FD) { + if (hf->echo_id == GS_HOST_FRAME_ECHO_ID_RX) + data_length = can_fd_dlc2len(hf->can_dlc); + + if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) + /* timestamp follows data field of max size */ + minimum_length = struct_size(hf, canfd_ts, 1); + else + minimum_length = sizeof(hf->header) + data_length; + } else { + if (hf->echo_id == GS_HOST_FRAME_ECHO_ID_RX && + !(hf->can_id & cpu_to_le32(CAN_RTR_FLAG))) + data_length = can_cc_dlc2len(hf->can_dlc); + + if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) + /* timestamp follows data field of max size */ + minimum_length = struct_size(hf, classic_can_ts, 1); + else + minimum_length = sizeof(hf->header) + data_length; + } + + *data_length_p = data_length; + return minimum_length; +} + static void gs_usb_receive_bulk_callback(struct urb *urb) { - struct gs_usb *usbcan = urb->context; + struct gs_usb *parent = urb->context; struct gs_can *dev; struct net_device *netdev; int rc; struct net_device_stats *stats; struct gs_host_frame *hf = urb->transfer_buffer; + unsigned int minimum_length, data_length; struct gs_tx_context *txc; struct can_frame *cf; + struct canfd_frame *cfd; struct sk_buff *skb; - BUG_ON(!usbcan); + BUG_ON(!parent); switch (urb->status) { case 0: /* success */ @@ -320,11 +633,20 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) return; } - /* device reports out of range channel id */ - if (hf->channel >= GS_MAX_INTF) + minimum_length = sizeof(hf->header); + if (urb->actual_length < minimum_length) { + dev_err_ratelimited(&parent->udev->dev, + "short read (actual_length=%u, minimum_length=%u)\n", + urb->actual_length, minimum_length); + goto resubmit_urb; + } + + /* device reports out of range channel id */ + if (hf->channel >= parent->channel_cnt) + goto device_detach; - dev = usbcan->canch[hf->channel]; + dev = parent->canch[hf->channel]; netdev = dev->netdev; stats = &netdev->stats; @@ -332,47 +654,73 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) if (!netif_device_present(netdev)) return; - if (hf->echo_id == -1) { /* normal rx */ - skb = alloc_can_skb(dev->netdev, &cf); - if (!skb) - return; + if (!netif_running(netdev)) + goto resubmit_urb; + + minimum_length = gs_usb_get_minimum_rx_length(dev, hf, &data_length); + if (urb->actual_length < minimum_length) { + stats->rx_errors++; + stats->rx_length_errors++; + + if (net_ratelimit()) + netdev_err(netdev, + "short read (actual_length=%u, minimum_length=%u)\n", + urb->actual_length, minimum_length); - cf->can_id = hf->can_id; + goto resubmit_urb; + } + + if (hf->echo_id == GS_HOST_FRAME_ECHO_ID_RX) { /* normal rx */ + if (hf->flags & GS_CAN_FLAG_FD) { + skb = alloc_canfd_skb(netdev, &cfd); + if (!skb) + return; - cf->can_dlc = get_can_dlc(hf->can_dlc); - memcpy(cf->data, hf->data, 8); + cfd->can_id = le32_to_cpu(hf->can_id); + cfd->len = data_length; + if (hf->flags & GS_CAN_FLAG_BRS) + cfd->flags |= CANFD_BRS; + if (hf->flags & GS_CAN_FLAG_ESI) + cfd->flags |= CANFD_ESI; + + memcpy(cfd->data, hf->canfd->data, data_length); + } else { + skb = alloc_can_skb(netdev, &cf); + if (!skb) + return; - /* ERROR frames tell us information about the controller */ - if (hf->can_id & CAN_ERR_FLAG) - gs_update_state(dev, cf); + cf->can_id = le32_to_cpu(hf->can_id); + can_frame_set_cc_len(cf, hf->can_dlc, dev->can.ctrlmode); - netdev->stats.rx_packets++; - netdev->stats.rx_bytes += hf->can_dlc; + memcpy(cf->data, hf->classic_can->data, data_length); - netif_rx(skb); + /* ERROR frames tell us information about the controller */ + if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG) + gs_update_state(dev, cf); + } + + gs_usb_rx_offload(dev, skb, hf); } else { /* echo_id == hf->echo_id */ if (hf->echo_id >= GS_MAX_TX_URBS) { netdev_err(netdev, - "Unexpected out of range echo id %d\n", + "Unexpected out of range echo id %u\n", hf->echo_id); goto resubmit_urb; } - netdev->stats.tx_packets++; - netdev->stats.tx_bytes += hf->can_dlc; - txc = gs_get_tx_context(dev, hf->echo_id); /* bad devices send bad echo_ids. */ if (!txc) { netdev_err(netdev, - "Unexpected unused echo id %d\n", + "Unexpected unused echo id %u\n", hf->echo_id); goto resubmit_urb; } - can_get_echo_skb(netdev, hf->echo_id); - + skb = dev->can.echo_skb[hf->echo_id]; + stats->tx_packets++; + stats->tx_bytes += gs_usb_get_echo_skb(dev, skb, hf); gs_free_tx_context(txc); atomic_dec(&dev->active_tx_urbs); @@ -381,35 +729,36 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) } if (hf->flags & GS_CAN_FLAG_OVERFLOW) { + stats->rx_over_errors++; + stats->rx_errors++; + skb = alloc_can_err_skb(netdev, &cf); if (!skb) goto resubmit_urb; cf->can_id |= CAN_ERR_CRTL; - cf->can_dlc = CAN_ERR_DLC; + cf->len = CAN_ERR_DLC; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; - stats->rx_over_errors++; - stats->rx_errors++; - netif_rx(skb); + + gs_usb_rx_offload(dev, skb, hf); } - resubmit_urb: - usb_fill_bulk_urb(urb, - usbcan->udev, - usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN), - hf, - sizeof(struct gs_host_frame), - gs_usb_receive_bulk_callback, - usbcan - ); + can_rx_offload_irq_finish(&dev->offload); + +resubmit_urb: + usb_fill_bulk_urb(urb, parent->udev, + parent->pipe_in, + hf, parent->hf_size_rx, + gs_usb_receive_bulk_callback, parent); rc = usb_submit_urb(urb, GFP_ATOMIC); /* USB failure take down all interfaces */ if (rc == -ENODEV) { - for (rc = 0; rc < GS_MAX_INTF; rc++) { - if (usbcan->canch[rc]) - netif_device_detach(usbcan->canch[rc]->netdev); +device_detach: + for (rc = 0; rc < parent->channel_cnt; rc++) { + if (parent->canch[rc]) + netif_device_detach(parent->canch[rc]->netdev); } } } @@ -418,38 +767,42 @@ static int gs_usb_set_bittiming(struct net_device *netdev) { struct gs_can *dev = netdev_priv(netdev); struct can_bittiming *bt = &dev->can.bittiming; - struct usb_interface *intf = dev->iface; - int rc; - struct gs_device_bittiming *dbt; - - dbt = kmalloc(sizeof(*dbt), GFP_KERNEL); - if (!dbt) - return -ENOMEM; - - dbt->prop_seg = bt->prop_seg; - dbt->phase_seg1 = bt->phase_seg1; - dbt->phase_seg2 = bt->phase_seg2; - dbt->sjw = bt->sjw; - dbt->brp = bt->brp; + struct gs_device_bittiming dbt = { + .prop_seg = cpu_to_le32(bt->prop_seg), + .phase_seg1 = cpu_to_le32(bt->phase_seg1), + .phase_seg2 = cpu_to_le32(bt->phase_seg2), + .sjw = cpu_to_le32(bt->sjw), + .brp = cpu_to_le32(bt->brp), + }; /* request bit timings */ - rc = usb_control_msg(interface_to_usbdev(intf), - usb_sndctrlpipe(interface_to_usbdev(intf), 0), - GS_USB_BREQ_BITTIMING, - USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, - dev->channel, - 0, - dbt, - sizeof(*dbt), - 1000); - - kfree(dbt); - - if (rc < 0) - dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)", - rc); + return usb_control_msg_send(dev->udev, 0, GS_USB_BREQ_BITTIMING, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, + dev->channel, 0, &dbt, sizeof(dbt), 1000, + GFP_KERNEL); +} - return (rc > 0) ? 0 : rc; +static int gs_usb_set_data_bittiming(struct net_device *netdev) +{ + struct gs_can *dev = netdev_priv(netdev); + struct can_bittiming *bt = &dev->can.fd.data_bittiming; + struct gs_device_bittiming dbt = { + .prop_seg = cpu_to_le32(bt->prop_seg), + .phase_seg1 = cpu_to_le32(bt->phase_seg1), + .phase_seg2 = cpu_to_le32(bt->phase_seg2), + .sjw = cpu_to_le32(bt->sjw), + .brp = cpu_to_le32(bt->brp), + }; + u8 request = GS_USB_BREQ_DATA_BITTIMING; + + if (dev->feature & GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO) + request = GS_USB_BREQ_QUIRK_CANTACT_PRO_DATA_BITTIMING; + + /* request data bit timings */ + return usb_control_msg_send(dev->udev, 0, request, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, + dev->channel, 0, &dbt, sizeof(dbt), 1000, + GFP_KERNEL); } static void gs_usb_xmit_callback(struct urb *urb) @@ -458,13 +811,21 @@ static void gs_usb_xmit_callback(struct urb *urb) struct gs_can *dev = txc->dev; struct net_device *netdev = dev->netdev; - if (urb->status) - netdev_info(netdev, "usb xmit fail %d\n", txc->echo_id); + if (!urb->status) + return; + + if (urb->status != -ESHUTDOWN && net_ratelimit()) + netdev_info(netdev, "failed to xmit URB %u: %pe\n", + txc->echo_id, ERR_PTR(urb->status)); - usb_free_coherent(urb->dev, - urb->transfer_buffer_length, - urb->transfer_buffer, - urb->transfer_dma); + netdev->stats.tx_dropped++; + netdev->stats.tx_errors++; + + can_free_echo_skb(netdev, txc->echo_id, NULL); + gs_free_tx_context(txc); + atomic_dec(&dev->active_tx_urbs); + + netif_wake_queue(netdev); } static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, @@ -475,11 +836,12 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, struct urb *urb; struct gs_host_frame *hf; struct can_frame *cf; + struct canfd_frame *cfd; int rc; unsigned int idx; struct gs_tx_context *txc; - if (can_dropped_invalid_skb(netdev, skb)) + if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; /* find an empty context to keep track of transmission */ @@ -492,40 +854,52 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, if (!urb) goto nomem_urb; - hf = usb_alloc_coherent(dev->udev, sizeof(*hf), GFP_ATOMIC, - &urb->transfer_dma); - if (!hf) { - netdev_err(netdev, "No memory left for USB buffer\n"); + hf = kmalloc(dev->hf_size_tx, GFP_ATOMIC); + if (!hf) goto nomem_hf; - } idx = txc->echo_id; if (idx >= GS_MAX_TX_URBS) { - netdev_err(netdev, "Invalid tx context %d\n", idx); + netdev_err(netdev, "Invalid tx context %u\n", idx); goto badidx; } hf->echo_id = idx; hf->channel = dev->channel; + hf->flags = 0; + hf->reserved = 0; + + if (can_is_canfd_skb(skb)) { + cfd = (struct canfd_frame *)skb->data; + + hf->can_id = cpu_to_le32(cfd->can_id); + hf->can_dlc = can_fd_len2dlc(cfd->len); + hf->flags |= GS_CAN_FLAG_FD; + if (cfd->flags & CANFD_BRS) + hf->flags |= GS_CAN_FLAG_BRS; + if (cfd->flags & CANFD_ESI) + hf->flags |= GS_CAN_FLAG_ESI; - cf = (struct can_frame *)skb->data; + memcpy(hf->canfd->data, cfd->data, cfd->len); + } else { + cf = (struct can_frame *)skb->data; + + hf->can_id = cpu_to_le32(cf->can_id); + hf->can_dlc = can_get_cc_dlc(cf, dev->can.ctrlmode); - hf->can_id = cf->can_id; - hf->can_dlc = cf->can_dlc; - memcpy(hf->data, cf->data, cf->can_dlc); + memcpy(hf->classic_can->data, cf->data, cf->len); + } usb_fill_bulk_urb(urb, dev->udev, - usb_sndbulkpipe(dev->udev, GSUSB_ENDPOINT_OUT), - hf, - sizeof(*hf), - gs_usb_xmit_callback, - txc); + dev->parent->pipe_out, + hf, dev->hf_size_tx, + gs_usb_xmit_callback, txc); - urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + urb->transfer_flags |= URB_FREE_BUFFER; usb_anchor_urb(urb, &dev->tx_submitted); - can_put_echo_skb(skb, netdev, idx); + can_put_echo_skb(skb, netdev, idx, 0); atomic_inc(&dev->active_tx_urbs); @@ -533,14 +907,10 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, if (unlikely(rc)) { /* usb send failed */ atomic_dec(&dev->active_tx_urbs); - can_free_echo_skb(netdev, idx); + can_free_echo_skb(netdev, idx, NULL); gs_free_tx_context(txc); usb_unanchor_urb(urb); - usb_free_coherent(dev->udev, - sizeof(*hf), - hf, - urb->transfer_dma); if (rc == -ENODEV) { netif_device_detach(netdev); @@ -559,15 +929,12 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; - badidx: - usb_free_coherent(dev->udev, - sizeof(*hf), - hf, - urb->transfer_dma); - nomem_hf: +badidx: + kfree(hf); +nomem_hf: usb_free_urb(urb); - nomem_urb: +nomem_urb: gs_free_tx_context(txc); dev_kfree_skb(skb); stats->tx_dropped++; @@ -578,46 +945,64 @@ static int gs_can_open(struct net_device *netdev) { struct gs_can *dev = netdev_priv(netdev); struct gs_usb *parent = dev->parent; - int rc, i; - struct gs_device_mode *dm; + struct gs_device_mode dm = { + .mode = cpu_to_le32(GS_CAN_MODE_START), + }; + struct gs_host_frame *hf; + struct urb *urb = NULL; u32 ctrlmode; + u32 flags = 0; + int rc, i; rc = open_candev(netdev); if (rc) return rc; - if (atomic_add_return(1, &parent->active_channels) == 1) { + ctrlmode = dev->can.ctrlmode; + if (ctrlmode & CAN_CTRLMODE_FD) { + if (dev->feature & GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX) + dev->hf_size_tx = struct_size(hf, canfd_quirk, 1); + else + dev->hf_size_tx = struct_size(hf, canfd, 1); + } else { + if (dev->feature & GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX) + dev->hf_size_tx = struct_size(hf, classic_can_quirk, 1); + else + dev->hf_size_tx = struct_size(hf, classic_can, 1); + } + + can_rx_offload_enable(&dev->offload); + + if (!parent->active_channels) { + if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) + gs_usb_timestamp_init(parent); + for (i = 0; i < GS_MAX_RX_URBS; i++) { - struct urb *urb; u8 *buf; /* alloc rx urb */ urb = usb_alloc_urb(0, GFP_KERNEL); - if (!urb) - return -ENOMEM; + if (!urb) { + rc = -ENOMEM; + goto out_usb_kill_anchored_urbs; + } /* alloc rx buffer */ - buf = usb_alloc_coherent(dev->udev, - sizeof(struct gs_host_frame), - GFP_KERNEL, - &urb->transfer_dma); + buf = kmalloc(dev->parent->hf_size_rx, + GFP_KERNEL); if (!buf) { - netdev_err(netdev, - "No memory left for USB buffer\n"); - usb_free_urb(urb); - return -ENOMEM; + rc = -ENOMEM; + goto out_usb_free_urb; } /* fill, anchor, and submit rx urb */ usb_fill_bulk_urb(urb, dev->udev, - usb_rcvbulkpipe(dev->udev, - GSUSB_ENDPOINT_IN), + dev->parent->pipe_in, buf, - sizeof(struct gs_host_frame), - gs_usb_receive_bulk_callback, - parent); - urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + dev->parent->hf_size_rx, + gs_usb_receive_bulk_callback, parent); + urb->transfer_flags |= URB_FREE_BUFFER; usb_anchor_urb(urb, &parent->rx_submitted); @@ -627,11 +1012,10 @@ static int gs_can_open(struct net_device *netdev) netif_device_detach(dev->netdev); netdev_err(netdev, - "usb_submit failed (err=%d)\n", - rc); + "usb_submit_urb() failed, error %pe\n", + ERR_PTR(rc)); - usb_unanchor_urb(urb); - break; + goto out_usb_unanchor_urb; } /* Drop reference, @@ -641,55 +1025,100 @@ static int gs_can_open(struct net_device *netdev) } } - dm = kmalloc(sizeof(*dm), GFP_KERNEL); - if (!dm) - return -ENOMEM; - /* flags */ - ctrlmode = dev->can.ctrlmode; - dm->flags = 0; - if (ctrlmode & CAN_CTRLMODE_LOOPBACK) - dm->flags |= GS_CAN_MODE_LOOP_BACK; - else if (ctrlmode & CAN_CTRLMODE_LISTENONLY) - dm->flags |= GS_CAN_MODE_LISTEN_ONLY; + flags |= GS_CAN_MODE_LOOP_BACK; - /* Controller is not allowed to retry TX - * this mode is unavailable on atmels uc3c hardware - */ - if (ctrlmode & CAN_CTRLMODE_ONE_SHOT) - dm->flags |= GS_CAN_MODE_ONE_SHOT; + if (ctrlmode & CAN_CTRLMODE_LISTENONLY) + flags |= GS_CAN_MODE_LISTEN_ONLY; if (ctrlmode & CAN_CTRLMODE_3_SAMPLES) - dm->flags |= GS_CAN_MODE_TRIPLE_SAMPLE; + flags |= GS_CAN_MODE_TRIPLE_SAMPLE; - /* finally start device */ - dm->mode = GS_CAN_MODE_START; - rc = usb_control_msg(interface_to_usbdev(dev->iface), - usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0), - GS_USB_BREQ_MODE, - USB_DIR_OUT | USB_TYPE_VENDOR | - USB_RECIP_INTERFACE, - dev->channel, - 0, - dm, - sizeof(*dm), - 1000); - - if (rc < 0) { - netdev_err(netdev, "Couldn't start device (err=%d)\n", rc); - kfree(dm); - return rc; - } + if (ctrlmode & CAN_CTRLMODE_ONE_SHOT) + flags |= GS_CAN_MODE_ONE_SHOT; + + if (ctrlmode & CAN_CTRLMODE_BERR_REPORTING) + flags |= GS_CAN_MODE_BERR_REPORTING; + + if (ctrlmode & CAN_CTRLMODE_FD) + flags |= GS_CAN_MODE_FD; - kfree(dm); + /* if hardware supports timestamps, enable it */ + if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) + flags |= GS_CAN_MODE_HW_TIMESTAMP; + /* finally start device */ dev->can.state = CAN_STATE_ERROR_ACTIVE; + dm.flags = cpu_to_le32(flags); + rc = usb_control_msg_send(dev->udev, 0, GS_USB_BREQ_MODE, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, + dev->channel, 0, &dm, sizeof(dm), 1000, + GFP_KERNEL); + if (rc) { + netdev_err(netdev, "Couldn't start device (err=%d)\n", rc); + dev->can.state = CAN_STATE_STOPPED; + + goto out_usb_kill_anchored_urbs; + } + parent->active_channels++; if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) netif_start_queue(netdev); return 0; + +out_usb_unanchor_urb: + usb_unanchor_urb(urb); +out_usb_free_urb: + usb_free_urb(urb); +out_usb_kill_anchored_urbs: + if (!parent->active_channels) { + usb_kill_anchored_urbs(&dev->tx_submitted); + + if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) + gs_usb_timestamp_stop(parent); + } + + can_rx_offload_disable(&dev->offload); + close_candev(netdev); + + return rc; +} + +static int gs_usb_get_state(const struct net_device *netdev, + struct can_berr_counter *bec, + enum can_state *state) +{ + struct gs_can *dev = netdev_priv(netdev); + struct gs_device_state ds; + int rc; + + rc = usb_control_msg_recv(dev->udev, 0, GS_USB_BREQ_GET_STATE, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, + dev->channel, 0, + &ds, sizeof(ds), + USB_CTRL_GET_TIMEOUT, + GFP_KERNEL); + if (rc) + return rc; + + if (le32_to_cpu(ds.state) >= CAN_STATE_MAX) + return -EOPNOTSUPP; + + *state = le32_to_cpu(ds.state); + bec->txerr = le32_to_cpu(ds.txerr); + bec->rxerr = le32_to_cpu(ds.rxerr); + + return 0; +} + +static int gs_usb_can_get_berr_counter(const struct net_device *netdev, + struct can_berr_counter *bec) +{ + enum can_state state; + + return gs_usb_get_state(netdev, bec, &state); } static int gs_can_close(struct net_device *netdev) @@ -701,17 +1130,22 @@ static int gs_can_close(struct net_device *netdev) netif_stop_queue(netdev); /* Stop polling */ - if (atomic_dec_and_test(&parent->active_channels)) + parent->active_channels--; + if (!parent->active_channels) { usb_kill_anchored_urbs(&parent->rx_submitted); + if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) + gs_usb_timestamp_stop(parent); + } + /* Stop sending URBs */ usb_kill_anchored_urbs(&dev->tx_submitted); atomic_set(&dev->active_tx_urbs, 0); + dev->can.state = CAN_STATE_STOPPED; + /* reset the device */ - rc = gs_cmd_reset(dev); - if (rc < 0) - netdev_warn(netdev, "Couldn't shutdown device (err=%d)", rc); + gs_cmd_reset(dev); /* reset tx contexts */ for (rc = 0; rc < GS_MAX_TX_URBS; rc++) { @@ -719,64 +1153,77 @@ static int gs_can_close(struct net_device *netdev) dev->tx_context[rc].echo_id = GS_MAX_TX_URBS; } + can_rx_offload_disable(&dev->offload); + /* close the netdev */ close_candev(netdev); return 0; } +static int gs_can_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *cfg) +{ + const struct gs_can *dev = netdev_priv(netdev); + + if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) + return can_hwtstamp_get(netdev, cfg); + + return -EOPNOTSUPP; +} + +static int gs_can_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) +{ + const struct gs_can *dev = netdev_priv(netdev); + + if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) + return can_hwtstamp_set(netdev, cfg, extack); + + return -EOPNOTSUPP; +} + static const struct net_device_ops gs_usb_netdev_ops = { .ndo_open = gs_can_open, .ndo_stop = gs_can_close, .ndo_start_xmit = gs_can_start_xmit, - .ndo_change_mtu = can_change_mtu, + .ndo_hwtstamp_get = gs_can_hwtstamp_get, + .ndo_hwtstamp_set = gs_can_hwtstamp_set, }; static int gs_usb_set_identify(struct net_device *netdev, bool do_identify) { struct gs_can *dev = netdev_priv(netdev); - struct gs_identify_mode *imode; - int rc; - - imode = kmalloc(sizeof(*imode), GFP_KERNEL); - - if (!imode) - return -ENOMEM; + struct gs_identify_mode imode; if (do_identify) - imode->mode = GS_CAN_IDENTIFY_ON; + imode.mode = cpu_to_le32(GS_CAN_IDENTIFY_ON); else - imode->mode = GS_CAN_IDENTIFY_OFF; - - rc = usb_control_msg(interface_to_usbdev(dev->iface), - usb_sndctrlpipe(interface_to_usbdev(dev->iface), - 0), - GS_USB_BREQ_IDENTIFY, - USB_DIR_OUT | USB_TYPE_VENDOR | - USB_RECIP_INTERFACE, - dev->channel, - 0, - imode, - sizeof(*imode), - 100); - - kfree(imode); - - return (rc > 0) ? 0 : rc; + imode.mode = cpu_to_le32(GS_CAN_IDENTIFY_OFF); + + return usb_control_msg_send(dev->udev, 0, GS_USB_BREQ_IDENTIFY, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, + dev->channel, 0, &imode, sizeof(imode), 100, + GFP_KERNEL); } /* blink LED's for finding the this interface */ -static int gs_usb_set_phys_id(struct net_device *dev, +static int gs_usb_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { + const struct gs_can *dev = netdev_priv(netdev); int rc = 0; + if (!(dev->feature & GS_CAN_FEATURE_IDENTIFY)) + return -EOPNOTSUPP; + switch (state) { case ETHTOOL_ID_ACTIVE: - rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_ON); + rc = gs_usb_set_identify(netdev, GS_CAN_IDENTIFY_ON); break; case ETHTOOL_ID_INACTIVE: - rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_OFF); + rc = gs_usb_set_identify(netdev, GS_CAN_IDENTIFY_OFF); break; default: break; @@ -785,8 +1232,65 @@ static int gs_usb_set_phys_id(struct net_device *dev, return rc; } +static int gs_usb_get_ts_info(struct net_device *netdev, + struct kernel_ethtool_ts_info *info) +{ + struct gs_can *dev = netdev_priv(netdev); + + /* report if device supports HW timestamps */ + if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) + return can_ethtool_op_get_ts_info_hwts(netdev, info); + + return ethtool_op_get_ts_info(netdev, info); +} + static const struct ethtool_ops gs_usb_ethtool_ops = { .set_phys_id = gs_usb_set_phys_id, + .get_ts_info = gs_usb_get_ts_info, +}; + +static int gs_usb_get_termination(struct net_device *netdev, u16 *term) +{ + struct gs_can *dev = netdev_priv(netdev); + struct gs_device_termination_state term_state; + int rc; + + rc = usb_control_msg_recv(dev->udev, 0, GS_USB_BREQ_GET_TERMINATION, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, + dev->channel, 0, + &term_state, sizeof(term_state), 1000, + GFP_KERNEL); + if (rc) + return rc; + + if (term_state.state == cpu_to_le32(GS_CAN_TERMINATION_STATE_ON)) + *term = GS_USB_TERMINATION_ENABLED; + else + *term = GS_USB_TERMINATION_DISABLED; + + return 0; +} + +static int gs_usb_set_termination(struct net_device *netdev, u16 term) +{ + struct gs_can *dev = netdev_priv(netdev); + struct gs_device_termination_state term_state; + + if (term == GS_USB_TERMINATION_ENABLED) + term_state.state = cpu_to_le32(GS_CAN_TERMINATION_STATE_ON); + else + term_state.state = cpu_to_le32(GS_CAN_TERMINATION_STATE_OFF); + + return usb_control_msg_send(dev->udev, 0, GS_USB_BREQ_SET_TERMINATION, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, + dev->channel, 0, + &term_state, sizeof(term_state), 1000, + GFP_KERNEL); +} + +static const u16 gs_usb_termination_const[] = { + GS_USB_TERMINATION_DISABLED, + GS_USB_TERMINATION_ENABLED }; static struct gs_can *gs_make_candev(unsigned int channel, @@ -796,28 +1300,21 @@ static struct gs_can *gs_make_candev(unsigned int channel, struct gs_can *dev; struct net_device *netdev; int rc; - struct gs_device_bt_const *bt_const; - - bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL); - if (!bt_const) - return ERR_PTR(-ENOMEM); + struct gs_device_bt_const_extended bt_const_extended; + struct gs_device_bt_const bt_const; + u32 feature; /* fetch bit timing constants */ - rc = usb_control_msg(interface_to_usbdev(intf), - usb_rcvctrlpipe(interface_to_usbdev(intf), 0), - GS_USB_BREQ_BT_CONST, - USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, - channel, - 0, - bt_const, - sizeof(*bt_const), - 1000); - - if (rc < 0) { + rc = usb_control_msg_recv(interface_to_usbdev(intf), 0, + GS_USB_BREQ_BT_CONST, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, + channel, 0, &bt_const, sizeof(bt_const), 1000, + GFP_KERNEL); + + if (rc) { dev_err(&intf->dev, - "Couldn't get bit timing const for channel (err=%d)\n", - rc); - kfree(bt_const); + "Couldn't get bit timing const for channel %d (%pe)\n", + channel, ERR_PTR(rc)); return ERR_PTR(rc); } @@ -825,29 +1322,30 @@ static struct gs_can *gs_make_candev(unsigned int channel, netdev = alloc_candev(sizeof(struct gs_can), GS_MAX_TX_URBS); if (!netdev) { dev_err(&intf->dev, "Couldn't allocate candev\n"); - kfree(bt_const); return ERR_PTR(-ENOMEM); } dev = netdev_priv(netdev); netdev->netdev_ops = &gs_usb_netdev_ops; + netdev->ethtool_ops = &gs_usb_ethtool_ops; netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */ - - /* dev settup */ - strcpy(dev->bt_const.name, "gs_usb"); - dev->bt_const.tseg1_min = bt_const->tseg1_min; - dev->bt_const.tseg1_max = bt_const->tseg1_max; - dev->bt_const.tseg2_min = bt_const->tseg2_min; - dev->bt_const.tseg2_max = bt_const->tseg2_max; - dev->bt_const.sjw_max = bt_const->sjw_max; - dev->bt_const.brp_min = bt_const->brp_min; - dev->bt_const.brp_max = bt_const->brp_max; - dev->bt_const.brp_inc = bt_const->brp_inc; + netdev->dev_id = channel; + netdev->dev_port = channel; + + /* dev setup */ + strcpy(dev->bt_const.name, KBUILD_MODNAME); + dev->bt_const.tseg1_min = le32_to_cpu(bt_const.tseg1_min); + dev->bt_const.tseg1_max = le32_to_cpu(bt_const.tseg1_max); + dev->bt_const.tseg2_min = le32_to_cpu(bt_const.tseg2_min); + dev->bt_const.tseg2_max = le32_to_cpu(bt_const.tseg2_max); + dev->bt_const.sjw_max = le32_to_cpu(bt_const.sjw_max); + dev->bt_const.brp_min = le32_to_cpu(bt_const.brp_min); + dev->bt_const.brp_max = le32_to_cpu(bt_const.brp_max); + dev->bt_const.brp_inc = le32_to_cpu(bt_const.brp_inc); dev->udev = interface_to_usbdev(intf); - dev->iface = intf; dev->netdev = netdev; dev->channel = channel; @@ -859,188 +1357,296 @@ static struct gs_can *gs_make_candev(unsigned int channel, dev->tx_context[rc].echo_id = GS_MAX_TX_URBS; } - /* can settup */ + /* can setup */ dev->can.state = CAN_STATE_STOPPED; - dev->can.clock.freq = bt_const->fclk_can; + dev->can.clock.freq = le32_to_cpu(bt_const.fclk_can); dev->can.bittiming_const = &dev->bt_const; dev->can.do_set_bittiming = gs_usb_set_bittiming; - dev->can.ctrlmode_supported = 0; + dev->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC; - if (bt_const->feature & GS_CAN_FEATURE_LISTEN_ONLY) + feature = le32_to_cpu(bt_const.feature); + dev->feature = FIELD_GET(GS_CAN_FEATURE_MASK, feature); + if (feature & GS_CAN_FEATURE_LISTEN_ONLY) dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY; - if (bt_const->feature & GS_CAN_FEATURE_LOOP_BACK) + if (feature & GS_CAN_FEATURE_LOOP_BACK) dev->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK; - if (bt_const->feature & GS_CAN_FEATURE_TRIPLE_SAMPLE) + if (feature & GS_CAN_FEATURE_TRIPLE_SAMPLE) dev->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; - if (bt_const->feature & GS_CAN_FEATURE_ONE_SHOT) + if (feature & GS_CAN_FEATURE_ONE_SHOT) dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; - SET_NETDEV_DEV(netdev, &intf->dev); + if (feature & GS_CAN_FEATURE_FD) { + dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD; + /* The data bit timing will be overwritten, if + * GS_CAN_FEATURE_BT_CONST_EXT is set. + */ + dev->can.fd.data_bittiming_const = &dev->bt_const; + dev->can.fd.do_set_data_bittiming = gs_usb_set_data_bittiming; + } - if (dconf->sw_version > 1) - if (bt_const->feature & GS_CAN_FEATURE_IDENTIFY) - netdev->ethtool_ops = &gs_usb_ethtool_ops; + if (feature & GS_CAN_FEATURE_TERMINATION) { + rc = gs_usb_get_termination(netdev, &dev->can.termination); + if (rc) { + dev->feature &= ~GS_CAN_FEATURE_TERMINATION; - kfree(bt_const); + dev_info(&intf->dev, + "Disabling termination support for channel %d (%pe)\n", + channel, ERR_PTR(rc)); + } else { + dev->can.termination_const = gs_usb_termination_const; + dev->can.termination_const_cnt = ARRAY_SIZE(gs_usb_termination_const); + dev->can.do_set_termination = gs_usb_set_termination; + } + } + + if (feature & GS_CAN_FEATURE_BERR_REPORTING) + dev->can.ctrlmode_supported |= CAN_CTRLMODE_BERR_REPORTING; + + if (feature & GS_CAN_FEATURE_GET_STATE) + dev->can.do_get_berr_counter = gs_usb_can_get_berr_counter; + + /* The CANtact Pro from LinkLayer Labs is based on the + * LPC54616 µC, which is affected by the NXP LPC USB transfer + * erratum. However, the current firmware (version 2) doesn't + * set the GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX bit. Set the + * feature GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX to workaround + * this issue. + * + * For the GS_USB_BREQ_DATA_BITTIMING USB control message the + * CANtact Pro firmware uses a request value, which is already + * used by the candleLight firmware for a different purpose + * (GS_USB_BREQ_GET_USER_ID). Set the feature + * GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO to workaround this + * issue. + */ + if (dev->udev->descriptor.idVendor == cpu_to_le16(USB_GS_USB_1_VENDOR_ID) && + dev->udev->descriptor.idProduct == cpu_to_le16(USB_GS_USB_1_PRODUCT_ID) && + dev->udev->manufacturer && dev->udev->product && + !strcmp(dev->udev->manufacturer, "LinkLayer Labs") && + !strcmp(dev->udev->product, "CANtact Pro") && + (le32_to_cpu(dconf->sw_version) <= 2)) + dev->feature |= GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX | + GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO; + + /* GS_CAN_FEATURE_IDENTIFY is only supported for sw_version > 1 */ + if (!(le32_to_cpu(dconf->sw_version) > 1 && + feature & GS_CAN_FEATURE_IDENTIFY)) + dev->feature &= ~GS_CAN_FEATURE_IDENTIFY; + + /* fetch extended bit timing constants if device has feature + * GS_CAN_FEATURE_FD and GS_CAN_FEATURE_BT_CONST_EXT + */ + if (feature & GS_CAN_FEATURE_FD && + feature & GS_CAN_FEATURE_BT_CONST_EXT) { + rc = usb_control_msg_recv(interface_to_usbdev(intf), 0, + GS_USB_BREQ_BT_CONST_EXT, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, + channel, 0, &bt_const_extended, + sizeof(bt_const_extended), + 1000, GFP_KERNEL); + if (rc) { + dev_err(&intf->dev, + "Couldn't get extended bit timing const for channel %d (%pe)\n", + channel, ERR_PTR(rc)); + goto out_free_candev; + } + + strcpy(dev->data_bt_const.name, KBUILD_MODNAME); + dev->data_bt_const.tseg1_min = le32_to_cpu(bt_const_extended.dtseg1_min); + dev->data_bt_const.tseg1_max = le32_to_cpu(bt_const_extended.dtseg1_max); + dev->data_bt_const.tseg2_min = le32_to_cpu(bt_const_extended.dtseg2_min); + dev->data_bt_const.tseg2_max = le32_to_cpu(bt_const_extended.dtseg2_max); + dev->data_bt_const.sjw_max = le32_to_cpu(bt_const_extended.dsjw_max); + dev->data_bt_const.brp_min = le32_to_cpu(bt_const_extended.dbrp_min); + dev->data_bt_const.brp_max = le32_to_cpu(bt_const_extended.dbrp_max); + dev->data_bt_const.brp_inc = le32_to_cpu(bt_const_extended.dbrp_inc); + + dev->can.fd.data_bittiming_const = &dev->data_bt_const; + } + + can_rx_offload_add_manual(netdev, &dev->offload, GS_NAPI_WEIGHT); + SET_NETDEV_DEV(netdev, &intf->dev); rc = register_candev(dev->netdev); if (rc) { - free_candev(dev->netdev); - dev_err(&intf->dev, "Couldn't register candev (err=%d)\n", rc); - return ERR_PTR(rc); + dev_err(&intf->dev, + "Couldn't register candev for channel %d (%pe)\n", + channel, ERR_PTR(rc)); + goto out_can_rx_offload_del; } return dev; + +out_can_rx_offload_del: + can_rx_offload_del(&dev->offload); +out_free_candev: + free_candev(dev->netdev); + return ERR_PTR(rc); } static void gs_destroy_candev(struct gs_can *dev) { unregister_candev(dev->netdev); - usb_kill_anchored_urbs(&dev->tx_submitted); + can_rx_offload_del(&dev->offload); free_candev(dev->netdev); } static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { - struct gs_usb *dev; - int rc = -ENOMEM; + struct usb_device *udev = interface_to_usbdev(intf); + struct usb_endpoint_descriptor *ep_in, *ep_out; + struct gs_host_frame *hf; + struct gs_usb *parent; + struct gs_host_config hconf = { + .byte_order = cpu_to_le32(0x0000beef), + }; + struct gs_device_config dconf; unsigned int icount, i; - struct gs_host_config *hconf; - struct gs_device_config *dconf; - - hconf = kmalloc(sizeof(*hconf), GFP_KERNEL); - if (!hconf) - return -ENOMEM; + int rc; - hconf->byte_order = 0x0000beef; + rc = usb_find_common_endpoints(intf->cur_altsetting, + &ep_in, &ep_out, NULL, NULL); + if (rc) { + dev_err(&intf->dev, "Required endpoints not found\n"); + return rc; + } /* send host config */ - rc = usb_control_msg(interface_to_usbdev(intf), - usb_sndctrlpipe(interface_to_usbdev(intf), 0), - GS_USB_BREQ_HOST_FORMAT, - USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, - 1, - intf->altsetting[0].desc.bInterfaceNumber, - hconf, - sizeof(*hconf), - 1000); - - kfree(hconf); - - if (rc < 0) { - dev_err(&intf->dev, "Couldn't send data format (err=%d)\n", - rc); + rc = usb_control_msg_send(udev, 0, + GS_USB_BREQ_HOST_FORMAT, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, + 1, intf->cur_altsetting->desc.bInterfaceNumber, + &hconf, sizeof(hconf), 1000, + GFP_KERNEL); + if (rc) { + dev_err(&intf->dev, "Couldn't send data format (err=%d)\n", rc); return rc; } - dconf = kmalloc(sizeof(*dconf), GFP_KERNEL); - if (!dconf) - return -ENOMEM; - /* read device config */ - rc = usb_control_msg(interface_to_usbdev(intf), - usb_rcvctrlpipe(interface_to_usbdev(intf), 0), - GS_USB_BREQ_DEVICE_CONFIG, - USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, - 1, - intf->altsetting[0].desc.bInterfaceNumber, - dconf, - sizeof(*dconf), - 1000); - if (rc < 0) { + rc = usb_control_msg_recv(udev, 0, + GS_USB_BREQ_DEVICE_CONFIG, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, + 1, intf->cur_altsetting->desc.bInterfaceNumber, + &dconf, sizeof(dconf), 1000, + GFP_KERNEL); + if (rc) { dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n", rc); - kfree(dconf); return rc; } - icount = dconf->icount + 1; - dev_info(&intf->dev, "Configuring for %d interfaces\n", icount); + icount = dconf.icount + 1; + dev_info(&intf->dev, "Configuring for %u interfaces\n", icount); - if (icount > GS_MAX_INTF) { + if (icount > type_max(parent->channel_cnt)) { dev_err(&intf->dev, - "Driver cannot handle more that %d CAN interfaces\n", - GS_MAX_INTF); - kfree(dconf); + "Driver cannot handle more that %u CAN interfaces\n", + type_max(parent->channel_cnt)); return -EINVAL; } - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) { - kfree(dconf); + parent = kzalloc(struct_size(parent, canch, icount), GFP_KERNEL); + if (!parent) return -ENOMEM; - } - init_usb_anchor(&dev->rx_submitted); + parent->channel_cnt = icount; - atomic_set(&dev->active_channels, 0); + init_usb_anchor(&parent->rx_submitted); - usb_set_intfdata(intf, dev); - dev->udev = interface_to_usbdev(intf); + usb_set_intfdata(intf, parent); + parent->udev = udev; + + /* store the detected endpoints */ + parent->pipe_in = usb_rcvbulkpipe(parent->udev, ep_in->bEndpointAddress); + parent->pipe_out = usb_sndbulkpipe(parent->udev, ep_out->bEndpointAddress); for (i = 0; i < icount; i++) { - dev->canch[i] = gs_make_candev(i, intf, dconf); - if (IS_ERR_OR_NULL(dev->canch[i])) { + unsigned int hf_size_rx = 0; + + parent->canch[i] = gs_make_candev(i, intf, &dconf); + if (IS_ERR_OR_NULL(parent->canch[i])) { /* save error code to return later */ - rc = PTR_ERR(dev->canch[i]); + rc = PTR_ERR(parent->canch[i]); /* on failure destroy previously created candevs */ icount = i; for (i = 0; i < icount; i++) - gs_destroy_candev(dev->canch[i]); + gs_destroy_candev(parent->canch[i]); - usb_kill_anchored_urbs(&dev->rx_submitted); - kfree(dconf); - kfree(dev); + usb_kill_anchored_urbs(&parent->rx_submitted); + kfree(parent); return rc; } - dev->canch[i]->parent = dev; + parent->canch[i]->parent = parent; + + /* set RX packet size based on FD and if hardware + * timestamps are supported. + */ + if (parent->canch[i]->can.ctrlmode_supported & CAN_CTRLMODE_FD) { + if (parent->canch[i]->feature & GS_CAN_FEATURE_HW_TIMESTAMP) + hf_size_rx = struct_size(hf, canfd_ts, 1); + else + hf_size_rx = struct_size(hf, canfd, 1); + } else { + if (parent->canch[i]->feature & GS_CAN_FEATURE_HW_TIMESTAMP) + hf_size_rx = struct_size(hf, classic_can_ts, 1); + else + hf_size_rx = struct_size(hf, classic_can, 1); + } + parent->hf_size_rx = max(parent->hf_size_rx, hf_size_rx); } - kfree(dconf); - return 0; } static void gs_usb_disconnect(struct usb_interface *intf) { - unsigned i; - struct gs_usb *dev = usb_get_intfdata(intf); + struct gs_usb *parent = usb_get_intfdata(intf); + unsigned int i; + usb_set_intfdata(intf, NULL); - if (!dev) { + if (!parent) { dev_err(&intf->dev, "Disconnect (nodata)\n"); return; } - for (i = 0; i < GS_MAX_INTF; i++) - if (dev->canch[i]) - gs_destroy_candev(dev->canch[i]); + for (i = 0; i < parent->channel_cnt; i++) + if (parent->canch[i]) + gs_destroy_candev(parent->canch[i]); - usb_kill_anchored_urbs(&dev->rx_submitted); - kfree(dev); + kfree(parent); } static const struct usb_device_id gs_usb_table[] = { - { USB_DEVICE_INTERFACE_NUMBER(USB_GSUSB_1_VENDOR_ID, - USB_GSUSB_1_PRODUCT_ID, 0) }, + { USB_DEVICE_INTERFACE_NUMBER(USB_GS_USB_1_VENDOR_ID, + USB_GS_USB_1_PRODUCT_ID, 0) }, { USB_DEVICE_INTERFACE_NUMBER(USB_CANDLELIGHT_VENDOR_ID, USB_CANDLELIGHT_PRODUCT_ID, 0) }, + { USB_DEVICE_INTERFACE_NUMBER(USB_CES_CANEXT_FD_VENDOR_ID, + USB_CES_CANEXT_FD_PRODUCT_ID, 0) }, + { USB_DEVICE_INTERFACE_NUMBER(USB_ABE_CANDEBUGGER_FD_VENDOR_ID, + USB_ABE_CANDEBUGGER_FD_PRODUCT_ID, 0) }, + { USB_DEVICE_INTERFACE_NUMBER(USB_XYLANTA_SAINT3_VENDOR_ID, + USB_XYLANTA_SAINT3_PRODUCT_ID, 0) }, + { USB_DEVICE_INTERFACE_NUMBER(USB_CANNECTIVITY_VENDOR_ID, + USB_CANNECTIVITY_PRODUCT_ID, 0) }, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, gs_usb_table); static struct usb_driver gs_usb_driver = { - .name = "gs_usb", - .probe = gs_usb_probe, + .name = KBUILD_MODNAME, + .probe = gs_usb_probe, .disconnect = gs_usb_disconnect, - .id_table = gs_usb_table, + .id_table = gs_usb_table, }; module_usb_driver(gs_usb_driver); diff --git a/drivers/net/can/usb/kvaser_usb/Makefile b/drivers/net/can/usb/kvaser_usb/Makefile index 9f41ddab6a5a..41b4a11555aa 100644 --- a/drivers/net/can/usb/kvaser_usb/Makefile +++ b/drivers/net/can/usb/kvaser_usb/Makefile @@ -1,2 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o -kvaser_usb-y = kvaser_usb_core.o kvaser_usb_leaf.o kvaser_usb_hydra.o +kvaser_usb-y = kvaser_usb_core.o kvaser_usb_devlink.o kvaser_usb_leaf.o kvaser_usb_hydra.o diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h index 390b6bde883c..46a1b6907a50 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h @@ -22,9 +22,12 @@ */ #include <linux/completion.h> +#include <linux/ktime.h> +#include <linux/math64.h> #include <linux/spinlock.h> #include <linux/types.h> #include <linux/usb.h> +#include <net/devlink.h> #include <linux/can.h> #include <linux/can/dev.h> @@ -35,15 +38,20 @@ #define KVASER_USB_RX_BUFFER_SIZE 3072 #define KVASER_USB_MAX_NET_DEVICES 5 -/* USB devices features */ -#define KVASER_USB_HAS_SILENT_MODE BIT(0) -#define KVASER_USB_HAS_TXRX_ERRORS BIT(1) +/* Kvaser USB device quirks */ +#define KVASER_USB_QUIRK_HAS_SILENT_MODE BIT(0) +#define KVASER_USB_QUIRK_HAS_TXRX_ERRORS BIT(1) +#define KVASER_USB_QUIRK_IGNORE_CLK_FREQ BIT(2) /* Device capabilities */ #define KVASER_USB_CAP_BERR_CAP 0x01 #define KVASER_USB_CAP_EXT_CAP 0x02 #define KVASER_USB_HYDRA_CAP_EXT_CMD 0x04 +#define KVASER_USB_SW_VERSION_MAJOR_MASK GENMASK(31, 24) +#define KVASER_USB_SW_VERSION_MINOR_MASK GENMASK(23, 16) +#define KVASER_USB_SW_VERSION_BUILD_MASK GENMASK(15, 0) + struct kvaser_usb_dev_cfg; enum kvaser_usb_leaf_family { @@ -51,6 +59,11 @@ enum kvaser_usb_leaf_family { KVASER_USBCAN, }; +enum kvaser_usb_led_state { + KVASER_USB_LED_ON = 0, + KVASER_USB_LED_OFF = 1, +}; + #define KVASER_USB_HYDRA_MAX_CMD_LEN 128 struct kvaser_usb_dev_card_data_hydra { u8 channel_to_he[KVASER_USB_MAX_NET_DEVICES]; @@ -65,37 +78,49 @@ struct kvaser_usb_dev_card_data_hydra { struct kvaser_usb_dev_card_data { u32 ctrlmode_supported; u32 capabilities; - union { - struct { - enum kvaser_usb_leaf_family family; - } leaf; - struct kvaser_usb_dev_card_data_hydra hydra; - }; + struct kvaser_usb_dev_card_data_hydra hydra; + u32 usbcan_timestamp_msb; }; /* Context for an outstanding, not yet ACKed, transmission */ struct kvaser_usb_tx_urb_context { struct kvaser_usb_net_priv *priv; u32 echo_index; - int dlc; }; +struct kvaser_usb_fw_version { + u8 major; + u8 minor; + u16 build; +}; + +struct kvaser_usb_busparams { + __le32 bitrate; + u8 tseg1; + u8 tseg2; + u8 sjw; + u8 nsamples; +} __packed; + struct kvaser_usb { struct usb_device *udev; struct usb_interface *intf; struct kvaser_usb_net_priv *nets[KVASER_USB_MAX_NET_DEVICES]; - const struct kvaser_usb_dev_ops *ops; + const struct kvaser_usb_driver_info *driver_info; const struct kvaser_usb_dev_cfg *cfg; struct usb_endpoint_descriptor *bulk_in, *bulk_out; struct usb_anchor rx_submitted; + u32 ean[2]; + u32 serial_number; + struct kvaser_usb_fw_version fw_version; + u8 hw_revision; + unsigned int nchannels; /* @max_tx_urbs: Firmware-reported maximum number of outstanding, * not yet ACKed, transmissions on this device. This value is * also used as a sentinel for marking free tx contexts. */ - u32 fw_version; - unsigned int nchannels; unsigned int max_tx_urbs; struct kvaser_usb_dev_card_data card_data; @@ -106,15 +131,22 @@ struct kvaser_usb { struct kvaser_usb_net_priv { struct can_priv can; + struct devlink_port devlink_port; struct can_berr_counter bec; + /* subdriver-specific data */ + void *sub_priv; + struct kvaser_usb *dev; struct net_device *netdev; int channel; - struct completion start_comp, stop_comp, flush_comp; + struct completion start_comp, stop_comp, flush_comp, + get_busparams_comp; struct usb_anchor tx_submitted; + struct kvaser_usb_busparams busparams_nominal, busparams_data; + spinlock_t tx_contexts_lock; /* lock for active_tx_contexts */ int active_tx_contexts; struct kvaser_usb_tx_urb_context tx_contexts[]; @@ -124,15 +156,20 @@ struct kvaser_usb_net_priv { * struct kvaser_usb_dev_ops - Device specific functions * @dev_set_mode: used for can.do_set_mode * @dev_set_bittiming: used for can.do_set_bittiming - * @dev_set_data_bittiming: used for can.do_set_data_bittiming + * @dev_get_busparams: readback arbitration busparams + * @dev_set_data_bittiming: used for can.fd.do_set_data_bittiming + * @dev_get_data_busparams: readback data busparams * @dev_get_berr_counter: used for can.do_get_berr_counter * * @dev_setup_endpoints: setup USB in and out endpoints * @dev_init_card: initialize card + * @dev_init_channel: initialize channel + * @dev_remove_channel: uninitialize channel * @dev_get_software_info: get software info * @dev_get_software_details: get software details * @dev_get_card_info: get card info * @dev_get_capabilities: discover device capabilities + * @dev_set_led: turn on/off device LED * * @dev_set_opt_mode: set ctrlmod * @dev_start_chip: start the CAN controller @@ -144,16 +181,25 @@ struct kvaser_usb_net_priv { */ struct kvaser_usb_dev_ops { int (*dev_set_mode)(struct net_device *netdev, enum can_mode mode); - int (*dev_set_bittiming)(struct net_device *netdev); - int (*dev_set_data_bittiming)(struct net_device *netdev); + int (*dev_set_bittiming)(const struct net_device *netdev, + const struct kvaser_usb_busparams *busparams); + int (*dev_get_busparams)(struct kvaser_usb_net_priv *priv); + int (*dev_set_data_bittiming)(const struct net_device *netdev, + const struct kvaser_usb_busparams *busparams); + int (*dev_get_data_busparams)(struct kvaser_usb_net_priv *priv); int (*dev_get_berr_counter)(const struct net_device *netdev, struct can_berr_counter *bec); int (*dev_setup_endpoints)(struct kvaser_usb *dev); int (*dev_init_card)(struct kvaser_usb *dev); + int (*dev_init_channel)(struct kvaser_usb_net_priv *priv); + void (*dev_remove_channel)(struct kvaser_usb_net_priv *priv); int (*dev_get_software_info)(struct kvaser_usb *dev); int (*dev_get_software_details)(struct kvaser_usb *dev); int (*dev_get_card_info)(struct kvaser_usb *dev); int (*dev_get_capabilities)(struct kvaser_usb *dev); + int (*dev_set_led)(struct kvaser_usb_net_priv *priv, + enum kvaser_usb_led_state state, + u16 duration_ms); int (*dev_set_opt_mode)(const struct kvaser_usb_net_priv *priv); int (*dev_start_chip)(struct kvaser_usb_net_priv *priv); int (*dev_stop_chip)(struct kvaser_usb_net_priv *priv); @@ -162,8 +208,14 @@ struct kvaser_usb_dev_ops { void (*dev_read_bulk_callback)(struct kvaser_usb *dev, void *buf, int len); void *(*dev_frame_to_cmd)(const struct kvaser_usb_net_priv *priv, - const struct sk_buff *skb, int *frame_len, - int *cmd_len, u16 transid); + const struct sk_buff *skb, int *cmd_len, + u16 transid); +}; + +struct kvaser_usb_driver_info { + u32 quirks; + enum kvaser_usb_leaf_family family; + const struct kvaser_usb_dev_ops *ops; }; struct kvaser_usb_dev_cfg { @@ -176,6 +228,13 @@ struct kvaser_usb_dev_cfg { extern const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops; extern const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops; +extern const struct devlink_ops kvaser_usb_devlink_ops; + +int kvaser_usb_devlink_port_register(struct kvaser_usb_net_priv *priv); +void kvaser_usb_devlink_port_unregister(struct kvaser_usb_net_priv *priv); + +void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv); + int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len, int *actual_len); @@ -185,4 +244,29 @@ int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd, int len); int kvaser_usb_can_rx_over_error(struct net_device *netdev); + +extern const struct can_bittiming_const kvaser_usb_flexc_bittiming_const; + +static inline ktime_t kvaser_usb_ticks_to_ktime(const struct kvaser_usb_dev_cfg *cfg, + u64 ticks) +{ + return ns_to_ktime(div_u64(ticks * 1000, cfg->timestamp_freq)); +} + +static inline ktime_t kvaser_usb_timestamp48_to_ktime(const struct kvaser_usb_dev_cfg *cfg, + const __le16 *timestamp) +{ + u64 ticks = le16_to_cpu(timestamp[0]) | + (u64)(le16_to_cpu(timestamp[1])) << 16 | + (u64)(le16_to_cpu(timestamp[2])) << 32; + + return kvaser_usb_ticks_to_ktime(cfg, ticks); +} + +static inline ktime_t kvaser_usb_timestamp64_to_ktime(const struct kvaser_usb_dev_cfg *cfg, + __le64 timestamp) +{ + return kvaser_usb_ticks_to_ktime(cfg, le64_to_cpu(timestamp)); +} + #endif /* KVASER_USB_H */ diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c index c89c7d4900d7..62701ec34272 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -13,6 +13,7 @@ #include <linux/completion.h> #include <linux/device.h> +#include <linux/ethtool.h> #include <linux/gfp.h> #include <linux/if.h> #include <linux/kernel.h> @@ -30,165 +31,233 @@ #include "kvaser_usb.h" /* Kvaser USB vendor id. */ -#define KVASER_VENDOR_ID 0x0bfd +#define KVASER_VENDOR_ID 0x0bfd /* Kvaser Leaf USB devices product ids */ -#define USB_LEAF_DEVEL_PRODUCT_ID 10 -#define USB_LEAF_LITE_PRODUCT_ID 11 -#define USB_LEAF_PRO_PRODUCT_ID 12 -#define USB_LEAF_SPRO_PRODUCT_ID 14 -#define USB_LEAF_PRO_LS_PRODUCT_ID 15 -#define USB_LEAF_PRO_SWC_PRODUCT_ID 16 -#define USB_LEAF_PRO_LIN_PRODUCT_ID 17 -#define USB_LEAF_SPRO_LS_PRODUCT_ID 18 -#define USB_LEAF_SPRO_SWC_PRODUCT_ID 19 -#define USB_MEMO2_DEVEL_PRODUCT_ID 22 -#define USB_MEMO2_HSHS_PRODUCT_ID 23 -#define USB_UPRO_HSHS_PRODUCT_ID 24 -#define USB_LEAF_LITE_GI_PRODUCT_ID 25 -#define USB_LEAF_PRO_OBDII_PRODUCT_ID 26 -#define USB_MEMO2_HSLS_PRODUCT_ID 27 -#define USB_LEAF_LITE_CH_PRODUCT_ID 28 -#define USB_BLACKBIRD_SPRO_PRODUCT_ID 29 -#define USB_OEM_MERCURY_PRODUCT_ID 34 -#define USB_OEM_LEAF_PRODUCT_ID 35 -#define USB_CAN_R_PRODUCT_ID 39 -#define USB_LEAF_LITE_V2_PRODUCT_ID 288 -#define USB_MINI_PCIE_HS_PRODUCT_ID 289 -#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 290 -#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 291 -#define USB_MINI_PCIE_2HS_PRODUCT_ID 292 +#define USB_LEAF_DEVEL_PRODUCT_ID 0x000a +#define USB_LEAF_LITE_PRODUCT_ID 0x000b +#define USB_LEAF_PRO_PRODUCT_ID 0x000c +#define USB_LEAF_SPRO_PRODUCT_ID 0x000e +#define USB_LEAF_PRO_LS_PRODUCT_ID 0x000f +#define USB_LEAF_PRO_SWC_PRODUCT_ID 0x0010 +#define USB_LEAF_PRO_LIN_PRODUCT_ID 0x0011 +#define USB_LEAF_SPRO_LS_PRODUCT_ID 0x0012 +#define USB_LEAF_SPRO_SWC_PRODUCT_ID 0x0013 +#define USB_MEMO2_DEVEL_PRODUCT_ID 0x0016 +#define USB_MEMO2_HSHS_PRODUCT_ID 0x0017 +#define USB_UPRO_HSHS_PRODUCT_ID 0x0018 +#define USB_LEAF_LITE_GI_PRODUCT_ID 0x0019 +#define USB_LEAF_PRO_OBDII_PRODUCT_ID 0x001a +#define USB_MEMO2_HSLS_PRODUCT_ID 0x001b +#define USB_LEAF_LITE_CH_PRODUCT_ID 0x001c +#define USB_BLACKBIRD_SPRO_PRODUCT_ID 0x001d +#define USB_OEM_MERCURY_PRODUCT_ID 0x0022 +#define USB_OEM_LEAF_PRODUCT_ID 0x0023 +#define USB_CAN_R_PRODUCT_ID 0x0027 +#define USB_LEAF_LITE_V2_PRODUCT_ID 0x0120 +#define USB_MINI_PCIE_HS_PRODUCT_ID 0x0121 +#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 0x0122 +#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 0x0123 +#define USB_MINI_PCIE_2HS_PRODUCT_ID 0x0124 +#define USB_USBCAN_R_V2_PRODUCT_ID 0x0126 +#define USB_LEAF_LIGHT_R_V2_PRODUCT_ID 0x0127 +#define USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID 0x0128 /* Kvaser USBCan-II devices product ids */ -#define USB_USBCAN_REVB_PRODUCT_ID 2 -#define USB_VCI2_PRODUCT_ID 3 -#define USB_USBCAN2_PRODUCT_ID 4 -#define USB_MEMORATOR_PRODUCT_ID 5 +#define USB_USBCAN_REVB_PRODUCT_ID 0x0002 +#define USB_VCI2_PRODUCT_ID 0x0003 +#define USB_USBCAN2_PRODUCT_ID 0x0004 +#define USB_MEMORATOR_PRODUCT_ID 0x0005 /* Kvaser Minihydra USB devices product ids */ -#define USB_BLACKBIRD_V2_PRODUCT_ID 258 -#define USB_MEMO_PRO_5HS_PRODUCT_ID 260 -#define USB_USBCAN_PRO_5HS_PRODUCT_ID 261 -#define USB_USBCAN_LIGHT_4HS_PRODUCT_ID 262 -#define USB_LEAF_PRO_HS_V2_PRODUCT_ID 263 -#define USB_USBCAN_PRO_2HS_V2_PRODUCT_ID 264 -#define USB_MEMO_2HS_PRODUCT_ID 265 -#define USB_MEMO_PRO_2HS_V2_PRODUCT_ID 266 -#define USB_HYBRID_CANLIN_PRODUCT_ID 267 -#define USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID 268 -#define USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID 269 -#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 270 - -static inline bool kvaser_is_leaf(const struct usb_device_id *id) -{ - return (id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID && - id->idProduct <= USB_CAN_R_PRODUCT_ID) || - (id->idProduct >= USB_LEAF_LITE_V2_PRODUCT_ID && - id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID); -} +#define USB_BLACKBIRD_V2_PRODUCT_ID 0x0102 +#define USB_MEMO_PRO_5HS_PRODUCT_ID 0x0104 +#define USB_USBCAN_PRO_5HS_PRODUCT_ID 0x0105 +#define USB_USBCAN_LIGHT_4HS_PRODUCT_ID 0x0106 +#define USB_LEAF_PRO_HS_V2_PRODUCT_ID 0x0107 +#define USB_USBCAN_PRO_2HS_V2_PRODUCT_ID 0x0108 +#define USB_MEMO_2HS_PRODUCT_ID 0x0109 +#define USB_MEMO_PRO_2HS_V2_PRODUCT_ID 0x010a +#define USB_HYBRID_2CANLIN_PRODUCT_ID 0x010b +#define USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID 0x010c +#define USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID 0x010d +#define USB_HYBRID_PRO_2CANLIN_PRODUCT_ID 0x010e +#define USB_U100_PRODUCT_ID 0x0111 +#define USB_U100P_PRODUCT_ID 0x0112 +#define USB_U100S_PRODUCT_ID 0x0113 +#define USB_USBCAN_PRO_4HS_PRODUCT_ID 0x0114 +#define USB_HYBRID_CANLIN_PRODUCT_ID 0x0115 +#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 0x0116 +#define USB_LEAF_V3_PRODUCT_ID 0x0117 +#define USB_VINING_800_PRODUCT_ID 0x0119 +#define USB_USBCAN_PRO_5XCAN_PRODUCT_ID 0x011A +#define USB_MINI_PCIE_1XCAN_PRODUCT_ID 0x011B + +static const struct kvaser_usb_driver_info kvaser_usb_driver_info_hydra = { + .quirks = 0, + .ops = &kvaser_usb_hydra_dev_ops, +}; -static inline bool kvaser_is_usbcan(const struct usb_device_id *id) -{ - return id->idProduct >= USB_USBCAN_REVB_PRODUCT_ID && - id->idProduct <= USB_MEMORATOR_PRODUCT_ID; -} +static const struct kvaser_usb_driver_info kvaser_usb_driver_info_usbcan = { + .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS | + KVASER_USB_QUIRK_HAS_SILENT_MODE, + .family = KVASER_USBCAN, + .ops = &kvaser_usb_leaf_dev_ops, +}; -static inline bool kvaser_is_hydra(const struct usb_device_id *id) -{ - return id->idProduct >= USB_BLACKBIRD_V2_PRODUCT_ID && - id->idProduct <= USB_HYBRID_PRO_CANLIN_PRODUCT_ID; -} +static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf = { + .quirks = KVASER_USB_QUIRK_IGNORE_CLK_FREQ, + .family = KVASER_LEAF, + .ops = &kvaser_usb_leaf_dev_ops, +}; + +static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err = { + .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS | + KVASER_USB_QUIRK_IGNORE_CLK_FREQ, + .family = KVASER_LEAF, + .ops = &kvaser_usb_leaf_dev_ops, +}; + +static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err_listen = { + .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS | + KVASER_USB_QUIRK_HAS_SILENT_MODE | + KVASER_USB_QUIRK_IGNORE_CLK_FREQ, + .family = KVASER_LEAF, + .ops = &kvaser_usb_leaf_dev_ops, +}; + +static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leafimx = { + .quirks = 0, + .family = KVASER_LEAF, + .ops = &kvaser_usb_leaf_dev_ops, +}; static const struct usb_device_id kvaser_usb_table[] = { - /* Leaf USB product IDs */ - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) }, + /* Leaf M32C USB product IDs */ + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LS_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_SWC_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LIN_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_LS_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_SWC_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_DEVEL_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSHS_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_UPRO_HSHS_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID) }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_OBDII_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSLS_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_CH_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_SPRO_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_MERCURY_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_LEAF_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, { USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, + + /* Leaf i.MX28 USB product IDs */ + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_R_V2_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_R_V2_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, /* USBCANII USB product IDs */ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan }, { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_REVB_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan }, { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMORATOR_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan }, { USB_DEVICE(KVASER_VENDOR_ID, USB_VCI2_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan }, /* Minihydra USB product IDs */ - { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_V2_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_5HS_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5HS_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_4HS_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_HS_V2_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_V2_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_5HS_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5HS_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_4HS_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_HS_V2_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_2CANLIN_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_2CANLIN_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_4HS_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_V3_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_VINING_800_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5XCAN_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_1XCAN_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, { } }; MODULE_DEVICE_TABLE(usb, kvaser_usb_table); int kvaser_usb_send_cmd(const struct kvaser_usb *dev, void *cmd, int len) { - int actual_len; /* Not used */ - return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, dev->bulk_out->bEndpointAddress), - cmd, len, &actual_len, KVASER_USB_TIMEOUT); + cmd, len, NULL, KVASER_USB_TIMEOUT); } int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len, @@ -235,7 +304,7 @@ int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd, } usb_free_urb(urb); - return 0; + return err; } int kvaser_usb_can_rx_over_error(struct net_device *netdev) @@ -257,8 +326,6 @@ int kvaser_usb_can_rx_over_error(struct net_device *netdev) cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; netif_rx(skb); return 0; @@ -267,6 +334,7 @@ int kvaser_usb_can_rx_over_error(struct net_device *netdev) static void kvaser_usb_read_bulk_callback(struct urb *urb) { struct kvaser_usb *dev = urb->context; + const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; int err; unsigned int i; @@ -283,8 +351,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) goto resubmit_urb; } - dev->ops->dev_read_bulk_callback(dev, urb->transfer_buffer, - urb->actual_length); + ops->dev_read_bulk_callback(dev, urb->transfer_buffer, + urb->actual_length); resubmit_urb: usb_fill_bulk_urb(urb, dev->udev, @@ -296,10 +364,13 @@ resubmit_urb: err = usb_submit_urb(urb, GFP_ATOMIC); if (err == -ENODEV) { for (i = 0; i < dev->nchannels; i++) { - if (!dev->nets[i]) + struct kvaser_usb_net_priv *priv; + + priv = dev->nets[i]; + if (!priv) continue; - netif_device_detach(dev->nets[i]->netdev); + netif_device_detach(priv->netdev); } } else if (err) { dev_err(&dev->intf->dev, @@ -378,21 +449,18 @@ static int kvaser_usb_open(struct net_device *netdev) { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct kvaser_usb *dev = priv->dev; + const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; int err; err = open_candev(netdev); if (err) return err; - err = kvaser_usb_setup_rx_urbs(dev); - if (err) - goto error; - - err = dev->ops->dev_set_opt_mode(priv); + err = ops->dev_set_opt_mode(priv); if (err) goto error; - err = dev->ops->dev_start_chip(priv); + err = ops->dev_start_chip(priv); if (err) { netdev_warn(netdev, "Cannot start device, error %d\n", err); goto error; @@ -421,7 +489,7 @@ static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv) /* This method might sleep. Do not call it in the atomic context * of URB completions. */ -static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv) +void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv) { usb_kill_anchored_urbs(&priv->tx_submitted); kvaser_usb_reset_tx_urb_contexts(priv); @@ -449,22 +517,23 @@ static int kvaser_usb_close(struct net_device *netdev) { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct kvaser_usb *dev = priv->dev; + const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; int err; netif_stop_queue(netdev); - err = dev->ops->dev_flush_queue(priv); + err = ops->dev_flush_queue(priv); if (err) netdev_warn(netdev, "Cannot flush queue, error %d\n", err); - if (dev->ops->dev_reset_chip) { - err = dev->ops->dev_reset_chip(dev, priv->channel); + if (ops->dev_reset_chip) { + err = ops->dev_reset_chip(dev, priv->channel); if (err) netdev_warn(netdev, "Cannot reset card, error %d\n", err); } - err = dev->ops->dev_stop_chip(priv); + err = ops->dev_stop_chip(priv); if (err) netdev_warn(netdev, "Cannot stop device, error %d\n", err); @@ -477,6 +546,91 @@ static int kvaser_usb_close(struct net_device *netdev) return 0; } +static int kvaser_usb_set_bittiming(struct net_device *netdev) +{ + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + struct kvaser_usb *dev = priv->dev; + const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; + struct can_bittiming *bt = &priv->can.bittiming; + struct kvaser_usb_busparams busparams; + int tseg1 = bt->prop_seg + bt->phase_seg1; + int tseg2 = bt->phase_seg2; + int sjw = bt->sjw; + int err; + + busparams.bitrate = cpu_to_le32(bt->bitrate); + busparams.sjw = (u8)sjw; + busparams.tseg1 = (u8)tseg1; + busparams.tseg2 = (u8)tseg2; + if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) + busparams.nsamples = 3; + else + busparams.nsamples = 1; + + err = ops->dev_set_bittiming(netdev, &busparams); + if (err) + return err; + + err = kvaser_usb_setup_rx_urbs(priv->dev); + if (err) + return err; + + err = ops->dev_get_busparams(priv); + if (err) { + /* Treat EOPNOTSUPP as success */ + if (err == -EOPNOTSUPP) + err = 0; + return err; + } + + if (memcmp(&busparams, &priv->busparams_nominal, + sizeof(priv->busparams_nominal)) != 0) + err = -EINVAL; + + return err; +} + +static int kvaser_usb_set_data_bittiming(struct net_device *netdev) +{ + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + struct kvaser_usb *dev = priv->dev; + const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; + struct can_bittiming *dbt = &priv->can.fd.data_bittiming; + struct kvaser_usb_busparams busparams; + int tseg1 = dbt->prop_seg + dbt->phase_seg1; + int tseg2 = dbt->phase_seg2; + int sjw = dbt->sjw; + int err; + + if (!ops->dev_set_data_bittiming || + !ops->dev_get_data_busparams) + return -EOPNOTSUPP; + + busparams.bitrate = cpu_to_le32(dbt->bitrate); + busparams.sjw = (u8)sjw; + busparams.tseg1 = (u8)tseg1; + busparams.tseg2 = (u8)tseg2; + busparams.nsamples = 1; + + err = ops->dev_set_data_bittiming(netdev, &busparams); + if (err) + return err; + + err = kvaser_usb_setup_rx_urbs(priv->dev); + if (err) + return err; + + err = ops->dev_get_data_busparams(priv); + if (err) + return err; + + if (memcmp(&busparams, &priv->busparams_data, + sizeof(priv->busparams_data)) != 0) + err = -EINVAL; + + return err; +} + static void kvaser_usb_write_bulk_callback(struct urb *urb) { struct kvaser_usb_tx_urb_context *context = urb->context; @@ -503,6 +657,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct kvaser_usb *dev = priv->dev; + const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; struct net_device_stats *stats = &netdev->stats; struct kvaser_usb_tx_urb_context *context = NULL; struct urb *urb; @@ -512,7 +667,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, unsigned int i; unsigned long flags; - if (can_dropped_invalid_skb(netdev, skb)) + if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; urb = usb_alloc_urb(0, GFP_ATOMIC); @@ -545,8 +700,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, goto freeurb; } - buf = dev->ops->dev_frame_to_cmd(priv, skb, &context->dlc, &cmd_len, - context->echo_index); + buf = ops->dev_frame_to_cmd(priv, skb, &cmd_len, context->echo_index); if (!buf) { stats->tx_dropped++; dev_kfree_skb(skb); @@ -562,7 +716,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, context->priv = priv; - can_put_echo_skb(skb, netdev, context->echo_index); + can_put_echo_skb(skb, netdev, context->echo_index, 0); usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, @@ -575,7 +729,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, if (unlikely(err)) { spin_lock_irqsave(&priv->tx_contexts_lock, flags); - can_free_echo_skb(netdev, context->echo_index); + can_free_echo_skb(netdev, context->echo_index, NULL); context->echo_index = dev->max_tx_urbs; --priv->active_tx_contexts; netif_wake_queue(netdev); @@ -602,49 +756,88 @@ freeurb: return ret; } +static int kvaser_usb_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + const struct kvaser_usb_dev_ops *ops = priv->dev->driver_info->ops; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 3; /* 3 On/Off cycles per second */ + + case ETHTOOL_ID_ON: + return ops->dev_set_led(priv, KVASER_USB_LED_ON, 1000); + + case ETHTOOL_ID_OFF: + return ops->dev_set_led(priv, KVASER_USB_LED_OFF, 1000); + + case ETHTOOL_ID_INACTIVE: + /* Turn LED off and restore standard function after 1ms */ + return ops->dev_set_led(priv, KVASER_USB_LED_OFF, 1); + + default: + return -EINVAL; + } +} + static const struct net_device_ops kvaser_usb_netdev_ops = { .ndo_open = kvaser_usb_open, .ndo_stop = kvaser_usb_close, .ndo_start_xmit = kvaser_usb_start_xmit, - .ndo_change_mtu = can_change_mtu, + .ndo_hwtstamp_get = can_hwtstamp_get, + .ndo_hwtstamp_set = can_hwtstamp_set, +}; + +static const struct ethtool_ops kvaser_usb_ethtool_ops = { + .get_ts_info = can_ethtool_op_get_ts_info_hwts, + .set_phys_id = kvaser_usb_set_phys_id, }; static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev) { + const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; int i; + struct kvaser_usb_net_priv *priv; for (i = 0; i < dev->nchannels; i++) { - if (!dev->nets[i]) + priv = dev->nets[i]; + if (!priv) continue; - unregister_candev(dev->nets[i]->netdev); + unregister_candev(priv->netdev); } kvaser_usb_unlink_all_urbs(dev); for (i = 0; i < dev->nchannels; i++) { - if (!dev->nets[i]) + priv = dev->nets[i]; + if (!priv) continue; - free_candev(dev->nets[i]->netdev); + if (ops->dev_remove_channel) + ops->dev_remove_channel(priv); + + kvaser_usb_devlink_port_unregister(priv); + free_candev(priv->netdev); } } -static int kvaser_usb_init_one(struct kvaser_usb *dev, - const struct usb_device_id *id, int channel) +static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel) { struct net_device *netdev; struct kvaser_usb_net_priv *priv; + const struct kvaser_usb_driver_info *driver_info = dev->driver_info; + const struct kvaser_usb_dev_ops *ops = driver_info->ops; int err; - if (dev->ops->dev_reset_chip) { - err = dev->ops->dev_reset_chip(dev, channel); + if (ops->dev_reset_chip) { + err = ops->dev_reset_chip(dev, channel); if (err) return err; } - netdev = alloc_candev(sizeof(*priv) + - dev->max_tx_urbs * sizeof(*priv->tx_contexts), + netdev = alloc_candev(struct_size(priv, tx_contexts, dev->max_tx_urbs), dev->max_tx_urbs); if (!netdev) { dev_err(&dev->intf->dev, "Cannot alloc candev\n"); @@ -656,7 +849,10 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, init_usb_anchor(&priv->tx_submitted); init_completion(&priv->start_comp); init_completion(&priv->stop_comp); - priv->can.ctrlmode_supported = 0; + init_completion(&priv->flush_comp); + init_completion(&priv->get_busparams_comp); + priv->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC | + CAN_CTRLMODE_BERR_REPORTING; priv->dev = dev; priv->netdev = netdev; @@ -668,76 +864,88 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, priv->can.state = CAN_STATE_STOPPED; priv->can.clock.freq = dev->cfg->clock.freq; priv->can.bittiming_const = dev->cfg->bittiming_const; - priv->can.do_set_bittiming = dev->ops->dev_set_bittiming; - priv->can.do_set_mode = dev->ops->dev_set_mode; - if ((id->driver_info & KVASER_USB_HAS_TXRX_ERRORS) || + priv->can.do_set_bittiming = kvaser_usb_set_bittiming; + priv->can.do_set_mode = ops->dev_set_mode; + if ((driver_info->quirks & KVASER_USB_QUIRK_HAS_TXRX_ERRORS) || (priv->dev->card_data.capabilities & KVASER_USB_CAP_BERR_CAP)) - priv->can.do_get_berr_counter = dev->ops->dev_get_berr_counter; - if (id->driver_info & KVASER_USB_HAS_SILENT_MODE) + priv->can.do_get_berr_counter = ops->dev_get_berr_counter; + if (driver_info->quirks & KVASER_USB_QUIRK_HAS_SILENT_MODE) priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY; priv->can.ctrlmode_supported |= dev->card_data.ctrlmode_supported; if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) { - priv->can.data_bittiming_const = dev->cfg->data_bittiming_const; - priv->can.do_set_data_bittiming = - dev->ops->dev_set_data_bittiming; + priv->can.fd.data_bittiming_const = dev->cfg->data_bittiming_const; + priv->can.fd.do_set_data_bittiming = kvaser_usb_set_data_bittiming; } netdev->flags |= IFF_ECHO; netdev->netdev_ops = &kvaser_usb_netdev_ops; - + netdev->ethtool_ops = &kvaser_usb_ethtool_ops; SET_NETDEV_DEV(netdev, &dev->intf->dev); netdev->dev_id = channel; + netdev->dev_port = channel; dev->nets[channel] = priv; + if (ops->dev_init_channel) { + err = ops->dev_init_channel(priv); + if (err) + goto candev_free; + } + + err = kvaser_usb_devlink_port_register(priv); + if (err) { + dev_err(&dev->intf->dev, "Failed to register devlink port\n"); + goto candev_free; + } + err = register_candev(netdev); if (err) { dev_err(&dev->intf->dev, "Failed to register CAN device\n"); - free_candev(netdev); - dev->nets[channel] = NULL; - return err; + goto unregister_devlink_port; } netdev_dbg(netdev, "device registered\n"); return 0; + +unregister_devlink_port: + kvaser_usb_devlink_port_unregister(priv); +candev_free: + free_candev(netdev); + dev->nets[channel] = NULL; + return err; } static int kvaser_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct kvaser_usb *dev; + struct devlink *devlink; int err; int i; + const struct kvaser_usb_driver_info *driver_info; + const struct kvaser_usb_dev_ops *ops; - dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL); - if (!dev) - return -ENOMEM; - - if (kvaser_is_leaf(id)) { - dev->card_data.leaf.family = KVASER_LEAF; - dev->ops = &kvaser_usb_leaf_dev_ops; - } else if (kvaser_is_usbcan(id)) { - dev->card_data.leaf.family = KVASER_USBCAN; - dev->ops = &kvaser_usb_leaf_dev_ops; - } else if (kvaser_is_hydra(id)) { - dev->ops = &kvaser_usb_hydra_dev_ops; - } else { - dev_err(&intf->dev, - "Product ID (%d) is not a supported Kvaser USB device\n", - id->idProduct); + driver_info = (const struct kvaser_usb_driver_info *)id->driver_info; + if (!driver_info) return -ENODEV; - } + devlink = devlink_alloc(&kvaser_usb_devlink_ops, sizeof(*dev), &intf->dev); + if (!devlink) + return -ENOMEM; + + dev = devlink_priv(devlink); dev->intf = intf; + dev->driver_info = driver_info; + ops = driver_info->ops; - err = dev->ops->dev_setup_endpoints(dev); + err = ops->dev_setup_endpoints(dev); if (err) { - dev_err(&intf->dev, "Cannot get usb endpoint(s)"); - return err; + dev_err_probe(&intf->dev, err, "Cannot get usb endpoint(s)"); + goto free_devlink; } dev->udev = interface_to_usbdev(intf); @@ -748,64 +956,67 @@ static int kvaser_usb_probe(struct usb_interface *intf, dev->card_data.ctrlmode_supported = 0; dev->card_data.capabilities = 0; - err = dev->ops->dev_init_card(dev); + err = ops->dev_init_card(dev); if (err) { - dev_err(&intf->dev, - "Failed to initialize card, error %d\n", err); - return err; + dev_err_probe(&intf->dev, err, + "Failed to initialize card\n"); + goto free_devlink; } - err = dev->ops->dev_get_software_info(dev); + err = ops->dev_get_software_info(dev); if (err) { - dev_err(&intf->dev, - "Cannot get software info, error %d\n", err); - return err; + dev_err_probe(&intf->dev, err, + "Cannot get software info\n"); + goto free_devlink; } - if (dev->ops->dev_get_software_details) { - err = dev->ops->dev_get_software_details(dev); + if (ops->dev_get_software_details) { + err = ops->dev_get_software_details(dev); if (err) { - dev_err(&intf->dev, - "Cannot get software details, error %d\n", err); - return err; + dev_err_probe(&intf->dev, err, + "Cannot get software details\n"); + goto free_devlink; } } - if (WARN_ON(!dev->cfg)) - return -ENODEV; - - dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n", - ((dev->fw_version >> 24) & 0xff), - ((dev->fw_version >> 16) & 0xff), - (dev->fw_version & 0xffff)); + if (WARN_ON(!dev->cfg)) { + err = -ENODEV; + goto free_devlink; + } dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs); - err = dev->ops->dev_get_card_info(dev); + err = ops->dev_get_card_info(dev); if (err) { - dev_err(&intf->dev, "Cannot get card info, error %d\n", err); - return err; + dev_err_probe(&intf->dev, err, + "Cannot get card info\n"); + goto free_devlink; } - if (dev->ops->dev_get_capabilities) { - err = dev->ops->dev_get_capabilities(dev); + if (ops->dev_get_capabilities) { + err = ops->dev_get_capabilities(dev); if (err) { - dev_err(&intf->dev, - "Cannot get capabilities, error %d\n", err); - kvaser_usb_remove_interfaces(dev); - return err; + dev_err_probe(&intf->dev, err, + "Cannot get capabilities\n"); + goto remove_interfaces; } } for (i = 0; i < dev->nchannels; i++) { - err = kvaser_usb_init_one(dev, id, i); - if (err) { - kvaser_usb_remove_interfaces(dev); - return err; - } + err = kvaser_usb_init_one(dev, i); + if (err) + goto remove_interfaces; } + devlink_register(devlink); return 0; + +remove_interfaces: + kvaser_usb_remove_interfaces(dev); +free_devlink: + devlink_free(devlink); + + return err; } static void kvaser_usb_disconnect(struct usb_interface *intf) @@ -818,10 +1029,12 @@ static void kvaser_usb_disconnect(struct usb_interface *intf) return; kvaser_usb_remove_interfaces(dev); + devlink_unregister(priv_to_devlink(dev)); + devlink_free(priv_to_devlink(dev)); } static struct usb_driver kvaser_usb_driver = { - .name = "kvaser_usb", + .name = KBUILD_MODNAME, .probe = kvaser_usb_probe, .disconnect = kvaser_usb_disconnect, .id_table = kvaser_usb_table, diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_devlink.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_devlink.c new file mode 100644 index 000000000000..e838b82298ae --- /dev/null +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_devlink.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 +/* kvaser_usb devlink functions + * + * Copyright (C) 2025 KVASER AB, Sweden. All rights reserved. + */ +#include "kvaser_usb.h" + +#include <linux/netdevice.h> +#include <net/devlink.h> + +#define KVASER_USB_EAN_MSB 0x00073301 + +static int kvaser_usb_devlink_info_get(struct devlink *devlink, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct kvaser_usb *dev = devlink_priv(devlink); + char buf[] = "73301XXXXXXXXXX"; + int ret; + + if (dev->serial_number) { + snprintf(buf, sizeof(buf), "%u", dev->serial_number); + ret = devlink_info_serial_number_put(req, buf); + if (ret) + return ret; + } + + if (dev->fw_version.major) { + snprintf(buf, sizeof(buf), "%u.%u.%u", + dev->fw_version.major, + dev->fw_version.minor, + dev->fw_version.build); + ret = devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW, + buf); + if (ret) + return ret; + } + + if (dev->hw_revision) { + snprintf(buf, sizeof(buf), "%u", dev->hw_revision); + ret = devlink_info_version_fixed_put(req, + DEVLINK_INFO_VERSION_GENERIC_BOARD_REV, + buf); + if (ret) + return ret; + } + + if (dev->ean[1] == KVASER_USB_EAN_MSB) { + snprintf(buf, sizeof(buf), "%x%08x", dev->ean[1], dev->ean[0]); + ret = devlink_info_version_fixed_put(req, + DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, + buf); + if (ret) + return ret; + } + + return 0; +} + +const struct devlink_ops kvaser_usb_devlink_ops = { + .info_get = kvaser_usb_devlink_info_get, +}; + +int kvaser_usb_devlink_port_register(struct kvaser_usb_net_priv *priv) +{ + int ret; + struct devlink_port_attrs attrs = { + .flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL, + .phys.port_number = priv->channel, + }; + devlink_port_attrs_set(&priv->devlink_port, &attrs); + + ret = devlink_port_register(priv_to_devlink(priv->dev), + &priv->devlink_port, priv->channel); + if (ret) + return ret; + + SET_NETDEV_DEVLINK_PORT(priv->netdev, &priv->devlink_port); + + return 0; +} + +void kvaser_usb_devlink_port_unregister(struct kvaser_usb_net_priv *priv) +{ + devlink_port_unregister(&priv->devlink_port); +} diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c index 5fc0be564274..a59f20dad692 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c @@ -10,9 +10,9 @@ * - Transition from CAN_STATE_ERROR_WARNING to CAN_STATE_ERROR_ACTIVE is only * reported after a call to do_get_berr_counter(), since firmware does not * distinguish between ERROR_WARNING and ERROR_ACTIVE. - * - Hardware timestamps are not set for CAN Tx frames. */ +#include <linux/bitfield.h> #include <linux/completion.h> #include <linux/device.h> #include <linux/gfp.h> @@ -22,6 +22,7 @@ #include <linux/spinlock.h> #include <linux/string.h> #include <linux/types.h> +#include <linux/units.h> #include <linux/usb.h> #include <linux/can.h> @@ -34,6 +35,7 @@ /* Forward declarations */ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan; static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc; +static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt; #define KVASER_USB_HYDRA_BULK_EP_IN_ADDR 0x82 #define KVASER_USB_HYDRA_BULK_EP_OUT_ADDR 0x02 @@ -43,6 +45,8 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc; /* Minihydra command IDs */ #define CMD_SET_BUSPARAMS_REQ 16 +#define CMD_GET_BUSPARAMS_REQ 17 +#define CMD_GET_BUSPARAMS_RESP 18 #define CMD_GET_CHIP_STATE_REQ 19 #define CMD_CHIP_STATE_EVENT 20 #define CMD_SET_DRIVERMODE_REQ 21 @@ -64,6 +68,8 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc; #define CMD_SET_BUSPARAMS_RESP 85 #define CMD_GET_CAPABILITIES_REQ 95 #define CMD_GET_CAPABILITIES_RESP 96 +#define CMD_LED_ACTION_REQ 101 +#define CMD_LED_ACTION_RESP 102 #define CMD_RX_MESSAGE 106 #define CMD_MAP_CHANNEL_REQ 200 #define CMD_MAP_CHANNEL_RESP 201 @@ -108,7 +114,7 @@ struct kvaser_cmd_card_info { __le32 clock_res; __le32 mfg_date; __le32 ean[2]; - u8 hw_version; + u8 hw_revision; u8 usb_mode; u8 hw_type; u8 reserved0; @@ -135,6 +141,7 @@ struct kvaser_cmd_sw_detail_req { #define KVASER_USB_HYDRA_SW_FLAG_CANFD BIT(10) #define KVASER_USB_HYDRA_SW_FLAG_NONISO BIT(11) #define KVASER_USB_HYDRA_SW_FLAG_EXT_CAP BIT(12) +#define KVASER_USB_HYDRA_SW_FLAG_CAN_FREQ_80M BIT(13) struct kvaser_cmd_sw_detail_res { __le32 sw_flags; __le32 sw_version; @@ -193,21 +200,42 @@ struct kvaser_cmd_chip_state_event { #define KVASER_USB_HYDRA_BUS_MODE_CANFD_ISO 0x01 #define KVASER_USB_HYDRA_BUS_MODE_NONISO 0x02 struct kvaser_cmd_set_busparams { - __le32 bitrate; - u8 tseg1; - u8 tseg2; - u8 sjw; - u8 nsamples; + struct kvaser_usb_busparams busparams_nominal; u8 reserved0[4]; - __le32 bitrate_d; - u8 tseg1_d; - u8 tseg2_d; - u8 sjw_d; - u8 nsamples_d; + struct kvaser_usb_busparams busparams_data; u8 canfd_mode; u8 reserved1[7]; } __packed; +/* Busparam type */ +#define KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN 0x00 +#define KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD 0x01 +struct kvaser_cmd_get_busparams_req { + u8 type; + u8 reserved[27]; +} __packed; + +struct kvaser_cmd_get_busparams_res { + struct kvaser_usb_busparams busparams; + u8 reserved[20]; +} __packed; + +/* The device has two LEDs per CAN channel + * The LSB of action field controls the state: + * 0 = ON + * 1 = OFF + * The remaining bits of action field is the LED index + */ +#define KVASER_USB_HYDRA_LED_IDX_MASK GENMASK(31, 1) +#define KVASER_USB_HYDRA_LED_YELLOW_CH0_IDX 3 +#define KVASER_USB_HYDRA_LEDS_PER_CHANNEL 2 +struct kvaser_cmd_led_action_req { + u8 action; + u8 padding; + __le16 duration_ms; + u8 reserved[24]; +} __packed; + /* Ctrl modes */ #define KVASER_USB_HYDRA_CTRLMODE_NORMAL 0x01 #define KVASER_USB_HYDRA_CTRLMODE_LISTEN 0x02 @@ -251,6 +279,15 @@ struct kvaser_cmd_tx_can { u8 reserved[11]; } __packed; +struct kvaser_cmd_tx_ack { + __le32 id; + u8 data[8]; + u8 dlc; + u8 flags; + __le16 timestamp[3]; + u8 reserved0[8]; +} __packed; + struct kvaser_cmd_header { u8 cmd_no; /* The destination HE address is stored in 0..5 of he_addr. @@ -278,6 +315,10 @@ struct kvaser_cmd { struct kvaser_cmd_error_event error_event; struct kvaser_cmd_set_busparams set_busparams_req; + struct kvaser_cmd_get_busparams_req get_busparams_req; + struct kvaser_cmd_get_busparams_res get_busparams_res; + + struct kvaser_cmd_led_action_req led_action_req; struct kvaser_cmd_chip_state_event chip_state_event; @@ -285,6 +326,7 @@ struct kvaser_cmd { struct kvaser_cmd_rx_can rx_can; struct kvaser_cmd_tx_can tx_can; + struct kvaser_cmd_tx_ack tx_ack; } __packed; } __packed; @@ -293,6 +335,7 @@ struct kvaser_cmd { #define KVASER_USB_HYDRA_CF_FLAG_OVERRUN BIT(1) #define KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME BIT(4) #define KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID BIT(5) +#define KVASER_USB_HYDRA_CF_FLAG_TX_ACK BIT(6) /* CAN frame flags. Used in ext_rx_can and ext_tx_can */ #define KVASER_USB_HYDRA_CF_FLAG_OSM_NACK BIT(12) #define KVASER_USB_HYDRA_CF_FLAG_ABL BIT(13) @@ -359,6 +402,10 @@ struct kvaser_cmd_ext { } __packed; } __packed; +struct kvaser_usb_net_hydra_priv { + int pending_get_busparams_type; +}; + static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = { .name = "kvaser_usb_kcan", .tseg1_min = 1, @@ -367,11 +414,11 @@ static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = { .tseg2_max = 32, .sjw_max = 16, .brp_min = 1, - .brp_max = 4096, + .brp_max = 8192, .brp_inc = 1, }; -static const struct can_bittiming_const kvaser_usb_hydra_flexc_bittiming_c = { +const struct can_bittiming_const kvaser_usb_flexc_bittiming_const = { .name = "kvaser_usb_flex", .tseg1_min = 4, .tseg1_max = 16, @@ -383,6 +430,30 @@ static const struct can_bittiming_const kvaser_usb_hydra_flexc_bittiming_c = { .brp_inc = 1, }; +static const struct can_bittiming_const kvaser_usb_hydra_rt_bittiming_c = { + .name = "kvaser_usb_rt", + .tseg1_min = 2, + .tseg1_max = 96, + .tseg2_min = 2, + .tseg2_max = 32, + .sjw_max = 32, + .brp_min = 1, + .brp_max = 1024, + .brp_inc = 1, +}; + +static const struct can_bittiming_const kvaser_usb_hydra_rtd_bittiming_c = { + .name = "kvaser_usb_rt", + .tseg1_min = 2, + .tseg1_max = 39, + .tseg2_min = 2, + .tseg2_max = 8, + .sjw_max = 8, + .brp_min = 1, + .brp_max = 1024, + .brp_inc = 1, +}; + #define KVASER_USB_HYDRA_TRANSID_BITS 12 #define KVASER_USB_HYDRA_TRANSID_MASK \ GENMASK(KVASER_USB_HYDRA_TRANSID_BITS - 1, 0) @@ -481,36 +552,40 @@ kvaser_usb_hydra_net_priv_from_cmd(const struct kvaser_usb *dev, return priv; } -static ktime_t -kvaser_usb_hydra_ktime_from_rx_cmd(const struct kvaser_usb_dev_cfg *cfg, - const struct kvaser_cmd *cmd) +static ktime_t kvaser_usb_hydra_ktime_from_cmd(const struct kvaser_usb_dev_cfg *cfg, + const struct kvaser_cmd *cmd) { - u64 ticks; + ktime_t hwtstamp = 0; if (cmd->header.cmd_no == CMD_EXTENDED) { struct kvaser_cmd_ext *cmd_ext = (struct kvaser_cmd_ext *)cmd; - ticks = le64_to_cpu(cmd_ext->rx_can.timestamp); - } else { - ticks = le16_to_cpu(cmd->rx_can.timestamp[0]); - ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[1])) << 16; - ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[2])) << 32; + if (cmd_ext->cmd_no_ext == CMD_RX_MESSAGE_FD) + hwtstamp = kvaser_usb_timestamp64_to_ktime(cfg, cmd_ext->rx_can.timestamp); + else if (cmd_ext->cmd_no_ext == CMD_TX_ACKNOWLEDGE_FD) + hwtstamp = kvaser_usb_timestamp64_to_ktime(cfg, cmd_ext->tx_ack.timestamp); + } else if (cmd->header.cmd_no == CMD_RX_MESSAGE) { + hwtstamp = kvaser_usb_timestamp48_to_ktime(cfg, cmd->rx_can.timestamp); + } else if (cmd->header.cmd_no == CMD_TX_ACKNOWLEDGE) { + hwtstamp = kvaser_usb_timestamp48_to_ktime(cfg, cmd->tx_ack.timestamp); } - return ns_to_ktime(div_u64(ticks * 1000, cfg->timestamp_freq)); + return hwtstamp; } static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev, u8 cmd_no, int channel) { struct kvaser_cmd *cmd; + size_t cmd_len; int err; - cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->header.cmd_no = cmd_no; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); if (channel < 0) { kvaser_usb_hydra_set_cmd_dest_he (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL); @@ -527,7 +602,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev, kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); if (err) goto end; @@ -543,21 +618,22 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv, { struct kvaser_cmd *cmd; struct kvaser_usb *dev = priv->dev; + size_t cmd_len; int err; - cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC); + cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); if (!cmd) return -ENOMEM; cmd->header.cmd_no = cmd_no; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); kvaser_usb_hydra_set_cmd_dest_he (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd_async(priv, cmd, - kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd_async(priv, cmd, cmd_len); if (err) kfree(cmd); @@ -666,7 +742,7 @@ static int kvaser_usb_hydra_map_channel(struct kvaser_usb *dev, u16 transid, struct kvaser_cmd *cmd; int err; - cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; @@ -701,24 +777,26 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev, { struct kvaser_usb_dev_card_data *card_data = &dev->card_data; struct kvaser_cmd *cmd; + size_t cmd_len; u32 value = 0; u32 mask = 0; u16 cap_cmd_res; int err; int i; - cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->header.cmd_no = CMD_GET_CAPABILITIES_REQ; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); cmd->cap_req.cap_cmd = cpu_to_le16(cap_cmd_req); kvaser_usb_hydra_set_cmd_dest_he(cmd, card_data->hydra.sysdbg_he); kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); if (err) goto end; @@ -812,6 +890,39 @@ static void kvaser_usb_hydra_flush_queue_reply(const struct kvaser_usb *dev, complete(&priv->flush_comp); } +static void kvaser_usb_hydra_get_busparams_reply(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_net_priv *priv; + struct kvaser_usb_net_hydra_priv *hydra; + + priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); + if (!priv) + return; + + hydra = priv->sub_priv; + if (!hydra) + return; + + switch (hydra->pending_get_busparams_type) { + case KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN: + memcpy(&priv->busparams_nominal, &cmd->get_busparams_res.busparams, + sizeof(priv->busparams_nominal)); + break; + case KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD: + memcpy(&priv->busparams_data, &cmd->get_busparams_res.busparams, + sizeof(priv->busparams_nominal)); + break; + default: + dev_warn(&dev->intf->dev, "Unknown get_busparams_type %d\n", + hydra->pending_get_busparams_type); + break; + } + hydra->pending_get_busparams_type = -1; + + complete(&priv->get_busparams_comp); +} + static void kvaser_usb_hydra_bus_status_to_can_state(const struct kvaser_usb_net_priv *priv, u8 bus_status, @@ -836,6 +947,42 @@ kvaser_usb_hydra_bus_status_to_can_state(const struct kvaser_usb_net_priv *priv, } } +static void kvaser_usb_hydra_change_state(struct kvaser_usb_net_priv *priv, + const struct can_berr_counter *bec, + struct can_frame *cf, + enum can_state new_state) +{ + struct net_device *netdev = priv->netdev; + enum can_state old_state = priv->can.state; + enum can_state tx_state, rx_state; + + tx_state = (bec->txerr >= bec->rxerr) ? + new_state : CAN_STATE_ERROR_ACTIVE; + rx_state = (bec->txerr <= bec->rxerr) ? + new_state : CAN_STATE_ERROR_ACTIVE; + can_change_state(netdev, cf, tx_state, rx_state); + + if (new_state == CAN_STATE_BUS_OFF && old_state < CAN_STATE_BUS_OFF) { + if (priv->can.restart_ms == 0) + kvaser_usb_hydra_send_simple_cmd_async(priv, CMD_STOP_CHIP_REQ); + + can_bus_off(netdev); + } + + if (priv->can.restart_ms && + old_state >= CAN_STATE_BUS_OFF && + new_state < CAN_STATE_BUS_OFF) { + priv->can.can_stats.restarts++; + if (cf) + cf->can_id |= CAN_ERR_RESTARTED; + } + if (cf && new_state != CAN_STATE_BUS_OFF) { + cf->can_id |= CAN_ERR_CNT; + cf->data[6] = bec->txerr; + cf->data[7] = bec->rxerr; + } +} + static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv, u8 bus_status, const struct can_berr_counter *bec) @@ -843,7 +990,6 @@ static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv, struct net_device *netdev = priv->netdev; struct can_frame *cf; struct sk_buff *skb; - struct net_device_stats *stats; enum can_state new_state, old_state; old_state = priv->can.state; @@ -862,41 +1008,11 @@ static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv, return; skb = alloc_can_err_skb(netdev, &cf); - if (skb) { - enum can_state tx_state, rx_state; - - tx_state = (bec->txerr >= bec->rxerr) ? - new_state : CAN_STATE_ERROR_ACTIVE; - rx_state = (bec->txerr <= bec->rxerr) ? - new_state : CAN_STATE_ERROR_ACTIVE; - can_change_state(netdev, cf, tx_state, rx_state); - } - - if (new_state == CAN_STATE_BUS_OFF && old_state < CAN_STATE_BUS_OFF) { - if (!priv->can.restart_ms) - kvaser_usb_hydra_send_simple_cmd_async - (priv, CMD_STOP_CHIP_REQ); - - can_bus_off(netdev); - } - - if (!skb) { + kvaser_usb_hydra_change_state(priv, bec, cf, new_state); + if (skb) + netif_rx(skb); + else netdev_warn(netdev, "No memory left for err_skb\n"); - return; - } - - if (priv->can.restart_ms && - old_state >= CAN_STATE_BUS_OFF && - new_state < CAN_STATE_BUS_OFF) - priv->can.can_stats.restarts++; - - cf->data[6] = bec->txerr; - cf->data[7] = bec->rxerr; - - stats = &netdev->stats; - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_rx(skb); } static void kvaser_usb_hydra_state_event(const struct kvaser_usb *dev, @@ -989,9 +1105,8 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv, { struct net_device *netdev = priv->netdev; struct net_device_stats *stats = &netdev->stats; - struct can_frame *cf; - struct sk_buff *skb; - struct skb_shared_hwtstamps *shhwtstamps; + struct can_frame *cf = NULL; + struct sk_buff *skb = NULL; struct can_berr_counter bec; enum can_state new_state, old_state; u8 bus_status; @@ -1007,51 +1122,26 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv, kvaser_usb_hydra_bus_status_to_can_state(priv, bus_status, &bec, &new_state); - skb = alloc_can_err_skb(netdev, &cf); + if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) + skb = alloc_can_err_skb(netdev, &cf); + if (new_state != old_state) + kvaser_usb_hydra_change_state(priv, &bec, cf, new_state); - if (new_state != old_state) { + if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) { if (skb) { - enum can_state tx_state, rx_state; - - tx_state = (bec.txerr >= bec.rxerr) ? - new_state : CAN_STATE_ERROR_ACTIVE; - rx_state = (bec.txerr <= bec.rxerr) ? - new_state : CAN_STATE_ERROR_ACTIVE; - - can_change_state(netdev, cf, tx_state, rx_state); - - if (priv->can.restart_ms && - old_state >= CAN_STATE_BUS_OFF && - new_state < CAN_STATE_BUS_OFF) - cf->can_id |= CAN_ERR_RESTARTED; - } + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); - if (new_state == CAN_STATE_BUS_OFF) { - if (!priv->can.restart_ms) - kvaser_usb_hydra_send_simple_cmd_async - (priv, CMD_STOP_CHIP_REQ); - - can_bus_off(netdev); + shhwtstamps->hwtstamp = hwtstamp; + cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + netif_rx(skb); + } else { + stats->rx_dropped++; + netdev_warn(netdev, "No memory left for err_skb\n"); } } - if (!skb) { - stats->rx_dropped++; - netdev_warn(netdev, "No memory left for err_skb\n"); - return; - } - - shhwtstamps = skb_hwtstamps(skb); - shhwtstamps->hwtstamp = hwtstamp; - - cf->can_id |= CAN_ERR_BUSERROR; - cf->data[6] = bec.txerr; - cf->data[7] = bec.rxerr; - - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_rx(skb); - priv->bec.txerr = bec.txerr; priv->bec.rxerr = bec.rxerr; } @@ -1083,8 +1173,6 @@ static void kvaser_usb_hydra_one_shot_fail(struct kvaser_usb_net_priv *priv, } stats->tx_errors++; - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; netif_rx(skb); } @@ -1094,8 +1182,11 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev, struct kvaser_usb_tx_urb_context *context; struct kvaser_usb_net_priv *priv; unsigned long irq_flags; + unsigned int len; bool one_shot_fail = false; + bool is_err_frame = false; u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd); + struct sk_buff *skb; priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); if (!priv) @@ -1113,24 +1204,31 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev, kvaser_usb_hydra_one_shot_fail(priv, cmd_ext); one_shot_fail = true; } + + is_err_frame = flags & KVASER_USB_HYDRA_CF_FLAG_TX_ACK && + flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME; } context = &priv->tx_contexts[transid % dev->max_tx_urbs]; - if (!one_shot_fail) { - struct net_device_stats *stats = &priv->netdev->stats; - - stats->tx_packets++; - stats->tx_bytes += can_dlc2len(context->dlc); - } spin_lock_irqsave(&priv->tx_contexts_lock, irq_flags); - can_get_echo_skb(priv->netdev, context->echo_index); + skb = priv->can.echo_skb[context->echo_index]; + if (skb) + skb_hwtstamps(skb)->hwtstamp = kvaser_usb_hydra_ktime_from_cmd(dev->cfg, cmd); + len = can_get_echo_skb(priv->netdev, context->echo_index, NULL); context->echo_index = dev->max_tx_urbs; --priv->active_tx_contexts; netif_wake_queue(priv->netdev); spin_unlock_irqrestore(&priv->tx_contexts_lock, irq_flags); + + if (!one_shot_fail && !is_err_frame) { + struct net_device_stats *stats = &priv->netdev->stats; + + stats->tx_packets++; + stats->tx_bytes += len; + } } static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev, @@ -1151,7 +1249,7 @@ static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev, stats = &priv->netdev->stats; flags = cmd->rx_can.flags; - hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, cmd); + hwtstamp = kvaser_usb_hydra_ktime_from_cmd(dev->cfg, cmd); if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) { kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data, @@ -1180,15 +1278,17 @@ static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev, if (flags & KVASER_USB_HYDRA_CF_FLAG_OVERRUN) kvaser_usb_can_rx_over_error(priv->netdev); - cf->can_dlc = get_can_dlc(cmd->rx_can.dlc); + can_frame_set_cc_len((struct can_frame *)cf, cmd->rx_can.dlc, priv->can.ctrlmode); - if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) + if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) { cf->can_id |= CAN_RTR_FLAG; - else - memcpy(cf->data, cmd->rx_can.data, cf->can_dlc); + } else { + memcpy(cf->data, cmd->rx_can.data, cf->len); + stats->rx_bytes += cf->len; + } stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } @@ -1217,7 +1317,7 @@ static void kvaser_usb_hydra_rx_msg_ext(const struct kvaser_usb *dev, KVASER_USB_KCAN_DATA_DLC_SHIFT; flags = le32_to_cpu(cmd->rx_can.flags); - hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, std_cmd); + hwtstamp = kvaser_usb_hydra_ktime_from_cmd(dev->cfg, std_cmd); if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) { kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data, @@ -1251,22 +1351,24 @@ static void kvaser_usb_hydra_rx_msg_ext(const struct kvaser_usb *dev, kvaser_usb_can_rx_over_error(priv->netdev); if (flags & KVASER_USB_HYDRA_CF_FLAG_FDF) { - cf->len = can_dlc2len(get_canfd_dlc(dlc)); + cf->len = can_fd_dlc2len(dlc); if (flags & KVASER_USB_HYDRA_CF_FLAG_BRS) cf->flags |= CANFD_BRS; if (flags & KVASER_USB_HYDRA_CF_FLAG_ESI) cf->flags |= CANFD_ESI; } else { - cf->len = get_can_dlc(dlc); + can_frame_set_cc_len((struct can_frame *)cf, dlc, priv->can.ctrlmode); } - if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) + if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) { cf->can_id |= CAN_RTR_FLAG; - else + } else { memcpy(cf->data, cmd->rx_can.kcan_payload, cf->len); + stats->rx_bytes += cf->len; + } stats->rx_packets++; - stats->rx_bytes += cf->len; + netif_rx(skb); } @@ -1290,6 +1392,10 @@ static void kvaser_usb_hydra_handle_cmd_std(const struct kvaser_usb *dev, kvaser_usb_hydra_state_event(dev, cmd); break; + case CMD_GET_BUSPARAMS_RESP: + kvaser_usb_hydra_get_busparams_reply(dev, cmd); + break; + case CMD_ERROR_EVENT: kvaser_usb_hydra_error_event(dev, cmd); break; @@ -1305,6 +1411,7 @@ static void kvaser_usb_hydra_handle_cmd_std(const struct kvaser_usb *dev, /* Ignored commands */ case CMD_SET_BUSPARAMS_RESP: case CMD_SET_BUSPARAMS_FD_RESP: + case CMD_LED_ACTION_RESP: break; default: @@ -1345,22 +1452,20 @@ static void kvaser_usb_hydra_handle_cmd(const struct kvaser_usb *dev, static void * kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv, - const struct sk_buff *skb, int *frame_len, - int *cmd_len, u16 transid) + const struct sk_buff *skb, int *cmd_len, + u16 transid) { struct kvaser_usb *dev = priv->dev; struct kvaser_cmd_ext *cmd; struct canfd_frame *cf = (struct canfd_frame *)skb->data; - u8 dlc = can_len2dlc(cf->len); + u8 dlc; u8 nbr_of_bytes = cf->len; u32 flags; u32 id; u32 kcan_id; u32 kcan_header; - *frame_len = nbr_of_bytes; - - cmd = kcalloc(1, sizeof(struct kvaser_cmd_ext), GFP_ATOMIC); + cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); if (!cmd) return NULL; @@ -1378,6 +1483,11 @@ kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv, cmd->len = cpu_to_le16(*cmd_len); + if (can_is_canfd_skb(skb)) + dlc = can_fd_len2dlc(cf->len); + else + dlc = can_get_cc_dlc((struct can_frame *)cf, priv->can.ctrlmode); + cmd->tx_can.databytes = nbr_of_bytes; cmd->tx_can.dlc = dlc; @@ -1425,8 +1535,8 @@ kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv, static void * kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv, - const struct sk_buff *skb, int *frame_len, - int *cmd_len, u16 transid) + const struct sk_buff *skb, int *cmd_len, + u16 transid) { struct kvaser_usb *dev = priv->dev; struct kvaser_cmd *cmd; @@ -1434,9 +1544,7 @@ kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv, u32 flags; u32 id; - *frame_len = cf->can_dlc; - - cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC); + cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); if (!cmd) return NULL; @@ -1455,7 +1563,7 @@ kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv, id = cf->can_id & CAN_SFF_MASK; } - cmd->tx_can.dlc = cf->can_dlc; + cmd->tx_can.dlc = can_get_cc_dlc(cf, priv->can.ctrlmode); flags = (cf->can_id & CAN_EFF_FLAG ? KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID : 0); @@ -1469,7 +1577,7 @@ kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv, cmd->tx_can.id = cpu_to_le32(id); cmd->tx_can.flags = flags; - memcpy(cmd->tx_can.data, cf->data, *frame_len); + memcpy(cmd->tx_can.data, cf->data, cf->len); return cmd; } @@ -1490,61 +1598,101 @@ static int kvaser_usb_hydra_set_mode(struct net_device *netdev, return err; } -static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev) +static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv, + int busparams_type) +{ + struct kvaser_usb *dev = priv->dev; + struct kvaser_usb_net_hydra_priv *hydra = priv->sub_priv; + struct kvaser_cmd *cmd; + size_t cmd_len; + int err; + + if (!hydra) + return -EINVAL; + + cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->header.cmd_no = CMD_GET_BUSPARAMS_REQ; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); + kvaser_usb_hydra_set_cmd_dest_he + (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); + kvaser_usb_hydra_set_cmd_transid + (cmd, kvaser_usb_hydra_get_next_transid(dev)); + cmd->get_busparams_req.type = busparams_type; + hydra->pending_get_busparams_type = busparams_type; + + reinit_completion(&priv->get_busparams_comp); + + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); + if (err) + return err; + + if (!wait_for_completion_timeout(&priv->get_busparams_comp, + msecs_to_jiffies(KVASER_USB_TIMEOUT))) + return -ETIMEDOUT; + + return err; +} + +static int kvaser_usb_hydra_get_nominal_busparams(struct kvaser_usb_net_priv *priv) +{ + return kvaser_usb_hydra_get_busparams(priv, KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN); +} + +static int kvaser_usb_hydra_get_data_busparams(struct kvaser_usb_net_priv *priv) +{ + return kvaser_usb_hydra_get_busparams(priv, KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD); +} + +static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev, + const struct kvaser_usb_busparams *busparams) { struct kvaser_cmd *cmd; struct kvaser_usb_net_priv *priv = netdev_priv(netdev); - struct can_bittiming *bt = &priv->can.bittiming; struct kvaser_usb *dev = priv->dev; - int tseg1 = bt->prop_seg + bt->phase_seg1; - int tseg2 = bt->phase_seg2; - int sjw = bt->sjw; + size_t cmd_len; int err; - cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ; - cmd->set_busparams_req.bitrate = cpu_to_le32(bt->bitrate); - cmd->set_busparams_req.sjw = (u8)sjw; - cmd->set_busparams_req.tseg1 = (u8)tseg1; - cmd->set_busparams_req.tseg2 = (u8)tseg2; - cmd->set_busparams_req.nsamples = 1; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); + memcpy(&cmd->set_busparams_req.busparams_nominal, busparams, + sizeof(cmd->set_busparams_req.busparams_nominal)); kvaser_usb_hydra_set_cmd_dest_he (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); kfree(cmd); return err; } -static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev) +static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev, + const struct kvaser_usb_busparams *busparams) { struct kvaser_cmd *cmd; struct kvaser_usb_net_priv *priv = netdev_priv(netdev); - struct can_bittiming *dbt = &priv->can.data_bittiming; struct kvaser_usb *dev = priv->dev; - int tseg1 = dbt->prop_seg + dbt->phase_seg1; - int tseg2 = dbt->phase_seg2; - int sjw = dbt->sjw; + size_t cmd_len; int err; - cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ; - cmd->set_busparams_req.bitrate_d = cpu_to_le32(dbt->bitrate); - cmd->set_busparams_req.sjw_d = (u8)sjw; - cmd->set_busparams_req.tseg1_d = (u8)tseg1; - cmd->set_busparams_req.tseg2_d = (u8)tseg2; - cmd->set_busparams_req.nsamples_d = 1; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); + memcpy(&cmd->set_busparams_req.busparams_data, busparams, + sizeof(cmd->set_busparams_req.busparams_data)); if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) @@ -1560,7 +1708,7 @@ static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev) kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); kfree(cmd); @@ -1590,7 +1738,7 @@ static int kvaser_usb_hydra_setup_endpoints(struct kvaser_usb *dev) struct usb_endpoint_descriptor *ep; int i; - iface_desc = &dev->intf->altsetting[0]; + iface_desc = dev->intf->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { ep = &iface_desc->endpoint[i].desc; @@ -1651,6 +1799,19 @@ static int kvaser_usb_hydra_init_card(struct kvaser_usb *dev) return 0; } +static int kvaser_usb_hydra_init_channel(struct kvaser_usb_net_priv *priv) +{ + struct kvaser_usb_net_hydra_priv *hydra; + + hydra = devm_kzalloc(&priv->dev->intf->dev, sizeof(*hydra), GFP_KERNEL); + if (!hydra) + return -ENOMEM; + + priv->sub_priv = hydra; + + return 0; +} + static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev) { struct kvaser_cmd cmd; @@ -1675,15 +1836,18 @@ static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev) static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev) { struct kvaser_cmd *cmd; + size_t cmd_len; int err; u32 flags; + u32 fw_version; struct kvaser_usb_dev_card_data *card_data = &dev->card_data; - cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->header.cmd_no = CMD_GET_SOFTWARE_DETAILS_REQ; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); cmd->sw_detail_req.use_ext_cmd = 1; kvaser_usb_hydra_set_cmd_dest_he (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL); @@ -1691,7 +1855,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev) kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); if (err) goto end; @@ -1700,7 +1864,10 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev) if (err) goto end; - dev->fw_version = le32_to_cpu(cmd->sw_detail_res.sw_version); + fw_version = le32_to_cpu(cmd->sw_detail_res.sw_version); + dev->fw_version.major = FIELD_GET(KVASER_USB_SW_VERSION_MAJOR_MASK, fw_version); + dev->fw_version.minor = FIELD_GET(KVASER_USB_SW_VERSION_MINOR_MASK, fw_version); + dev->fw_version.build = FIELD_GET(KVASER_USB_SW_VERSION_BUILD_MASK, fw_version); flags = le32_to_cpu(cmd->sw_detail_res.sw_flags); if (flags & KVASER_USB_HYDRA_SW_FLAG_FW_BAD) { @@ -1727,6 +1894,8 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev) if (flags & KVASER_USB_HYDRA_SW_FLAG_FREQ_80M) dev->cfg = &kvaser_usb_hydra_dev_cfg_kcan; + else if (flags & KVASER_USB_HYDRA_SW_FLAG_CAN_FREQ_80M) + dev->cfg = &kvaser_usb_hydra_dev_cfg_rt; else dev->cfg = &kvaser_usb_hydra_dev_cfg_flexc; @@ -1749,6 +1918,10 @@ static int kvaser_usb_hydra_get_card_info(struct kvaser_usb *dev) err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_CARD_INFO_RESP, &cmd); if (err) return err; + dev->ean[1] = le32_to_cpu(cmd.card_info.ean[1]); + dev->ean[0] = le32_to_cpu(cmd.card_info.ean[0]); + dev->serial_number = le32_to_cpu(cmd.card_info.serial_number); + dev->hw_revision = cmd.card_info.hw_revision; dev->nchannels = cmd.card_info.nchannels; if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES) @@ -1803,10 +1976,41 @@ static int kvaser_usb_hydra_get_capabilities(struct kvaser_usb *dev) return 0; } +static int kvaser_usb_hydra_set_led(struct kvaser_usb_net_priv *priv, + enum kvaser_usb_led_state state, + u16 duration_ms) +{ + struct kvaser_usb *dev = priv->dev; + struct kvaser_cmd *cmd; + size_t cmd_len; + int ret; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->header.cmd_no = CMD_LED_ACTION_REQ; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); + kvaser_usb_hydra_set_cmd_dest_he(cmd, dev->card_data.hydra.sysdbg_he); + kvaser_usb_hydra_set_cmd_transid(cmd, kvaser_usb_hydra_get_next_transid(dev)); + + cmd->led_action_req.duration_ms = cpu_to_le16(duration_ms); + cmd->led_action_req.action = state | + FIELD_PREP(KVASER_USB_HYDRA_LED_IDX_MASK, + KVASER_USB_HYDRA_LED_YELLOW_CH0_IDX + + KVASER_USB_HYDRA_LEDS_PER_CHANNEL * priv->channel); + + ret = kvaser_usb_send_cmd(dev, cmd, cmd_len); + kfree(cmd); + + return ret; +} + static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv) { struct kvaser_usb *dev = priv->dev; struct kvaser_cmd *cmd; + size_t cmd_len; int err; if ((priv->can.ctrlmode & @@ -1817,11 +2021,12 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv) return -EINVAL; } - cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->header.cmd_no = CMD_SET_DRIVERMODE_REQ; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); kvaser_usb_hydra_set_cmd_dest_he (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); kvaser_usb_hydra_set_cmd_transid @@ -1831,7 +2036,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv) else cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_NORMAL; - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); kfree(cmd); return err; @@ -1841,7 +2046,7 @@ static int kvaser_usb_hydra_start_chip(struct kvaser_usb_net_priv *priv) { int err; - init_completion(&priv->start_comp); + reinit_completion(&priv->start_comp); err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_START_CHIP_REQ, priv->channel); @@ -1859,7 +2064,7 @@ static int kvaser_usb_hydra_stop_chip(struct kvaser_usb_net_priv *priv) { int err; - init_completion(&priv->stop_comp); + reinit_completion(&priv->stop_comp); /* Make sure we do not report invalid BUS_OFF from CMD_CHIP_STATE_EVENT * see comment in kvaser_usb_hydra_update_state() @@ -1882,7 +2087,7 @@ static int kvaser_usb_hydra_flush_queue(struct kvaser_usb_net_priv *priv) { int err; - init_completion(&priv->flush_comp); + reinit_completion(&priv->flush_comp); err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_FLUSH_QUEUE, priv->channel); @@ -1975,17 +2180,17 @@ static void kvaser_usb_hydra_read_bulk_callback(struct kvaser_usb *dev, static void * kvaser_usb_hydra_frame_to_cmd(const struct kvaser_usb_net_priv *priv, - const struct sk_buff *skb, int *frame_len, - int *cmd_len, u16 transid) + const struct sk_buff *skb, int *cmd_len, + u16 transid) { void *buf; if (priv->dev->card_data.capabilities & KVASER_USB_HYDRA_CAP_EXT_CMD) - buf = kvaser_usb_hydra_frame_to_cmd_ext(priv, skb, frame_len, - cmd_len, transid); + buf = kvaser_usb_hydra_frame_to_cmd_ext(priv, skb, cmd_len, + transid); else - buf = kvaser_usb_hydra_frame_to_cmd_std(priv, skb, frame_len, - cmd_len, transid); + buf = kvaser_usb_hydra_frame_to_cmd_std(priv, skb, cmd_len, + transid); return buf; } @@ -1993,14 +2198,18 @@ kvaser_usb_hydra_frame_to_cmd(const struct kvaser_usb_net_priv *priv, const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = { .dev_set_mode = kvaser_usb_hydra_set_mode, .dev_set_bittiming = kvaser_usb_hydra_set_bittiming, + .dev_get_busparams = kvaser_usb_hydra_get_nominal_busparams, .dev_set_data_bittiming = kvaser_usb_hydra_set_data_bittiming, + .dev_get_data_busparams = kvaser_usb_hydra_get_data_busparams, .dev_get_berr_counter = kvaser_usb_hydra_get_berr_counter, .dev_setup_endpoints = kvaser_usb_hydra_setup_endpoints, .dev_init_card = kvaser_usb_hydra_init_card, + .dev_init_channel = kvaser_usb_hydra_init_channel, .dev_get_software_info = kvaser_usb_hydra_get_software_info, .dev_get_software_details = kvaser_usb_hydra_get_software_details, .dev_get_card_info = kvaser_usb_hydra_get_card_info, .dev_get_capabilities = kvaser_usb_hydra_get_capabilities, + .dev_set_led = kvaser_usb_hydra_set_led, .dev_set_opt_mode = kvaser_usb_hydra_set_opt_mode, .dev_start_chip = kvaser_usb_hydra_start_chip, .dev_stop_chip = kvaser_usb_hydra_stop_chip, @@ -2012,7 +2221,7 @@ const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = { static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan = { .clock = { - .freq = 80000000, + .freq = 80 * MEGA /* Hz */, }, .timestamp_freq = 80, .bittiming_const = &kvaser_usb_hydra_kcan_bittiming_c, @@ -2021,8 +2230,17 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan = { static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = { .clock = { - .freq = 24000000, + .freq = 24 * MEGA /* Hz */, }, .timestamp_freq = 1, - .bittiming_const = &kvaser_usb_hydra_flexc_bittiming_c, + .bittiming_const = &kvaser_usb_flexc_bittiming_const, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt = { + .clock = { + .freq = 80 * MEGA /* Hz */, + }, + .timestamp_freq = 24, + .bittiming_const = &kvaser_usb_hydra_rt_bittiming_c, + .data_bittiming_const = &kvaser_usb_hydra_rtd_bittiming_c, }; diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c index 07d2f3aa2c02..1167d38344f1 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c @@ -10,6 +10,7 @@ * Copyright (C) 2015 Valeo S.A. */ +#include <linux/bitfield.h> #include <linux/completion.h> #include <linux/device.h> #include <linux/gfp.h> @@ -19,7 +20,9 @@ #include <linux/spinlock.h> #include <linux/string.h> #include <linux/types.h> +#include <linux/units.h> #include <linux/usb.h> +#include <linux/workqueue.h> #include <linux/can.h> #include <linux/can/dev.h> @@ -28,10 +31,6 @@ #include "kvaser_usb.h" -/* Forward declaration */ -static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg; - -#define CAN_USB_CLOCK 8000000 #define MAX_USBCAN_NET_DEVICES 2 /* Command header size */ @@ -59,6 +58,9 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg; #define CMD_RX_EXT_MESSAGE 14 #define CMD_TX_EXT_MESSAGE 15 #define CMD_SET_BUS_PARAMS 16 +#define CMD_GET_BUS_PARAMS 17 +#define CMD_GET_BUS_PARAMS_REPLY 18 +#define CMD_GET_CHIP_STATE 19 #define CMD_CHIP_STATE_EVENT 20 #define CMD_SET_CTRL_MODE 21 #define CMD_RESET_CHIP 24 @@ -73,13 +75,26 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg; #define CMD_GET_CARD_INFO_REPLY 35 #define CMD_GET_SOFTWARE_INFO 38 #define CMD_GET_SOFTWARE_INFO_REPLY 39 +#define CMD_ERROR_EVENT 45 #define CMD_FLUSH_QUEUE 48 #define CMD_TX_ACKNOWLEDGE 50 #define CMD_CAN_ERROR_EVENT 51 #define CMD_FLUSH_QUEUE_REPLY 68 +#define CMD_GET_CAPABILITIES_REQ 95 +#define CMD_GET_CAPABILITIES_RESP 96 +#define CMD_LED_ACTION_REQ 101 +#define CMD_LED_ACTION_RESP 102 #define CMD_LEAF_LOG_MESSAGE 106 +/* Leaf frequency options */ +#define KVASER_USB_LEAF_SWOPTION_FREQ_MASK 0x60 +#define KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK 0 +#define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5) +#define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6) + +#define KVASER_USB_LEAF_SWOPTION_EXT_CAP BIT(12) + /* error factors */ #define M16C_EF_ACKE BIT(0) #define M16C_EF_CRCE BIT(1) @@ -98,16 +113,6 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg; #define USBCAN_ERROR_STATE_RX_ERROR BIT(1) #define USBCAN_ERROR_STATE_BUSERROR BIT(2) -/* bittiming parameters */ -#define KVASER_USB_TSEG1_MIN 1 -#define KVASER_USB_TSEG1_MAX 16 -#define KVASER_USB_TSEG2_MIN 1 -#define KVASER_USB_TSEG2_MAX 8 -#define KVASER_USB_SJW_MAX 4 -#define KVASER_USB_BRP_MIN 1 -#define KVASER_USB_BRP_MAX 64 -#define KVASER_USB_BRP_INC 1 - /* ctrl modes */ #define KVASER_CTRL_MODE_NORMAL 1 #define KVASER_CTRL_MODE_SILENT 2 @@ -117,6 +122,10 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg; /* Extended CAN identifier flag */ #define KVASER_EXTENDED_FRAME BIT(31) +/* USBCanII timestamp */ +#define KVASER_USB_USBCAN_CLK_OVERFLOW_MASK GENMASK(31, 16) +#define KVASER_USB_USBCAN_TIMESTAMP_FACTOR 10 + struct kvaser_cmd_simple { u8 tid; u8 channel; @@ -129,7 +138,7 @@ struct kvaser_cmd_cardinfo { __le32 padding0; __le32 clock_resolution; __le32 mfgdate; - u8 ean[8]; + __le32 ean[2]; u8 hw_revision; union { struct { @@ -164,11 +173,22 @@ struct usbcan_cmd_softinfo { struct kvaser_cmd_busparams { u8 tid; u8 channel; - __le32 bitrate; - u8 tseg1; - u8 tseg2; - u8 sjw; - u8 no_samp; + struct kvaser_usb_busparams busparams; +} __packed; + +/* The device has one LED per CAN channel + * The LSB of action field controls the state: + * 0 = ON + * 1 = OFF + * The remaining bits of action field is the LED index + */ +#define KVASER_USB_LEAF_LED_IDX_MASK GENMASK(31, 1) +#define KVASER_USB_LEAF_LED_YELLOW_CH0_IDX 2 +struct kvaser_cmd_led_action_req { + u8 tid; + u8 action; + __le16 duration_ms; + u8 padding[24]; } __packed; struct kvaser_cmd_tx_can { @@ -237,7 +257,21 @@ struct kvaser_cmd_tx_acknowledge_header { u8 tid; } __packed; -struct leaf_cmd_error_event { +struct leaf_cmd_tx_acknowledge { + u8 channel; + u8 tid; + __le16 time[3]; + u8 padding[2]; +} __packed; + +struct usbcan_cmd_tx_acknowledge { + u8 channel; + u8 tid; + __le16 time; + u8 padding[2]; +} __packed; + +struct leaf_cmd_can_error_event { u8 tid; u8 flags; __le16 time[3]; @@ -249,7 +283,7 @@ struct leaf_cmd_error_event { u8 error_factor; } __packed; -struct usbcan_cmd_error_event { +struct usbcan_cmd_can_error_event { u8 tid; u8 padding; u8 tx_errors_count_ch0; @@ -261,6 +295,34 @@ struct usbcan_cmd_error_event { __le16 time; } __packed; +/* CMD_ERROR_EVENT error codes */ +#define KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL 0x8 +#define KVASER_USB_LEAF_ERROR_EVENT_PARAM 0x9 + +struct leaf_cmd_error_event { + u8 tid; + u8 error_code; + __le16 timestamp[3]; + __le16 padding; + __le16 info1; + __le16 info2; +} __packed; + +struct usbcan_cmd_error_event { + u8 tid; + u8 error_code; + __le16 info1; + __le16 info2; + __le16 timestamp; + __le16 padding; +} __packed; + +struct usbcan_cmd_clk_overflow_event { + u8 tid; + u8 padding; + __le32 time; +} __packed; + struct kvaser_cmd_ctrl_mode { u8 tid; u8 channel; @@ -285,6 +347,28 @@ struct leaf_cmd_log_message { u8 data[8]; } __packed; +/* Sub commands for cap_req and cap_res */ +#define KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE 0x02 +#define KVASER_USB_LEAF_CAP_CMD_ERR_REPORT 0x05 +struct kvaser_cmd_cap_req { + __le16 padding0; + __le16 cap_cmd; + __le16 padding1; + __le16 channel; +} __packed; + +/* Status codes for cap_res */ +#define KVASER_USB_LEAF_CAP_STAT_OK 0x00 +#define KVASER_USB_LEAF_CAP_STAT_NOT_IMPL 0x01 +#define KVASER_USB_LEAF_CAP_STAT_UNAVAIL 0x02 +struct kvaser_cmd_cap_res { + __le16 padding; + __le16 cap_cmd; + __le16 status; + __le32 mask; + __le32 value; +} __packed; + struct kvaser_cmd { u8 len; u8 id; @@ -293,6 +377,8 @@ struct kvaser_cmd { struct kvaser_cmd_cardinfo cardinfo; struct kvaser_cmd_busparams busparams; + struct kvaser_cmd_led_action_req led_action_req; + struct kvaser_cmd_rx_can_header rx_can_header; struct kvaser_cmd_tx_acknowledge_header tx_acknowledge_header; @@ -300,15 +386,22 @@ struct kvaser_cmd { struct leaf_cmd_softinfo softinfo; struct leaf_cmd_rx_can rx_can; struct leaf_cmd_chip_state_event chip_state_event; - struct leaf_cmd_error_event error_event; + struct leaf_cmd_can_error_event can_error_event; struct leaf_cmd_log_message log_message; + struct leaf_cmd_error_event error_event; + struct kvaser_cmd_cap_req cap_req; + struct kvaser_cmd_cap_res cap_res; + struct leaf_cmd_tx_acknowledge tx_ack; } __packed leaf; union { struct usbcan_cmd_softinfo softinfo; struct usbcan_cmd_rx_can rx_can; struct usbcan_cmd_chip_state_event chip_state_event; + struct usbcan_cmd_can_error_event can_error_event; struct usbcan_cmd_error_event error_event; + struct usbcan_cmd_tx_acknowledge tx_ack; + struct usbcan_cmd_clk_overflow_event clk_overflow_event; } __packed usbcan; struct kvaser_cmd_tx_can tx_can; @@ -317,6 +410,44 @@ struct kvaser_cmd { } u; } __packed; +#define CMD_SIZE_ANY 0xff +#define kvaser_fsize(field) sizeof_field(struct kvaser_cmd, field) + +static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = { + [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple), + [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple), + [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo), + [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.leaf.tx_ack), + [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.leaf.softinfo), + [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.leaf.rx_can), + [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.leaf.rx_can), + [CMD_LEAF_LOG_MESSAGE] = kvaser_fsize(u.leaf.log_message), + [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.leaf.chip_state_event), + [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.leaf.can_error_event), + [CMD_GET_CAPABILITIES_RESP] = kvaser_fsize(u.leaf.cap_res), + [CMD_GET_BUS_PARAMS_REPLY] = kvaser_fsize(u.busparams), + [CMD_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event), + /* ignored events: */ + [CMD_FLUSH_QUEUE_REPLY] = CMD_SIZE_ANY, + [CMD_LED_ACTION_RESP] = CMD_SIZE_ANY, +}; + +static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = { + [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple), + [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple), + [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo), + [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.usbcan.tx_ack), + [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.usbcan.softinfo), + [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.usbcan.rx_can), + [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.usbcan.rx_can), + [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.usbcan.chip_state_event), + [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.can_error_event), + [CMD_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event), + [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = kvaser_fsize(u.usbcan.clk_overflow_event), + /* ignored events: */ + [CMD_LED_ACTION_RESP] = CMD_SIZE_ANY, +}; + /* Summary of a kvaser error event, for a unified Leaf/Usbcan error * handling. Some discrepancies between the two families exist: * @@ -340,18 +471,151 @@ struct kvaser_usb_err_summary { }; }; +struct kvaser_usb_net_leaf_priv { + struct kvaser_usb_net_priv *net; + + struct delayed_work chip_state_req_work; + + /* started but not reported as bus-on yet */ + bool joining_bus; +}; + +static const struct can_bittiming_const kvaser_usb_leaf_m16c_bittiming_const = { + .name = "kvaser_usb_ucii", + .tseg1_min = 4, + .tseg1_max = 16, + .tseg2_min = 2, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 16, + .brp_inc = 1, +}; + +static const struct can_bittiming_const kvaser_usb_leaf_m32c_bittiming_const = { + .name = "kvaser_usb_leaf", + .tseg1_min = 3, + .tseg1_max = 16, + .tseg2_min = 2, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 2, + .brp_max = 128, + .brp_inc = 2, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_usbcan_dev_cfg = { + .clock = { + .freq = 8 * MEGA /* Hz */, + }, + .timestamp_freq = 1, + .bittiming_const = &kvaser_usb_leaf_m16c_bittiming_const, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg_16mhz = { + .clock = { + .freq = 16 * MEGA /* Hz */, + }, + .timestamp_freq = 16, + .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg_24mhz = { + .clock = { + .freq = 16 * MEGA /* Hz */, + }, + .timestamp_freq = 24, + .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg_32mhz = { + .clock = { + .freq = 16 * MEGA /* Hz */, + }, + .timestamp_freq = 32, + .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_16mhz = { + .clock = { + .freq = 16 * MEGA /* Hz */, + }, + .timestamp_freq = 16, + .bittiming_const = &kvaser_usb_flexc_bittiming_const, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_24mhz = { + .clock = { + .freq = 24 * MEGA /* Hz */, + }, + .timestamp_freq = 24, + .bittiming_const = &kvaser_usb_flexc_bittiming_const, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_32mhz = { + .clock = { + .freq = 32 * MEGA /* Hz */, + }, + .timestamp_freq = 32, + .bittiming_const = &kvaser_usb_flexc_bittiming_const, +}; + +static inline ktime_t kvaser_usb_usbcan_timestamp_to_ktime(const struct kvaser_usb *dev, + __le16 timestamp) +{ + u64 ticks = le16_to_cpu(timestamp) | + dev->card_data.usbcan_timestamp_msb; + + return kvaser_usb_ticks_to_ktime(dev->cfg, ticks * KVASER_USB_USBCAN_TIMESTAMP_FACTOR); +} + +static int kvaser_usb_leaf_verify_size(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + /* buffer size >= cmd->len ensured by caller */ + u8 min_size = 0; + + switch (dev->driver_info->family) { + case KVASER_LEAF: + if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_leaf)) + min_size = kvaser_usb_leaf_cmd_sizes_leaf[cmd->id]; + break; + case KVASER_USBCAN: + if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_usbcan)) + min_size = kvaser_usb_leaf_cmd_sizes_usbcan[cmd->id]; + break; + } + + if (min_size == CMD_SIZE_ANY) + return 0; + + if (min_size) { + min_size += CMD_HEADER_LEN; + if (cmd->len >= min_size) + return 0; + + dev_err_ratelimited(&dev->intf->dev, + "Received command %u too short (size %u, needed %u)", + cmd->id, cmd->len, min_size); + return -EIO; + } + + dev_warn_ratelimited(&dev->intf->dev, + "Unhandled command (%d, size %d)\n", + cmd->id, cmd->len); + return -EINVAL; +} + static void * kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv, - const struct sk_buff *skb, int *frame_len, - int *cmd_len, u16 transid) + const struct sk_buff *skb, int *cmd_len, + u16 transid) { struct kvaser_usb *dev = priv->dev; struct kvaser_cmd *cmd; u8 *cmd_tx_can_flags = NULL; /* GCC */ struct can_frame *cf = (struct can_frame *)skb->data; - *frame_len = cf->can_dlc; - cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC); if (cmd) { cmd->u.tx_can.tid = transid & 0xff; @@ -359,7 +623,7 @@ kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv, sizeof(struct kvaser_cmd_tx_can); cmd->u.tx_can.channel = priv->channel; - switch (dev->card_data.leaf.family) { + switch (dev->driver_info->family) { case KVASER_LEAF: cmd_tx_can_flags = &cmd->u.tx_can.leaf.flags; break; @@ -383,8 +647,8 @@ kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv, cmd->u.tx_can.data[1] = cf->can_id & 0x3f; } - cmd->u.tx_can.data[5] = cf->can_dlc; - memcpy(&cmd->u.tx_can.data[6], cf->data, cf->can_dlc); + cmd->u.tx_can.data[5] = can_get_cc_dlc(cf, priv->can.ctrlmode); + memcpy(&cmd->u.tx_can.data[6], cf->data, cf->len); if (cf->can_id & CAN_RTR_FLAG) *cmd_tx_can_flags |= MSG_FLAG_REMOTE_FRAME; @@ -421,7 +685,7 @@ static int kvaser_usb_leaf_wait_cmd(const struct kvaser_usb *dev, u8 id, * for further details. */ if (tmp->len == 0) { - pos = round_up(pos, + pos = round_up(pos + 1, le16_to_cpu (dev->bulk_in->wMaxPacketSize)); continue; @@ -447,6 +711,9 @@ static int kvaser_usb_leaf_wait_cmd(const struct kvaser_usb *dev, u8 id, end: kfree(buf); + if (err == 0) + err = kvaser_usb_leaf_verify_size(dev, cmd); + return err; } @@ -471,10 +738,57 @@ static int kvaser_usb_leaf_send_simple_cmd(const struct kvaser_usb *dev, return rc; } +static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev, + const struct leaf_cmd_softinfo *softinfo) +{ + u32 fw_version; + u32 sw_options = le32_to_cpu(softinfo->sw_options); + + fw_version = le32_to_cpu(softinfo->fw_version); + dev->fw_version.major = FIELD_GET(KVASER_USB_SW_VERSION_MAJOR_MASK, fw_version); + dev->fw_version.minor = FIELD_GET(KVASER_USB_SW_VERSION_MINOR_MASK, fw_version); + dev->fw_version.build = FIELD_GET(KVASER_USB_SW_VERSION_BUILD_MASK, fw_version); + dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx); + + if (sw_options & KVASER_USB_LEAF_SWOPTION_EXT_CAP) + dev->card_data.capabilities |= KVASER_USB_CAP_EXT_CAP; + + if (dev->driver_info->quirks & KVASER_USB_QUIRK_IGNORE_CLK_FREQ) { + /* Firmware expects bittiming parameters calculated for 16MHz + * clock, regardless of the actual clock + * Though, the reported freq is used for timestamps + */ + switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) { + case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK: + dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg_16mhz; + break; + case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK: + dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg_24mhz; + break; + case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK: + dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg_32mhz; + break; + } + } else { + switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) { + case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK: + dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_16mhz; + break; + case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK: + dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_24mhz; + break; + case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK: + dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_32mhz; + break; + } + } +} + static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev) { struct kvaser_cmd cmd; int err; + u32 fw_version; err = kvaser_usb_leaf_send_simple_cmd(dev, CMD_GET_SOFTWARE_INFO, 0); if (err) @@ -484,16 +798,21 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev) if (err) return err; - switch (dev->card_data.leaf.family) { + switch (dev->driver_info->family) { case KVASER_LEAF: - dev->fw_version = le32_to_cpu(cmd.u.leaf.softinfo.fw_version); - dev->max_tx_urbs = - le16_to_cpu(cmd.u.leaf.softinfo.max_outstanding_tx); + kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo); break; case KVASER_USBCAN: - dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version); + fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version); + dev->fw_version.major = FIELD_GET(KVASER_USB_SW_VERSION_MAJOR_MASK, + fw_version); + dev->fw_version.minor = FIELD_GET(KVASER_USB_SW_VERSION_MINOR_MASK, + fw_version); + dev->fw_version.build = FIELD_GET(KVASER_USB_SW_VERSION_BUILD_MASK, + fw_version); dev->max_tx_urbs = le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx); + dev->cfg = &kvaser_usb_leaf_usbcan_dev_cfg; break; } @@ -532,13 +851,155 @@ static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev) dev->nchannels = cmd.u.cardinfo.nchannels; if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES || - (dev->card_data.leaf.family == KVASER_USBCAN && + (dev->driver_info->family == KVASER_USBCAN && dev->nchannels > MAX_USBCAN_NET_DEVICES)) return -EINVAL; + dev->ean[1] = le32_to_cpu(cmd.u.cardinfo.ean[1]); + dev->ean[0] = le32_to_cpu(cmd.u.cardinfo.ean[0]); + dev->serial_number = le32_to_cpu(cmd.u.cardinfo.serial_number); + dev->hw_revision = cmd.u.cardinfo.hw_revision; return 0; } +static int kvaser_usb_leaf_get_single_capability(struct kvaser_usb *dev, + u16 cap_cmd_req, u16 *status) +{ + struct kvaser_usb_dev_card_data *card_data = &dev->card_data; + struct kvaser_cmd *cmd; + u32 value = 0; + u32 mask = 0; + u16 cap_cmd_res; + int err; + int i; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->id = CMD_GET_CAPABILITIES_REQ; + cmd->u.leaf.cap_req.cap_cmd = cpu_to_le16(cap_cmd_req); + cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_cap_req); + + err = kvaser_usb_send_cmd(dev, cmd, cmd->len); + if (err) + goto end; + + err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_CAPABILITIES_RESP, cmd); + if (err) + goto end; + + *status = le16_to_cpu(cmd->u.leaf.cap_res.status); + + if (*status != KVASER_USB_LEAF_CAP_STAT_OK) + goto end; + + cap_cmd_res = le16_to_cpu(cmd->u.leaf.cap_res.cap_cmd); + switch (cap_cmd_res) { + case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE: + case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT: + value = le32_to_cpu(cmd->u.leaf.cap_res.value); + mask = le32_to_cpu(cmd->u.leaf.cap_res.mask); + break; + default: + dev_warn(&dev->intf->dev, "Unknown capability command %u\n", + cap_cmd_res); + break; + } + + for (i = 0; i < dev->nchannels; i++) { + if (BIT(i) & (value & mask)) { + switch (cap_cmd_res) { + case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE: + card_data->ctrlmode_supported |= + CAN_CTRLMODE_LISTENONLY; + break; + case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT: + card_data->capabilities |= + KVASER_USB_CAP_BERR_CAP; + break; + } + } + } + +end: + kfree(cmd); + + return err; +} + +static int kvaser_usb_leaf_get_capabilities_leaf(struct kvaser_usb *dev) +{ + int err; + u16 status; + + if (!(dev->card_data.capabilities & KVASER_USB_CAP_EXT_CAP)) { + dev_info(&dev->intf->dev, + "No extended capability support. Upgrade device firmware.\n"); + return 0; + } + + err = kvaser_usb_leaf_get_single_capability(dev, + KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE, + &status); + if (err) + return err; + if (status) + dev_info(&dev->intf->dev, + "KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE failed %u\n", + status); + + err = kvaser_usb_leaf_get_single_capability(dev, + KVASER_USB_LEAF_CAP_CMD_ERR_REPORT, + &status); + if (err) + return err; + if (status) + dev_info(&dev->intf->dev, + "KVASER_USB_LEAF_CAP_CMD_ERR_REPORT failed %u\n", + status); + + return 0; +} + +static int kvaser_usb_leaf_set_led(struct kvaser_usb_net_priv *priv, + enum kvaser_usb_led_state state, + u16 duration_ms) +{ + struct kvaser_usb *dev = priv->dev; + struct kvaser_cmd *cmd; + int ret; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->id = CMD_LED_ACTION_REQ; + cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_led_action_req); + cmd->u.led_action_req.tid = 0xff; + + cmd->u.led_action_req.duration_ms = cpu_to_le16(duration_ms); + cmd->u.led_action_req.action = state | + FIELD_PREP(KVASER_USB_LEAF_LED_IDX_MASK, + KVASER_USB_LEAF_LED_YELLOW_CH0_IDX + + priv->channel); + + ret = kvaser_usb_send_cmd(dev, cmd, cmd->len); + kfree(cmd); + + return ret; +} + +static int kvaser_usb_leaf_get_capabilities(struct kvaser_usb *dev) +{ + int err = 0; + + if (dev->driver_info->family == KVASER_LEAF) + err = kvaser_usb_leaf_get_capabilities_leaf(dev); + + return err; +} + static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { @@ -547,6 +1008,8 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev, struct kvaser_usb_net_priv *priv; unsigned long flags; u8 channel, tid; + struct sk_buff *skb; + ktime_t hwtstamp = 0; channel = cmd->u.tx_acknowledge_header.channel; tid = cmd->u.tx_acknowledge_header.tid; @@ -567,17 +1030,15 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev, context = &priv->tx_contexts[tid % dev->max_tx_urbs]; /* Sometimes the state change doesn't come after a bus-off event */ - if (priv->can.restart_ms && priv->can.state >= CAN_STATE_BUS_OFF) { - struct sk_buff *skb; + if (priv->can.restart_ms && priv->can.state == CAN_STATE_BUS_OFF) { + struct sk_buff *err_skb; struct can_frame *cf; - skb = alloc_can_err_skb(priv->netdev, &cf); - if (skb) { + err_skb = alloc_can_err_skb(priv->netdev, &cf); + if (err_skb) { cf->can_id |= CAN_ERR_RESTARTED; - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_rx(skb); + netif_rx(err_skb); } else { netdev_err(priv->netdev, "No memory left for err_skb\n"); @@ -588,13 +1049,23 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev, priv->can.state = CAN_STATE_ERROR_ACTIVE; } - - stats->tx_packets++; - stats->tx_bytes += context->dlc; + switch (dev->driver_info->family) { + case KVASER_LEAF: + hwtstamp = kvaser_usb_timestamp48_to_ktime(dev->cfg, cmd->u.leaf.tx_ack.time); + break; + case KVASER_USBCAN: + hwtstamp = kvaser_usb_usbcan_timestamp_to_ktime(dev, cmd->u.usbcan.tx_ack.time); + break; + } spin_lock_irqsave(&priv->tx_contexts_lock, flags); - can_get_echo_skb(priv->netdev, context->echo_index); + skb = priv->can.echo_skb[context->echo_index]; + if (skb) + skb_hwtstamps(skb)->hwtstamp = hwtstamp; + stats->tx_packets++; + stats->tx_bytes += can_get_echo_skb(priv->netdev, + context->echo_index, NULL); context->echo_index = dev->max_tx_urbs; --priv->active_tx_contexts; netif_wake_queue(priv->netdev); @@ -608,7 +1079,7 @@ static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv, struct kvaser_cmd *cmd; int err; - cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC); + cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); if (!cmd) return -ENOMEM; @@ -623,11 +1094,22 @@ static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv, return err; } +static void kvaser_usb_leaf_chip_state_req_work(struct work_struct *work) +{ + struct kvaser_usb_net_leaf_priv *leaf = + container_of(work, struct kvaser_usb_net_leaf_priv, + chip_state_req_work.work); + struct kvaser_usb_net_priv *priv = leaf->net; + + kvaser_usb_leaf_simple_cmd_async(priv, CMD_GET_CHIP_STATE); +} + static void kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, const struct kvaser_usb_err_summary *es, struct can_frame *cf) { + struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv; struct kvaser_usb *dev = priv->dev; struct net_device_stats *stats = &priv->netdev->stats; enum can_state cur_state, new_state, tx_state, rx_state; @@ -641,20 +1123,32 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, new_state = CAN_STATE_BUS_OFF; } else if (es->status & M16C_STATE_BUS_PASSIVE) { new_state = CAN_STATE_ERROR_PASSIVE; - } else if (es->status & M16C_STATE_BUS_ERROR) { + } else if ((es->status & M16C_STATE_BUS_ERROR) && + cur_state >= CAN_STATE_BUS_OFF) { /* Guard against spurious error events after a busoff */ - if (cur_state < CAN_STATE_BUS_OFF) { - if (es->txerr >= 128 || es->rxerr >= 128) - new_state = CAN_STATE_ERROR_PASSIVE; - else if (es->txerr >= 96 || es->rxerr >= 96) - new_state = CAN_STATE_ERROR_WARNING; - else if (cur_state > CAN_STATE_ERROR_ACTIVE) - new_state = CAN_STATE_ERROR_ACTIVE; - } + } else if (es->txerr >= 128 || es->rxerr >= 128) { + new_state = CAN_STATE_ERROR_PASSIVE; + } else if (es->txerr >= 96 || es->rxerr >= 96) { + new_state = CAN_STATE_ERROR_WARNING; + } else { + new_state = CAN_STATE_ERROR_ACTIVE; } - if (!es->status) - new_state = CAN_STATE_ERROR_ACTIVE; + /* 0bfd:0124 FW 4.18.778 was observed to send the initial + * CMD_CHIP_STATE_EVENT after CMD_START_CHIP with M16C_STATE_BUS_OFF + * bit set if the channel was bus-off when it was last stopped (even + * across chip resets). This bit will clear shortly afterwards, without + * triggering a second unsolicited chip state event. + * Ignore this initial bus-off. + */ + if (leaf->joining_bus) { + if (new_state == CAN_STATE_BUS_OFF) { + netdev_dbg(priv->netdev, "ignoring bus-off during startup"); + new_state = cur_state; + } else { + leaf->joining_bus = false; + } + } if (new_state != cur_state) { tx_state = (es->txerr >= es->rxerr) ? new_state : 0; @@ -664,11 +1158,11 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, } if (priv->can.restart_ms && - cur_state >= CAN_STATE_BUS_OFF && + cur_state == CAN_STATE_BUS_OFF && new_state < CAN_STATE_BUS_OFF) priv->can.can_stats.restarts++; - switch (dev->card_data.leaf.family) { + switch (dev->driver_info->family) { case KVASER_LEAF: if (es->leaf.error_factor) { priv->can.can_stats.bus_error++; @@ -692,12 +1186,11 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, const struct kvaser_usb_err_summary *es) { - struct can_frame *cf; - struct can_frame tmp_cf = { .can_id = CAN_ERR_FLAG, - .can_dlc = CAN_ERR_DLC }; - struct sk_buff *skb; + struct can_frame *cf = NULL; + struct sk_buff *skb = NULL; struct net_device_stats *stats; struct kvaser_usb_net_priv *priv; + struct kvaser_usb_net_leaf_priv *leaf; enum can_state old_state, new_state; if (es->channel >= dev->nchannels) { @@ -707,28 +1200,29 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, } priv = dev->nets[es->channel]; + leaf = priv->sub_priv; stats = &priv->netdev->stats; - /* Update all of the CAN interface's state and error counters before - * trying any memory allocation that can actually fail with -ENOMEM. - * - * We send a temporary stack-allocated error CAN frame to - * can_change_state() for the very same reason. - * - * TODO: Split can_change_state() responsibility between updating the - * CAN interface's state and counters, and the setting up of CAN error - * frame ID and data to userspace. Remove stack allocation afterwards. - */ + /* Ignore e.g. state change to bus-off reported just after stopping */ + if (!netif_running(priv->netdev)) + return; + old_state = priv->can.state; - kvaser_usb_leaf_rx_error_update_can_state(priv, es, &tmp_cf); + if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) + skb = alloc_can_err_skb(priv->netdev, &cf); + kvaser_usb_leaf_rx_error_update_can_state(priv, es, cf); new_state = priv->can.state; - skb = alloc_can_err_skb(priv->netdev, &cf); - if (!skb) { - stats->rx_dropped++; - return; - } - memcpy(cf, &tmp_cf, sizeof(*cf)); + /* If there are errors, request status updates periodically as we do + * not get automatic notifications of improved state. + * Also request updates if we saw a stale BUS_OFF during startup + * (joining_bus). + */ + if (new_state < CAN_STATE_BUS_OFF && + (es->rxerr || es->txerr || new_state == CAN_STATE_ERROR_PASSIVE || + leaf->joining_bus)) + schedule_delayed_work(&leaf->chip_state_req_work, + msecs_to_jiffies(500)); if (new_state != old_state) { if (es->status & @@ -740,14 +1234,23 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, } if (priv->can.restart_ms && - old_state >= CAN_STATE_BUS_OFF && + old_state == CAN_STATE_BUS_OFF && new_state < CAN_STATE_BUS_OFF) { - cf->can_id |= CAN_ERR_RESTARTED; + if (cf) + cf->can_id |= CAN_ERR_RESTARTED; netif_carrier_on(priv->netdev); } } - switch (dev->card_data.leaf.family) { + if (!skb) { + if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) { + stats->rx_dropped++; + netdev_warn(priv->netdev, "No memory left for err_skb\n"); + } + return; + } + + switch (dev->driver_info->family) { case KVASER_LEAF: if (es->leaf.error_factor) { cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; @@ -774,11 +1277,12 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, break; } - cf->data[6] = es->txerr; - cf->data[7] = es->rxerr; + if (new_state != CAN_STATE_BUS_OFF) { + cf->can_id |= CAN_ERR_CNT; + cf->data[6] = es->txerr; + cf->data[7] = es->rxerr; + } - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; netif_rx(skb); } @@ -838,11 +1342,11 @@ static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev, case CMD_CAN_ERROR_EVENT: es.channel = 0; - es.status = cmd->u.usbcan.error_event.status_ch0; - es.txerr = cmd->u.usbcan.error_event.tx_errors_count_ch0; - es.rxerr = cmd->u.usbcan.error_event.rx_errors_count_ch0; + es.status = cmd->u.usbcan.can_error_event.status_ch0; + es.txerr = cmd->u.usbcan.can_error_event.tx_errors_count_ch0; + es.rxerr = cmd->u.usbcan.can_error_event.rx_errors_count_ch0; es.usbcan.other_ch_status = - cmd->u.usbcan.error_event.status_ch1; + cmd->u.usbcan.can_error_event.status_ch1; kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es); /* The USBCAN firmware supports up to 2 channels. @@ -850,13 +1354,13 @@ static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev, */ if (dev->nchannels == MAX_USBCAN_NET_DEVICES) { es.channel = 1; - es.status = cmd->u.usbcan.error_event.status_ch1; + es.status = cmd->u.usbcan.can_error_event.status_ch1; es.txerr = - cmd->u.usbcan.error_event.tx_errors_count_ch1; + cmd->u.usbcan.can_error_event.tx_errors_count_ch1; es.rxerr = - cmd->u.usbcan.error_event.rx_errors_count_ch1; + cmd->u.usbcan.can_error_event.rx_errors_count_ch1; es.usbcan.other_ch_status = - cmd->u.usbcan.error_event.status_ch0; + cmd->u.usbcan.can_error_event.status_ch0; kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es); } break; @@ -873,11 +1377,11 @@ static void kvaser_usb_leaf_leaf_rx_error(const struct kvaser_usb *dev, switch (cmd->id) { case CMD_CAN_ERROR_EVENT: - es.channel = cmd->u.leaf.error_event.channel; - es.status = cmd->u.leaf.error_event.status; - es.txerr = cmd->u.leaf.error_event.tx_errors_count; - es.rxerr = cmd->u.leaf.error_event.rx_errors_count; - es.leaf.error_factor = cmd->u.leaf.error_event.error_factor; + es.channel = cmd->u.leaf.can_error_event.channel; + es.status = cmd->u.leaf.can_error_event.status; + es.txerr = cmd->u.leaf.can_error_event.tx_errors_count; + es.rxerr = cmd->u.leaf.can_error_event.rx_errors_count; + es.leaf.error_factor = cmd->u.leaf.can_error_event.error_factor; break; case CMD_LEAF_LOG_MESSAGE: es.channel = cmd->u.leaf.log_message.channel; @@ -928,6 +1432,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, struct net_device_stats *stats; u8 channel = cmd->u.rx_can_header.channel; const u8 *rx_data = NULL; /* GCC */ + ktime_t hwtstamp = 0; if (channel >= dev->nchannels) { dev_err(&dev->intf->dev, @@ -939,7 +1444,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, stats = &priv->netdev->stats; if ((cmd->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) && - (dev->card_data.leaf.family == KVASER_LEAF && + (dev->driver_info->family == KVASER_LEAF && cmd->id == CMD_LEAF_LOG_MESSAGE)) { kvaser_usb_leaf_leaf_rx_error(dev, cmd); return; @@ -955,12 +1460,14 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, return; } - switch (dev->card_data.leaf.family) { + switch (dev->driver_info->family) { case KVASER_LEAF: rx_data = cmd->u.leaf.rx_can.data; + hwtstamp = kvaser_usb_timestamp48_to_ktime(dev->cfg, cmd->u.leaf.rx_can.time); break; case KVASER_USBCAN: rx_data = cmd->u.usbcan.rx_can.data; + hwtstamp = kvaser_usb_usbcan_timestamp_to_ktime(dev, cmd->u.usbcan.rx_can.time); break; } @@ -970,7 +1477,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, return; } - if (dev->card_data.leaf.family == KVASER_LEAF && cmd->id == + if (dev->driver_info->family == KVASER_LEAF && cmd->id == CMD_LEAF_LOG_MESSAGE) { cf->can_id = le32_to_cpu(cmd->u.leaf.log_message.id); if (cf->can_id & KVASER_EXTENDED_FRAME) @@ -978,13 +1485,13 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, else cf->can_id &= CAN_SFF_MASK; - cf->can_dlc = get_can_dlc(cmd->u.leaf.log_message.dlc); + can_frame_set_cc_len(cf, cmd->u.leaf.log_message.dlc & 0xF, priv->can.ctrlmode); if (cmd->u.leaf.log_message.flags & MSG_FLAG_REMOTE_FRAME) cf->can_id |= CAN_RTR_FLAG; else memcpy(cf->data, &cmd->u.leaf.log_message.data, - cf->can_dlc); + cf->len); } else { cf->can_id = ((rx_data[0] & 0x1f) << 6) | (rx_data[1] & 0x3f); @@ -996,19 +1503,89 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, cf->can_id |= CAN_EFF_FLAG; } - cf->can_dlc = get_can_dlc(rx_data[5]); + can_frame_set_cc_len(cf, rx_data[5] & 0xF, priv->can.ctrlmode); if (cmd->u.rx_can_header.flag & MSG_FLAG_REMOTE_FRAME) cf->can_id |= CAN_RTR_FLAG; else - memcpy(cf->data, &rx_data[6], cf->can_dlc); + memcpy(cf->data, &rx_data[6], cf->len); } + skb_hwtstamps(skb)->hwtstamp = hwtstamp; stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + if (!(cf->can_id & CAN_RTR_FLAG)) + stats->rx_bytes += cf->len; netif_rx(skb); } +static void kvaser_usb_leaf_error_event_parameter(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + u16 info1 = 0; + + switch (dev->driver_info->family) { + case KVASER_LEAF: + info1 = le16_to_cpu(cmd->u.leaf.error_event.info1); + break; + case KVASER_USBCAN: + info1 = le16_to_cpu(cmd->u.usbcan.error_event.info1); + break; + } + + /* info1 will contain the offending cmd_no */ + switch (info1) { + case CMD_SET_CTRL_MODE: + dev_warn(&dev->intf->dev, + "CMD_SET_CTRL_MODE error in parameter\n"); + break; + + case CMD_SET_BUS_PARAMS: + dev_warn(&dev->intf->dev, + "CMD_SET_BUS_PARAMS error in parameter\n"); + break; + + default: + dev_warn(&dev->intf->dev, + "Unhandled parameter error event cmd_no (%u)\n", + info1); + break; + } +} + +static void kvaser_usb_leaf_error_event(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + u8 error_code = 0; + + switch (dev->driver_info->family) { + case KVASER_LEAF: + error_code = cmd->u.leaf.error_event.error_code; + break; + case KVASER_USBCAN: + error_code = cmd->u.usbcan.error_event.error_code; + break; + } + + switch (error_code) { + case KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL: + /* Received additional CAN message, when firmware TX queue is + * already full. Something is wrong with the driver. + * This should never happen! + */ + dev_err(&dev->intf->dev, + "Received error event TX_QUEUE_FULL\n"); + break; + case KVASER_USB_LEAF_ERROR_EVENT_PARAM: + kvaser_usb_leaf_error_event_parameter(dev, cmd); + break; + + default: + dev_warn(&dev->intf->dev, + "Unhandled error event (%d)\n", error_code); + break; + } +} + static void kvaser_usb_leaf_start_chip_reply(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { @@ -1049,9 +1626,31 @@ static void kvaser_usb_leaf_stop_chip_reply(const struct kvaser_usb *dev, complete(&priv->stop_comp); } -static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev, +static void kvaser_usb_leaf_get_busparams_reply(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_net_priv *priv; + u8 channel = cmd->u.busparams.channel; + + if (channel >= dev->nchannels) { + dev_err(&dev->intf->dev, + "Invalid channel number (%d)\n", channel); + return; + } + + priv = dev->nets[channel]; + memcpy(&priv->busparams_nominal, &cmd->u.busparams.busparams, + sizeof(priv->busparams_nominal)); + + complete(&priv->get_busparams_comp); +} + +static void kvaser_usb_leaf_handle_command(struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { + if (kvaser_usb_leaf_verify_size(dev, cmd) < 0) + return; + switch (cmd->id) { case CMD_START_CHIP_REPLY: kvaser_usb_leaf_start_chip_reply(dev, cmd); @@ -1067,14 +1666,14 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev, break; case CMD_LEAF_LOG_MESSAGE: - if (dev->card_data.leaf.family != KVASER_LEAF) + if (dev->driver_info->family != KVASER_LEAF) goto warn; kvaser_usb_leaf_rx_can_msg(dev, cmd); break; case CMD_CHIP_STATE_EVENT: case CMD_CAN_ERROR_EVENT: - if (dev->card_data.leaf.family == KVASER_LEAF) + if (dev->driver_info->family == KVASER_LEAF) kvaser_usb_leaf_leaf_rx_error(dev, cmd); else kvaser_usb_leaf_usbcan_rx_error(dev, cmd); @@ -1084,16 +1683,29 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev, kvaser_usb_leaf_tx_acknowledge(dev, cmd); break; - /* Ignored commands */ + case CMD_ERROR_EVENT: + kvaser_usb_leaf_error_event(dev, cmd); + break; + + case CMD_GET_BUS_PARAMS_REPLY: + kvaser_usb_leaf_get_busparams_reply(dev, cmd); + break; + case CMD_USBCAN_CLOCK_OVERFLOW_EVENT: - if (dev->card_data.leaf.family != KVASER_USBCAN) + if (dev->driver_info->family != KVASER_USBCAN) goto warn; + dev->card_data.usbcan_timestamp_msb = + le32_to_cpu(cmd->u.usbcan.clk_overflow_event.time) & + KVASER_USB_USBCAN_CLK_OVERFLOW_MASK; break; + /* Ignored commands */ case CMD_FLUSH_QUEUE_REPLY: - if (dev->card_data.leaf.family != KVASER_LEAF) + if (dev->driver_info->family != KVASER_LEAF) goto warn; break; + case CMD_LED_ACTION_RESP: + break; default: warn: dev_warn(&dev->intf->dev, "Unhandled command (%d)\n", cmd->id); @@ -1120,7 +1732,7 @@ static void kvaser_usb_leaf_read_bulk_callback(struct kvaser_usb *dev, * number of events in case of a heavy rx load on the bus. */ if (cmd->len == 0) { - pos = round_up(pos, le16_to_cpu + pos = round_up(pos + 1, le16_to_cpu (dev->bulk_in->wMaxPacketSize)); continue; } @@ -1140,7 +1752,7 @@ static int kvaser_usb_leaf_set_opt_mode(const struct kvaser_usb_net_priv *priv) struct kvaser_cmd *cmd; int rc; - cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; @@ -1162,9 +1774,12 @@ static int kvaser_usb_leaf_set_opt_mode(const struct kvaser_usb_net_priv *priv) static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv) { + struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv; int err; - init_completion(&priv->start_comp); + leaf->joining_bus = true; + + reinit_completion(&priv->start_comp); err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_START_CHIP, priv->channel); @@ -1180,9 +1795,12 @@ static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv) static int kvaser_usb_leaf_stop_chip(struct kvaser_usb_net_priv *priv) { + struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv; int err; - init_completion(&priv->stop_comp); + reinit_completion(&priv->stop_comp); + + cancel_delayed_work(&leaf->chip_state_req_work); err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP, priv->channel); @@ -1206,7 +1824,7 @@ static int kvaser_usb_leaf_flush_queue(struct kvaser_usb_net_priv *priv) struct kvaser_cmd *cmd; int rc; - cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; @@ -1225,28 +1843,40 @@ static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev) { struct kvaser_usb_dev_card_data *card_data = &dev->card_data; - dev->cfg = &kvaser_usb_leaf_dev_cfg; card_data->ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; return 0; } -static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = { - .name = "kvaser_usb", - .tseg1_min = KVASER_USB_TSEG1_MIN, - .tseg1_max = KVASER_USB_TSEG1_MAX, - .tseg2_min = KVASER_USB_TSEG2_MIN, - .tseg2_max = KVASER_USB_TSEG2_MAX, - .sjw_max = KVASER_USB_SJW_MAX, - .brp_min = KVASER_USB_BRP_MIN, - .brp_max = KVASER_USB_BRP_MAX, - .brp_inc = KVASER_USB_BRP_INC, -}; +static int kvaser_usb_leaf_init_channel(struct kvaser_usb_net_priv *priv) +{ + struct kvaser_usb_net_leaf_priv *leaf; + + leaf = devm_kzalloc(&priv->dev->intf->dev, sizeof(*leaf), GFP_KERNEL); + if (!leaf) + return -ENOMEM; + + leaf->net = priv; + INIT_DELAYED_WORK(&leaf->chip_state_req_work, + kvaser_usb_leaf_chip_state_req_work); -static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev) + priv->sub_priv = leaf; + + return 0; +} + +static void kvaser_usb_leaf_remove_channel(struct kvaser_usb_net_priv *priv) +{ + struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv; + + if (leaf) + cancel_delayed_work_sync(&leaf->chip_state_req_work); +} + +static int kvaser_usb_leaf_set_bittiming(const struct net_device *netdev, + const struct kvaser_usb_busparams *busparams) { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); - struct can_bittiming *bt = &priv->can.bittiming; struct kvaser_usb *dev = priv->dev; struct kvaser_cmd *cmd; int rc; @@ -1259,15 +1889,8 @@ static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev) cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_busparams); cmd->u.busparams.channel = priv->channel; cmd->u.busparams.tid = 0xff; - cmd->u.busparams.bitrate = cpu_to_le32(bt->bitrate); - cmd->u.busparams.sjw = bt->sjw; - cmd->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1; - cmd->u.busparams.tseg2 = bt->phase_seg2; - - if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) - cmd->u.busparams.no_samp = 3; - else - cmd->u.busparams.no_samp = 1; + memcpy(&cmd->u.busparams.busparams, busparams, + sizeof(cmd->u.busparams.busparams)); rc = kvaser_usb_send_cmd(dev, cmd, cmd->len); @@ -1275,17 +1898,45 @@ static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev) return rc; } +static int kvaser_usb_leaf_get_busparams(struct kvaser_usb_net_priv *priv) +{ + int err; + + if (priv->dev->driver_info->family == KVASER_USBCAN) + return -EOPNOTSUPP; + + reinit_completion(&priv->get_busparams_comp); + + err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_GET_BUS_PARAMS, + priv->channel); + if (err) + return err; + + if (!wait_for_completion_timeout(&priv->get_busparams_comp, + msecs_to_jiffies(KVASER_USB_TIMEOUT))) + return -ETIMEDOUT; + + return 0; +} + static int kvaser_usb_leaf_set_mode(struct net_device *netdev, enum can_mode mode) { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv; int err; switch (mode) { case CAN_MODE_START: + kvaser_usb_unlink_tx_urbs(priv); + + leaf->joining_bus = true; + err = kvaser_usb_leaf_simple_cmd_async(priv, CMD_START_CHIP); if (err) return err; + + priv->can.state = CAN_STATE_ERROR_ACTIVE; break; default: return -EOPNOTSUPP; @@ -1310,7 +1961,7 @@ static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev) struct usb_endpoint_descriptor *endpoint; int i; - iface_desc = &dev->intf->altsetting[0]; + iface_desc = dev->intf->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; @@ -1332,14 +1983,19 @@ static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev) const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = { .dev_set_mode = kvaser_usb_leaf_set_mode, .dev_set_bittiming = kvaser_usb_leaf_set_bittiming, + .dev_get_busparams = kvaser_usb_leaf_get_busparams, .dev_set_data_bittiming = NULL, + .dev_get_data_busparams = NULL, .dev_get_berr_counter = kvaser_usb_leaf_get_berr_counter, .dev_setup_endpoints = kvaser_usb_leaf_setup_endpoints, .dev_init_card = kvaser_usb_leaf_init_card, + .dev_init_channel = kvaser_usb_leaf_init_channel, + .dev_remove_channel = kvaser_usb_leaf_remove_channel, .dev_get_software_info = kvaser_usb_leaf_get_software_info, .dev_get_software_details = NULL, .dev_get_card_info = kvaser_usb_leaf_get_card_info, - .dev_get_capabilities = NULL, + .dev_get_capabilities = kvaser_usb_leaf_get_capabilities, + .dev_set_led = kvaser_usb_leaf_set_led, .dev_set_opt_mode = kvaser_usb_leaf_set_opt_mode, .dev_start_chip = kvaser_usb_leaf_start_chip, .dev_stop_chip = kvaser_usb_leaf_stop_chip, @@ -1348,11 +2004,3 @@ const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = { .dev_read_bulk_callback = kvaser_usb_leaf_read_bulk_callback, .dev_frame_to_cmd = kvaser_usb_leaf_frame_to_cmd, }; - -static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg = { - .clock = { - .freq = CAN_USB_CLOCK, - }, - .timestamp_freq = 1, - .bittiming_const = &kvaser_usb_leaf_bittiming_const, -}; diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c index 8d8c2086424d..41c0a1c399bf 100644 --- a/drivers/net/can/usb/mcba_usb.c +++ b/drivers/net/can/usb/mcba_usb.c @@ -1,27 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0-only /* SocketCAN driver for Microchip CAN BUS Analyzer Tool * * Copyright (C) 2017 Mobica Limited * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published - * by the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. - * * This driver is inspired by the 4.6.2 version of net/can/usb/usb_8dev.c */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> -#include <linux/can/led.h> +#include <linux/ethtool.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/signal.h> @@ -39,15 +28,11 @@ #define MCBA_CTX_FREE MCBA_MAX_TX_URBS /* RX buffer must be bigger than msg size since at the - * beggining USB messages are stacked. + * beginning USB messages are stacked. */ #define MCBA_USB_RX_BUFF_SIZE 64 #define MCBA_USB_TX_BUFF_SIZE (sizeof(struct mcba_usb_msg)) -/* MCBA endpoint numbers */ -#define MCBA_USB_EP_IN 1 -#define MCBA_USB_EP_OUT 1 - /* Microchip command id */ #define MBCA_CMD_RECEIVE_MESSAGE 0xE3 #define MBCA_CMD_I_AM_ALIVE_FROM_CAN 0xF5 @@ -62,6 +47,10 @@ #define MCBA_VER_REQ_USB 1 #define MCBA_VER_REQ_CAN 2 +/* Drive the CAN_RES signal LOW "0" to activate R24 and R25 */ +#define MCBA_VER_TERMINATION_ON 0 +#define MCBA_VER_TERMINATION_OFF 1 + #define MCBA_SIDL_EXID_MASK 0x8 #define MCBA_DLC_MASK 0xf #define MCBA_DLC_RTR_MASK 0x40 @@ -75,7 +64,6 @@ struct mcba_usb_ctx { struct mcba_priv *priv; u32 ndx; - u8 dlc; bool can; }; @@ -93,6 +81,10 @@ struct mcba_priv { bool can_ka_first_pass; bool can_speed_check; atomic_t free_ctx_cnt; + void *rxbuf[MCBA_MAX_RX_URBS]; + dma_addr_t rxbuf_dma[MCBA_MAX_RX_URBS]; + int rx_pipe; + int tx_pipe; }; /* CAN frame */ @@ -193,13 +185,10 @@ static inline struct mcba_usb_ctx *mcba_usb_get_free_ctx(struct mcba_priv *priv, ctx = &priv->tx_context[i]; ctx->ndx = i; - if (cf) { + if (cf) ctx->can = true; - ctx->dlc = cf->can_dlc; - } else { + else ctx->can = false; - ctx->dlc = 0; - } atomic_dec(&priv->free_ctx_cnt); break; @@ -245,10 +234,8 @@ static void mcba_usb_write_bulk_callback(struct urb *urb) return; netdev->stats.tx_packets++; - netdev->stats.tx_bytes += ctx->dlc; - - can_led_event(netdev, CAN_LED_EVENT_TX); - can_get_echo_skb(netdev, ctx->ndx); + netdev->stats.tx_bytes += can_get_echo_skb(netdev, ctx->ndx, + NULL); } if (urb->status) @@ -281,10 +268,8 @@ static netdev_tx_t mcba_usb_xmit(struct mcba_priv *priv, memcpy(buf, usb_msg, MCBA_USB_TX_BUFF_SIZE); - usb_fill_bulk_urb(urb, priv->udev, - usb_sndbulkpipe(priv->udev, MCBA_USB_EP_OUT), buf, - MCBA_USB_TX_BUFF_SIZE, mcba_usb_write_bulk_callback, - ctx); + usb_fill_bulk_urb(urb, priv->udev, priv->tx_pipe, buf, MCBA_USB_TX_BUFF_SIZE, + mcba_usb_write_bulk_callback, ctx); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &priv->tx_submitted); @@ -330,15 +315,13 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb, .cmd_id = MBCA_CMD_TRANSMIT_MESSAGE_EV }; - if (can_dropped_invalid_skb(netdev, skb)) + if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; ctx = mcba_usb_get_free_ctx(priv, cf); if (!ctx) return NETDEV_TX_BUSY; - can_put_echo_skb(skb, priv->netdev, ctx->ndx); - if (cf->can_id & CAN_EFF_FLAG) { /* SIDH | SIDL | EIDH | EIDL * 28 - 21 | 20 19 18 x x x 17 16 | 15 - 8 | 7 - 0 @@ -361,13 +344,15 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb, usb_msg.eid = 0; } - usb_msg.dlc = cf->can_dlc; + usb_msg.dlc = cf->len; memcpy(usb_msg.data, cf->data, usb_msg.dlc); if (cf->can_id & CAN_RTR_FLAG) usb_msg.dlc |= MCBA_DLC_RTR_MASK; + can_put_echo_skb(skb, priv->netdev, ctx->ndx, 0); + err = mcba_usb_xmit(priv, (struct mcba_usb_msg *)&usb_msg, ctx); if (err) goto xmit_failed; @@ -375,9 +360,8 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; xmit_failed: - can_free_echo_skb(priv->netdev, ctx->ndx); + can_free_echo_skb(priv->netdev, ctx->ndx, NULL); mcba_usb_free_ctx(ctx); - dev_kfree_skb(skb); stats->tx_dropped++; return NETDEV_TX_OK; @@ -459,17 +443,17 @@ static void mcba_usb_process_can(struct mcba_priv *priv, cf->can_id = (sid & 0xffe0) >> 5; } - if (msg->dlc & MCBA_DLC_RTR_MASK) - cf->can_id |= CAN_RTR_FLAG; - - cf->can_dlc = get_can_dlc(msg->dlc & MCBA_DLC_MASK); + cf->len = can_cc_dlc2len(msg->dlc & MCBA_DLC_MASK); - memcpy(cf->data, msg->data, cf->can_dlc); + if (msg->dlc & MCBA_DLC_RTR_MASK) { + cf->can_id |= CAN_RTR_FLAG; + } else { + memcpy(cf->data, msg->data, cf->len); + stats->rx_bytes += cf->len; + } stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - can_led_event(priv->netdev, CAN_LED_EVENT_RX); netif_rx(skb); } @@ -477,13 +461,13 @@ static void mcba_usb_process_ka_usb(struct mcba_priv *priv, struct mcba_usb_msg_ka_usb *msg) { if (unlikely(priv->usb_ka_first_pass)) { - netdev_info(priv->netdev, "PIC USB version %hhu.%hhu\n", + netdev_info(priv->netdev, "PIC USB version %u.%u\n", msg->soft_ver_major, msg->soft_ver_minor); priv->usb_ka_first_pass = false; } - if (msg->termination_state) + if (msg->termination_state == MCBA_VER_TERMINATION_ON) priv->can.termination = MCBA_TERMINATION_ENABLED; else priv->can.termination = MCBA_TERMINATION_DISABLED; @@ -503,7 +487,7 @@ static void mcba_usb_process_ka_can(struct mcba_priv *priv, struct mcba_usb_msg_ka_can *msg) { if (unlikely(priv->can_ka_first_pass)) { - netdev_info(priv->netdev, "PIC CAN version %hhu.%hhu\n", + netdev_info(priv->netdev, "PIC CAN version %u.%u\n", msg->soft_ver_major, msg->soft_ver_minor); priv->can_ka_first_pass = false; @@ -565,7 +549,7 @@ static void mcba_usb_process_rx(struct mcba_priv *priv, break; default: - netdev_warn(priv->netdev, "Unsupported msg (0x%hhX)", + netdev_warn(priv->netdev, "Unsupported msg (0x%X)", msg->cmd_id); break; } @@ -620,7 +604,7 @@ static void mcba_usb_read_bulk_callback(struct urb *urb) resubmit_urb: usb_fill_bulk_urb(urb, priv->udev, - usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_OUT), + priv->rx_pipe, urb->transfer_buffer, MCBA_USB_RX_BUFF_SIZE, mcba_usb_read_bulk_callback, priv); @@ -644,6 +628,7 @@ static int mcba_usb_start(struct mcba_priv *priv) for (i = 0; i < MCBA_MAX_RX_URBS; i++) { struct urb *urb = NULL; u8 *buf; + dma_addr_t buf_dma; /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_KERNEL); @@ -653,7 +638,7 @@ static int mcba_usb_start(struct mcba_priv *priv) } buf = usb_alloc_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE, - GFP_KERNEL, &urb->transfer_dma); + GFP_KERNEL, &buf_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); @@ -661,8 +646,10 @@ static int mcba_usb_start(struct mcba_priv *priv) break; } + urb->transfer_dma = buf_dma; + usb_fill_bulk_urb(urb, priv->udev, - usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_IN), + priv->rx_pipe, buf, MCBA_USB_RX_BUFF_SIZE, mcba_usb_read_bulk_callback, priv); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; @@ -672,11 +659,14 @@ static int mcba_usb_start(struct mcba_priv *priv) if (err) { usb_unanchor_urb(urb); usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE, - buf, urb->transfer_dma); + buf, buf_dma); usb_free_urb(urb); break; } + priv->rxbuf[i] = buf; + priv->rxbuf_dma[i] = buf_dma; + /* Drop reference, USB core will take care of freeing it */ usb_free_urb(urb); } @@ -711,7 +701,6 @@ static int mcba_usb_open(struct net_device *netdev) priv->can_speed_check = true; priv->can.state = CAN_STATE_ERROR_ACTIVE; - can_led_event(netdev, CAN_LED_EVENT_OPEN); netif_start_queue(netdev); return 0; @@ -719,7 +708,14 @@ static int mcba_usb_open(struct net_device *netdev) static void mcba_urb_unlink(struct mcba_priv *priv) { + int i; + usb_kill_anchored_urbs(&priv->rx_submitted); + + for (i = 0; i < MCBA_MAX_RX_URBS; ++i) + usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE, + priv->rxbuf[i], priv->rxbuf_dma[i]); + usb_kill_anchored_urbs(&priv->tx_submitted); } @@ -736,7 +732,6 @@ static int mcba_usb_close(struct net_device *netdev) mcba_urb_unlink(priv); close_candev(netdev); - can_led_event(netdev, CAN_LED_EVENT_STOP); return 0; } @@ -768,6 +763,10 @@ static const struct net_device_ops mcba_netdev_ops = { .ndo_start_xmit = mcba_usb_start_xmit, }; +static const struct ethtool_ops mcba_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, +}; + /* Microchip CANBUS has hardcoded bittiming values by default. * This function sends request via USB to change the speed and align bittiming * values for presentation purposes only @@ -790,9 +789,9 @@ static int mcba_set_termination(struct net_device *netdev, u16 term) }; if (term == MCBA_TERMINATION_ENABLED) - usb_msg.termination = 1; + usb_msg.termination = MCBA_VER_TERMINATION_ON; else - usb_msg.termination = 0; + usb_msg.termination = MCBA_VER_TERMINATION_OFF; mcba_usb_xmit_cmd(priv, (struct mcba_usb_msg *)&usb_msg); @@ -804,8 +803,15 @@ static int mcba_usb_probe(struct usb_interface *intf, { struct net_device *netdev; struct mcba_priv *priv; - int err = -ENOMEM; + int err; struct usb_device *usbdev = interface_to_usbdev(intf); + struct usb_endpoint_descriptor *in, *out; + + err = usb_find_common_endpoints(intf->cur_altsetting, &in, &out, NULL, NULL); + if (err) { + dev_err(&intf->dev, "Can't find endpoints\n"); + return err; + } netdev = alloc_candev(sizeof(struct mcba_priv), MCBA_MAX_TX_URBS); if (!netdev) { @@ -839,6 +845,7 @@ static int mcba_usb_probe(struct usb_interface *intf, priv->can.do_set_bittiming = mcba_net_set_bittiming; netdev->netdev_ops = &mcba_netdev_ops; + netdev->ethtool_ops = &mcba_ethtool_ops; netdev->flags |= IFF_ECHO; /* we support local echo */ @@ -851,7 +858,8 @@ static int mcba_usb_probe(struct usb_interface *intf, goto cleanup_free_candev; } - devm_can_led_init(netdev); + priv->rx_pipe = usb_rcvbulkpipe(priv->udev, in->bEndpointAddress); + priv->tx_pipe = usb_sndbulkpipe(priv->udev, out->bEndpointAddress); /* Start USB dev only if we have successfully registered CAN device */ err = mcba_usb_start(priv); @@ -887,9 +895,8 @@ static void mcba_usb_disconnect(struct usb_interface *intf) netdev_info(priv->netdev, "device disconnected\n"); unregister_candev(priv->netdev); - free_candev(priv->netdev); - mcba_urb_unlink(priv); + free_candev(priv->netdev); } static struct usb_driver mcba_usb_driver = { diff --git a/drivers/net/can/usb/nct6694_canfd.c b/drivers/net/can/usb/nct6694_canfd.c new file mode 100644 index 000000000000..dd6df2ec3742 --- /dev/null +++ b/drivers/net/can/usb/nct6694_canfd.c @@ -0,0 +1,831 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Nuvoton NCT6694 Socket CANfd driver based on USB interface. + * + * Copyright (C) 2025 Nuvoton Technology Corp. + */ + +#include <linux/bitfield.h> +#include <linux/can/dev.h> +#include <linux/can/rx-offload.h> +#include <linux/ethtool.h> +#include <linux/idr.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/mfd/nct6694.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/platform_device.h> + +#define DEVICE_NAME "nct6694-canfd" + +/* USB command module type for NCT6694 CANfd controller. + * This defines the module type used for communication with the NCT6694 + * CANfd controller over the USB interface. + */ +#define NCT6694_CANFD_MOD 0x05 + +/* Command 00h - CAN Setting and Initialization */ +#define NCT6694_CANFD_SETTING 0x00 +#define NCT6694_CANFD_SETTING_ACTIVE_CTRL1 BIT(0) +#define NCT6694_CANFD_SETTING_ACTIVE_CTRL2 BIT(1) +#define NCT6694_CANFD_SETTING_ACTIVE_NBTP_DBTP BIT(2) +#define NCT6694_CANFD_SETTING_CTRL1_MON BIT(0) +#define NCT6694_CANFD_SETTING_CTRL1_NISO BIT(1) +#define NCT6694_CANFD_SETTING_CTRL1_LBCK BIT(2) +#define NCT6694_CANFD_SETTING_NBTP_NTSEG2 GENMASK(6, 0) +#define NCT6694_CANFD_SETTING_NBTP_NTSEG1 GENMASK(15, 8) +#define NCT6694_CANFD_SETTING_NBTP_NBRP GENMASK(24, 16) +#define NCT6694_CANFD_SETTING_NBTP_NSJW GENMASK(31, 25) +#define NCT6694_CANFD_SETTING_DBTP_DSJW GENMASK(3, 0) +#define NCT6694_CANFD_SETTING_DBTP_DTSEG2 GENMASK(7, 4) +#define NCT6694_CANFD_SETTING_DBTP_DTSEG1 GENMASK(12, 8) +#define NCT6694_CANFD_SETTING_DBTP_DBRP GENMASK(20, 16) +#define NCT6694_CANFD_SETTING_DBTP_TDC BIT(23) + +/* Command 01h - CAN Information */ +#define NCT6694_CANFD_INFORMATION 0x01 +#define NCT6694_CANFD_INFORMATION_SEL 0x00 + +/* Command 02h - CAN Event */ +#define NCT6694_CANFD_EVENT 0x02 +#define NCT6694_CANFD_EVENT_SEL(idx, mask) \ + ((idx ? 0x80 : 0x00) | ((mask) & 0x7F)) + +#define NCT6694_CANFD_EVENT_MASK GENMASK(5, 0) +#define NCT6694_CANFD_EVT_TX_FIFO_EMPTY BIT(7) /* Read-clear */ +#define NCT6694_CANFD_EVT_RX_DATA_LOST BIT(5) /* Read-clear */ +#define NCT6694_CANFD_EVT_RX_DATA_IN BIT(7) /* Read-clear */ + +/* Command 10h - CAN Deliver */ +#define NCT6694_CANFD_DELIVER 0x10 +#define NCT6694_CANFD_DELIVER_SEL(buf_cnt) \ + ((buf_cnt) & 0xFF) + +/* Command 11h - CAN Receive */ +#define NCT6694_CANFD_RECEIVE 0x11 +#define NCT6694_CANFD_RECEIVE_SEL(idx, buf_cnt) \ + ((idx ? 0x80 : 0x00) | ((buf_cnt) & 0x7F)) + +#define NCT6694_CANFD_FRAME_TAG(idx) (0xC0 | (idx)) +#define NCT6694_CANFD_FRAME_FLAG_EFF BIT(0) +#define NCT6694_CANFD_FRAME_FLAG_RTR BIT(1) +#define NCT6694_CANFD_FRAME_FLAG_FD BIT(2) +#define NCT6694_CANFD_FRAME_FLAG_BRS BIT(3) +#define NCT6694_CANFD_FRAME_FLAG_ERR BIT(4) + +#define NCT6694_NAPI_WEIGHT 32 + +enum nct6694_event_err { + NCT6694_CANFD_EVT_ERR_NO_ERROR = 0, + NCT6694_CANFD_EVT_ERR_CRC_ERROR, + NCT6694_CANFD_EVT_ERR_STUFF_ERROR, + NCT6694_CANFD_EVT_ERR_ACK_ERROR, + NCT6694_CANFD_EVT_ERR_FORM_ERROR, + NCT6694_CANFD_EVT_ERR_BIT_ERROR, + NCT6694_CANFD_EVT_ERR_TIMEOUT_ERROR, + NCT6694_CANFD_EVT_ERR_UNKNOWN_ERROR, +}; + +enum nct6694_event_status { + NCT6694_CANFD_EVT_STS_ERROR_ACTIVE = 0, + NCT6694_CANFD_EVT_STS_ERROR_PASSIVE, + NCT6694_CANFD_EVT_STS_BUS_OFF, + NCT6694_CANFD_EVT_STS_WARNING, +}; + +struct __packed nct6694_canfd_setting { + __le32 nbr; + __le32 dbr; + u8 active; + u8 reserved[3]; + __le16 ctrl1; + __le16 ctrl2; + __le32 nbtp; + __le32 dbtp; +}; + +struct __packed nct6694_canfd_information { + u8 tx_fifo_cnt; + u8 rx_fifo_cnt; + u8 reserved[2]; + __le32 can_clk; +}; + +struct __packed nct6694_canfd_event { + u8 err; + u8 status; + u8 tx_evt; + u8 rx_evt; + u8 rec; + u8 tec; + u8 reserved[2]; +}; + +struct __packed nct6694_canfd_frame { + u8 tag; + u8 flag; + u8 reserved; + u8 length; + __le32 id; + u8 data[CANFD_MAX_DLEN]; +}; + +struct nct6694_canfd_priv { + struct can_priv can; /* must be the first member */ + struct can_rx_offload offload; + struct net_device *ndev; + struct nct6694 *nct6694; + struct workqueue_struct *wq; + struct work_struct tx_work; + struct nct6694_canfd_frame tx; + struct nct6694_canfd_frame rx; + struct nct6694_canfd_event event[2]; + struct can_berr_counter bec; +}; + +static inline struct nct6694_canfd_priv *rx_offload_to_priv(struct can_rx_offload *offload) +{ + return container_of(offload, struct nct6694_canfd_priv, offload); +} + +static const struct can_bittiming_const nct6694_canfd_bittiming_nominal_const = { + .name = DEVICE_NAME, + .tseg1_min = 1, + .tseg1_max = 256, + .tseg2_min = 1, + .tseg2_max = 128, + .sjw_max = 128, + .brp_min = 1, + .brp_max = 512, + .brp_inc = 1, +}; + +static const struct can_bittiming_const nct6694_canfd_bittiming_data_const = { + .name = DEVICE_NAME, + .tseg1_min = 1, + .tseg1_max = 32, + .tseg2_min = 1, + .tseg2_max = 16, + .sjw_max = 16, + .brp_min = 1, + .brp_max = 32, + .brp_inc = 1, +}; + +static void nct6694_canfd_rx_offload(struct can_rx_offload *offload, + struct sk_buff *skb) +{ + struct nct6694_canfd_priv *priv = rx_offload_to_priv(offload); + int ret; + + ret = can_rx_offload_queue_tail(offload, skb); + if (ret) + priv->ndev->stats.rx_fifo_errors++; +} + +static void nct6694_canfd_handle_lost_msg(struct net_device *ndev) +{ + struct nct6694_canfd_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + struct can_frame *cf; + struct sk_buff *skb; + + netdev_dbg(ndev, "RX FIFO overflow, message(s) lost.\n"); + + stats->rx_errors++; + stats->rx_over_errors++; + + skb = alloc_can_err_skb(ndev, &cf); + if (!skb) + return; + + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + + nct6694_canfd_rx_offload(&priv->offload, skb); +} + +static void nct6694_canfd_handle_rx(struct net_device *ndev, u8 rx_evt) +{ + struct net_device_stats *stats = &ndev->stats; + struct nct6694_canfd_priv *priv = netdev_priv(ndev); + struct nct6694_canfd_frame *frame = &priv->rx; + const struct nct6694_cmd_header cmd_hd = { + .mod = NCT6694_CANFD_MOD, + .cmd = NCT6694_CANFD_RECEIVE, + .sel = NCT6694_CANFD_RECEIVE_SEL(ndev->dev_port, 1), + .len = cpu_to_le16(sizeof(*frame)) + }; + struct sk_buff *skb; + int ret; + + ret = nct6694_read_msg(priv->nct6694, &cmd_hd, frame); + if (ret) + return; + + if (frame->flag & NCT6694_CANFD_FRAME_FLAG_FD) { + struct canfd_frame *cfd; + + skb = alloc_canfd_skb(priv->ndev, &cfd); + if (!skb) { + stats->rx_dropped++; + return; + } + + cfd->can_id = le32_to_cpu(frame->id); + cfd->len = canfd_sanitize_len(frame->length); + if (frame->flag & NCT6694_CANFD_FRAME_FLAG_EFF) + cfd->can_id |= CAN_EFF_FLAG; + if (frame->flag & NCT6694_CANFD_FRAME_FLAG_BRS) + cfd->flags |= CANFD_BRS; + if (frame->flag & NCT6694_CANFD_FRAME_FLAG_ERR) + cfd->flags |= CANFD_ESI; + + memcpy(cfd->data, frame->data, cfd->len); + } else { + struct can_frame *cf; + + skb = alloc_can_skb(priv->ndev, &cf); + if (!skb) { + stats->rx_dropped++; + return; + } + + cf->can_id = le32_to_cpu(frame->id); + cf->len = can_cc_dlc2len(frame->length); + if (frame->flag & NCT6694_CANFD_FRAME_FLAG_EFF) + cf->can_id |= CAN_EFF_FLAG; + + if (frame->flag & NCT6694_CANFD_FRAME_FLAG_RTR) + cf->can_id |= CAN_RTR_FLAG; + else + memcpy(cf->data, frame->data, cf->len); + } + + nct6694_canfd_rx_offload(&priv->offload, skb); +} + +static int nct6694_canfd_get_berr_counter(const struct net_device *ndev, + struct can_berr_counter *bec) +{ + struct nct6694_canfd_priv *priv = netdev_priv(ndev); + + *bec = priv->bec; + + return 0; +} + +static void nct6694_canfd_handle_state_change(struct net_device *ndev, u8 status) +{ + struct nct6694_canfd_priv *priv = netdev_priv(ndev); + enum can_state new_state, rx_state, tx_state; + struct can_berr_counter bec; + struct can_frame *cf; + struct sk_buff *skb; + + nct6694_canfd_get_berr_counter(ndev, &bec); + can_state_get_by_berr_counter(ndev, &bec, &tx_state, &rx_state); + + new_state = max(tx_state, rx_state); + + /* state hasn't changed */ + if (new_state == priv->can.state) + return; + + skb = alloc_can_err_skb(ndev, &cf); + + can_change_state(ndev, cf, tx_state, rx_state); + + if (new_state == CAN_STATE_BUS_OFF) { + can_bus_off(ndev); + } else if (cf) { + cf->can_id |= CAN_ERR_CNT; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + } + + if (skb) + nct6694_canfd_rx_offload(&priv->offload, skb); +} + +static void nct6694_canfd_handle_bus_err(struct net_device *ndev, u8 bus_err) +{ + struct nct6694_canfd_priv *priv = netdev_priv(ndev); + struct can_frame *cf; + struct sk_buff *skb; + + priv->can.can_stats.bus_error++; + + skb = alloc_can_err_skb(ndev, &cf); + if (cf) + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + + switch (bus_err) { + case NCT6694_CANFD_EVT_ERR_CRC_ERROR: + netdev_dbg(ndev, "CRC error\n"); + ndev->stats.rx_errors++; + if (cf) + cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; + break; + + case NCT6694_CANFD_EVT_ERR_STUFF_ERROR: + netdev_dbg(ndev, "Stuff error\n"); + ndev->stats.rx_errors++; + if (cf) + cf->data[2] |= CAN_ERR_PROT_STUFF; + break; + + case NCT6694_CANFD_EVT_ERR_ACK_ERROR: + netdev_dbg(ndev, "Ack error\n"); + ndev->stats.tx_errors++; + if (cf) { + cf->can_id |= CAN_ERR_ACK; + cf->data[2] |= CAN_ERR_PROT_TX; + } + break; + + case NCT6694_CANFD_EVT_ERR_FORM_ERROR: + netdev_dbg(ndev, "Form error\n"); + ndev->stats.rx_errors++; + if (cf) + cf->data[2] |= CAN_ERR_PROT_FORM; + break; + + case NCT6694_CANFD_EVT_ERR_BIT_ERROR: + netdev_dbg(ndev, "Bit error\n"); + ndev->stats.tx_errors++; + if (cf) + cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT; + break; + + default: + break; + } + + if (skb) + nct6694_canfd_rx_offload(&priv->offload, skb); +} + +static void nct6694_canfd_handle_tx(struct net_device *ndev) +{ + struct nct6694_canfd_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + + stats->tx_bytes += can_rx_offload_get_echo_skb_queue_tail(&priv->offload, + 0, NULL); + stats->tx_packets++; + netif_wake_queue(ndev); +} + +static irqreturn_t nct6694_canfd_irq(int irq, void *data) +{ + struct net_device *ndev = data; + struct nct6694_canfd_priv *priv = netdev_priv(ndev); + struct nct6694_canfd_event *event = &priv->event[ndev->dev_port]; + const struct nct6694_cmd_header cmd_hd = { + .mod = NCT6694_CANFD_MOD, + .cmd = NCT6694_CANFD_EVENT, + .sel = NCT6694_CANFD_EVENT_SEL(ndev->dev_port, NCT6694_CANFD_EVENT_MASK), + .len = cpu_to_le16(sizeof(priv->event)) + }; + irqreturn_t handled = IRQ_NONE; + int ret; + + ret = nct6694_read_msg(priv->nct6694, &cmd_hd, priv->event); + if (ret < 0) + return handled; + + if (event->rx_evt & NCT6694_CANFD_EVT_RX_DATA_IN) { + nct6694_canfd_handle_rx(ndev, event->rx_evt); + handled = IRQ_HANDLED; + } + + if (event->rx_evt & NCT6694_CANFD_EVT_RX_DATA_LOST) { + nct6694_canfd_handle_lost_msg(ndev); + handled = IRQ_HANDLED; + } + + if (event->status) { + nct6694_canfd_handle_state_change(ndev, event->status); + handled = IRQ_HANDLED; + } + + if (event->err != NCT6694_CANFD_EVT_ERR_NO_ERROR) { + if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) + nct6694_canfd_handle_bus_err(ndev, event->err); + handled = IRQ_HANDLED; + } + + if (event->tx_evt & NCT6694_CANFD_EVT_TX_FIFO_EMPTY) { + nct6694_canfd_handle_tx(ndev); + handled = IRQ_HANDLED; + } + + if (handled) + can_rx_offload_threaded_irq_finish(&priv->offload); + + priv->bec.rxerr = event->rec; + priv->bec.txerr = event->tec; + + return handled; +} + +static void nct6694_canfd_tx_work(struct work_struct *work) +{ + struct nct6694_canfd_priv *priv = container_of(work, + struct nct6694_canfd_priv, + tx_work); + struct nct6694_canfd_frame *frame = &priv->tx; + struct net_device *ndev = priv->ndev; + struct net_device_stats *stats = &ndev->stats; + struct sk_buff *skb = priv->can.echo_skb[0]; + static const struct nct6694_cmd_header cmd_hd = { + .mod = NCT6694_CANFD_MOD, + .cmd = NCT6694_CANFD_DELIVER, + .sel = NCT6694_CANFD_DELIVER_SEL(1), + .len = cpu_to_le16(sizeof(*frame)) + }; + u32 txid; + int err; + + memset(frame, 0, sizeof(*frame)); + + frame->tag = NCT6694_CANFD_FRAME_TAG(ndev->dev_port); + + if (can_is_canfd_skb(skb)) { + struct canfd_frame *cfd = (struct canfd_frame *)skb->data; + + if (cfd->flags & CANFD_BRS) + frame->flag |= NCT6694_CANFD_FRAME_FLAG_BRS; + + if (cfd->can_id & CAN_EFF_FLAG) { + txid = cfd->can_id & CAN_EFF_MASK; + frame->flag |= NCT6694_CANFD_FRAME_FLAG_EFF; + } else { + txid = cfd->can_id & CAN_SFF_MASK; + } + frame->flag |= NCT6694_CANFD_FRAME_FLAG_FD; + frame->id = cpu_to_le32(txid); + frame->length = canfd_sanitize_len(cfd->len); + + memcpy(frame->data, cfd->data, frame->length); + } else { + struct can_frame *cf = (struct can_frame *)skb->data; + + if (cf->can_id & CAN_EFF_FLAG) { + txid = cf->can_id & CAN_EFF_MASK; + frame->flag |= NCT6694_CANFD_FRAME_FLAG_EFF; + } else { + txid = cf->can_id & CAN_SFF_MASK; + } + + if (cf->can_id & CAN_RTR_FLAG) + frame->flag |= NCT6694_CANFD_FRAME_FLAG_RTR; + else + memcpy(frame->data, cf->data, cf->len); + + frame->id = cpu_to_le32(txid); + frame->length = cf->len; + } + + err = nct6694_write_msg(priv->nct6694, &cmd_hd, frame); + if (err) { + can_free_echo_skb(ndev, 0, NULL); + stats->tx_dropped++; + stats->tx_errors++; + netif_wake_queue(ndev); + } +} + +static netdev_tx_t nct6694_canfd_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct nct6694_canfd_priv *priv = netdev_priv(ndev); + + if (can_dev_dropped_skb(ndev, skb)) + return NETDEV_TX_OK; + + netif_stop_queue(ndev); + can_put_echo_skb(skb, ndev, 0, 0); + queue_work(priv->wq, &priv->tx_work); + + return NETDEV_TX_OK; +} + +static int nct6694_canfd_start(struct net_device *ndev) +{ + struct nct6694_canfd_priv *priv = netdev_priv(ndev); + const struct can_bittiming *n_bt = &priv->can.bittiming; + const struct can_bittiming *d_bt = &priv->can.fd.data_bittiming; + struct nct6694_canfd_setting *setting __free(kfree) = NULL; + const struct nct6694_cmd_header cmd_hd = { + .mod = NCT6694_CANFD_MOD, + .cmd = NCT6694_CANFD_SETTING, + .sel = ndev->dev_port, + .len = cpu_to_le16(sizeof(*setting)) + }; + u32 en_tdc; + int ret; + + setting = kzalloc(sizeof(*setting), GFP_KERNEL); + if (!setting) + return -ENOMEM; + + if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + setting->ctrl1 |= cpu_to_le16(NCT6694_CANFD_SETTING_CTRL1_MON); + + if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) + setting->ctrl1 |= cpu_to_le16(NCT6694_CANFD_SETTING_CTRL1_NISO); + + if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) + setting->ctrl1 |= cpu_to_le16(NCT6694_CANFD_SETTING_CTRL1_LBCK); + + /* Disable clock divider */ + setting->ctrl2 = 0; + + setting->nbtp = cpu_to_le32(FIELD_PREP(NCT6694_CANFD_SETTING_NBTP_NSJW, + n_bt->sjw - 1) | + FIELD_PREP(NCT6694_CANFD_SETTING_NBTP_NBRP, + n_bt->brp - 1) | + FIELD_PREP(NCT6694_CANFD_SETTING_NBTP_NTSEG2, + n_bt->phase_seg2 - 1) | + FIELD_PREP(NCT6694_CANFD_SETTING_NBTP_NTSEG1, + n_bt->prop_seg + n_bt->phase_seg1 - 1)); + + if (d_bt->brp <= 2) + en_tdc = NCT6694_CANFD_SETTING_DBTP_TDC; + else + en_tdc = 0; + + setting->dbtp = cpu_to_le32(FIELD_PREP(NCT6694_CANFD_SETTING_DBTP_DSJW, + d_bt->sjw - 1) | + FIELD_PREP(NCT6694_CANFD_SETTING_DBTP_DBRP, + d_bt->brp - 1) | + FIELD_PREP(NCT6694_CANFD_SETTING_DBTP_DTSEG2, + d_bt->phase_seg2 - 1) | + FIELD_PREP(NCT6694_CANFD_SETTING_DBTP_DTSEG1, + d_bt->prop_seg + d_bt->phase_seg1 - 1) | + en_tdc); + + setting->active = NCT6694_CANFD_SETTING_ACTIVE_CTRL1 | + NCT6694_CANFD_SETTING_ACTIVE_CTRL2 | + NCT6694_CANFD_SETTING_ACTIVE_NBTP_DBTP; + + ret = nct6694_write_msg(priv->nct6694, &cmd_hd, setting); + if (ret) + return ret; + + priv->can.state = CAN_STATE_ERROR_ACTIVE; + + return 0; +} + +static void nct6694_canfd_stop(struct net_device *ndev) +{ + struct nct6694_canfd_priv *priv = netdev_priv(ndev); + struct nct6694_canfd_setting *setting __free(kfree) = NULL; + const struct nct6694_cmd_header cmd_hd = { + .mod = NCT6694_CANFD_MOD, + .cmd = NCT6694_CANFD_SETTING, + .sel = ndev->dev_port, + .len = cpu_to_le16(sizeof(*setting)) + }; + + /* The NCT6694 cannot be stopped. To ensure safe operation and avoid + * interference, the control mode is set to Listen-Only mode. This + * mode allows the device to monitor bus activity without actively + * participating in communication. + */ + setting = kzalloc(sizeof(*setting), GFP_KERNEL); + if (!setting) + return; + + nct6694_read_msg(priv->nct6694, &cmd_hd, setting); + setting->ctrl1 = cpu_to_le16(NCT6694_CANFD_SETTING_CTRL1_MON); + setting->active = NCT6694_CANFD_SETTING_ACTIVE_CTRL1; + nct6694_write_msg(priv->nct6694, &cmd_hd, setting); + + priv->can.state = CAN_STATE_STOPPED; +} + +static int nct6694_canfd_close(struct net_device *ndev) +{ + struct nct6694_canfd_priv *priv = netdev_priv(ndev); + + netif_stop_queue(ndev); + nct6694_canfd_stop(ndev); + destroy_workqueue(priv->wq); + free_irq(ndev->irq, ndev); + can_rx_offload_disable(&priv->offload); + close_candev(ndev); + return 0; +} + +static int nct6694_canfd_set_mode(struct net_device *ndev, enum can_mode mode) +{ + int ret; + + switch (mode) { + case CAN_MODE_START: + ret = nct6694_canfd_start(ndev); + if (ret) + return ret; + + netif_wake_queue(ndev); + break; + + default: + return -EOPNOTSUPP; + } + + return ret; +} + +static int nct6694_canfd_open(struct net_device *ndev) +{ + struct nct6694_canfd_priv *priv = netdev_priv(ndev); + int ret; + + ret = open_candev(ndev); + if (ret) + return ret; + + can_rx_offload_enable(&priv->offload); + + ret = request_threaded_irq(ndev->irq, NULL, + nct6694_canfd_irq, IRQF_ONESHOT, + "nct6694_canfd", ndev); + if (ret) { + netdev_err(ndev, "Failed to request IRQ\n"); + goto can_rx_offload_disable; + } + + priv->wq = alloc_ordered_workqueue("%s-nct6694_wq", + WQ_FREEZABLE | WQ_MEM_RECLAIM, + ndev->name); + if (!priv->wq) { + ret = -ENOMEM; + goto free_irq; + } + + ret = nct6694_canfd_start(ndev); + if (ret) + goto destroy_wq; + + netif_start_queue(ndev); + + return 0; + +destroy_wq: + destroy_workqueue(priv->wq); +free_irq: + free_irq(ndev->irq, ndev); +can_rx_offload_disable: + can_rx_offload_disable(&priv->offload); + close_candev(ndev); + return ret; +} + +static const struct net_device_ops nct6694_canfd_netdev_ops = { + .ndo_open = nct6694_canfd_open, + .ndo_stop = nct6694_canfd_close, + .ndo_start_xmit = nct6694_canfd_start_xmit, +}; + +static const struct ethtool_ops nct6694_canfd_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, +}; + +static int nct6694_canfd_get_clock(struct nct6694_canfd_priv *priv) +{ + struct nct6694_canfd_information *info __free(kfree) = NULL; + static const struct nct6694_cmd_header cmd_hd = { + .mod = NCT6694_CANFD_MOD, + .cmd = NCT6694_CANFD_INFORMATION, + .sel = NCT6694_CANFD_INFORMATION_SEL, + .len = cpu_to_le16(sizeof(*info)) + }; + int ret; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + ret = nct6694_read_msg(priv->nct6694, &cmd_hd, info); + if (ret) + return ret; + + return le32_to_cpu(info->can_clk); +} + +static int nct6694_canfd_probe(struct platform_device *pdev) +{ + struct nct6694 *nct6694 = dev_get_drvdata(pdev->dev.parent); + struct nct6694_canfd_priv *priv; + struct net_device *ndev; + int port, irq, ret, can_clk; + + port = ida_alloc(&nct6694->canfd_ida, GFP_KERNEL); + if (port < 0) + return port; + + irq = irq_create_mapping(nct6694->domain, + NCT6694_IRQ_CAN0 + port); + if (!irq) { + ret = -EINVAL; + goto free_ida; + } + + ndev = alloc_candev(sizeof(struct nct6694_canfd_priv), 1); + if (!ndev) { + ret = -ENOMEM; + goto dispose_irq; + } + + ndev->irq = irq; + ndev->flags |= IFF_ECHO; + ndev->dev_port = port; + ndev->netdev_ops = &nct6694_canfd_netdev_ops; + ndev->ethtool_ops = &nct6694_canfd_ethtool_ops; + + priv = netdev_priv(ndev); + priv->nct6694 = nct6694; + priv->ndev = ndev; + + can_clk = nct6694_canfd_get_clock(priv); + if (can_clk < 0) { + ret = dev_err_probe(&pdev->dev, can_clk, + "Failed to get clock\n"); + goto free_candev; + } + + INIT_WORK(&priv->tx_work, nct6694_canfd_tx_work); + + priv->can.clock.freq = can_clk; + priv->can.bittiming_const = &nct6694_canfd_bittiming_nominal_const; + priv->can.fd.data_bittiming_const = &nct6694_canfd_bittiming_data_const; + priv->can.do_set_mode = nct6694_canfd_set_mode; + priv->can.do_get_berr_counter = nct6694_canfd_get_berr_counter; + priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | + CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING | + CAN_CTRLMODE_FD_NON_ISO; + + ret = can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD); + if (ret) + goto free_candev; + + ret = can_rx_offload_add_manual(ndev, &priv->offload, + NCT6694_NAPI_WEIGHT); + if (ret) { + dev_err_probe(&pdev->dev, ret, "Failed to add rx_offload\n"); + goto free_candev; + } + + platform_set_drvdata(pdev, priv); + SET_NETDEV_DEV(priv->ndev, &pdev->dev); + + ret = register_candev(priv->ndev); + if (ret) + goto rx_offload_del; + + return 0; + +rx_offload_del: + can_rx_offload_del(&priv->offload); +free_candev: + free_candev(ndev); +dispose_irq: + irq_dispose_mapping(irq); +free_ida: + ida_free(&nct6694->canfd_ida, port); + return ret; +} + +static void nct6694_canfd_remove(struct platform_device *pdev) +{ + struct nct6694_canfd_priv *priv = platform_get_drvdata(pdev); + struct nct6694 *nct6694 = priv->nct6694; + struct net_device *ndev = priv->ndev; + int port = ndev->dev_port; + int irq = ndev->irq; + + unregister_candev(ndev); + can_rx_offload_del(&priv->offload); + free_candev(ndev); + irq_dispose_mapping(irq); + ida_free(&nct6694->canfd_ida, port); +} + +static struct platform_driver nct6694_canfd_driver = { + .driver = { + .name = DEVICE_NAME, + }, + .probe = nct6694_canfd_probe, + .remove = nct6694_canfd_remove, +}; + +module_platform_driver(nct6694_canfd_driver); + +MODULE_DESCRIPTION("USB-CAN FD driver for NCT6694"); +MODULE_AUTHOR("Ming Yu <tmyu0@nuvoton.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/can/usb/peak_usb/Makefile b/drivers/net/can/usb/peak_usb/Makefile index 1839e9ca62e7..80789f91e300 100644 --- a/drivers/net/can/usb/peak_usb/Makefile +++ b/drivers/net/can/usb/peak_usb/Makefile @@ -1,2 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_CAN_PEAK_USB) += peak_usb.o peak_usb-y = pcan_usb_core.o pcan_usb.o pcan_usb_pro.o pcan_usb_fd.o diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 13238a72a338..9278a1522aae 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -1,24 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * CAN driver for PEAK System PCAN-USB adapter * Derived from the PCAN project file driver/src/pcan_usb.c * - * Copyright (C) 2003-2010 PEAK System-Technik GmbH - * Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com> + * Copyright (C) 2003-2025 PEAK System-Technik GmbH + * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com> * * Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published - * by the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. */ +#include <linux/unaligned.h> + +#include <linux/ethtool.h> +#include <linux/module.h> #include <linux/netdevice.h> #include <linux/usb.h> -#include <linux/module.h> #include <linux/can.h> #include <linux/can/dev.h> @@ -26,8 +21,6 @@ #include "pcan_usb_core.h" -MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB adapter"); - /* PCAN-USB Endpoints */ #define PCAN_USB_EP_CMDOUT 1 #define PCAN_USB_EP_CMDIN (PCAN_USB_EP_CMDOUT | USB_DIR_IN) @@ -42,6 +35,24 @@ MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB adapter"); #define PCAN_USB_CMD_LEN (PCAN_USB_CMD_ARGS + \ PCAN_USB_CMD_ARGS_LEN) +/* PCAN-USB commands */ +#define PCAN_USB_CMD_BITRATE 1 +#define PCAN_USB_CMD_SET_BUS 3 +#define PCAN_USB_CMD_DEVID 4 +#define PCAN_USB_CMD_SN 6 +#define PCAN_USB_CMD_REGISTER 9 +#define PCAN_USB_CMD_EXT_VCC 10 +#define PCAN_USB_CMD_ERR_FR 11 +#define PCAN_USB_CMD_LED 12 + +/* PCAN_USB_CMD_SET_BUS number arg */ +#define PCAN_USB_BUS_XCVER 2 +#define PCAN_USB_BUS_SILENT_MODE 3 + +/* PCAN_USB_CMD_xxx functions */ +#define PCAN_USB_GET 1 +#define PCAN_USB_SET 2 + /* PCAN-USB command timeout (ms.) */ #define PCAN_USB_COMMAND_TIMEOUT 1000 @@ -54,6 +65,8 @@ MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB adapter"); #define PCAN_USB_MSG_HEADER_LEN 2 +#define PCAN_USB_MSG_TX_CAN 2 /* Tx msg is a CAN frame */ + /* PCAN-USB adapter internal clock (MHz) */ #define PCAN_USB_CRYSTAL_HZ 16000000 @@ -64,6 +77,10 @@ MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB adapter"); #define PCAN_USB_STATUSLEN_RTR (1 << 4) #define PCAN_USB_STATUSLEN_DLC (0xf) +/* PCAN-USB 4.1 CAN Id tx extended flags */ +#define PCAN_USB_TX_SRR 0x01 /* SJA1000 SRR command */ +#define PCAN_USB_TX_AT 0x02 /* SJA1000 AT command */ + /* PCAN-USB error flags */ #define PCAN_USB_ERROR_TXFULL 0x01 #define PCAN_USB_ERROR_RXQOVR 0x02 @@ -74,6 +91,10 @@ MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB adapter"); #define PCAN_USB_ERROR_QOVR 0x40 #define PCAN_USB_ERROR_TXQFULL 0x80 +#define PCAN_USB_ERROR_BUS (PCAN_USB_ERROR_BUS_LIGHT | \ + PCAN_USB_ERROR_BUS_HEAVY | \ + PCAN_USB_ERROR_BUS_OFF) + /* SJA1000 modes */ #define SJA1000_MODE_NORMAL 0x00 #define SJA1000_MODE_INIT 0x01 @@ -93,11 +114,26 @@ MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB adapter"); #define PCAN_USB_REC_TS 4 #define PCAN_USB_REC_BUSEVT 5 +/* CAN bus events notifications selection mask */ +#define PCAN_USB_ERR_RXERR 0x02 /* ask for rxerr counter */ +#define PCAN_USB_ERR_TXERR 0x04 /* ask for txerr counter */ + +/* This mask generates an usb packet each time the state of the bus changes. + * In other words, its interest is to know which side among rx and tx is + * responsible of the change of the bus state. + */ +#define PCAN_USB_BERR_MASK (PCAN_USB_ERR_RXERR | PCAN_USB_ERR_TXERR) + +/* identify bus event packets with rx/tx error counters */ +#define PCAN_USB_ERR_CNT_DEC 0x00 /* counters are decreasing */ +#define PCAN_USB_ERR_CNT_INC 0x80 /* counters are increasing */ + /* private to PCAN-USB adapter */ struct pcan_usb { struct peak_usb_device dev; struct peak_time_ref time_ref; struct timer_list restart_timer; + struct can_berr_counter bec; }; /* incoming message context for decoding */ @@ -108,7 +144,7 @@ struct pcan_usb_msg_context { u8 *end; u8 rec_cnt; u8 rec_idx; - u8 rec_data_idx; + u8 rec_ts_idx; struct net_device *netdev; struct pcan_usb *pdev; }; @@ -180,7 +216,8 @@ static int pcan_usb_set_sja1000(struct peak_usb_device *dev, u8 mode) [1] = mode, }; - return pcan_usb_send_cmd(dev, 9, 2, args); + return pcan_usb_send_cmd(dev, PCAN_USB_CMD_REGISTER, PCAN_USB_SET, + args); } static int pcan_usb_set_bus(struct peak_usb_device *dev, u8 onoff) @@ -189,7 +226,8 @@ static int pcan_usb_set_bus(struct peak_usb_device *dev, u8 onoff) [0] = !!onoff, }; - return pcan_usb_send_cmd(dev, 3, 2, args); + return pcan_usb_send_cmd(dev, PCAN_USB_CMD_SET_BUS, PCAN_USB_BUS_XCVER, + args); } static int pcan_usb_set_silent(struct peak_usb_device *dev, u8 onoff) @@ -198,7 +236,18 @@ static int pcan_usb_set_silent(struct peak_usb_device *dev, u8 onoff) [0] = !!onoff, }; - return pcan_usb_send_cmd(dev, 3, 3, args); + return pcan_usb_send_cmd(dev, PCAN_USB_CMD_SET_BUS, + PCAN_USB_BUS_SILENT_MODE, args); +} + +/* send the cmd to be notified from bus errors */ +static int pcan_usb_set_err_frame(struct peak_usb_device *dev, u8 err_mask) +{ + u8 args[PCAN_USB_CMD_ARGS_LEN] = { + [0] = err_mask, + }; + + return pcan_usb_send_cmd(dev, PCAN_USB_CMD_ERR_FR, PCAN_USB_SET, args); } static int pcan_usb_set_ext_vcc(struct peak_usb_device *dev, u8 onoff) @@ -207,7 +256,16 @@ static int pcan_usb_set_ext_vcc(struct peak_usb_device *dev, u8 onoff) [0] = !!onoff, }; - return pcan_usb_send_cmd(dev, 10, 2, args); + return pcan_usb_send_cmd(dev, PCAN_USB_CMD_EXT_VCC, PCAN_USB_SET, args); +} + +static int pcan_usb_set_led(struct peak_usb_device *dev, u8 onoff) +{ + u8 args[PCAN_USB_CMD_ARGS_LEN] = { + [0] = !!onoff, + }; + + return pcan_usb_send_cmd(dev, PCAN_USB_CMD_LED, PCAN_USB_SET, args); } /* @@ -231,7 +289,7 @@ static int pcan_usb_set_bittiming(struct peak_usb_device *dev, args[0] = btr1; args[1] = btr0; - return pcan_usb_send_cmd(dev, 1, 2, args); + return pcan_usb_send_cmd(dev, PCAN_USB_CMD_BITRATE, PCAN_USB_SET, args); } /* @@ -261,7 +319,7 @@ static int pcan_usb_write_mode(struct peak_usb_device *dev, u8 onoff) */ static void pcan_usb_restart(struct timer_list *t) { - struct pcan_usb *pdev = from_timer(pdev, t, restart_timer); + struct pcan_usb *pdev = timer_container_of(pdev, t, restart_timer); struct peak_usb_device *dev = &pdev->dev; /* notify candev and netdev */ @@ -315,49 +373,60 @@ static int pcan_usb_get_serial(struct peak_usb_device *dev, u32 *serial_number) u8 args[PCAN_USB_CMD_ARGS_LEN]; int err; - err = pcan_usb_wait_rsp(dev, 6, 1, args); - if (err) { - netdev_err(dev->netdev, "getting serial failure: %d\n", err); - } else if (serial_number) { - __le32 tmp32; - - memcpy(&tmp32, args, 4); - *serial_number = le32_to_cpu(tmp32); - } + err = pcan_usb_wait_rsp(dev, PCAN_USB_CMD_SN, PCAN_USB_GET, args); + if (err) + return err; + *serial_number = le32_to_cpup((__le32 *)args); - return err; + return 0; } /* - * read device id from device + * read can channel id from device */ -static int pcan_usb_get_device_id(struct peak_usb_device *dev, u32 *device_id) +static int pcan_usb_get_can_channel_id(struct peak_usb_device *dev, u32 *can_ch_id) { u8 args[PCAN_USB_CMD_ARGS_LEN]; int err; - err = pcan_usb_wait_rsp(dev, 4, 1, args); + err = pcan_usb_wait_rsp(dev, PCAN_USB_CMD_DEVID, PCAN_USB_GET, args); if (err) - netdev_err(dev->netdev, "getting device id failure: %d\n", err); - else if (device_id) - *device_id = args[0]; + netdev_err(dev->netdev, "getting can channel id failure: %d\n", err); + + else + *can_ch_id = args[0]; return err; } +/* set a new CAN channel id in the flash memory of the device */ +static int pcan_usb_set_can_channel_id(struct peak_usb_device *dev, u32 can_ch_id) +{ + u8 args[PCAN_USB_CMD_ARGS_LEN]; + + /* this kind of device supports 8-bit values only */ + if (can_ch_id > U8_MAX) + return -EINVAL; + + /* during the flash process the device disconnects during ~1.25 s.: + * prohibit access when interface is UP + */ + if (dev->netdev->flags & IFF_UP) + return -EBUSY; + + args[0] = can_ch_id; + return pcan_usb_send_cmd(dev, PCAN_USB_CMD_DEVID, PCAN_USB_SET, args); +} + /* * update current time ref with received timestamp */ static int pcan_usb_update_ts(struct pcan_usb_msg_context *mc) { - __le16 tmp16; - - if ((mc->ptr+2) > mc->end) + if ((mc->ptr + 2) > mc->end) return -EINVAL; - memcpy(&tmp16, mc->ptr, 2); - - mc->ts16 = le16_to_cpu(tmp16); + mc->ts16 = get_unaligned_le16(mc->ptr); if (mc->rec_idx > 0) peak_usb_update_ts_now(&mc->pdev->time_ref, mc->ts16); @@ -374,16 +443,13 @@ static int pcan_usb_decode_ts(struct pcan_usb_msg_context *mc, u8 first_packet) { /* only 1st packet supplies a word timestamp */ if (first_packet) { - __le16 tmp16; - if ((mc->ptr + 2) > mc->end) return -EINVAL; - memcpy(&tmp16, mc->ptr, 2); - mc->ptr += 2; - - mc->ts16 = le16_to_cpu(tmp16); + mc->ts16 = get_unaligned_le16(mc->ptr); mc->prev_ts8 = mc->ts16 & 0x00ff; + + mc->ptr += 2; } else { u8 ts8; @@ -408,120 +474,67 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, { struct sk_buff *skb; struct can_frame *cf; - enum can_state new_state; + enum can_state new_state = CAN_STATE_ERROR_ACTIVE; /* ignore this error until 1st ts received */ if (n == PCAN_USB_ERROR_QOVR) if (!mc->pdev->time_ref.tick_count) return 0; - new_state = mc->pdev->dev.can.state; + /* allocate an skb to store the error frame */ + skb = alloc_can_err_skb(mc->netdev, &cf); - switch (mc->pdev->dev.can.state) { - case CAN_STATE_ERROR_ACTIVE: - if (n & PCAN_USB_ERROR_BUS_LIGHT) { - new_state = CAN_STATE_ERROR_WARNING; - break; + if (n & PCAN_USB_ERROR_RXQOVR) { + /* data overrun interrupt */ + netdev_dbg(mc->netdev, "data overrun interrupt\n"); + mc->netdev->stats.rx_over_errors++; + mc->netdev->stats.rx_errors++; + if (cf) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; } - /* else: fall through */ + } - case CAN_STATE_ERROR_WARNING: - if (n & PCAN_USB_ERROR_BUS_HEAVY) { - new_state = CAN_STATE_ERROR_PASSIVE; - break; - } - if (n & PCAN_USB_ERROR_BUS_OFF) { - new_state = CAN_STATE_BUS_OFF; - break; - } - if (n & (PCAN_USB_ERROR_RXQOVR | PCAN_USB_ERROR_QOVR)) { - /* - * trick to bypass next comparison and process other - * errors - */ - new_state = CAN_STATE_MAX; - break; - } - if ((n & PCAN_USB_ERROR_BUS_LIGHT) == 0) { - /* no error (back to active state) */ - mc->pdev->dev.can.state = CAN_STATE_ERROR_ACTIVE; - return 0; - } - break; + if (n & PCAN_USB_ERROR_TXQFULL) + netdev_dbg(mc->netdev, "device Tx queue full)\n"); - case CAN_STATE_ERROR_PASSIVE: - if (n & PCAN_USB_ERROR_BUS_OFF) { - new_state = CAN_STATE_BUS_OFF; - break; - } - if (n & PCAN_USB_ERROR_BUS_LIGHT) { - new_state = CAN_STATE_ERROR_WARNING; - break; - } - if (n & (PCAN_USB_ERROR_RXQOVR | PCAN_USB_ERROR_QOVR)) { - /* - * trick to bypass next comparison and process other - * errors - */ - new_state = CAN_STATE_MAX; - break; - } + if (n & PCAN_USB_ERROR_BUS_OFF) { + new_state = CAN_STATE_BUS_OFF; + } else if (n & PCAN_USB_ERROR_BUS_HEAVY) { + new_state = ((mc->pdev->bec.txerr >= 128) || + (mc->pdev->bec.rxerr >= 128)) ? + CAN_STATE_ERROR_PASSIVE : + CAN_STATE_ERROR_WARNING; + } else { + new_state = CAN_STATE_ERROR_ACTIVE; + } - if ((n & PCAN_USB_ERROR_BUS_HEAVY) == 0) { - /* no error (back to active state) */ - mc->pdev->dev.can.state = CAN_STATE_ERROR_ACTIVE; - return 0; + /* handle change of state */ + if (new_state != mc->pdev->dev.can.state) { + enum can_state tx_state = + (mc->pdev->bec.txerr >= mc->pdev->bec.rxerr) ? + new_state : 0; + enum can_state rx_state = + (mc->pdev->bec.txerr <= mc->pdev->bec.rxerr) ? + new_state : 0; + + can_change_state(mc->netdev, cf, tx_state, rx_state); + + if (new_state == CAN_STATE_BUS_OFF) { + can_bus_off(mc->netdev); + } else if (cf && (cf->can_id & CAN_ERR_CRTL)) { + /* Supply TX/RX error counters in case of + * controller error. + */ + cf->can_id = CAN_ERR_CNT; + cf->data[6] = mc->pdev->bec.txerr; + cf->data[7] = mc->pdev->bec.rxerr; } - break; - - default: - /* do nothing waiting for restart */ - return 0; } - /* donot post any error if current state didn't change */ - if (mc->pdev->dev.can.state == new_state) - return 0; - - /* allocate an skb to store the error frame */ - skb = alloc_can_err_skb(mc->netdev, &cf); if (!skb) return -ENOMEM; - switch (new_state) { - case CAN_STATE_BUS_OFF: - cf->can_id |= CAN_ERR_BUSOFF; - mc->pdev->dev.can.can_stats.bus_off++; - can_bus_off(mc->netdev); - break; - - case CAN_STATE_ERROR_PASSIVE: - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE | - CAN_ERR_CRTL_RX_PASSIVE; - mc->pdev->dev.can.can_stats.error_passive++; - break; - - case CAN_STATE_ERROR_WARNING: - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] |= CAN_ERR_CRTL_TX_WARNING | - CAN_ERR_CRTL_RX_WARNING; - mc->pdev->dev.can.can_stats.error_warning++; - break; - - default: - /* CAN_STATE_MAX (trick to handle other errors) */ - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; - mc->netdev->stats.rx_over_errors++; - mc->netdev->stats.rx_errors++; - - new_state = mc->pdev->dev.can.state; - break; - } - - mc->pdev->dev.can.state = new_state; - if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) { struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb); @@ -529,13 +542,36 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, &hwts->hwtstamp); } - mc->netdev->stats.rx_packets++; - mc->netdev->stats.rx_bytes += cf->can_dlc; netif_rx(skb); return 0; } +/* decode bus event usb packet: first byte contains rxerr while 2nd one contains + * txerr. + */ +static int pcan_usb_handle_bus_evt(struct pcan_usb_msg_context *mc, u8 ir) +{ + struct pcan_usb *pdev = mc->pdev; + + /* according to the content of the packet */ + switch (ir) { + case PCAN_USB_ERR_CNT_DEC: + case PCAN_USB_ERR_CNT_INC: + + /* save rx/tx error counters from in the device context */ + pdev->bec.rxerr = mc->ptr[1]; + pdev->bec.txerr = mc->ptr[2]; + break; + + default: + /* reserved */ + break; + } + + return 0; +} + /* * decode non-data usb message */ @@ -555,10 +591,15 @@ static int pcan_usb_decode_status(struct pcan_usb_msg_context *mc, mc->ptr += PCAN_USB_CMD_ARGS; if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) { - int err = pcan_usb_decode_ts(mc, !mc->rec_idx); + int err = pcan_usb_decode_ts(mc, !mc->rec_ts_idx); if (err) return err; + + /* Next packet in the buffer will have a timestamp on a single + * byte + */ + mc->rec_ts_idx++; } switch (f) { @@ -585,9 +626,10 @@ static int pcan_usb_decode_status(struct pcan_usb_msg_context *mc, break; case PCAN_USB_REC_BUSEVT: - /* error frame/bus event */ - if (n & PCAN_USB_ERROR_TXQFULL) - netdev_dbg(mc->netdev, "device Tx queue full)\n"); + /* bus event notifications (get rxerr/txerr) */ + err = pcan_usb_handle_bus_evt(mc, n); + if (err) + return err; break; default: netdev_err(mc->netdev, "unexpected function %u\n", f); @@ -611,39 +653,37 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) struct sk_buff *skb; struct can_frame *cf; struct skb_shared_hwtstamps *hwts; + u32 can_id_flags; skb = alloc_can_skb(mc->netdev, &cf); if (!skb) return -ENOMEM; if (status_len & PCAN_USB_STATUSLEN_EXT_ID) { - __le32 tmp32; - if ((mc->ptr + 4) > mc->end) goto decode_failed; - memcpy(&tmp32, mc->ptr, 4); + can_id_flags = get_unaligned_le32(mc->ptr); + cf->can_id = can_id_flags >> 3 | CAN_EFF_FLAG; mc->ptr += 4; - - cf->can_id = (le32_to_cpu(tmp32) >> 3) | CAN_EFF_FLAG; } else { - __le16 tmp16; - if ((mc->ptr + 2) > mc->end) goto decode_failed; - memcpy(&tmp16, mc->ptr, 2); + can_id_flags = get_unaligned_le16(mc->ptr); + cf->can_id = can_id_flags >> 5; mc->ptr += 2; - - cf->can_id = le16_to_cpu(tmp16) >> 5; } - cf->can_dlc = get_can_dlc(rec_len); + can_frame_set_cc_len(cf, rec_len, mc->pdev->dev.can.ctrlmode); - /* first data packet timestamp is a word */ - if (pcan_usb_decode_ts(mc, !mc->rec_data_idx)) + /* Only first packet timestamp is a word */ + if (pcan_usb_decode_ts(mc, !mc->rec_ts_idx)) goto decode_failed; + /* Next packet in the buffer will have a timestamp on a single byte */ + mc->rec_ts_idx++; + /* read data */ memset(cf->data, 0x0, sizeof(cf->data)); if (status_len & PCAN_USB_STATUSLEN_RTR) { @@ -652,17 +692,22 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) if ((mc->ptr + rec_len) > mc->end) goto decode_failed; - memcpy(cf->data, mc->ptr, cf->can_dlc); + memcpy(cf->data, mc->ptr, cf->len); mc->ptr += rec_len; + + /* Ignore next byte (client private id) if SRR bit is set */ + if (can_id_flags & PCAN_USB_TX_SRR) + mc->ptr++; + + /* update statistics */ + mc->netdev->stats.rx_bytes += cf->len; } + mc->netdev->stats.rx_packets++; /* convert timestamp into kernel time */ hwts = skb_hwtstamps(skb); peak_usb_get_ts_time(&mc->pdev->time_ref, mc->ts16, &hwts->hwtstamp); - /* update statistics */ - mc->netdev->stats.rx_packets++; - mc->netdev->stats.rx_bytes += cf->can_dlc; /* push the skb */ netif_rx(skb); @@ -696,7 +741,6 @@ static int pcan_usb_decode_msg(struct peak_usb_device *dev, u8 *ibuf, u32 lbuf) /* handle normal can frames here */ } else { err = pcan_usb_decode_data(&mc, sl); - mc.rec_data_idx++; } } @@ -732,57 +776,103 @@ static int pcan_usb_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb, struct net_device *netdev = dev->netdev; struct net_device_stats *stats = &netdev->stats; struct can_frame *cf = (struct can_frame *)skb->data; + u32 can_id_flags = cf->can_id & CAN_ERR_MASK; u8 *pc; - obuf[0] = 2; - obuf[1] = 1; + obuf[0] = PCAN_USB_MSG_TX_CAN; + obuf[1] = 1; /* only one CAN frame is stored in the packet */ pc = obuf + PCAN_USB_MSG_HEADER_LEN; /* status/len byte */ - *pc = cf->can_dlc; + *pc = can_get_cc_dlc(cf, dev->can.ctrlmode); + if (cf->can_id & CAN_RTR_FLAG) *pc |= PCAN_USB_STATUSLEN_RTR; /* can id */ if (cf->can_id & CAN_EFF_FLAG) { - __le32 tmp32 = cpu_to_le32((cf->can_id & CAN_ERR_MASK) << 3); - *pc |= PCAN_USB_STATUSLEN_EXT_ID; - memcpy(++pc, &tmp32, 4); + pc++; + + can_id_flags <<= 3; + + if (dev->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) + can_id_flags |= PCAN_USB_TX_SRR; + + if (dev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) + can_id_flags |= PCAN_USB_TX_AT; + + put_unaligned_le32(can_id_flags, pc); pc += 4; } else { - __le16 tmp16 = cpu_to_le16((cf->can_id & CAN_ERR_MASK) << 5); + pc++; + + can_id_flags <<= 5; + + if (dev->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) + can_id_flags |= PCAN_USB_TX_SRR; - memcpy(++pc, &tmp16, 2); + if (dev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) + can_id_flags |= PCAN_USB_TX_AT; + + put_unaligned_le16(can_id_flags, pc); pc += 2; } /* can data */ if (!(cf->can_id & CAN_RTR_FLAG)) { - memcpy(pc, cf->data, cf->can_dlc); - pc += cf->can_dlc; + memcpy(pc, cf->data, cf->len); + pc += cf->len; } + /* SRR bit needs a writer id (useless here) */ + if (can_id_flags & PCAN_USB_TX_SRR) + *pc++ = 0x80; + obuf[(*size)-1] = (u8)(stats->tx_packets & 0xff); return 0; } +/* socket callback used to copy berr counters values received through USB */ +static int pcan_usb_get_berr_counter(const struct net_device *netdev, + struct can_berr_counter *bec) +{ + struct peak_usb_device *dev = netdev_priv(netdev); + struct pcan_usb *pdev = container_of(dev, struct pcan_usb, dev); + + *bec = pdev->bec; + + /* must return 0 */ + return 0; +} + /* * start interface */ static int pcan_usb_start(struct peak_usb_device *dev) { struct pcan_usb *pdev = container_of(dev, struct pcan_usb, dev); + int err; /* number of bits used in timestamps read from adapter struct */ peak_usb_init_time_ref(&pdev->time_ref, &pcan_usb); + pdev->bec.rxerr = 0; + pdev->bec.txerr = 0; + + /* always ask the device for BERR reporting, to be able to switch from + * WARNING to PASSIVE state + */ + err = pcan_usb_set_err_frame(dev, PCAN_USB_BERR_MASK); + if (err) + netdev_warn(dev->netdev, + "Asking for BERR reporting error %u\n", + err); + /* if revision greater than 3, can put silent mode on/off */ if (dev->device_rev > 3) { - int err; - err = pcan_usb_set_silent(dev, dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY); if (err) @@ -819,6 +909,19 @@ static int pcan_usb_init(struct peak_usb_device *dev) pcan_usb.name, dev->device_rev, serial_number, pcan_usb.ctrl_count); + /* Since rev 4.1, PCAN-USB is able to make single-shot as well as + * looped back frames. + */ + if (dev->device_rev >= 41) { + struct can_priv *priv = netdev_priv(dev->netdev); + + priv->ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT | + CAN_CTRLMODE_LOOPBACK; + } else { + dev_info(dev->netdev->dev.parent, + "Firmware update available. Please contact support.peak@hms-networks.com\n"); + } + return 0; } @@ -850,6 +953,50 @@ static int pcan_usb_probe(struct usb_interface *intf) return 0; } +static int pcan_usb_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct peak_usb_device *dev = netdev_priv(netdev); + int err = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + /* call ON/OFF twice a second */ + return 2; + + case ETHTOOL_ID_OFF: + err = pcan_usb_set_led(dev, 0); + break; + + case ETHTOOL_ID_ON: + fallthrough; + + case ETHTOOL_ID_INACTIVE: + /* restore LED default */ + err = pcan_usb_set_led(dev, 1); + break; + + default: + break; + } + + return err; +} + +/* This device only handles 8-bit CAN channel id. */ +static int pcan_usb_get_eeprom_len(struct net_device *netdev) +{ + return sizeof(u8); +} + +static const struct ethtool_ops pcan_usb_ethtool_ops = { + .set_phys_id = pcan_usb_set_phys_id, + .get_ts_info = pcan_get_ts_info, + .get_eeprom_len = pcan_usb_get_eeprom_len, + .get_eeprom = peak_usb_get_eeprom, + .set_eeprom = peak_usb_set_eeprom, +}; + /* * describe the PCAN-USB adapter */ @@ -869,18 +1016,20 @@ const struct peak_usb_adapter pcan_usb = { .name = "PCAN-USB", .device_id = PCAN_USB_PRODUCT_ID, .ctrl_count = 1, - .ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY, + .ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_CC_LEN8_DLC, .clock = { - .freq = PCAN_USB_CRYSTAL_HZ / 2 , + .freq = PCAN_USB_CRYSTAL_HZ / 2, }, .bittiming_const = &pcan_usb_const, /* size of device private data */ .sizeof_dev_private = sizeof(struct pcan_usb), + .ethtool_ops = &pcan_usb_ethtool_ops, + /* timestamps usage */ .ts_used_bits = 16, - .ts_period = 24575, /* calibration period in ts. */ .us_per_ts_scale = PCAN_USB_TS_US_PER_TICK, /* us=(ts*scale) */ .us_per_ts_shift = PCAN_USB_TS_DIV_SHIFTER, /* >> shift */ @@ -897,9 +1046,11 @@ const struct peak_usb_adapter pcan_usb = { .dev_init = pcan_usb_init, .dev_set_bus = pcan_usb_write_mode, .dev_set_bittiming = pcan_usb_set_bittiming, - .dev_get_device_id = pcan_usb_get_device_id, + .dev_get_can_channel_id = pcan_usb_get_can_channel_id, + .dev_set_can_channel_id = pcan_usb_set_can_channel_id, .dev_decode_buf = pcan_usb_decode_buf, .dev_encode_msg = pcan_usb_encode_msg, .dev_start = pcan_usb_start, .dev_restart_async = pcan_usb_restart_async, + .do_get_berr_counter = pcan_usb_get_berr_counter, }; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 611f9d31be5d..cf48bb26d46d 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -1,26 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * CAN driver for PEAK System USB adapters * Derived from the PCAN project file driver/src/pcan_usb_core.c * - * Copyright (C) 2003-2010 PEAK System-Technik GmbH - * Copyright (C) 2010-2012 Stephane Grosjean <s.grosjean@peak-system.com> + * Copyright (C) 2003-2025 PEAK System-Technik GmbH + * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com> * * Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published - * by the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. */ +#include <linux/device.h> +#include <linux/ethtool.h> #include <linux/init.h> -#include <linux/signal.h> -#include <linux/slab.h> #include <linux/module.h> #include <linux/netdevice.h> +#include <linux/signal.h> +#include <linux/slab.h> +#include <linux/sysfs.h> #include <linux/usb.h> #include <linux/can.h> @@ -29,38 +24,62 @@ #include "pcan_usb_core.h" -MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>"); +MODULE_AUTHOR("Stéphane Grosjean <stephane.grosjean@hms-networks.com>"); MODULE_DESCRIPTION("CAN driver for PEAK-System USB adapters"); MODULE_LICENSE("GPL v2"); /* Table of devices that work with this driver */ -static struct usb_device_id peak_usb_table[] = { - {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USB_PRODUCT_ID)}, - {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID)}, - {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID)}, - {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID)}, - {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBCHIP_PRODUCT_ID)}, - {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBX6_PRODUCT_ID)}, - {} /* Terminating entry */ +static const struct usb_device_id peak_usb_table[] = { + { + USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USB_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&pcan_usb, + }, { + USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&pcan_usb_pro, + }, { + USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&pcan_usb_fd, + }, { + USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&pcan_usb_pro_fd, + }, { + USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBCHIP_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&pcan_usb_chip, + }, { + USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBX6_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&pcan_usb_x6, + }, { + /* Terminating entry */ + } }; MODULE_DEVICE_TABLE(usb, peak_usb_table); -/* List of supported PCAN-USB adapters (NULL terminated list) */ -static const struct peak_usb_adapter *const peak_usb_adapters_list[] = { - &pcan_usb, - &pcan_usb_pro, - &pcan_usb_fd, - &pcan_usb_pro_fd, - &pcan_usb_chip, - &pcan_usb_x6, +static ssize_t can_channel_id_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_dev(dev); + struct peak_usb_device *peak_dev = netdev_priv(netdev); + + return sysfs_emit(buf, "%08X\n", peak_dev->can_channel_id); +} +static DEVICE_ATTR_RO(can_channel_id); + +/* mutable to avoid cast in attribute_group */ +static struct attribute *peak_usb_sysfs_attrs[] = { + &dev_attr_can_channel_id.attr, + NULL, +}; + +static const struct attribute_group peak_usb_sysfs_group = { + .name = "peak_usb", + .attrs = peak_usb_sysfs_attrs, }; /* * dump memory */ #define DUMP_WIDTH 16 -void pcan_dump_mem(char *prompt, void *p, int l) +void pcan_dump_mem(const char *prompt, const void *p, int l) { pr_info("%s dumping %s (%d bytes):\n", PCAN_USB_DRIVER_NAME, prompt ? prompt : "memory", l); @@ -92,7 +111,7 @@ void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now) u32 delta_ts = time_ref->ts_dev_2 - time_ref->ts_dev_1; if (time_ref->ts_dev_2 < time_ref->ts_dev_1) - delta_ts &= (1 << time_ref->adapter->ts_used_bits) - 1; + delta_ts &= (1ULL << time_ref->adapter->ts_used_bits) - 1; time_ref->ts_total += delta_ts; } @@ -138,14 +157,55 @@ void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *time) /* protect from getting time before setting now */ if (ktime_to_ns(time_ref->tv_host)) { u64 delta_us; + s64 delta_ts = 0; + + /* General case: dev_ts_1 < dev_ts_2 < ts, with: + * + * - dev_ts_1 = previous sync timestamp + * - dev_ts_2 = last sync timestamp + * - ts = event timestamp + * - ts_period = known sync period (theoretical) + * ~ dev_ts2 - dev_ts1 + * *but*: + * + * - time counters wrap (see adapter->ts_used_bits) + * - sometimes, dev_ts_1 < ts < dev_ts2 + * + * "normal" case (sync time counters increase): + * must take into account case when ts wraps (tsw) + * + * < ts_period > < > + * | | | + * ---+--------+----+-------0-+--+--> + * ts_dev_1 | ts_dev_2 | + * ts tsw + */ + if (time_ref->ts_dev_1 < time_ref->ts_dev_2) { + /* case when event time (tsw) wraps */ + if (ts < time_ref->ts_dev_1) + delta_ts = BIT_ULL(time_ref->adapter->ts_used_bits); + + /* Otherwise, sync time counter (ts_dev_2) has wrapped: + * handle case when event time (tsn) hasn't. + * + * < ts_period > < > + * | | | + * ---+--------+--0-+---------+--+--> + * ts_dev_1 | ts_dev_2 | + * tsn ts + */ + } else if (time_ref->ts_dev_1 < ts) { + delta_ts = -BIT_ULL(time_ref->adapter->ts_used_bits); + } - delta_us = ts - time_ref->ts_dev_2; - if (ts < time_ref->ts_dev_2) - delta_us &= (1 << time_ref->adapter->ts_used_bits) - 1; + /* add delay between last sync and event timestamps */ + delta_ts += (signed int)(ts - time_ref->ts_dev_2); - delta_us += time_ref->ts_total; + /* add time from beginning to last sync */ + delta_ts += time_ref->ts_total; - delta_us *= time_ref->adapter->us_per_ts_scale; + /* convert ticks number into microseconds */ + delta_us = delta_ts * time_ref->adapter->us_per_ts_scale; delta_us >>= time_ref->adapter->us_per_ts_shift; *time = ktime_add_us(time_ref->tv_host_0, delta_us); @@ -154,15 +214,15 @@ void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *time) } } -/* - * post received skb after having set any hw timestamp - */ -int peak_usb_netif_rx(struct sk_buff *skb, - struct peak_time_ref *time_ref, u32 ts_low) +/* post received skb with native 64-bit hw timestamp */ +int peak_usb_netif_rx_64(struct sk_buff *skb, u32 ts_low, u32 ts_high) { struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb); + u64 ns_ts; - peak_usb_get_ts_time(time_ref, ts_low, &hwts->hwtstamp); + ns_ts = (u64)ts_high << 32 | ts_low; + ns_ts *= NSEC_PER_USEC; + hwts->hwtstamp = ns_to_ktime(ns_ts); return netif_rx(skb); } @@ -240,6 +300,7 @@ static void peak_usb_write_bulk_callback(struct urb *urb) struct peak_tx_urb_context *context = urb->context; struct peak_usb_device *dev; struct net_device *netdev; + int tx_bytes; BUG_ON(!context); @@ -254,33 +315,35 @@ static void peak_usb_write_bulk_callback(struct urb *urb) /* check tx status */ switch (urb->status) { case 0: - /* transmission complete */ - netdev->stats.tx_packets++; - netdev->stats.tx_bytes += context->data_len; - /* prevent tx timeout */ netif_trans_update(netdev); break; - default: - if (net_ratelimit()) - netdev_err(netdev, "Tx urb aborted (%d)\n", - urb->status); case -EPROTO: case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: + break; + default: + if (net_ratelimit()) + netdev_err(netdev, "Tx urb aborted (%d)\n", + urb->status); break; } /* should always release echo skb and corresponding context */ - can_get_echo_skb(netdev, context->echo_index); + tx_bytes = can_get_echo_skb(netdev, context->echo_index, NULL); context->echo_index = PCAN_USB_MAX_TX_URBS; - /* do wakeup tx queue in case of success only */ - if (!urb->status) + if (!urb->status) { + /* transmission complete */ + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += tx_bytes; + + /* do wakeup tx queue in case of success only */ netif_wake_queue(netdev); + } } /* @@ -292,13 +355,12 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb, struct peak_usb_device *dev = netdev_priv(netdev); struct peak_tx_urb_context *context = NULL; struct net_device_stats *stats = &netdev->stats; - struct canfd_frame *cfd = (struct canfd_frame *)skb->data; struct urb *urb; u8 *obuf; int i, err; size_t size = dev->adapter->tx_buffer_size; - if (can_dropped_invalid_skb(netdev, skb)) + if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) @@ -326,18 +388,15 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb, context->echo_index = i; - /* Note: this works with CANFD frames too */ - context->data_len = cfd->len; - usb_anchor_urb(urb, &dev->tx_submitted); - can_put_echo_skb(skb, netdev, context->echo_index); + can_put_echo_skb(skb, netdev, context->echo_index, 0); atomic_inc(&dev->active_tx_urbs); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { - can_free_echo_skb(netdev, context->echo_index); + can_free_echo_skb(netdev, context->echo_index, NULL); usb_unanchor_urb(urb); @@ -353,7 +412,7 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb, default: netdev_warn(netdev, "tx urb submitting failed err=%d\n", err); - /* fall through */ + fallthrough; case -ENOENT: /* cable unplugged */ stats->tx_dropped++; @@ -576,19 +635,20 @@ static int peak_usb_ndo_stop(struct net_device *netdev) dev->state &= ~PCAN_USB_STATE_STARTED; netif_stop_queue(netdev); + close_candev(netdev); + + dev->can.state = CAN_STATE_STOPPED; + /* unlink all pending urbs and free used memory */ peak_usb_unlink_all_urbs(dev); if (dev->adapter->dev_stop) dev->adapter->dev_stop(dev); - close_candev(netdev); - - dev->can.state = CAN_STATE_STOPPED; - /* can set bus off now */ if (dev->adapter->dev_set_bus) { int err = dev->adapter->dev_set_bus(dev, 0); + if (err) return err; } @@ -710,7 +770,7 @@ static int peak_usb_set_data_bittiming(struct net_device *netdev) const struct peak_usb_adapter *pa = dev->adapter; if (pa->dev_set_data_bittiming) { - struct can_bittiming *bt = &dev->can.data_bittiming; + struct can_bittiming *bt = &dev->can.fd.data_bittiming; int err = pa->dev_set_data_bittiming(dev, bt); if (err) @@ -724,13 +784,127 @@ static int peak_usb_set_data_bittiming(struct net_device *netdev) return 0; } +static int peak_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) +{ + config->tx_type = HWTSTAMP_TX_OFF; + config->rx_filter = HWTSTAMP_FILTER_ALL; + + return 0; +} + +static int peak_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + if (config->tx_type == HWTSTAMP_TX_OFF && + config->rx_filter == HWTSTAMP_FILTER_ALL) + return 0; + + NL_SET_ERR_MSG_MOD(extack, "Only RX HWTSTAMP_FILTER_ALL is supported"); + return -ERANGE; +} + static const struct net_device_ops peak_usb_netdev_ops = { .ndo_open = peak_usb_ndo_open, .ndo_stop = peak_usb_ndo_stop, .ndo_start_xmit = peak_usb_ndo_start_xmit, - .ndo_change_mtu = can_change_mtu, + .ndo_hwtstamp_get = peak_hwtstamp_get, + .ndo_hwtstamp_set = peak_hwtstamp_set, }; +/* CAN-USB devices generally handle 32-bit CAN channel IDs. + * In case one doesn't, then it have to overload this function. + */ +int peak_usb_get_eeprom_len(struct net_device *netdev) +{ + return sizeof(u32); +} + +/* Every CAN-USB device exports the dev_get_can_channel_id() operation. It is used + * here to fill the data buffer with the user defined CAN channel ID. + */ +int peak_usb_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct peak_usb_device *dev = netdev_priv(netdev); + u32 ch_id; + __le32 ch_id_le; + int err; + + err = dev->adapter->dev_get_can_channel_id(dev, &ch_id); + if (err) + return err; + + /* ethtool operates on individual bytes. The byte order of the CAN + * channel id in memory depends on the kernel architecture. We + * convert the CAN channel id back to the native byte order of the PEAK + * device itself to ensure that the order is consistent for all + * host architectures. + */ + ch_id_le = cpu_to_le32(ch_id); + memcpy(data, (u8 *)&ch_id_le + eeprom->offset, eeprom->len); + + /* update cached value */ + dev->can_channel_id = ch_id; + return err; +} + +/* Every CAN-USB device exports the dev_get_can_channel_id()/dev_set_can_channel_id() + * operations. They are used here to set the new user defined CAN channel ID. + */ +int peak_usb_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct peak_usb_device *dev = netdev_priv(netdev); + u32 ch_id; + __le32 ch_id_le; + int err; + + /* first, read the current user defined CAN channel ID */ + err = dev->adapter->dev_get_can_channel_id(dev, &ch_id); + if (err) { + netdev_err(netdev, "Failed to init CAN channel id (err %d)\n", err); + return err; + } + + /* do update the value with user given bytes. + * ethtool operates on individual bytes. The byte order of the CAN + * channel ID in memory depends on the kernel architecture. We + * convert the CAN channel ID back to the native byte order of the PEAK + * device itself to ensure that the order is consistent for all + * host architectures. + */ + ch_id_le = cpu_to_le32(ch_id); + memcpy((u8 *)&ch_id_le + eeprom->offset, data, eeprom->len); + ch_id = le32_to_cpu(ch_id_le); + + /* flash the new value now */ + err = dev->adapter->dev_set_can_channel_id(dev, ch_id); + if (err) { + netdev_err(netdev, "Failed to write new CAN channel id (err %d)\n", + err); + return err; + } + + /* update cached value with the new one */ + dev->can_channel_id = ch_id; + + return 0; +} + +int pcan_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info) +{ + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->tx_types = BIT(HWTSTAMP_TX_OFF); + info->rx_filters = BIT(HWTSTAMP_FILTER_ALL); + + return 0; +} + /* * create one device which is attached to CAN controller #ctrl_idx of the * usb adapter. @@ -758,7 +932,7 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter, dev = netdev_priv(netdev); /* allocate a buffer large enough to send commands */ - dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL); + dev->cmd_buf = kzalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL); if (!dev->cmd_buf) { err = -ENOMEM; goto lbl_free_candev; @@ -776,8 +950,8 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter, dev->can.clock = peak_usb_adapter->clock; dev->can.bittiming_const = peak_usb_adapter->bittiming_const; dev->can.do_set_bittiming = peak_usb_set_bittiming; - dev->can.data_bittiming_const = peak_usb_adapter->data_bittiming_const; - dev->can.do_set_data_bittiming = peak_usb_set_data_bittiming; + dev->can.fd.data_bittiming_const = peak_usb_adapter->data_bittiming_const; + dev->can.fd.do_set_data_bittiming = peak_usb_set_data_bittiming; dev->can.do_set_mode = peak_usb_set_mode; dev->can.do_get_berr_counter = peak_usb_adapter->do_get_berr_counter; dev->can.ctrlmode_supported = peak_usb_adapter->ctrlmode_supported; @@ -786,6 +960,12 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter, netdev->flags |= IFF_ECHO; /* we support local echo */ + /* add ethtool support */ + netdev->ethtool_ops = peak_usb_adapter->ethtool_ops; + + /* register peak_usb sysfs files */ + netdev->sysfs_groups[0] = &peak_usb_sysfs_group; + init_usb_anchor(&dev->rx_submitted); init_usb_anchor(&dev->tx_submitted); @@ -823,18 +1003,21 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter, if (dev->adapter->dev_set_bus) { err = dev->adapter->dev_set_bus(dev, 0); if (err) - goto lbl_unregister_candev; + goto adap_dev_free; } - /* get device number early */ - if (dev->adapter->dev_get_device_id) - dev->adapter->dev_get_device_id(dev, &dev->device_number); + /* get CAN channel id early */ + dev->adapter->dev_get_can_channel_id(dev, &dev->can_channel_id); - netdev_info(netdev, "attached to %s channel %u (device %u)\n", - peak_usb_adapter->name, ctrl_idx, dev->device_number); + netdev_info(netdev, "attached to %s channel %u (device 0x%08X)\n", + peak_usb_adapter->name, ctrl_idx, dev->can_channel_id); return 0; +adap_dev_free: + if (dev->adapter->dev_free) + dev->adapter->dev_free(dev); + lbl_unregister_candev: unregister_candev(netdev); @@ -863,9 +1046,9 @@ static void peak_usb_disconnect(struct usb_interface *intf) dev_prev_siblings = dev->prev_siblings; dev->state &= ~PCAN_USB_STATE_CONNECTED; - strncpy(name, netdev->name, IFNAMSIZ); + strscpy(name, netdev->name, IFNAMSIZ); - unregister_netdev(netdev); + unregister_candev(netdev); kfree(dev->cmd_buf); dev->next_siblings = NULL; @@ -885,24 +1068,11 @@ static void peak_usb_disconnect(struct usb_interface *intf) static int peak_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { - struct usb_device *usb_dev = interface_to_usbdev(intf); - const u16 usb_id_product = le16_to_cpu(usb_dev->descriptor.idProduct); - const struct peak_usb_adapter *peak_usb_adapter = NULL; + const struct peak_usb_adapter *peak_usb_adapter; int i, err = -ENOMEM; /* get corresponding PCAN-USB adapter */ - for (i = 0; i < ARRAY_SIZE(peak_usb_adapters_list); i++) - if (peak_usb_adapters_list[i]->device_id == usb_id_product) { - peak_usb_adapter = peak_usb_adapters_list[i]; - break; - } - - if (!peak_usb_adapter) { - /* should never come except device_id bad usage in this file */ - pr_err("%s: didn't find device id. 0x%x in devices list\n", - PCAN_USB_DRIVER_NAME, usb_id_product); - return -ENODEV; - } + peak_usb_adapter = (const struct peak_usb_adapter *)id->driver_info; /* got corresponding adapter: check if it handles current interface */ if (peak_usb_adapter->intf_probe) { @@ -966,7 +1136,7 @@ static void __exit peak_usb_exit(void) int err; /* last chance do send any synchronous commands here */ - err = driver_for_each_device(&peak_usb_driver.drvwrap.driver, NULL, + err = driver_for_each_device(&peak_usb_driver.driver, NULL, NULL, peak_usb_do_device_exit); if (err) pr_err("%s: failed to stop all can devices (err %d)\n", diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h index fb23489e1cb8..d1c1897d47b9 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h @@ -1,20 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * CAN driver for PEAK System USB adapters * Derived from the PCAN project file driver/src/pcan_usb_core.c * - * Copyright (C) 2003-2010 PEAK System-Technik GmbH - * Copyright (C) 2010-2012 Stephane Grosjean <s.grosjean@peak-system.com> + * Copyright (C) 2003-2025 PEAK System-Technik GmbH + * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com> * * Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published - * by the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. */ #ifndef PCAN_USB_CORE_H #define PCAN_USB_CORE_H @@ -39,7 +31,7 @@ /* usb adapters maximum channels per usb interface */ #define PCAN_USB_MAX_CHANNEL 2 -/* maximum length of the usb commands sent to/received from the devices */ +/* maximum length of the usb commands sent to/received from the devices */ #define PCAN_USB_MAX_CMD_LEN 32 struct peak_usb_device; @@ -54,6 +46,8 @@ struct peak_usb_adapter { const struct can_bittiming_const * const data_bittiming_const; unsigned int ctrl_count; + const struct ethtool_ops *ethtool_ops; + int (*intf_probe)(struct usb_interface *intf); int (*dev_init)(struct peak_usb_device *dev); @@ -66,7 +60,8 @@ struct peak_usb_adapter { int (*dev_set_data_bittiming)(struct peak_usb_device *dev, struct can_bittiming *bt); int (*dev_set_bus)(struct peak_usb_device *dev, u8 onoff); - int (*dev_get_device_id)(struct peak_usb_device *dev, u32 *device_id); + int (*dev_get_can_channel_id)(struct peak_usb_device *dev, u32 *can_ch_id); + int (*dev_set_can_channel_id)(struct peak_usb_device *dev, u32 can_ch_id); int (*dev_decode_buf)(struct peak_usb_device *dev, struct urb *urb); int (*dev_encode_msg)(struct peak_usb_device *dev, struct sk_buff *skb, u8 *obuf, size_t *size); @@ -79,7 +74,6 @@ struct peak_usb_adapter { u8 ep_msg_in; u8 ep_msg_out[PCAN_USB_MAX_CHANNEL]; u8 ts_used_bits; - u32 ts_period; u8 us_per_ts_shift; u32 us_per_ts_scale; @@ -106,7 +100,6 @@ struct peak_time_ref { struct peak_tx_urb_context { struct peak_usb_device *dev; u32 echo_index; - u8 data_len; struct urb *urb; }; @@ -120,8 +113,6 @@ struct peak_usb_device { unsigned int ctrl_idx; u32 state; - struct sk_buff *echo_skb[PCAN_USB_MAX_TX_URBS]; - struct usb_device *udev; struct net_device *netdev; @@ -132,19 +123,18 @@ struct peak_usb_device { u8 *cmd_buf; struct usb_anchor rx_submitted; - u32 device_number; + /* equivalent to the device ID in the Windows API */ + u32 can_channel_id; u8 device_rev; u8 ep_msg_in; u8 ep_msg_out; - u16 bus_load; - struct peak_usb_device *prev_siblings; struct peak_usb_device *next_siblings; }; -void pcan_dump_mem(char *prompt, void *p, int l); +void pcan_dump_mem(const char *prompt, const void *p, int l); /* common timestamp management */ void peak_usb_init_time_ref(struct peak_time_ref *time_ref, @@ -152,9 +142,15 @@ void peak_usb_init_time_ref(struct peak_time_ref *time_ref, void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now); void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now); void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *tv); -int peak_usb_netif_rx(struct sk_buff *skb, - struct peak_time_ref *time_ref, u32 ts_low); +int peak_usb_netif_rx_64(struct sk_buff *skb, u32 ts_low, u32 ts_high); void peak_usb_async_complete(struct urb *urb); void peak_usb_restart_complete(struct peak_usb_device *dev); - +int pcan_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info); + +/* common 32-bit CAN channel ID ethtool management */ +int peak_usb_get_eeprom_len(struct net_device *netdev); +int peak_usb_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *data); +int peak_usb_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *data); #endif diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index dd161c5eea8e..be84191cde56 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c @@ -1,20 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * CAN driver for PEAK System PCAN-USB FD / PCAN-USB Pro FD adapter * - * Copyright (C) 2013-2014 Stephane Grosjean <s.grosjean@peak-system.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published - * by the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. + * Copyright (C) 2013-2025 PEAK System-Technik GmbH + * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com> */ +#include <linux/ethtool.h> +#include <linux/module.h> #include <linux/netdevice.h> #include <linux/usb.h> -#include <linux/module.h> #include <linux/can.h> #include <linux/can/dev.h> @@ -24,9 +18,6 @@ #include "pcan_usb_core.h" #include "pcan_usb_pro.h" -MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB FD adapter"); -MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro FD adapter"); - #define PCAN_USBPROFD_CHANNEL_COUNT 2 #define PCAN_USBFD_CHANNEL_COUNT 1 @@ -43,7 +34,11 @@ MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro FD adapter"); #define PCAN_UFD_RX_BUFFER_SIZE 2048 #define PCAN_UFD_TX_BUFFER_SIZE 512 -/* read some versions info from the hw devcie */ +/* struct pcan_ufd_fw_info::type */ +#define PCAN_USBFD_TYPE_STD 1 +#define PCAN_USBFD_TYPE_EXT 2 /* includes EP numbers */ + +/* read some versions info from the hw device */ struct __packed pcan_ufd_fw_info { __le16 size_of; /* sizeof this */ __le16 type; /* type of this structure */ @@ -54,6 +49,13 @@ struct __packed pcan_ufd_fw_info { __le32 dev_id[2]; /* "device id" per CAN */ __le32 ser_no; /* S/N */ __le32 flags; /* special functions */ + + /* extended data when type >= PCAN_USBFD_TYPE_EXT */ + u8 cmd_out_ep; /* ep for cmd */ + u8 cmd_in_ep; /* ep for replies */ + u8 data_out_ep[2]; /* ep for CANx TX */ + u8 data_in_ep; /* ep for CAN RX */ + u8 dummy[3]; }; /* handle device specific info used by the netdevices */ @@ -146,6 +148,15 @@ struct __packed pcan_ufd_ovr_msg { u8 unused[3]; }; +#define PCAN_UFD_CMD_DEVID_SET 0x81 + +struct __packed pcan_ufd_device_id { + __le16 opcode_channel; + + u16 unused; + __le32 device_id; +}; + static inline int pufd_omsg_get_channel(struct pcan_ufd_ovr_msg *om) { return om->channel & 0xf; @@ -181,6 +192,9 @@ static inline void *pcan_usb_fd_cmd_buffer(struct peak_usb_device *dev) /* send PCAN-USB Pro FD commands synchronously */ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail) { + struct pcan_usb_fd_device *pdev = + container_of(dev, struct pcan_usb_fd_device, dev); + struct pcan_ufd_fw_info *fw_info = &pdev->usb_if->fw_info; void *cmd_head = pcan_usb_fd_cmd_buffer(dev); int err = 0; u8 *packet_ptr; @@ -210,7 +224,7 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail) do { err = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, - PCAN_USBPRO_EP_CMDOUT), + fw_info->cmd_out_ep), packet_ptr, packet_len, NULL, PCAN_UFD_CMD_TIMEOUT_MS); if (err) { @@ -230,6 +244,15 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail) return err; } +static int pcan_usb_fd_read_fwinfo(struct peak_usb_device *dev, + struct pcan_ufd_fw_info *fw_info) +{ + return pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO, + PCAN_USBPRO_INFO_FW, + fw_info, + sizeof(*fw_info)); +} + /* build the commands list in the given buffer, to enter operational mode */ static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf) { @@ -430,12 +453,43 @@ static int pcan_usb_fd_set_bittiming_fast(struct peak_usb_device *dev, return pcan_usb_fd_send_cmd(dev, ++cmd); } +/* read user CAN channel id from device */ +static int pcan_usb_fd_get_can_channel_id(struct peak_usb_device *dev, + u32 *can_ch_id) +{ + int err; + struct pcan_usb_fd_if *usb_if = pcan_usb_fd_dev_if(dev); + + err = pcan_usb_fd_read_fwinfo(dev, &usb_if->fw_info); + if (err) + return err; + + *can_ch_id = le32_to_cpu(usb_if->fw_info.dev_id[dev->ctrl_idx]); + return err; +} + +/* set a new CAN channel id in the flash memory of the device */ +static int pcan_usb_fd_set_can_channel_id(struct peak_usb_device *dev, u32 can_ch_id) +{ + struct pcan_ufd_device_id *cmd = pcan_usb_fd_cmd_buffer(dev); + + cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, + PCAN_UFD_CMD_DEVID_SET); + cmd->device_id = cpu_to_le32(can_ch_id); + + /* send the command */ + return pcan_usb_fd_send_cmd(dev, ++cmd); +} + /* handle restart but in asynchronously way * (uses PCAN-USB Pro code to complete asynchronous request) */ static int pcan_usb_fd_restart_async(struct peak_usb_device *dev, struct urb *urb, u8 *buf) { + struct pcan_usb_fd_device *pdev = + container_of(dev, struct pcan_usb_fd_device, dev); + struct pcan_ufd_fw_info *fw_info = &pdev->usb_if->fw_info; u8 *pc = buf; /* build the entire cmds list in the provided buffer, to go back into @@ -449,7 +503,7 @@ static int pcan_usb_fd_restart_async(struct peak_usb_device *dev, /* complete the URB */ usb_fill_bulk_urb(urb, dev->udev, - usb_sndbulkpipe(dev->udev, PCAN_USBPRO_EP_CMDOUT), + usb_sndbulkpipe(dev->udev, fw_info->cmd_out_ep), buf, pc - buf, pcan_usb_pro_restart_complete, dev); @@ -476,12 +530,18 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if, struct pucan_msg *rx_msg) { struct pucan_rx_msg *rm = (struct pucan_rx_msg *)rx_msg; - struct peak_usb_device *dev = usb_if->dev[pucan_msg_get_channel(rm)]; - struct net_device *netdev = dev->netdev; + struct peak_usb_device *dev; + struct net_device *netdev; struct canfd_frame *cfd; struct sk_buff *skb; const u16 rx_msg_flags = le16_to_cpu(rm->flags); + if (pucan_msg_get_channel(rm) >= ARRAY_SIZE(usb_if->dev)) + return -ENOMEM; + + dev = usb_if->dev[pucan_msg_get_channel(rm)]; + netdev = dev->netdev; + if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) { /* CANFD frame case */ skb = alloc_canfd_skb(netdev, &cfd); @@ -494,14 +554,16 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if, if (rx_msg_flags & PUCAN_MSG_ERROR_STATE_IND) cfd->flags |= CANFD_ESI; - cfd->len = can_dlc2len(get_canfd_dlc(pucan_msg_get_dlc(rm))); + cfd->len = can_fd_dlc2len(pucan_msg_get_dlc(rm)); } else { /* CAN 2.0 frame case */ skb = alloc_can_skb(netdev, (struct can_frame **)&cfd); if (!skb) return -ENOMEM; - cfd->len = get_can_dlc(pucan_msg_get_dlc(rm)); + can_frame_set_cc_len((struct can_frame *)cfd, + pucan_msg_get_dlc(rm), + dev->can.ctrlmode); } cfd->can_id = le32_to_cpu(rm->can_id); @@ -509,15 +571,16 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if, if (rx_msg_flags & PUCAN_MSG_EXT_ID) cfd->can_id |= CAN_EFF_FLAG; - if (rx_msg_flags & PUCAN_MSG_RTR) + if (rx_msg_flags & PUCAN_MSG_RTR) { cfd->can_id |= CAN_RTR_FLAG; - else + } else { memcpy(cfd->data, rm->d, cfd->len); - - peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low)); - + netdev->stats.rx_bytes += cfd->len; + } netdev->stats.rx_packets++; - netdev->stats.rx_bytes += cfd->len; + + peak_usb_netif_rx_64(skb, le32_to_cpu(rm->ts_low), + le32_to_cpu(rm->ts_high)); return 0; } @@ -527,15 +590,21 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if, struct pucan_msg *rx_msg) { struct pucan_status_msg *sm = (struct pucan_status_msg *)rx_msg; - struct peak_usb_device *dev = usb_if->dev[pucan_stmsg_get_channel(sm)]; - struct pcan_usb_fd_device *pdev = - container_of(dev, struct pcan_usb_fd_device, dev); + struct pcan_usb_fd_device *pdev; enum can_state new_state = CAN_STATE_ERROR_ACTIVE; enum can_state rx_state, tx_state; - struct net_device *netdev = dev->netdev; + struct peak_usb_device *dev; + struct net_device *netdev; struct can_frame *cf; struct sk_buff *skb; + if (pucan_stmsg_get_channel(sm) >= ARRAY_SIZE(usb_if->dev)) + return -ENOMEM; + + dev = usb_if->dev[pucan_stmsg_get_channel(sm)]; + pdev = container_of(dev, struct pcan_usb_fd_device, dev); + netdev = dev->netdev; + /* nothing should be sent while in BUS_OFF state */ if (dev->can.state == CAN_STATE_BUS_OFF) return 0; @@ -547,11 +616,10 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if, } else if (sm->channel_p_w_b & PUCAN_BUS_WARNING) { new_state = CAN_STATE_ERROR_WARNING; } else { - /* no error bit (so, no error skb, back to active state) */ - dev->can.state = CAN_STATE_ERROR_ACTIVE; + /* back to (or still in) ERROR_ACTIVE state */ + new_state = CAN_STATE_ERROR_ACTIVE; pdev->bec.txerr = 0; pdev->bec.rxerr = 0; - return 0; } /* state hasn't changed */ @@ -564,8 +632,7 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if, /* allocate an skb to store the error frame */ skb = alloc_can_err_skb(netdev, &cf); - if (skb) - can_change_state(netdev, cf, tx_state, rx_state); + can_change_state(netdev, cf, tx_state, rx_state); /* things must be done even in case of OOM */ if (new_state == CAN_STATE_BUS_OFF) @@ -574,10 +641,8 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if, if (!skb) return -ENOMEM; - peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low)); - - netdev->stats.rx_packets++; - netdev->stats.rx_bytes += cf->can_dlc; + peak_usb_netif_rx_64(skb, le32_to_cpu(sm->ts_low), + le32_to_cpu(sm->ts_high)); return 0; } @@ -587,9 +652,14 @@ static int pcan_usb_fd_decode_error(struct pcan_usb_fd_if *usb_if, struct pucan_msg *rx_msg) { struct pucan_error_msg *er = (struct pucan_error_msg *)rx_msg; - struct peak_usb_device *dev = usb_if->dev[pucan_ermsg_get_channel(er)]; - struct pcan_usb_fd_device *pdev = - container_of(dev, struct pcan_usb_fd_device, dev); + struct pcan_usb_fd_device *pdev; + struct peak_usb_device *dev; + + if (pucan_ermsg_get_channel(er) >= ARRAY_SIZE(usb_if->dev)) + return -EINVAL; + + dev = usb_if->dev[pucan_ermsg_get_channel(er)]; + pdev = container_of(dev, struct pcan_usb_fd_device, dev); /* keep a trace of tx and rx error counters for later use */ pdev->bec.txerr = er->tx_err_cnt; @@ -603,11 +673,17 @@ static int pcan_usb_fd_decode_overrun(struct pcan_usb_fd_if *usb_if, struct pucan_msg *rx_msg) { struct pcan_ufd_ovr_msg *ov = (struct pcan_ufd_ovr_msg *)rx_msg; - struct peak_usb_device *dev = usb_if->dev[pufd_omsg_get_channel(ov)]; - struct net_device *netdev = dev->netdev; + struct peak_usb_device *dev; + struct net_device *netdev; struct can_frame *cf; struct sk_buff *skb; + if (pufd_omsg_get_channel(ov) >= ARRAY_SIZE(usb_if->dev)) + return -EINVAL; + + dev = usb_if->dev[pufd_omsg_get_channel(ov)]; + netdev = dev->netdev; + /* allocate an skb to store the error frame */ skb = alloc_can_err_skb(netdev, &cf); if (!skb) @@ -616,7 +692,8 @@ static int pcan_usb_fd_decode_overrun(struct pcan_usb_fd_if *usb_if, cf->can_id |= CAN_ERR_CRTL; cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; - peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(ov->ts_low)); + peak_usb_netif_rx_64(skb, le32_to_cpu(ov->ts_low), + le32_to_cpu(ov->ts_high)); netdev->stats.rx_over_errors++; netdev->stats.rx_errors++; @@ -722,7 +799,10 @@ static int pcan_usb_fd_encode_msg(struct peak_usb_device *dev, struct pucan_tx_msg *tx_msg = (struct pucan_tx_msg *)obuf; struct canfd_frame *cfd = (struct canfd_frame *)skb->data; u16 tx_msg_size, tx_msg_flags; - u8 can_dlc; + u8 dlc; + + if (cfd->len > CANFD_MAX_DLEN) + return -EINVAL; tx_msg_size = ALIGN(sizeof(struct pucan_tx_msg) + cfd->len, 4); tx_msg->size = cpu_to_le16(tx_msg_size); @@ -738,7 +818,7 @@ static int pcan_usb_fd_encode_msg(struct peak_usb_device *dev, if (can_is_canfd_skb(skb)) { /* considering a CANFD frame */ - can_dlc = can_len2dlc(cfd->len); + dlc = can_fd_len2dlc(cfd->len); tx_msg_flags |= PUCAN_MSG_EXT_DATA_LEN; @@ -749,14 +829,19 @@ static int pcan_usb_fd_encode_msg(struct peak_usb_device *dev, tx_msg_flags |= PUCAN_MSG_ERROR_STATE_IND; } else { /* CAND 2.0 frames */ - can_dlc = cfd->len; + dlc = can_get_cc_dlc((struct can_frame *)cfd, + dev->can.ctrlmode); if (cfd->can_id & CAN_RTR_FLAG) tx_msg_flags |= PUCAN_MSG_RTR; } + /* Single-Shot frame */ + if (dev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) + tx_msg_flags |= PUCAN_MSG_SINGLE_SHOT; + tx_msg->flags = cpu_to_le16(tx_msg_flags); - tx_msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(dev->ctrl_idx, can_dlc); + tx_msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(dev->ctrl_idx, dlc); memcpy(tx_msg->d, cfd->data, cfd->len); /* add null size message to tag the end (messages are 32-bits aligned) @@ -804,7 +889,7 @@ static int pcan_usb_fd_start(struct peak_usb_device *dev) return err; } -/* socket callback used to copy berr counters values receieved through USB */ +/* socket callback used to copy berr counters values received through USB */ static int pcan_usb_fd_get_berr_counter(const struct net_device *netdev, struct can_berr_counter *bec) { @@ -818,6 +903,15 @@ static int pcan_usb_fd_get_berr_counter(const struct net_device *netdev, return 0; } +/* probe function for all PCAN-USB FD family usb interfaces */ +static int pcan_usb_fd_probe(struct usb_interface *intf) +{ + struct usb_host_interface *iface_desc = &intf->altsetting[0]; + + /* CAN interface is always interface #0 */ + return iface_desc->desc.bInterfaceNumber; +} + /* stop interface (last chance before set bus off) */ static int pcan_usb_fd_stop(struct peak_usb_device *dev) { @@ -839,6 +933,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev) { struct pcan_usb_fd_device *pdev = container_of(dev, struct pcan_usb_fd_device, dev); + struct pcan_ufd_fw_info *fw_info; int i, err = -ENOMEM; /* do this for 1st channel only */ @@ -849,7 +944,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev) goto err_out; /* allocate command buffer once for all for the interface */ - pdev->cmd_buffer_addr = kmalloc(PCAN_UFD_CMD_BUFFER_SIZE, + pdev->cmd_buffer_addr = kzalloc(PCAN_UFD_CMD_BUFFER_SIZE, GFP_KERNEL); if (!pdev->cmd_buffer_addr) goto err_out_1; @@ -857,10 +952,9 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev) /* number of ts msgs to ignore before taking one into account */ pdev->usb_if->cm_ignore_count = 5; - err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO, - PCAN_USBPRO_INFO_FW, - &pdev->usb_if->fw_info, - sizeof(pdev->usb_if->fw_info)); + fw_info = &pdev->usb_if->fw_info; + + err = pcan_usb_fd_read_fwinfo(dev, fw_info); if (err) { dev_err(dev->netdev->dev.parent, "unable to read %s firmware info (err %d)\n", @@ -874,14 +968,14 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev) */ dev_info(dev->netdev->dev.parent, "PEAK-System %s v%u fw v%u.%u.%u (%u channels)\n", - dev->adapter->name, pdev->usb_if->fw_info.hw_version, - pdev->usb_if->fw_info.fw_version[0], - pdev->usb_if->fw_info.fw_version[1], - pdev->usb_if->fw_info.fw_version[2], + dev->adapter->name, fw_info->hw_version, + fw_info->fw_version[0], + fw_info->fw_version[1], + fw_info->fw_version[2], dev->adapter->ctrl_count); /* check for ability to switch between ISO/non-ISO modes */ - if (pdev->usb_if->fw_info.fw_version[0] >= 2) { + if (fw_info->fw_version[0] >= 2) { /* firmware >= 2.x supports ISO/non-ISO switching */ dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO; } else { @@ -889,6 +983,15 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev) dev->can.ctrlmode |= CAN_CTRLMODE_FD_NON_ISO; } + /* if vendor rsp type is greater than or equal to 2, then it + * contains EP numbers to use for cmds pipes. If not, then + * default EP should be used. + */ + if (le16_to_cpu(fw_info->type) < PCAN_USBFD_TYPE_EXT) { + fw_info->cmd_out_ep = PCAN_USBPRO_EP_CMDOUT; + fw_info->cmd_in_ep = PCAN_USBPRO_EP_CMDIN; + } + /* tell the hardware the can driver is running */ err = pcan_usb_fd_drv_loaded(dev, 1); if (err) { @@ -909,12 +1012,23 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev) /* do a copy of the ctrlmode[_supported] too */ dev->can.ctrlmode = ppdev->dev.can.ctrlmode; dev->can.ctrlmode_supported = ppdev->dev.can.ctrlmode_supported; + + fw_info = &pdev->usb_if->fw_info; } pdev->usb_if->dev[dev->ctrl_idx] = dev; - dev->device_number = + dev->can_channel_id = le32_to_cpu(pdev->usb_if->fw_info.dev_id[dev->ctrl_idx]); + /* if vendor rsp type is greater than or equal to 2, then it contains EP + * numbers to use for data pipes. If not, then statically defined EP are + * used (see peak_usb_create_dev()). + */ + if (le16_to_cpu(fw_info->type) >= PCAN_USBFD_TYPE_EXT) { + dev->ep_msg_in = fw_info->data_in_ep; + dev->ep_msg_out = fw_info->data_out_ep[dev->ctrl_idx]; + } + /* set clock domain */ for (i = 0; i < ARRAY_SIZE(pcan_usb_fd_clk_freq); i++) if (dev->adapter->clock.freq == pcan_usb_fd_clk_freq[i]) @@ -988,6 +1102,35 @@ static void pcan_usb_fd_free(struct peak_usb_device *dev) } } +/* blink LED's */ +static int pcan_usb_fd_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct peak_usb_device *dev = netdev_priv(netdev); + int err = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + err = pcan_usb_fd_set_can_led(dev, PCAN_UFD_LED_FAST); + break; + case ETHTOOL_ID_INACTIVE: + err = pcan_usb_fd_set_can_led(dev, PCAN_UFD_LED_DEF); + break; + default: + break; + } + + return err; +} + +static const struct ethtool_ops pcan_usb_fd_ethtool_ops = { + .set_phys_id = pcan_usb_fd_set_phys_id, + .get_ts_info = pcan_get_ts_info, + .get_eeprom_len = peak_usb_get_eeprom_len, + .get_eeprom = peak_usb_get_eeprom, + .set_eeprom = peak_usb_set_eeprom, +}; + /* describes the PCAN-USB FD adapter */ static const struct can_bittiming_const pcan_usb_fd_const = { .name = "pcan_usb_fd", @@ -1018,7 +1161,8 @@ const struct peak_usb_adapter pcan_usb_fd = { .device_id = PCAN_USBFD_PRODUCT_ID, .ctrl_count = PCAN_USBFD_CHANNEL_COUNT, .ctrlmode_supported = CAN_CTRLMODE_FD | - CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY, + CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_CC_LEN8_DLC, .clock = { .freq = PCAN_UFD_CRYSTAL_HZ, }, @@ -1028,9 +1172,10 @@ const struct peak_usb_adapter pcan_usb_fd = { /* size of device private data */ .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), + .ethtool_ops = &pcan_usb_fd_ethtool_ops, + /* timestamps usage */ .ts_used_bits = 32, - .ts_period = 1000000, /* calibration period in ts. */ .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ .us_per_ts_shift = 0, @@ -1043,7 +1188,7 @@ const struct peak_usb_adapter pcan_usb_fd = { .tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE, /* device callbacks */ - .intf_probe = pcan_usb_pro_probe, /* same as PCAN-USB Pro */ + .intf_probe = pcan_usb_fd_probe, .dev_init = pcan_usb_fd_init, .dev_exit = pcan_usb_fd_exit, @@ -1051,6 +1196,8 @@ const struct peak_usb_adapter pcan_usb_fd = { .dev_set_bus = pcan_usb_fd_set_bus, .dev_set_bittiming = pcan_usb_fd_set_bittiming_slow, .dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast, + .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id, + .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id, .dev_decode_buf = pcan_usb_fd_decode_buf, .dev_start = pcan_usb_fd_start, .dev_stop = pcan_usb_fd_stop, @@ -1090,7 +1237,8 @@ const struct peak_usb_adapter pcan_usb_chip = { .device_id = PCAN_USBCHIP_PRODUCT_ID, .ctrl_count = PCAN_USBFD_CHANNEL_COUNT, .ctrlmode_supported = CAN_CTRLMODE_FD | - CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY, + CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_CC_LEN8_DLC, .clock = { .freq = PCAN_UFD_CRYSTAL_HZ, }, @@ -1100,9 +1248,10 @@ const struct peak_usb_adapter pcan_usb_chip = { /* size of device private data */ .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), + .ethtool_ops = &pcan_usb_fd_ethtool_ops, + /* timestamps usage */ .ts_used_bits = 32, - .ts_period = 1000000, /* calibration period in ts. */ .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ .us_per_ts_shift = 0, @@ -1123,6 +1272,8 @@ const struct peak_usb_adapter pcan_usb_chip = { .dev_set_bus = pcan_usb_fd_set_bus, .dev_set_bittiming = pcan_usb_fd_set_bittiming_slow, .dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast, + .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id, + .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id, .dev_decode_buf = pcan_usb_fd_decode_buf, .dev_start = pcan_usb_fd_start, .dev_stop = pcan_usb_fd_stop, @@ -1162,7 +1313,8 @@ const struct peak_usb_adapter pcan_usb_pro_fd = { .device_id = PCAN_USBPROFD_PRODUCT_ID, .ctrl_count = PCAN_USBPROFD_CHANNEL_COUNT, .ctrlmode_supported = CAN_CTRLMODE_FD | - CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY, + CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_CC_LEN8_DLC, .clock = { .freq = PCAN_UFD_CRYSTAL_HZ, }, @@ -1172,9 +1324,10 @@ const struct peak_usb_adapter pcan_usb_pro_fd = { /* size of device private data */ .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), + .ethtool_ops = &pcan_usb_fd_ethtool_ops, + /* timestamps usage */ .ts_used_bits = 32, - .ts_period = 1000000, /* calibration period in ts. */ .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ .us_per_ts_shift = 0, @@ -1195,6 +1348,8 @@ const struct peak_usb_adapter pcan_usb_pro_fd = { .dev_set_bus = pcan_usb_fd_set_bus, .dev_set_bittiming = pcan_usb_fd_set_bittiming_slow, .dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast, + .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id, + .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id, .dev_decode_buf = pcan_usb_fd_decode_buf, .dev_start = pcan_usb_fd_start, .dev_stop = pcan_usb_fd_stop, @@ -1234,7 +1389,8 @@ const struct peak_usb_adapter pcan_usb_x6 = { .device_id = PCAN_USBX6_PRODUCT_ID, .ctrl_count = PCAN_USBPROFD_CHANNEL_COUNT, .ctrlmode_supported = CAN_CTRLMODE_FD | - CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY, + CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_CC_LEN8_DLC, .clock = { .freq = PCAN_UFD_CRYSTAL_HZ, }, @@ -1244,9 +1400,10 @@ const struct peak_usb_adapter pcan_usb_x6 = { /* size of device private data */ .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), + .ethtool_ops = &pcan_usb_fd_ethtool_ops, + /* timestamps usage */ .ts_used_bits = 32, - .ts_period = 1000000, /* calibration period in ts. */ .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ .us_per_ts_shift = 0, @@ -1267,6 +1424,8 @@ const struct peak_usb_adapter pcan_usb_x6 = { .dev_set_bus = pcan_usb_fd_set_bus, .dev_set_bittiming = pcan_usb_fd_set_bittiming_slow, .dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast, + .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id, + .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id, .dev_decode_buf = pcan_usb_fd_decode_buf, .dev_start = pcan_usb_fd_start, .dev_stop = pcan_usb_fd_stop, diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index d516def846ab..7be286293b1a 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -1,22 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * CAN driver for PEAK System PCAN-USB Pro adapter * Derived from the PCAN project file driver/src/pcan_usbpro.c * - * Copyright (C) 2003-2011 PEAK System-Technik GmbH - * Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published - * by the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. + * Copyright (C) 2003-2025 PEAK System-Technik GmbH + * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com> */ +#include <linux/ethtool.h> +#include <linux/module.h> #include <linux/netdevice.h> #include <linux/usb.h> -#include <linux/module.h> #include <linux/can.h> #include <linux/can/dev.h> @@ -25,8 +18,6 @@ #include "pcan_usb_core.h" #include "pcan_usb_pro.h" -MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro adapter"); - #define PCAN_USBPRO_CHANNEL_COUNT 2 /* PCAN-USB Pro adapter internal clock (MHz) */ @@ -46,6 +37,7 @@ MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro adapter"); #define PCAN_USBPRO_RTR 0x01 #define PCAN_USBPRO_EXT 0x02 +#define PCAN_USBPRO_SS 0x08 #define PCAN_USBPRO_CMD_BUFFER_SIZE 512 @@ -84,6 +76,7 @@ static u16 pcan_usb_pro_sizeof_rec[256] = { [PCAN_USBPRO_SETFILTR] = sizeof(struct pcan_usb_pro_filter), [PCAN_USBPRO_SETTS] = sizeof(struct pcan_usb_pro_setts), [PCAN_USBPRO_GETDEVID] = sizeof(struct pcan_usb_pro_devid), + [PCAN_USBPRO_SETDEVID] = sizeof(struct pcan_usb_pro_devid), [PCAN_USBPRO_SETLED] = sizeof(struct pcan_usb_pro_setled), [PCAN_USBPRO_RXMSG8] = sizeof(struct pcan_usb_pro_rxmsg), [PCAN_USBPRO_RXMSG4] = sizeof(struct pcan_usb_pro_rxmsg) - 4, @@ -127,7 +120,7 @@ static u8 *pcan_msg_init_empty(struct pcan_usb_pro_msg *pm, /* * add one record to a message being built */ -static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, u8 id, ...) +static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, int id, ...) { int len, i; u8 *pc; @@ -141,10 +134,10 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, u8 id, ...) switch (id) { case PCAN_USBPRO_TXMSG8: i += 4; - /* fall through */ + fallthrough; case PCAN_USBPRO_TXMSG4: i += 4; - /* fall through */ + fallthrough; case PCAN_USBPRO_TXMSG0: *pc++ = va_arg(ap, int); *pc++ = va_arg(ap, int); @@ -157,6 +150,7 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, u8 id, ...) case PCAN_USBPRO_SETBTR: case PCAN_USBPRO_GETDEVID: + case PCAN_USBPRO_SETDEVID: *pc++ = va_arg(ap, int); pc += 2; *(__le32 *)pc = cpu_to_le32(va_arg(ap, u32)); @@ -194,7 +188,7 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, u8 id, ...) len = pc - pm->rec_ptr; if (len > 0) { - *pm->u.rec_cnt = cpu_to_le32(le32_to_cpu(*pm->u.rec_cnt) + 1); + le32_add_cpu(pm->u.rec_cnt, 1); *pm->rec_ptr = id; pm->rec_ptr = pc; @@ -298,7 +292,7 @@ static int pcan_usb_pro_wait_rsp(struct peak_usb_device *dev, pr->data_type); /* check if channel in response corresponds too */ - else if ((req_channel != 0xff) && \ + else if ((req_channel != 0xff) && (pr->bus_act.channel != req_channel)) netdev_err(dev->netdev, "got rsp %xh but on chan%u: ignored\n", @@ -427,8 +421,8 @@ static int pcan_usb_pro_set_led(struct peak_usb_device *dev, u8 mode, return pcan_usb_pro_send_cmd(dev, &um); } -static int pcan_usb_pro_get_device_id(struct peak_usb_device *dev, - u32 *device_id) +static int pcan_usb_pro_get_can_channel_id(struct peak_usb_device *dev, + u32 *can_ch_id) { struct pcan_usb_pro_devid *pdn; struct pcan_usb_pro_msg um; @@ -447,12 +441,23 @@ static int pcan_usb_pro_get_device_id(struct peak_usb_device *dev, return err; pdn = (struct pcan_usb_pro_devid *)pc; - if (device_id) - *device_id = le32_to_cpu(pdn->serial_num); + *can_ch_id = le32_to_cpu(pdn->dev_num); return err; } +static int pcan_usb_pro_set_can_channel_id(struct peak_usb_device *dev, + u32 can_ch_id) +{ + struct pcan_usb_pro_msg um; + + pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN); + pcan_msg_add_rec(&um, PCAN_USBPRO_SETDEVID, dev->ctrl_idx, + can_ch_id); + + return pcan_usb_pro_send_cmd(dev, &um); +} + static int pcan_usb_pro_set_bittiming(struct peak_usb_device *dev, struct can_bittiming *bt) { @@ -502,7 +507,7 @@ static int pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded) u8 *buffer; int err; - buffer = kmalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL); + buffer = kzalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL); if (!buffer) return -ENOMEM; @@ -540,22 +545,24 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if, return -ENOMEM; can_frame->can_id = le32_to_cpu(rx->id); - can_frame->can_dlc = rx->len & 0x0f; + can_frame->len = rx->len & 0x0f; if (rx->flags & PCAN_USBPRO_EXT) can_frame->can_id |= CAN_EFF_FLAG; - if (rx->flags & PCAN_USBPRO_RTR) + if (rx->flags & PCAN_USBPRO_RTR) { can_frame->can_id |= CAN_RTR_FLAG; - else - memcpy(can_frame->data, rx->data, can_frame->can_dlc); + } else { + memcpy(can_frame->data, rx->data, can_frame->len); + + netdev->stats.rx_bytes += can_frame->len; + } + netdev->stats.rx_packets++; hwts = skb_hwtstamps(skb); peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(rx->ts32), &hwts->hwtstamp); - netdev->stats.rx_packets++; - netdev->stats.rx_bytes += can_frame->can_dlc; netif_rx(skb); return 0; @@ -669,8 +676,6 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if, hwts = skb_hwtstamps(skb); peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(er->ts32), &hwts->hwtstamp); - netdev->stats.rx_packets++; - netdev->stats.rx_bytes += can_frame->can_dlc; netif_rx(skb); return 0; @@ -775,20 +780,24 @@ static int pcan_usb_pro_encode_msg(struct peak_usb_device *dev, pcan_msg_init_empty(&usb_msg, obuf, *size); - if ((cf->can_id & CAN_RTR_FLAG) || (cf->can_dlc == 0)) + if ((cf->can_id & CAN_RTR_FLAG) || (cf->len == 0)) data_type = PCAN_USBPRO_TXMSG0; - else if (cf->can_dlc <= 4) + else if (cf->len <= 4) data_type = PCAN_USBPRO_TXMSG4; else data_type = PCAN_USBPRO_TXMSG8; - len = (dev->ctrl_idx << 4) | (cf->can_dlc & 0x0f); + len = (dev->ctrl_idx << 4) | (cf->len & 0x0f); flags = 0; if (cf->can_id & CAN_EFF_FLAG) - flags |= 0x02; + flags |= PCAN_USBPRO_EXT; if (cf->can_id & CAN_RTR_FLAG) - flags |= 0x01; + flags |= PCAN_USBPRO_RTR; + + /* Single-Shot frame */ + if (dev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) + flags |= PCAN_USBPRO_SS; pcan_msg_add_rec(&usb_msg, data_type, 0, flags, len, cf->can_id, cf->data); @@ -916,7 +925,7 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev) usb_if->dev[dev->ctrl_idx] = dev; /* set LED in default state (end of init phase) */ - pcan_usb_pro_set_led(dev, 0, 1); + pcan_usb_pro_set_led(dev, PCAN_USBPRO_LED_DEVICE, 1); kfree(bi); kfree(fi); @@ -981,7 +990,7 @@ int pcan_usb_pro_probe(struct usb_interface *intf) struct usb_endpoint_descriptor *ep = &if_desc->endpoint[i].desc; /* - * below is the list of valid ep addreses. Any other ep address + * below is the list of valid ep addresses. Any other ep address * is considered as not-CAN interface address => no dev created */ switch (ep->bEndpointAddress) { @@ -1000,6 +1009,39 @@ int pcan_usb_pro_probe(struct usb_interface *intf) return 0; } +static int pcan_usb_pro_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct peak_usb_device *dev = netdev_priv(netdev); + int err = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + /* fast blinking forever */ + err = pcan_usb_pro_set_led(dev, PCAN_USBPRO_LED_BLINK_FAST, + 0xffffffff); + break; + + case ETHTOOL_ID_INACTIVE: + /* restore LED default */ + err = pcan_usb_pro_set_led(dev, PCAN_USBPRO_LED_DEVICE, 1); + break; + + default: + break; + } + + return err; +} + +static const struct ethtool_ops pcan_usb_pro_ethtool_ops = { + .set_phys_id = pcan_usb_pro_set_phys_id, + .get_ts_info = pcan_get_ts_info, + .get_eeprom_len = peak_usb_get_eeprom_len, + .get_eeprom = peak_usb_get_eeprom, + .set_eeprom = peak_usb_set_eeprom, +}; + /* * describe the PCAN-USB Pro adapter */ @@ -1019,7 +1061,8 @@ const struct peak_usb_adapter pcan_usb_pro = { .name = "PCAN-USB Pro", .device_id = PCAN_USBPRO_PRODUCT_ID, .ctrl_count = PCAN_USBPRO_CHANNEL_COUNT, - .ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY, + .ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_ONE_SHOT, .clock = { .freq = PCAN_USBPRO_CRYSTAL_HZ, }, @@ -1028,9 +1071,10 @@ const struct peak_usb_adapter pcan_usb_pro = { /* size of device private data */ .sizeof_dev_private = sizeof(struct pcan_usb_pro_device), + .ethtool_ops = &pcan_usb_pro_ethtool_ops, + /* timestamps usage */ .ts_used_bits = 32, - .ts_period = 1000000, /* calibration period in ts. */ .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ .us_per_ts_shift = 0, @@ -1049,7 +1093,8 @@ const struct peak_usb_adapter pcan_usb_pro = { .dev_free = pcan_usb_pro_free, .dev_set_bus = pcan_usb_pro_set_bus, .dev_set_bittiming = pcan_usb_pro_set_bittiming, - .dev_get_device_id = pcan_usb_pro_get_device_id, + .dev_get_can_channel_id = pcan_usb_pro_get_can_channel_id, + .dev_set_can_channel_id = pcan_usb_pro_set_can_channel_id, .dev_decode_buf = pcan_usb_pro_decode_buf, .dev_encode_msg = pcan_usb_pro_encode_msg, .dev_start = pcan_usb_pro_start, diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h index a62f7ab8980f..162c7546d3a8 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h @@ -1,18 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * CAN driver for PEAK System PCAN-USB Pro adapter * Derived from the PCAN project file driver/src/pcan_usbpro_fw.h * - * Copyright (C) 2003-2011 PEAK System-Technik GmbH - * Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published - * by the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. + * Copyright (C) 2003-2025 PEAK System-Technik GmbH + * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com> */ #ifndef PCAN_USB_PRO_H #define PCAN_USB_PRO_H @@ -42,11 +34,11 @@ /* PCAN_USBPRO_INFO_BL vendor request record type */ struct __packed pcan_usb_pro_blinfo { __le32 ctrl_type; - u8 version[4]; - u8 day; - u8 month; - u8 year; - u8 dummy; + u8 version[4]; + u8 day; + u8 month; + u8 year; + u8 dummy; __le32 serial_num_hi; __le32 serial_num_lo; __le32 hw_type; @@ -56,11 +48,11 @@ struct __packed pcan_usb_pro_blinfo { /* PCAN_USBPRO_INFO_FW vendor request record type */ struct __packed pcan_usb_pro_fwinfo { __le32 ctrl_type; - u8 version[4]; - u8 day; - u8 month; - u8 year; - u8 dummy; + u8 version[4]; + u8 day; + u8 month; + u8 year; + u8 dummy; __le32 fw_type; }; @@ -70,6 +62,7 @@ struct __packed pcan_usb_pro_fwinfo { #define PCAN_USBPRO_SETBTR 0x02 #define PCAN_USBPRO_SETBUSACT 0x04 #define PCAN_USBPRO_SETSILENT 0x05 +#define PCAN_USBPRO_SETDEVID 0x06 #define PCAN_USBPRO_SETFILTR 0x0a #define PCAN_USBPRO_SETTS 0x10 #define PCAN_USBPRO_GETDEVID 0x12 @@ -86,59 +79,65 @@ struct __packed pcan_usb_pro_fwinfo { /* record structures */ struct __packed pcan_usb_pro_btr { - u8 data_type; - u8 channel; + u8 data_type; + u8 channel; __le16 dummy; __le32 CCBT; }; struct __packed pcan_usb_pro_busact { - u8 data_type; - u8 channel; + u8 data_type; + u8 channel; __le16 onoff; }; struct __packed pcan_usb_pro_silent { - u8 data_type; - u8 channel; + u8 data_type; + u8 channel; __le16 onoff; }; struct __packed pcan_usb_pro_filter { - u8 data_type; - u8 dummy; + u8 data_type; + u8 dummy; __le16 filter_mode; }; struct __packed pcan_usb_pro_setts { - u8 data_type; - u8 dummy; + u8 data_type; + u8 dummy; __le16 mode; }; struct __packed pcan_usb_pro_devid { - u8 data_type; - u8 channel; + u8 data_type; + u8 channel; __le16 dummy; - __le32 serial_num; + __le32 dev_num; }; +#define PCAN_USBPRO_LED_DEVICE 0x00 +#define PCAN_USBPRO_LED_BLINK_FAST 0x01 +#define PCAN_USBPRO_LED_BLINK_SLOW 0x02 +#define PCAN_USBPRO_LED_ON 0x03 +#define PCAN_USBPRO_LED_OFF 0x04 + struct __packed pcan_usb_pro_setled { - u8 data_type; - u8 channel; + u8 data_type; + u8 channel; __le16 mode; __le32 timeout; }; struct __packed pcan_usb_pro_rxmsg { - u8 data_type; - u8 client; - u8 flags; - u8 len; + u8 data_type; + u8 client; + u8 flags; + u8 len; __le32 ts32; __le32 id; - u8 data[8]; + u8 data[8]; }; #define PCAN_USBPRO_STATUS_ERROR 0x0001 @@ -147,26 +146,26 @@ struct __packed pcan_usb_pro_rxmsg { #define PCAN_USBPRO_STATUS_QOVERRUN 0x0008 struct __packed pcan_usb_pro_rxstatus { - u8 data_type; - u8 channel; + u8 data_type; + u8 channel; __le16 status; __le32 ts32; __le32 err_frm; }; struct __packed pcan_usb_pro_rxts { - u8 data_type; - u8 dummy[3]; + u8 data_type; + u8 dummy[3]; __le32 ts64[2]; }; struct __packed pcan_usb_pro_txmsg { - u8 data_type; - u8 client; - u8 flags; - u8 len; + u8 data_type; + u8 client; + u8 flags; + u8 len; __le32 id; - u8 data[8]; + u8 data[8]; }; union pcan_usb_pro_rec { diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c index 04aac3bb54ef..de61d9da99e3 100644 --- a/drivers/net/can/usb/ucan.c +++ b/drivers/net/can/usb/ucan.c @@ -28,6 +28,7 @@ #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> +#include <linux/ethtool.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/signal.h> @@ -185,7 +186,7 @@ union ucan_ctl_payload { */ struct ucan_ctl_cmd_get_protocol_version cmd_get_protocol_version; - u8 raw[128]; + u8 fw_str[128]; } __packed; enum { @@ -244,9 +245,10 @@ struct ucan_message_in { /* CAN transmission complete * (type == UCAN_IN_TX_COMPLETE) */ - struct ucan_tx_complete_entry_t can_tx_complete_msg[0]; + DECLARE_FLEX_ARRAY(struct ucan_tx_complete_entry_t, + can_tx_complete_msg); } __aligned(0x4) msg; -} __packed; +} __packed __aligned(0x4); /* Macros to calculate message lengths */ #define UCAN_OUT_HDR_SIZE offsetof(struct ucan_message_out, msg) @@ -259,7 +261,6 @@ struct ucan_priv; /* Context Information for transmission URBs */ struct ucan_urb_context { struct ucan_priv *up; - u8 dlc; bool allocated; }; @@ -276,7 +277,6 @@ struct ucan_priv { /* linux USB device structures */ struct usb_device *udev; - struct usb_interface *intf; struct net_device *netdev; /* lock for can->echo_skb (used around @@ -284,7 +284,7 @@ struct ucan_priv { */ spinlock_t echo_skb_lock; - /* usb device information information */ + /* usb device information */ u8 intf_index; u8 in_ep_addr; u8 out_ep_addr; @@ -303,12 +303,12 @@ struct ucan_priv { struct ucan_urb_context *context_array; }; -static u8 ucan_get_can_dlc(struct ucan_can_msg *msg, u16 len) +static u8 ucan_can_cc_dlc2len(struct ucan_can_msg *msg, u16 len) { if (le32_to_cpu(msg->id) & CAN_RTR_FLAG) - return get_can_dlc(msg->dlc); + return can_cc_dlc2len(msg->dlc); else - return get_can_dlc(len - (UCAN_IN_HDR_SIZE + sizeof(msg->id))); + return can_cc_dlc2len(len - (UCAN_IN_HDR_SIZE + sizeof(msg->id))); } static void ucan_release_context_array(struct ucan_priv *up) @@ -424,18 +424,20 @@ static int ucan_ctrl_command_out(struct ucan_priv *up, UCAN_USB_CTL_PIPE_TIMEOUT); } -static int ucan_device_request_in(struct ucan_priv *up, - u8 cmd, u16 subcmd, u16 datalen) +static void ucan_get_fw_str(struct ucan_priv *up, char *fw_str, size_t size) { - return usb_control_msg(up->udev, - usb_rcvctrlpipe(up->udev, 0), - cmd, - USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - subcmd, - 0, - up->ctl_msg_buffer, - datalen, - UCAN_USB_CTL_PIPE_TIMEOUT); + int ret; + + ret = usb_control_msg(up->udev, usb_rcvctrlpipe(up->udev, 0), + UCAN_DEVICE_GET_FW_STRING, + USB_DIR_IN | USB_TYPE_VENDOR | + USB_RECIP_DEVICE, + 0, 0, fw_str, size - 1, + UCAN_USB_CTL_PIPE_TIMEOUT); + if (ret > 0) + fw_str[ret] = '\0'; + else + strscpy(fw_str, "unknown", size); } /* Parse the device information structure reported by the device and @@ -614,15 +616,18 @@ static void ucan_rx_can_msg(struct ucan_priv *up, struct ucan_message_in *m) cf->can_id = canid; /* compute DLC taking RTR_FLAG into account */ - cf->can_dlc = ucan_get_can_dlc(&m->msg.can_msg, len); + cf->len = ucan_can_cc_dlc2len(&m->msg.can_msg, len); /* copy the payload of non RTR frames */ if (!(cf->can_id & CAN_RTR_FLAG) || (cf->can_id & CAN_ERR_FLAG)) - memcpy(cf->data, m->msg.can_msg.data, cf->can_dlc); + memcpy(cf->data, m->msg.can_msg.data, cf->len); /* don't count error frames as real packets */ - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + if (!(cf->can_id & CAN_ERR_FLAG)) { + stats->rx_packets++; + if (!(cf->can_id & CAN_RTR_FLAG)) + stats->rx_bytes += cf->len; + } /* pass it to Linux */ netif_rx(skb); @@ -634,7 +639,7 @@ static void ucan_tx_complete_msg(struct ucan_priv *up, { unsigned long flags; u16 count, i; - u8 echo_index, dlc; + u8 echo_index; u16 len = le16_to_cpu(m->len); struct ucan_urb_context *context; @@ -658,7 +663,6 @@ static void ucan_tx_complete_msg(struct ucan_priv *up, /* gather information from the context */ context = &up->context_array[echo_index]; - dlc = READ_ONCE(context->dlc); /* Release context and restart queue if necessary. * Also check if the context was allocated @@ -671,11 +675,11 @@ static void ucan_tx_complete_msg(struct ucan_priv *up, UCAN_TX_COMPLETE_SUCCESS) { /* update statistics */ up->netdev->stats.tx_packets++; - up->netdev->stats.tx_bytes += dlc; - can_get_echo_skb(up->netdev, echo_index); + up->netdev->stats.tx_bytes += + can_get_echo_skb(up->netdev, echo_index, NULL); } else { up->netdev->stats.tx_dropped++; - can_free_echo_skb(up->netdev, echo_index); + can_free_echo_skb(up->netdev, echo_index, NULL); } spin_unlock_irqrestore(&up->echo_skb_lock, flags); } @@ -792,7 +796,7 @@ resubmit: up); usb_anchor_urb(urb, &up->rx_urbs); - ret = usb_submit_urb(urb, GFP_KERNEL); + ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret < 0) { netdev_err(up->netdev, @@ -843,7 +847,7 @@ static void ucan_write_bulk_callback(struct urb *urb) /* update counters an cleanup */ spin_lock_irqsave(&up->echo_skb_lock, flags); - can_free_echo_skb(up->netdev, context - up->context_array); + can_free_echo_skb(up->netdev, context - up->context_array, NULL); spin_unlock_irqrestore(&up->echo_skb_lock, flags); up->netdev->stats.tx_dropped++; @@ -1078,16 +1082,14 @@ static struct urb *ucan_prepare_tx_urb(struct ucan_priv *up, mlen = UCAN_OUT_HDR_SIZE + offsetof(struct ucan_can_msg, dlc) + sizeof(m->msg.can_msg.dlc); - m->msg.can_msg.dlc = cf->can_dlc; + m->msg.can_msg.dlc = cf->len; } else { mlen = UCAN_OUT_HDR_SIZE + - sizeof(m->msg.can_msg.id) + cf->can_dlc; - memcpy(m->msg.can_msg.data, cf->data, cf->can_dlc); + sizeof(m->msg.can_msg.id) + cf->len; + memcpy(m->msg.can_msg.data, cf->data, cf->len); } m->len = cpu_to_le16(mlen); - context->dlc = cf->can_dlc; - m->subtype = echo_index; /* build the urb */ @@ -1120,7 +1122,7 @@ static netdev_tx_t ucan_start_xmit(struct sk_buff *skb, struct can_frame *cf = (struct can_frame *)skb->data; /* check skb */ - if (can_dropped_invalid_skb(netdev, skb)) + if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; /* allocate a context and slow down tx path, if fifo state is low */ @@ -1137,7 +1139,7 @@ static netdev_tx_t ucan_start_xmit(struct sk_buff *skb, /* put the skb on can loopback stack */ spin_lock_irqsave(&up->echo_skb_lock, flags); - can_put_echo_skb(skb, up->netdev, echo_index); + can_put_echo_skb(skb, up->netdev, echo_index, 0); spin_unlock_irqrestore(&up->echo_skb_lock, flags); /* transmit it */ @@ -1157,7 +1159,7 @@ static netdev_tx_t ucan_start_xmit(struct sk_buff *skb, * frees the skb */ spin_lock_irqsave(&up->echo_skb_lock, flags); - can_free_echo_skb(up->netdev, echo_index); + can_free_echo_skb(up->netdev, echo_index, NULL); spin_unlock_irqrestore(&up->echo_skb_lock, flags); if (ret == -ENODEV) { @@ -1231,7 +1233,10 @@ static const struct net_device_ops ucan_netdev_ops = { .ndo_open = ucan_open, .ndo_stop = ucan_close, .ndo_start_xmit = ucan_start_xmit, - .ndo_change_mtu = can_change_mtu, +}; + +static const struct ethtool_ops ucan_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, }; /* Request to set bittiming @@ -1310,7 +1315,6 @@ static int ucan_probe(struct usb_interface *intf, u8 in_ep_addr; u8 out_ep_addr; union ucan_ctl_payload *ctl_msg_buffer; - char firmware_str[sizeof(union ucan_ctl_payload) + 1]; udev = interface_to_usbdev(intf); @@ -1393,7 +1397,7 @@ static int ucan_probe(struct usb_interface *intf, * Stage 3 for the final driver initialisation. */ - /* Prepare Memory for control transferes */ + /* Prepare Memory for control transfers */ ctl_msg_buffer = devm_kzalloc(&udev->dev, sizeof(union ucan_ctl_payload), GFP_KERNEL); @@ -1445,7 +1449,7 @@ static int ucan_probe(struct usb_interface *intf, /* request the device information and store it in ctl_msg_buffer * - * note: ucan_ctrl_command_* wrappers connot be used yet + * note: ucan_ctrl_command_* wrappers cannot be used yet * because `up` is initialised in Stage 3 */ ret = usb_control_msg(udev, @@ -1494,9 +1498,8 @@ static int ucan_probe(struct usb_interface *intf, up = netdev_priv(netdev); - /* initialze data */ + /* initialize data */ up->udev = udev; - up->intf = intf; up->netdev = netdev; up->intf_index = iface_desc->desc.bInterfaceNumber; up->in_ep_addr = in_ep_addr; @@ -1513,6 +1516,7 @@ static int ucan_probe(struct usb_interface *intf, spin_lock_init(&up->context_lock); spin_lock_init(&up->echo_skb_lock); netdev->netdev_ops = &ucan_netdev_ops; + netdev->ethtool_ops = &ucan_ethtool_ops; usb_set_intfdata(intf, up); SET_NETDEV_DEV(netdev, &intf->dev); @@ -1523,18 +1527,6 @@ static int ucan_probe(struct usb_interface *intf, */ ucan_parse_device_info(up, &ctl_msg_buffer->cmd_get_device_info); - /* just print some device information - if available */ - ret = ucan_device_request_in(up, UCAN_DEVICE_GET_FW_STRING, 0, - sizeof(union ucan_ctl_payload)); - if (ret > 0) { - /* copy string while ensuring zero terminiation */ - strncpy(firmware_str, up->ctl_msg_buffer->raw, - sizeof(union ucan_ctl_payload)); - firmware_str[sizeof(union ucan_ctl_payload)] = '\0'; - } else { - strcpy(firmware_str, "unknown"); - } - /* device is compatible, reset it */ ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0); if (ret < 0) @@ -1552,7 +1544,10 @@ static int ucan_probe(struct usb_interface *intf, /* initialisation complete, log device info */ netdev_info(up->netdev, "registered device\n"); - netdev_info(up->netdev, "firmware string: %s\n", firmware_str); + ucan_get_fw_str(up, up->ctl_msg_buffer->fw_str, + sizeof(up->ctl_msg_buffer->fw_str)); + netdev_info(up->netdev, "firmware string: %s\n", + up->ctl_msg_buffer->fw_str); /* success */ return 0; @@ -1576,7 +1571,7 @@ static void ucan_disconnect(struct usb_interface *intf) usb_set_intfdata(intf, NULL); if (up) { - unregister_netdev(up->netdev); + unregister_candev(up->netdev); free_candev(up->netdev); } } diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index 27861c417c94..7449328f7cd7 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -1,20 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * CAN driver for "8 devices" USB2CAN converter * * Copyright (C) 2012 Bernd Krumboeck (krumboeck@universalnet.at) * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published - * by the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. - * * This driver is inspired by the 3.2.0 version of drivers/net/can/usb/ems_usb.c * and drivers/net/can/usb/esd_usb2.c * @@ -23,6 +12,7 @@ * who were very cooperative and answered my questions. */ +#include <linux/ethtool.h> #include <linux/signal.h> #include <linux/slab.h> #include <linux/module.h> @@ -32,7 +22,6 @@ #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> -#include <linux/can/led.h> /* driver constants */ #define MAX_RX_URBS 20 @@ -99,7 +88,7 @@ enum usb_8dev_cmd { /* status */ #define USB_8DEV_STATUSMSG_OK 0x00 /* Normal condition. */ -#define USB_8DEV_STATUSMSG_OVERRUN 0x01 /* Overrun occured when sending */ +#define USB_8DEV_STATUSMSG_OVERRUN 0x01 /* Overrun occurred when sending */ #define USB_8DEV_STATUSMSG_BUSLIGHT 0x02 /* Error counter has reached 96 */ #define USB_8DEV_STATUSMSG_BUSHEAVY 0x03 /* Error count. has reached 128 */ #define USB_8DEV_STATUSMSG_BUSOFF 0x04 /* Device is in BUSOFF */ @@ -125,15 +114,12 @@ struct usb_8dev_tx_urb_context { struct usb_8dev_priv *priv; u32 echo_index; - u8 dlc; }; /* Structure to hold all of our device specific stuff */ struct usb_8dev_priv { struct can_priv can; /* must be the first member */ - struct sk_buff *echo_skb[MAX_TX_URBS]; - struct usb_device *udev; struct net_device *netdev; @@ -148,7 +134,8 @@ struct usb_8dev_priv { u8 *cmd_msg_buffer; struct mutex usb_8dev_cmd_lock; - + void *rxbuf[MAX_RX_URBS]; + dma_addr_t rxbuf_dma[MAX_RX_URBS]; }; /* tx frame */ @@ -176,7 +163,7 @@ struct __packed usb_8dev_rx_msg { /* command frame */ struct __packed usb_8dev_cmd_msg { u8 begin; - u8 channel; /* unkown - always 0 */ + u8 channel; /* unknown - always 0 */ u8 command; /* command to execute */ u8 opt1; /* optional parameter / return value */ u8 opt2; /* optional parameter 2 */ @@ -452,15 +439,15 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv, if (rx_errors) stats->rx_errors++; - - cf->data[6] = txerr; - cf->data[7] = rxerr; + if (priv->can.state != CAN_STATE_BUS_OFF) { + cf->can_id |= CAN_ERR_CNT; + cf->data[6] = txerr; + cf->data[7] = rxerr; + } priv->bec.txerr = txerr; priv->bec.rxerr = rxerr; - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; netif_rx(skb); } @@ -481,21 +468,20 @@ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv, return; cf->can_id = be32_to_cpu(msg->id); - cf->can_dlc = get_can_dlc(msg->dlc & 0xF); + can_frame_set_cc_len(cf, msg->dlc & 0xF, priv->can.ctrlmode); if (msg->flags & USB_8DEV_EXTID) cf->can_id |= CAN_EFF_FLAG; - if (msg->flags & USB_8DEV_RTR) + if (msg->flags & USB_8DEV_RTR) { cf->can_id |= CAN_RTR_FLAG; - else - memcpy(cf->data, msg->data, cf->can_dlc); - + } else { + memcpy(cf->data, msg->data, cf->len); + stats->rx_bytes += cf->len; + } stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_rx(skb); - can_led_event(priv->netdev, CAN_LED_EVENT_RX); + netif_rx(skb); } else { netdev_warn(priv->netdev, "frame type %d unknown", msg->type); @@ -594,11 +580,7 @@ static void usb_8dev_write_bulk_callback(struct urb *urb) urb->status); netdev->stats.tx_packets++; - netdev->stats.tx_bytes += context->dlc; - - can_get_echo_skb(netdev, context->echo_index); - - can_led_event(netdev, CAN_LED_EVENT_TX); + netdev->stats.tx_bytes += can_get_echo_skb(netdev, context->echo_index, NULL); /* Release context */ context->echo_index = MAX_TX_URBS; @@ -620,7 +602,7 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb, int i, err; size_t size = sizeof(struct usb_8dev_tx_msg); - if (can_dropped_invalid_skb(netdev, skb)) + if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; /* create a URB, and a buffer for it, and copy the data to the URB */ @@ -648,8 +630,8 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb, msg->flags |= USB_8DEV_EXTID; msg->id = cpu_to_be32(cf->can_id & CAN_ERR_MASK); - msg->dlc = cf->can_dlc; - memcpy(msg->data, cf->data, cf->can_dlc); + msg->dlc = can_get_cc_dlc(cf, priv->can.ctrlmode); + memcpy(msg->data, cf->data, cf->len); msg->end = USB_8DEV_DATA_END; for (i = 0; i < MAX_TX_URBS; i++) { @@ -667,7 +649,6 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb, context->priv = priv; context->echo_index = i; - context->dlc = cf->can_dlc; usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_TX), @@ -675,14 +656,25 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb, urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &priv->tx_submitted); - can_put_echo_skb(skb, netdev, context->echo_index); + can_put_echo_skb(skb, netdev, context->echo_index, 0); atomic_inc(&priv->active_tx_urbs); err = usb_submit_urb(urb, GFP_ATOMIC); - if (unlikely(err)) - goto failed; - else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS) + if (unlikely(err)) { + can_free_echo_skb(netdev, context->echo_index, NULL); + + usb_unanchor_urb(urb); + usb_free_coherent(priv->udev, size, buf, urb->transfer_dma); + + atomic_dec(&priv->active_tx_urbs); + + if (err == -ENODEV) + netif_device_detach(netdev); + else + netdev_warn(netdev, "failed tx_urb %d\n", err); + stats->tx_dropped++; + } else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS) /* Slow down tx path */ netif_stop_queue(netdev); @@ -701,19 +693,6 @@ nofreecontext: return NETDEV_TX_BUSY; -failed: - can_free_echo_skb(netdev, context->echo_index); - - usb_unanchor_urb(urb); - usb_free_coherent(priv->udev, size, buf, urb->transfer_dma); - - atomic_dec(&priv->active_tx_urbs); - - if (err == -ENODEV) - netif_device_detach(netdev); - else - netdev_warn(netdev, "failed tx_urb %d\n", err); - nomembuf: usb_free_urb(urb); @@ -744,6 +723,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv) for (i = 0; i < MAX_RX_URBS; i++) { struct urb *urb = NULL; u8 *buf; + dma_addr_t buf_dma; /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_KERNEL); @@ -753,7 +733,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv) } buf = usb_alloc_coherent(priv->udev, RX_BUFFER_SIZE, GFP_KERNEL, - &urb->transfer_dma); + &buf_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); @@ -761,6 +741,8 @@ static int usb_8dev_start(struct usb_8dev_priv *priv) break; } + urb->transfer_dma = buf_dma; + usb_fill_bulk_urb(urb, priv->udev, usb_rcvbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_RX), @@ -778,6 +760,9 @@ static int usb_8dev_start(struct usb_8dev_priv *priv) break; } + priv->rxbuf[i] = buf; + priv->rxbuf_dma[i] = buf_dma; + /* Drop reference, USB core will take care of freeing it */ usb_free_urb(urb); } @@ -820,8 +805,6 @@ static int usb_8dev_open(struct net_device *netdev) if (err) return err; - can_led_event(netdev, CAN_LED_EVENT_OPEN); - /* finally start device */ err = usb_8dev_start(priv); if (err) { @@ -847,6 +830,10 @@ static void unlink_all_urbs(struct usb_8dev_priv *priv) usb_kill_anchored_urbs(&priv->rx_submitted); + for (i = 0; i < MAX_RX_URBS; ++i) + usb_free_coherent(priv->udev, RX_BUFFER_SIZE, + priv->rxbuf[i], priv->rxbuf_dma[i]); + usb_kill_anchored_urbs(&priv->tx_submitted); atomic_set(&priv->active_tx_urbs, 0); @@ -874,8 +861,6 @@ static int usb_8dev_close(struct net_device *netdev) close_candev(netdev); - can_led_event(netdev, CAN_LED_EVENT_STOP); - return err; } @@ -883,11 +868,14 @@ static const struct net_device_ops usb_8dev_netdev_ops = { .ndo_open = usb_8dev_open, .ndo_stop = usb_8dev_close, .ndo_start_xmit = usb_8dev_start_xmit, - .ndo_change_mtu = can_change_mtu, +}; + +static const struct ethtool_ops usb_8dev_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, }; static const struct can_bittiming_const usb_8dev_bittiming_const = { - .name = "usb_8dev", + .name = KBUILD_MODNAME, .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, @@ -939,9 +927,11 @@ static int usb_8dev_probe(struct usb_interface *intf, priv->can.do_get_berr_counter = usb_8dev_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | - CAN_CTRLMODE_ONE_SHOT; + CAN_CTRLMODE_ONE_SHOT | + CAN_CTRLMODE_CC_LEN8_DLC; netdev->netdev_ops = &usb_8dev_netdev_ops; + netdev->ethtool_ops = &usb_8dev_ethtool_ops; netdev->flags |= IFF_ECHO; /* we support local echo */ @@ -982,8 +972,6 @@ static int usb_8dev_probe(struct usb_interface *intf, (version>>8) & 0xff, version & 0xff); } - devm_can_led_init(netdev); - return 0; cleanup_unregister_candev: @@ -1007,15 +995,14 @@ static void usb_8dev_disconnect(struct usb_interface *intf) netdev_info(priv->netdev, "device disconnected\n"); unregister_netdev(priv->netdev); - free_candev(priv->netdev); - unlink_all_urbs(priv); + free_candev(priv->netdev); } } static struct usb_driver usb_8dev_driver = { - .name = "usb_8dev", + .name = KBUILD_MODNAME, .probe = usb_8dev_probe, .disconnect = usb_8dev_disconnect, .id_table = usb_8dev_table, diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c index d200a5b0651c..fdc662aea279 100644 --- a/drivers/net/can/vcan.c +++ b/drivers/net/can/vcan.c @@ -1,5 +1,4 @@ -/* - * vcan.c - Virtual CAN interface +/* vcan.c - Virtual CAN interface * * Copyright (c) 2002-2017 Volkswagen Group Electronic Research * All rights reserved. @@ -39,12 +38,16 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/ethtool.h> #include <linux/module.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/can.h> +#include <linux/can/can-ml.h> #include <linux/can/dev.h> #include <linux/can/skb.h> #include <linux/slab.h> @@ -57,9 +60,7 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>"); MODULE_ALIAS_RTNL_LINK(DRV_NAME); - -/* - * CAN test feature: +/* CAN test feature: * Enable the echo on driver level for testing the CAN core echo modes. * See Documentation/networking/can.rst for details. */ @@ -68,47 +69,46 @@ static bool echo; /* echo testing. Default: 0 (Off) */ module_param(echo, bool, 0444); MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)"); - static void vcan_rx(struct sk_buff *skb, struct net_device *dev) { - struct canfd_frame *cfd = (struct canfd_frame *)skb->data; struct net_device_stats *stats = &dev->stats; stats->rx_packets++; - stats->rx_bytes += cfd->len; + stats->rx_bytes += can_skb_get_data_len(skb); skb->pkt_type = PACKET_BROADCAST; skb->dev = dev; skb->ip_summed = CHECKSUM_UNNECESSARY; - netif_rx_ni(skb); + netif_rx(skb); } static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev) { - struct canfd_frame *cfd = (struct canfd_frame *)skb->data; struct net_device_stats *stats = &dev->stats; + unsigned int len; int loop; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; + len = can_skb_get_data_len(skb); stats->tx_packets++; - stats->tx_bytes += cfd->len; + stats->tx_bytes += len; /* set flag whether this packet has to be looped back */ loop = skb->pkt_type == PACKET_LOOPBACK; + skb_tx_timestamp(skb); + if (!echo) { /* no echo handling available inside this driver */ - if (loop) { - /* - * only count the packets here, because the + /* only count the packets here, because the * CAN core already did the echo for us */ stats->rx_packets++; - stats->rx_bytes += cfd->len; + stats->rx_bytes += len; } consume_skb(skb); return NETDEV_TX_OK; @@ -117,7 +117,6 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev) /* perform standard echo handling for CAN network interfaces */ if (loop) { - skb = can_create_echo_skb(skb); if (!skb) return NETDEV_TX_OK; @@ -137,10 +136,11 @@ static int vcan_change_mtu(struct net_device *dev, int new_mtu) if (dev->flags & IFF_UP) return -EBUSY; - if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU) + if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU && + !can_is_canxl_dev_mtu(new_mtu)) return -EINVAL; - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } @@ -149,34 +149,41 @@ static const struct net_device_ops vcan_netdev_ops = { .ndo_change_mtu = vcan_change_mtu, }; +static const struct ethtool_ops vcan_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, +}; + static void vcan_setup(struct net_device *dev) { dev->type = ARPHRD_CAN; - dev->mtu = CANFD_MTU; + dev->mtu = CANXL_MTU; dev->hard_header_len = 0; dev->addr_len = 0; dev->tx_queue_len = 0; dev->flags = IFF_NOARP; + can_set_ml_priv(dev, netdev_priv(dev)); /* set flags according to driver capabilities */ if (echo) dev->flags |= IFF_ECHO; dev->netdev_ops = &vcan_netdev_ops; + dev->ethtool_ops = &vcan_ethtool_ops; dev->needs_free_netdev = true; } static struct rtnl_link_ops vcan_link_ops __read_mostly = { - .kind = DRV_NAME, - .setup = vcan_setup, + .kind = DRV_NAME, + .priv_size = sizeof(struct can_ml_priv), + .setup = vcan_setup, }; static __init int vcan_init_module(void) { - pr_info("vcan: Virtual CAN interface driver\n"); + pr_info("Virtual CAN interface driver\n"); if (echo) - printk(KERN_INFO "vcan: enabled echo on driver level.\n"); + pr_info("enabled echo on driver level.\n"); return rtnl_link_register(&vcan_link_ops); } diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index 80af658e530d..b2c19f8c5f8e 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * vxcan.c - Virtual CAN Tunnel for cross namespace communication * @@ -6,20 +7,9 @@ * for network interface pairs in a common and established way. * * Copyright (c) 2017 Oliver Hartkopp <socketcan@hartkopp.net> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. */ +#include <linux/ethtool.h> #include <linux/module.h> #include <linux/init.h> #include <linux/netdevice.h> @@ -29,6 +19,7 @@ #include <linux/can/dev.h> #include <linux/can/skb.h> #include <linux/can/vxcan.h> +#include <linux/can/can-ml.h> #include <linux/slab.h> #include <net/rtnetlink.h> @@ -43,27 +34,34 @@ struct vxcan_priv { struct net_device __rcu *peer; }; -static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t vxcan_xmit(struct sk_buff *oskb, struct net_device *dev) { struct vxcan_priv *priv = netdev_priv(dev); struct net_device *peer; - struct canfd_frame *cfd = (struct canfd_frame *)skb->data; struct net_device_stats *peerstats, *srcstats = &dev->stats; + struct sk_buff *skb; + unsigned int len; - if (can_dropped_invalid_skb(dev, skb)) + if (can_dropped_invalid_skb(dev, oskb)) return NETDEV_TX_OK; rcu_read_lock(); peer = rcu_dereference(priv->peer); if (unlikely(!peer)) { - kfree_skb(skb); + kfree_skb(oskb); dev->stats.tx_dropped++; goto out_unlock; } - skb = can_create_echo_skb(skb); - if (!skb) + skb_tx_timestamp(oskb); + + skb = skb_clone(oskb, GFP_ATOMIC); + if (skb) { + consume_skb(oskb); + } else { + kfree_skb(oskb); goto out_unlock; + } /* reset CAN GW hop counter */ skb->csum_start = 0; @@ -71,12 +69,13 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev) skb->dev = peer; skb->ip_summed = CHECKSUM_UNNECESSARY; - if (netif_rx_ni(skb) == NET_RX_SUCCESS) { + len = can_skb_get_data_len(skb); + if (netif_rx(skb) == NET_RX_SUCCESS) { srcstats->tx_packets++; - srcstats->tx_bytes += cfd->len; + srcstats->tx_bytes += len; peerstats = &peer->stats; peerstats->rx_packets++; - peerstats->rx_bytes += cfd->len; + peerstats->rx_bytes += len; } out_unlock: @@ -120,7 +119,7 @@ static int vxcan_get_iflink(const struct net_device *dev) rcu_read_lock(); peer = rcu_dereference(priv->peer); - iflink = peer ? peer->ifindex : 0; + iflink = peer ? READ_ONCE(peer->ifindex) : 0; rcu_read_unlock(); return iflink; @@ -132,10 +131,11 @@ static int vxcan_change_mtu(struct net_device *dev, int new_mtu) if (dev->flags & IFF_UP) return -EBUSY; - if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU) + if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU && + !can_is_canxl_dev_mtu(new_mtu)) return -EINVAL; - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } @@ -147,28 +147,40 @@ static const struct net_device_ops vxcan_netdev_ops = { .ndo_change_mtu = vxcan_change_mtu, }; +static const struct ethtool_ops vxcan_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, +}; + static void vxcan_setup(struct net_device *dev) { + struct can_ml_priv *can_ml; + dev->type = ARPHRD_CAN; - dev->mtu = CANFD_MTU; + dev->mtu = CANXL_MTU; dev->hard_header_len = 0; dev->addr_len = 0; dev->tx_queue_len = 0; - dev->flags = (IFF_NOARP|IFF_ECHO); + dev->flags = IFF_NOARP; dev->netdev_ops = &vxcan_netdev_ops; + dev->ethtool_ops = &vxcan_ethtool_ops; dev->needs_free_netdev = true; + + can_ml = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN); + can_set_ml_priv(dev, can_ml); } /* forward declaration for rtnl_create_link() */ static struct rtnl_link_ops vxcan_link_ops; -static int vxcan_newlink(struct net *net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int vxcan_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct net *peer_net = rtnl_newlink_peer_net(params); + struct nlattr **data = params->data; + struct nlattr **tb = params->tb; struct vxcan_priv *priv; struct net_device *peer; - struct net *peer_net; struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb; char ifname[IFNAMSIZ]; @@ -178,47 +190,30 @@ static int vxcan_newlink(struct net *net, struct net_device *dev, /* register peer device */ if (data && data[VXCAN_INFO_PEER]) { - struct nlattr *nla_peer; + struct nlattr *nla_peer = data[VXCAN_INFO_PEER]; - nla_peer = data[VXCAN_INFO_PEER]; ifmp = nla_data(nla_peer); - err = rtnl_nla_parse_ifla(peer_tb, - nla_data(nla_peer) + - sizeof(struct ifinfomsg), - nla_len(nla_peer) - - sizeof(struct ifinfomsg), - NULL); - if (err < 0) - return err; - + rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack); tbp = peer_tb; } if (ifmp && tbp[IFLA_IFNAME]) { - nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); + nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); name_assign_type = NET_NAME_USER; } else { snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d"); name_assign_type = NET_NAME_ENUM; } - peer_net = rtnl_link_get_net(net, tbp); - if (IS_ERR(peer_net)) - return PTR_ERR(peer_net); - peer = rtnl_create_link(peer_net, ifname, name_assign_type, &vxcan_link_ops, tbp, extack); - if (IS_ERR(peer)) { - put_net(peer_net); + if (IS_ERR(peer)) return PTR_ERR(peer); - } if (ifmp && dev->ifindex) peer->ifindex = ifmp->ifi_index; err = register_netdevice(peer); - put_net(peer_net); - peer_net = NULL; if (err < 0) { free_netdev(peer); return err; @@ -226,13 +221,13 @@ static int vxcan_newlink(struct net *net, struct net_device *dev, netif_carrier_off(peer); - err = rtnl_configure_link(peer, ifmp); + err = rtnl_configure_link(peer, ifmp, 0, NULL); if (err < 0) goto unregister_network_device; /* register first device */ if (tb[IFLA_IFNAME]) - nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); + nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); else snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d"); @@ -292,11 +287,12 @@ static struct net *vxcan_get_link_net(const struct net_device *dev) static struct rtnl_link_ops vxcan_link_ops = { .kind = DRV_NAME, - .priv_size = sizeof(struct vxcan_priv), + .priv_size = ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN) + sizeof(struct can_ml_priv), .setup = vxcan_setup, .newlink = vxcan_newlink, .dellink = vxcan_dellink, .policy = vxcan_policy, + .peer_type = VXCAN_INFO_PEER, .maxtype = VXCAN_INFO_MAX, .get_link_net = vxcan_get_link_net, }; diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 97d0933d9bd9..43d7f22820b8 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -1,24 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* Xilinx CAN device driver * - * Copyright (C) 2012 - 2014 Xilinx, Inc. + * Copyright (C) 2012 - 2022 Xilinx, Inc. * Copyright (C) 2009 PetaLogix. All rights reserved. * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy * * Description: - * This driver is developed for Axi CAN IP and for Zynq CANPS Controller. - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * This driver is developed for AXI CAN IP, AXI CANFD IP, CANPS and CANFD PS Controller. */ +#include <linux/bitfield.h> #include <linux/clk.h> #include <linux/errno.h> +#include <linux/ethtool.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> @@ -26,16 +20,18 @@ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/types.h> #include <linux/can/dev.h> #include <linux/can/error.h> -#include <linux/can/led.h> +#include <linux/phy/phy.h> #include <linux/pm_runtime.h> +#include <linux/reset.h> +#include <linux/u64_stats_sync.h> #define DRIVER_NAME "xilinx_can" @@ -58,18 +54,32 @@ enum xcan_reg { XCAN_AFR_OFFSET = 0x60, /* Acceptance Filter */ /* only on CAN FD cores */ + XCAN_F_BRPR_OFFSET = 0x088, /* Data Phase Baud Rate + * Prescaler + */ + XCAN_F_BTR_OFFSET = 0x08C, /* Data Phase Bit Timing */ XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */ + + /* only on AXI CAN cores */ + XCAN_ECC_CFG_OFFSET = 0xC8, /* ECC Configuration */ + XCAN_TXTLFIFO_ECC_OFFSET = 0xCC, /* TXTL FIFO ECC error counter */ + XCAN_TXOLFIFO_ECC_OFFSET = 0xD0, /* TXOL FIFO ECC error counter */ + XCAN_RXFIFO_ECC_OFFSET = 0xD4, /* RX FIFO ECC error counter */ + XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */ XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */ XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */ XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */ XCAN_RXMSG_2_BASE_OFFSET = 0x2100, /* RX Message Space */ + XCAN_AFR_2_MASK_OFFSET = 0x0A00, /* Acceptance Filter MASK */ + XCAN_AFR_2_ID_OFFSET = 0x0A04, /* Acceptance Filter ID */ }; #define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00) #define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04) #define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08) #define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C) +#define XCANFD_FRAME_DW_OFFSET(frame_base) ((frame_base) + 0x08) #define XCAN_CANFD_FRAME_SIZE 0x48 #define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \ @@ -88,6 +98,8 @@ enum xcan_reg { #define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */ #define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */ #define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */ +#define XCAN_BRPR_TDCO_MASK GENMASK(12, 8) /* TDCO */ +#define XCAN_2_BRPR_TDCO_MASK GENMASK(13, 8) /* TDCO for CANFD 2.0 */ #define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */ #define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */ #define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */ @@ -101,6 +113,7 @@ enum xcan_reg { #define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */ #define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */ #define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */ +#define XCAN_SR_TDCV_MASK GENMASK(22, 16) /* TDCV Value */ #define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */ #define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */ #define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */ @@ -119,6 +132,18 @@ enum xcan_reg { #define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */ #define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */ #define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */ +#define XCAN_IXR_E2BERX_MASK BIT(23) /* RX FIFO two bit ECC error */ +#define XCAN_IXR_E1BERX_MASK BIT(22) /* RX FIFO one bit ECC error */ +#define XCAN_IXR_E2BETXOL_MASK BIT(21) /* TXOL FIFO two bit ECC error */ +#define XCAN_IXR_E1BETXOL_MASK BIT(20) /* TXOL FIFO One bit ECC error */ +#define XCAN_IXR_E2BETXTL_MASK BIT(19) /* TXTL FIFO Two bit ECC error */ +#define XCAN_IXR_E1BETXTL_MASK BIT(18) /* TXTL FIFO One bit ECC error */ +#define XCAN_IXR_ECC_MASK (XCAN_IXR_E2BERX_MASK | \ + XCAN_IXR_E1BERX_MASK | \ + XCAN_IXR_E2BETXOL_MASK | \ + XCAN_IXR_E1BETXOL_MASK | \ + XCAN_IXR_E2BETXTL_MASK | \ + XCAN_IXR_E1BETXTL_MASK) #define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */ #define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */ #define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */ @@ -126,10 +151,20 @@ enum xcan_reg { #define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */ #define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */ #define XCAN_FSR_FL_MASK 0x00003F00 /* RX Fill Level */ +#define XCAN_2_FSR_FL_MASK 0x00007F00 /* RX Fill Level */ #define XCAN_FSR_IRI_MASK 0x00000080 /* RX Increment Read Index */ #define XCAN_FSR_RI_MASK 0x0000001F /* RX Read Index */ +#define XCAN_2_FSR_RI_MASK 0x0000003F /* RX Read Index */ +#define XCAN_DLCR_EDL_MASK 0x08000000 /* EDL Mask in DLC */ +#define XCAN_DLCR_BRS_MASK 0x04000000 /* BRS Mask in DLC */ +#define XCAN_ECC_CFG_REECRX_MASK BIT(2) /* Reset RX FIFO ECC error counters */ +#define XCAN_ECC_CFG_REECTXOL_MASK BIT(1) /* Reset TXOL FIFO ECC error counters */ +#define XCAN_ECC_CFG_REECTXTL_MASK BIT(0) /* Reset TXTL FIFO ECC error counters */ +#define XCAN_ECC_1BIT_CNT_MASK GENMASK(15, 0) /* FIFO ECC 1bit count mask */ +#define XCAN_ECC_2BIT_CNT_MASK GENMASK(31, 16) /* FIFO ECC 2bit count mask */ /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */ +#define XCAN_BRPR_TDC_ENABLE BIT(16) /* Transmitter Delay Compensation (TDC) Enable */ #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */ #define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */ #define XCAN_BTR_SJW_SHIFT_CANFD 16 /* Synchronous jump width */ @@ -141,6 +176,7 @@ enum xcan_reg { /* CAN frame length constants */ #define XCAN_FRAME_MAX_DATA_LEN 8 +#define XCANFD_DW_BYTES 4 #define XCAN_TIMEOUT (1 * HZ) /* TX-FIFO-empty interrupt available */ @@ -157,7 +193,15 @@ enum xcan_reg { #define XCAN_FLAG_RX_FIFO_MULTI 0x0010 #define XCAN_FLAG_CANFD_2 0x0020 +enum xcan_ip_type { + XAXI_CAN = 0, + XZYNQ_CANPS, + XAXI_CANFD, + XAXI_CANFD_2_0, +}; + struct xcan_devtype_data { + enum xcan_ip_type cantype; unsigned int flags; const struct can_bittiming_const *bittiming_const; const char *bus_clk_name; @@ -181,23 +225,43 @@ struct xcan_devtype_data { * @bus_clk: Pointer to struct clk * @can_clk: Pointer to struct clk * @devtype: Device type specific constants + * @transceiver: Optional pointer to associated CAN transceiver + * @rstc: Pointer to reset control + * @ecc_enable: ECC enable flag + * @syncp: synchronization for ECC error stats + * @ecc_rx_2_bit_errors: RXFIFO 2bit ECC count + * @ecc_rx_1_bit_errors: RXFIFO 1bit ECC count + * @ecc_txol_2_bit_errors: TXOLFIFO 2bit ECC count + * @ecc_txol_1_bit_errors: TXOLFIFO 1bit ECC count + * @ecc_txtl_2_bit_errors: TXTLFIFO 2bit ECC count + * @ecc_txtl_1_bit_errors: TXTLFIFO 1bit ECC count */ struct xcan_priv { struct can_priv can; - spinlock_t tx_lock; + spinlock_t tx_lock; /* Lock for synchronizing TX interrupt handling */ unsigned int tx_head; unsigned int tx_tail; unsigned int tx_max; struct napi_struct napi; u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg); void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg, - u32 val); + u32 val); struct device *dev; void __iomem *reg_base; unsigned long irq_flags; struct clk *bus_clk; struct clk *can_clk; struct xcan_devtype_data devtype; + struct phy *transceiver; + struct reset_control *rstc; + bool ecc_enable; + struct u64_stats_sync syncp; + u64_stats_t ecc_rx_2_bit_errors; + u64_stats_t ecc_rx_1_bit_errors; + u64_stats_t ecc_txol_2_bit_errors; + u64_stats_t ecc_txol_1_bit_errors; + u64_stats_t ecc_txtl_2_bit_errors; + u64_stats_t ecc_txtl_1_bit_errors; }; /* CAN Bittiming constants as per Xilinx CAN specs */ @@ -213,6 +277,7 @@ static const struct can_bittiming_const xcan_bittiming_const = { .brp_inc = 1, }; +/* AXI CANFD Arbitration Bittiming constants as per AXI CANFD 1.0 spec */ static const struct can_bittiming_const xcan_bittiming_const_canfd = { .name = DRIVER_NAME, .tseg1_min = 1, @@ -225,6 +290,20 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd = { .brp_inc = 1, }; +/* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */ +static const struct can_bittiming_const xcan_data_bittiming_const_canfd = { + .name = DRIVER_NAME, + .tseg1_min = 1, + .tseg1_max = 16, + .tseg2_min = 1, + .tseg2_max = 8, + .sjw_max = 8, + .brp_min = 1, + .brp_max = 256, + .brp_inc = 1, +}; + +/* AXI CANFD 2.0 Arbitration Bittiming constants as per AXI CANFD 2.0 spec */ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = { .name = DRIVER_NAME, .tseg1_min = 1, @@ -237,6 +316,57 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = { .brp_inc = 1, }; +/* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */ +static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = { + .name = DRIVER_NAME, + .tseg1_min = 1, + .tseg1_max = 32, + .tseg2_min = 1, + .tseg2_max = 16, + .sjw_max = 16, + .brp_min = 1, + .brp_max = 256, + .brp_inc = 1, +}; + +/* Transmission Delay Compensation constants for CANFD 1.0 */ +static const struct can_tdc_const xcan_tdc_const_canfd = { + .tdcv_min = 0, + .tdcv_max = 0, /* Manual mode not supported. */ + .tdco_min = 0, + .tdco_max = 32, + .tdcf_min = 0, /* Filter window not supported */ + .tdcf_max = 0, +}; + +/* Transmission Delay Compensation constants for CANFD 2.0 */ +static const struct can_tdc_const xcan_tdc_const_canfd2 = { + .tdcv_min = 0, + .tdcv_max = 0, /* Manual mode not supported. */ + .tdco_min = 0, + .tdco_max = 64, + .tdcf_min = 0, /* Filter window not supported */ + .tdcf_max = 0, +}; + +enum xcan_stats_type { + XCAN_ECC_RX_2_BIT_ERRORS, + XCAN_ECC_RX_1_BIT_ERRORS, + XCAN_ECC_TXOL_2_BIT_ERRORS, + XCAN_ECC_TXOL_1_BIT_ERRORS, + XCAN_ECC_TXTL_2_BIT_ERRORS, + XCAN_ECC_TXTL_1_BIT_ERRORS, +}; + +static const char xcan_priv_flags_strings[][ETH_GSTRING_LEN] = { + [XCAN_ECC_RX_2_BIT_ERRORS] = "ecc_rx_2_bit_errors", + [XCAN_ECC_RX_1_BIT_ERRORS] = "ecc_rx_1_bit_errors", + [XCAN_ECC_TXOL_2_BIT_ERRORS] = "ecc_txol_2_bit_errors", + [XCAN_ECC_TXOL_1_BIT_ERRORS] = "ecc_txol_1_bit_errors", + [XCAN_ECC_TXTL_2_BIT_ERRORS] = "ecc_txtl_2_bit_errors", + [XCAN_ECC_TXTL_1_BIT_ERRORS] = "ecc_txtl_1_bit_errors", +}; + /** * xcan_write_reg_le - Write a value to the device register little endian * @priv: Driver private data structure @@ -246,7 +376,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = { * Write data to the paricular CAN register */ static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg, - u32 val) + u32 val) { iowrite32(val, priv->reg_base + reg); } @@ -273,7 +403,7 @@ static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg) * Write data to the paricular CAN register */ static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg, - u32 val) + u32 val) { iowrite32be(val, priv->reg_base + reg); } @@ -351,6 +481,7 @@ static int xcan_set_bittiming(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); struct can_bittiming *bt = &priv->can.bittiming; + struct can_bittiming *dbt = &priv->can.fd.data_bittiming; u32 btr0, btr1; u32 is_config_mode; @@ -361,11 +492,11 @@ static int xcan_set_bittiming(struct net_device *ndev) XCAN_SR_CONFIG_MASK; if (!is_config_mode) { netdev_alert(ndev, - "BUG! Cannot set bittiming - CAN is not in config mode\n"); + "BUG! Cannot set bittiming - CAN is not in config mode\n"); return -EPERM; } - /* Setting Baud Rate prescalar value in BRPR Register */ + /* Setting Baud Rate prescaler value in BRPR Register */ btr0 = (bt->brp - 1); /* Setting Time Segment 1 in BTR Register */ @@ -380,9 +511,35 @@ static int xcan_set_bittiming(struct net_device *ndev) priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0); priv->write_reg(priv, XCAN_BTR_OFFSET, btr1); + if (priv->devtype.cantype == XAXI_CANFD || + priv->devtype.cantype == XAXI_CANFD_2_0) { + /* Setting Baud Rate prescaler value in F_BRPR Register */ + btr0 = dbt->brp - 1; + if (can_fd_tdc_is_enabled(&priv->can)) { + if (priv->devtype.cantype == XAXI_CANFD) + btr0 |= FIELD_PREP(XCAN_BRPR_TDCO_MASK, priv->can.fd.tdc.tdco) | + XCAN_BRPR_TDC_ENABLE; + else + btr0 |= FIELD_PREP(XCAN_2_BRPR_TDCO_MASK, priv->can.fd.tdc.tdco) | + XCAN_BRPR_TDC_ENABLE; + } + + /* Setting Time Segment 1 in BTR Register */ + btr1 = dbt->prop_seg + dbt->phase_seg1 - 1; + + /* Setting Time Segment 2 in BTR Register */ + btr1 |= (dbt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift; + + /* Setting Synchronous jump width in BTR Register */ + btr1 |= (dbt->sjw - 1) << priv->devtype.btr_sjw_shift; + + priv->write_reg(priv, XCAN_F_BRPR_OFFSET, btr0); + priv->write_reg(priv, XCAN_F_BTR_OFFSET, btr1); + } + netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n", - priv->read_reg(priv, XCAN_BRPR_OFFSET), - priv->read_reg(priv, XCAN_BTR_OFFSET)); + priv->read_reg(priv, XCAN_BRPR_OFFSET), + priv->read_reg(priv, XCAN_BTR_OFFSET)); return 0; } @@ -400,9 +557,8 @@ static int xcan_set_bittiming(struct net_device *ndev) static int xcan_chip_start(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); - u32 reg_msr, reg_sr_mask; + u32 reg_msr; int err; - unsigned long timeout; u32 ier; /* Check if it is in reset mode */ @@ -414,25 +570,31 @@ static int xcan_chip_start(struct net_device *ndev) if (err < 0) return err; - /* Enable interrupts */ + /* Enable interrupts + * + * We enable the ERROR interrupt even with + * CAN_CTRLMODE_BERR_REPORTING disabled as there is no + * dedicated interrupt for a state change to + * ERROR_WARNING/ERROR_PASSIVE. + */ ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK | XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv); + if (priv->ecc_enable) + ier |= XCAN_IXR_ECC_MASK; + if (priv->devtype.flags & XCAN_FLAG_RXMNF) ier |= XCAN_IXR_RXMNF_MASK; priv->write_reg(priv, XCAN_IER_OFFSET, ier); /* Check whether it is loopback mode or normal mode */ - if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { + if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) reg_msr = XCAN_MSR_LBACK_MASK; - reg_sr_mask = XCAN_SR_LBACK_MASK; - } else { + else reg_msr = 0x0; - reg_sr_mask = XCAN_SR_NORMAL_MASK; - } /* enable the first extended filter, if any, as cores with extended * filtering default to non-receipt if all filters are disabled @@ -443,16 +605,8 @@ static int xcan_chip_start(struct net_device *ndev) priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr); priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK); - timeout = jiffies + XCAN_TIMEOUT; - while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & reg_sr_mask)) { - if (time_after(jiffies, timeout)) { - netdev_warn(ndev, - "timed out for correct mode\n"); - return -ETIMEDOUT; - } - } netdev_dbg(ndev, "status:#x%08x\n", - priv->read_reg(priv, XCAN_SR_OFFSET)); + priv->read_reg(priv, XCAN_SR_OFFSET)); priv->can.state = CAN_STATE_ERROR_ACTIVE; return 0; @@ -463,8 +617,7 @@ static int xcan_chip_start(struct net_device *ndev) * @ndev: Pointer to net_device structure * @mode: Tells the mode of the driver * - * This check the drivers state and calls the - * the corresponding modes to set. + * This check the drivers state and calls the corresponding modes to set. * * Return: 0 on success and failure value on error */ @@ -491,14 +644,17 @@ static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode) /** * xcan_write_frame - Write a frame to HW + * @ndev: Pointer to net_device structure * @skb: sk_buff pointer that contains data to be Txed * @frame_offset: Register offset to write the frame to */ -static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb, +static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb, int frame_offset) { u32 id, dlc, data[2] = {0, 0}; - struct can_frame *cf = (struct can_frame *)skb->data; + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + u32 ramoff, dwindex = 0, i; + struct xcan_priv *priv = netdev_priv(ndev); /* Watch carefully on the bit sequence */ if (cf->can_id & CAN_EFF_FLAG) { @@ -506,7 +662,7 @@ static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb, id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) & XCAN_IDR_ID2_MASK; id |= (((cf->can_id & CAN_EFF_MASK) >> - (CAN_EFF_ID_BITS-CAN_SFF_ID_BITS)) << + (CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) << XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK; /* The substibute remote TX request bit should be "1" @@ -527,31 +683,59 @@ static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb, id |= XCAN_IDR_SRR_MASK; } - dlc = cf->can_dlc << XCAN_DLCR_DLC_SHIFT; - - if (cf->can_dlc > 0) - data[0] = be32_to_cpup((__be32 *)(cf->data + 0)); - if (cf->can_dlc > 4) - data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); + dlc = can_fd_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT; + if (can_is_canfd_skb(skb)) { + if (cf->flags & CANFD_BRS) + dlc |= XCAN_DLCR_BRS_MASK; + dlc |= XCAN_DLCR_EDL_MASK; + } priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id); /* If the CAN frame is RTR frame this write triggers transmission * (not on CAN FD) */ priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc); - if (!(cf->can_id & CAN_RTR_FLAG)) { - priv->write_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_offset), - data[0]); - /* If the CAN frame is Standard/Extended frame this - * write triggers transmission (not on CAN FD) - */ - priv->write_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_offset), - data[1]); + if (priv->devtype.cantype == XAXI_CANFD || + priv->devtype.cantype == XAXI_CANFD_2_0) { + for (i = 0; i < cf->len; i += 4) { + ramoff = XCANFD_FRAME_DW_OFFSET(frame_offset) + + (dwindex * XCANFD_DW_BYTES); + priv->write_reg(priv, ramoff, + be32_to_cpup((__be32 *)(cf->data + i))); + dwindex++; + } + } else { + if (cf->len > 0) + data[0] = be32_to_cpup((__be32 *)(cf->data + 0)); + if (cf->len > 4) + data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); + + if (!(cf->can_id & CAN_RTR_FLAG)) { + priv->write_reg(priv, + XCAN_FRAME_DW1_OFFSET(frame_offset), + data[0]); + /* If the CAN frame is Standard/Extended frame this + * write triggers transmission (not on CAN FD) + */ + priv->write_reg(priv, + XCAN_FRAME_DW2_OFFSET(frame_offset), + data[1]); + } } + + if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) && + (priv->devtype.flags & XCAN_FLAG_TXFEMP)) + can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0); + else + can_put_echo_skb(skb, ndev, 0, 0); + + priv->tx_head++; } /** * xcan_start_xmit_fifo - Starts the transmission (FIFO mode) + * @skb: sk_buff pointer that contains data to be Txed + * @ndev: Pointer to net_device structure * * Return: 0 on success, -ENOSPC if FIFO is full. */ @@ -565,13 +749,9 @@ static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev) XCAN_SR_TXFLL_MASK)) return -ENOSPC; - can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max); - spin_lock_irqsave(&priv->tx_lock, flags); - priv->tx_head++; - - xcan_write_frame(priv, skb, XCAN_TXFIFO_OFFSET); + xcan_write_frame(ndev, skb, XCAN_TXFIFO_OFFSET); /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */ if (priv->tx_max > 1) @@ -588,6 +768,8 @@ static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev) /** * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode) + * @skb: sk_buff pointer that contains data to be Txed + * @ndev: Pointer to net_device structure * * Return: 0 on success, -ENOSPC if there is no space */ @@ -600,13 +782,9 @@ static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev) BIT(XCAN_TX_MAILBOX_IDX))) return -ENOSPC; - can_put_echo_skb(skb, ndev, 0); - spin_lock_irqsave(&priv->tx_lock, flags); - priv->tx_head++; - - xcan_write_frame(priv, skb, + xcan_write_frame(ndev, skb, XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX)); /* Mark buffer as ready for transmit */ @@ -633,7 +811,7 @@ static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) struct xcan_priv *priv = netdev_priv(ndev); int ret; - if (can_dropped_invalid_skb(ndev, skb)) + if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) @@ -681,7 +859,7 @@ static int xcan_rx(struct net_device *ndev, int frame_base) XCAN_DLCR_DLC_SHIFT; /* Change Xilinx CAN data length format to socketCAN data format */ - cf->can_dlc = get_can_dlc(dlc); + cf->len = can_cc_dlc2len(dlc); /* Change Xilinx CAN ID format to socketCAN ID format */ if (id_xcan & XCAN_IDR_IDE_MASK) { @@ -706,14 +884,100 @@ static int xcan_rx(struct net_device *ndev, int frame_base) if (!(cf->can_id & CAN_RTR_FLAG)) { /* Change Xilinx CAN data format to socketCAN data format */ - if (cf->can_dlc > 0) + if (cf->len > 0) *(__be32 *)(cf->data) = cpu_to_be32(data[0]); - if (cf->can_dlc > 4) + if (cf->len > 4) *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]); + + stats->rx_bytes += cf->len; + } + stats->rx_packets++; + + netif_receive_skb(skb); + + return 1; +} + +/** + * xcanfd_rx - Is called from CAN isr to complete the received + * frame processing + * @ndev: Pointer to net_device structure + * @frame_base: Register offset to the frame to be read + * + * This function is invoked from the CAN isr(poll) to process the Rx frames. It + * does minimal processing and invokes "netif_receive_skb" to complete further + * processing. + * Return: 1 on success and 0 on failure. + */ +static int xcanfd_rx(struct net_device *ndev, int frame_base) +{ + struct xcan_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + struct canfd_frame *cf; + struct sk_buff *skb; + u32 id_xcan, dlc, data[2] = {0, 0}, dwindex = 0, i, dw_offset; + + id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base)); + dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)); + if (dlc & XCAN_DLCR_EDL_MASK) + skb = alloc_canfd_skb(ndev, &cf); + else + skb = alloc_can_skb(ndev, (struct can_frame **)&cf); + + if (unlikely(!skb)) { + stats->rx_dropped++; + return 0; } - stats->rx_bytes += cf->can_dlc; + /* Change Xilinx CANFD data length format to socketCAN data + * format + */ + if (dlc & XCAN_DLCR_EDL_MASK) + cf->len = can_fd_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >> + XCAN_DLCR_DLC_SHIFT); + else + cf->len = can_cc_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >> + XCAN_DLCR_DLC_SHIFT); + + /* Change Xilinx CAN ID format to socketCAN ID format */ + if (id_xcan & XCAN_IDR_IDE_MASK) { + /* The received frame is an Extended format frame */ + cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3; + cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >> + XCAN_IDR_ID2_SHIFT; + cf->can_id |= CAN_EFF_FLAG; + if (id_xcan & XCAN_IDR_RTR_MASK) + cf->can_id |= CAN_RTR_FLAG; + } else { + /* The received frame is a standard format frame */ + cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> + XCAN_IDR_ID1_SHIFT; + if (!(dlc & XCAN_DLCR_EDL_MASK) && (id_xcan & + XCAN_IDR_SRR_MASK)) + cf->can_id |= CAN_RTR_FLAG; + } + + /* Check the frame received is FD or not*/ + if (dlc & XCAN_DLCR_EDL_MASK) { + for (i = 0; i < cf->len; i += 4) { + dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base) + + (dwindex * XCANFD_DW_BYTES); + data[0] = priv->read_reg(priv, dw_offset); + *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]); + dwindex++; + } + } else { + for (i = 0; i < cf->len; i += 4) { + dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base); + data[0] = priv->read_reg(priv, dw_offset + i); + *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]); + } + } + + if (!(cf->can_id & CAN_RTR_FLAG)) + stats->rx_bytes += cf->len; stats->rx_packets++; + netif_receive_skb(skb); return 1; @@ -770,6 +1034,7 @@ static void xcan_set_error_state(struct net_device *ndev, can_change_state(ndev, cf, tx_state, rx_state); if (cf) { + cf->can_id |= CAN_ERR_CNT; cf->data[6] = txerr; cf->data[7] = rxerr; } @@ -806,13 +1071,8 @@ static void xcan_update_error_state_after_rxtx(struct net_device *ndev) xcan_set_error_state(ndev, new_state, skb ? cf : NULL); - if (skb) { - struct net_device_stats *stats = &ndev->stats; - - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + if (skb) netif_rx(skb); - } } } @@ -822,19 +1082,16 @@ static void xcan_update_error_state_after_rxtx(struct net_device *ndev) * @isr: interrupt status register value * * This is the CAN error interrupt and it will - * check the the type of error and forward the error + * check the type of error and forward the error * frame to upper layers. */ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) { struct xcan_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; - struct can_frame *cf; - struct sk_buff *skb; + struct can_frame cf = { }; u32 err_status; - skb = alloc_can_err_skb(ndev, &cf); - err_status = priv->read_reg(priv, XCAN_ESR_OFFSET); priv->write_reg(priv, XCAN_ESR_OFFSET, err_status); @@ -844,32 +1101,27 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) /* Leave device in Config Mode in bus-off state */ priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); can_bus_off(ndev); - if (skb) - cf->can_id |= CAN_ERR_BUSOFF; + cf.can_id |= CAN_ERR_BUSOFF; } else { enum can_state new_state = xcan_current_error_state(ndev); if (new_state != priv->can.state) - xcan_set_error_state(ndev, new_state, skb ? cf : NULL); + xcan_set_error_state(ndev, new_state, &cf); } /* Check for Arbitration lost interrupt */ if (isr & XCAN_IXR_ARBLST_MASK) { priv->can.can_stats.arbitration_lost++; - if (skb) { - cf->can_id |= CAN_ERR_LOSTARB; - cf->data[0] = CAN_ERR_LOSTARB_UNSPEC; - } + cf.can_id |= CAN_ERR_LOSTARB; + cf.data[0] = CAN_ERR_LOSTARB_UNSPEC; } /* Check for RX FIFO Overflow interrupt */ if (isr & XCAN_IXR_RXOFLW_MASK) { stats->rx_over_errors++; stats->rx_errors++; - if (skb) { - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; - } + cf.can_id |= CAN_ERR_CRTL; + cf.data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; } /* Check for RX Match Not Finished interrupt */ @@ -877,72 +1129,127 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) stats->rx_dropped++; stats->rx_errors++; netdev_err(ndev, "RX match not finished, frame discarded\n"); - if (skb) { - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] |= CAN_ERR_CRTL_UNSPEC; - } + cf.can_id |= CAN_ERR_CRTL; + cf.data[1] |= CAN_ERR_CRTL_UNSPEC; } /* Check for error interrupt */ if (isr & XCAN_IXR_ERROR_MASK) { - if (skb) - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + bool berr_reporting = false; + + if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) { + berr_reporting = true; + cf.can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + } /* Check for Ack error interrupt */ if (err_status & XCAN_ESR_ACKER_MASK) { stats->tx_errors++; - if (skb) { - cf->can_id |= CAN_ERR_ACK; - cf->data[3] = CAN_ERR_PROT_LOC_ACK; + if (berr_reporting) { + cf.can_id |= CAN_ERR_ACK; + cf.data[3] = CAN_ERR_PROT_LOC_ACK; } } /* Check for Bit error interrupt */ if (err_status & XCAN_ESR_BERR_MASK) { stats->tx_errors++; - if (skb) { - cf->can_id |= CAN_ERR_PROT; - cf->data[2] = CAN_ERR_PROT_BIT; + if (berr_reporting) { + cf.can_id |= CAN_ERR_PROT; + cf.data[2] = CAN_ERR_PROT_BIT; } } /* Check for Stuff error interrupt */ if (err_status & XCAN_ESR_STER_MASK) { stats->rx_errors++; - if (skb) { - cf->can_id |= CAN_ERR_PROT; - cf->data[2] = CAN_ERR_PROT_STUFF; + if (berr_reporting) { + cf.can_id |= CAN_ERR_PROT; + cf.data[2] = CAN_ERR_PROT_STUFF; } } /* Check for Form error interrupt */ if (err_status & XCAN_ESR_FMER_MASK) { stats->rx_errors++; - if (skb) { - cf->can_id |= CAN_ERR_PROT; - cf->data[2] = CAN_ERR_PROT_FORM; + if (berr_reporting) { + cf.can_id |= CAN_ERR_PROT; + cf.data[2] = CAN_ERR_PROT_FORM; } } /* Check for CRC error interrupt */ if (err_status & XCAN_ESR_CRCER_MASK) { stats->rx_errors++; - if (skb) { - cf->can_id |= CAN_ERR_PROT; - cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + if (berr_reporting) { + cf.can_id |= CAN_ERR_PROT; + cf.data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; } } - priv->can.can_stats.bus_error++; + priv->can.can_stats.bus_error++; + } + + if (priv->ecc_enable && isr & XCAN_IXR_ECC_MASK) { + u32 reg_rx_ecc, reg_txol_ecc, reg_txtl_ecc; + + reg_rx_ecc = priv->read_reg(priv, XCAN_RXFIFO_ECC_OFFSET); + reg_txol_ecc = priv->read_reg(priv, XCAN_TXOLFIFO_ECC_OFFSET); + reg_txtl_ecc = priv->read_reg(priv, XCAN_TXTLFIFO_ECC_OFFSET); + + /* The counter reaches its maximum at 0xffff and does not overflow. + * Accept the small race window between reading and resetting ECC counters. + */ + priv->write_reg(priv, XCAN_ECC_CFG_OFFSET, XCAN_ECC_CFG_REECRX_MASK | + XCAN_ECC_CFG_REECTXOL_MASK | XCAN_ECC_CFG_REECTXTL_MASK); + + u64_stats_update_begin(&priv->syncp); + + if (isr & XCAN_IXR_E2BERX_MASK) { + u64_stats_add(&priv->ecc_rx_2_bit_errors, + FIELD_GET(XCAN_ECC_2BIT_CNT_MASK, reg_rx_ecc)); + } + + if (isr & XCAN_IXR_E1BERX_MASK) { + u64_stats_add(&priv->ecc_rx_1_bit_errors, + FIELD_GET(XCAN_ECC_1BIT_CNT_MASK, reg_rx_ecc)); + } + + if (isr & XCAN_IXR_E2BETXOL_MASK) { + u64_stats_add(&priv->ecc_txol_2_bit_errors, + FIELD_GET(XCAN_ECC_2BIT_CNT_MASK, reg_txol_ecc)); + } + + if (isr & XCAN_IXR_E1BETXOL_MASK) { + u64_stats_add(&priv->ecc_txol_1_bit_errors, + FIELD_GET(XCAN_ECC_1BIT_CNT_MASK, reg_txol_ecc)); + } + + if (isr & XCAN_IXR_E2BETXTL_MASK) { + u64_stats_add(&priv->ecc_txtl_2_bit_errors, + FIELD_GET(XCAN_ECC_2BIT_CNT_MASK, reg_txtl_ecc)); + } + + if (isr & XCAN_IXR_E1BETXTL_MASK) { + u64_stats_add(&priv->ecc_txtl_1_bit_errors, + FIELD_GET(XCAN_ECC_1BIT_CNT_MASK, reg_txtl_ecc)); + } + + u64_stats_update_end(&priv->syncp); } - if (skb) { - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_rx(skb); + if (cf.can_id) { + struct can_frame *skb_cf; + struct sk_buff *skb = alloc_can_err_skb(ndev, &skb_cf); + + if (skb) { + skb_cf->can_id |= cf.can_id; + memcpy(skb_cf->data, cf.data, CAN_ERR_DLC); + netif_rx(skb); + } } netdev_dbg(ndev, "%s: error status register:0x%x\n", - __func__, priv->read_reg(priv, XCAN_ESR_OFFSET)); + __func__, priv->read_reg(priv, XCAN_ESR_OFFSET)); } /** @@ -968,6 +1275,7 @@ static void xcan_state_interrupt(struct net_device *ndev, u32 isr) /** * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame + * @priv: Driver private data structure * * Return: Register offset of the next frame in RX FIFO. */ @@ -976,7 +1284,7 @@ static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv) int offset; if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) { - u32 fsr; + u32 fsr, mask; /* clear RXOK before the is-empty check so that any newly * received frame will reassert it without a race @@ -986,13 +1294,20 @@ static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv) fsr = priv->read_reg(priv, XCAN_FSR_OFFSET); /* check if RX FIFO is empty */ - if (!(fsr & XCAN_FSR_FL_MASK)) + if (priv->devtype.flags & XCAN_FLAG_CANFD_2) + mask = XCAN_2_FSR_FL_MASK; + else + mask = XCAN_FSR_FL_MASK; + + if (!(fsr & mask)) return -ENOENT; if (priv->devtype.flags & XCAN_FLAG_CANFD_2) - offset = XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK); + offset = + XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_2_FSR_RI_MASK); else - offset = XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK); + offset = + XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK); } else { /* check if RX FIFO is empty */ @@ -1027,7 +1342,10 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota) while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 && (work_done < quota)) { - work_done += xcan_rx(ndev, frame_offset); + if (xcan_rx_int_mask(priv) & XCAN_IXR_RXOK_MASK) + work_done += xcanfd_rx(ndev, frame_offset); + else + work_done += xcan_rx(ndev, frame_offset); if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) /* increment read index */ @@ -1041,16 +1359,15 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota) XCAN_IXR_RXNEMP_MASK); } - if (work_done) { - can_led_event(ndev, CAN_LED_EVENT_RX); + if (work_done) xcan_update_error_state_after_rxtx(ndev); - } if (work_done < quota) { - napi_complete_done(napi, work_done); - ier = priv->read_reg(priv, XCAN_IER_OFFSET); - ier |= xcan_rx_int_mask(priv); - priv->write_reg(priv, XCAN_IER_OFFSET, ier); + if (napi_complete_done(napi, work_done)) { + ier = priv->read_reg(priv, XCAN_IER_OFFSET); + ier |= xcan_rx_int_mask(priv); + priv->write_reg(priv, XCAN_IER_OFFSET, ier); + } } return work_done; } @@ -1102,8 +1419,10 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr) * via TXFEMP handling as we read TXFEMP *after* TXOK * clear to satisfy (1). */ - while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) { - priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); + while ((isr & XCAN_IXR_TXOK_MASK) && + !WARN_ON(++retries == 100)) { + priv->write_reg(priv, XCAN_ICR_OFFSET, + XCAN_IXR_TXOK_MASK); isr = priv->read_reg(priv, XCAN_ISR_OFFSET); } @@ -1118,7 +1437,7 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr) while (frames_sent--) { stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail % - priv->tx_max); + priv->tx_max, NULL); priv->tx_tail++; stats->tx_packets++; } @@ -1127,14 +1446,13 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr) spin_unlock_irqrestore(&priv->tx_lock, flags); - can_led_event(ndev, CAN_LED_EVENT_TX); xcan_update_error_state_after_rxtx(ndev); } /** * xcan_interrupt - CAN Isr * @irq: irq number - * @dev_id: device id poniter + * @dev_id: device id pointer * * This is the xilinx CAN Isr. It checks for the type of interrupt * and invokes the corresponding ISR. @@ -1146,8 +1464,8 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) { struct net_device *ndev = (struct net_device *)dev_id; struct xcan_priv *priv = netdev_priv(ndev); + u32 isr_errors, mask; u32 isr, ier; - u32 isr_errors; u32 rx_int_mask = xcan_rx_int_mask(priv); /* Get the interrupt status from Xilinx CAN */ @@ -1166,10 +1484,15 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) if (isr & XCAN_IXR_TXOK_MASK) xcan_tx_interrupt(ndev, isr); + mask = XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | + XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK | + XCAN_IXR_RXMNF_MASK; + + if (priv->ecc_enable) + mask |= XCAN_IXR_ECC_MASK; + /* Check for the type of error interrupt and Processing it */ - isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | - XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK | - XCAN_IXR_RXMNF_MASK); + isr_errors = isr & mask; if (isr_errors) { priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors); xcan_err_interrupt(ndev, isr); @@ -1195,9 +1518,13 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) static void xcan_chip_stop(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); + int ret; /* Disable interrupts and leave the can in configuration mode */ - set_reset_mode(ndev); + ret = set_reset_mode(ndev); + if (ret < 0) + netdev_dbg(ndev, "set_reset_mode() Failed\n"); + priv->can.state = CAN_STATE_STOPPED; } @@ -1213,15 +1540,19 @@ static int xcan_open(struct net_device *ndev) struct xcan_priv *priv = netdev_priv(ndev); int ret; + ret = phy_power_on(priv->transceiver); + if (ret) + return ret; + ret = pm_runtime_get_sync(priv->dev); if (ret < 0) { netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", - __func__, ret); - return ret; + __func__, ret); + goto err; } ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags, - ndev->name, ndev); + ndev->name, ndev); if (ret < 0) { netdev_err(ndev, "irq allocation for CAN failed\n"); goto err; @@ -1245,7 +1576,6 @@ static int xcan_open(struct net_device *ndev) goto err_candev; } - can_led_event(ndev, CAN_LED_EVENT_OPEN); napi_enable(&priv->napi); netif_start_queue(ndev); @@ -1257,6 +1587,7 @@ err_irq: free_irq(ndev->irq, ndev); err: pm_runtime_put(priv->dev); + phy_power_off(priv->transceiver); return ret; } @@ -1277,8 +1608,8 @@ static int xcan_close(struct net_device *ndev) free_irq(ndev->irq, ndev); close_candev(ndev); - can_led_event(ndev, CAN_LED_EVENT_STOP); pm_runtime_put(priv->dev); + phy_power_off(priv->transceiver); return 0; } @@ -1292,7 +1623,7 @@ static int xcan_close(struct net_device *ndev) * Return: 0 on success and failure value on error */ static int xcan_get_berr_counter(const struct net_device *ndev, - struct can_berr_counter *bec) + struct can_berr_counter *bec) { struct xcan_priv *priv = netdev_priv(ndev); int ret; @@ -1300,7 +1631,8 @@ static int xcan_get_berr_counter(const struct net_device *ndev, ret = pm_runtime_get_sync(priv->dev); if (ret < 0) { netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", - __func__, ret); + __func__, ret); + pm_runtime_put(priv->dev); return ret; } @@ -1313,12 +1645,70 @@ static int xcan_get_berr_counter(const struct net_device *ndev, return 0; } +/** + * xcan_get_auto_tdcv - Get Transmitter Delay Compensation Value + * @ndev: Pointer to net_device structure + * @tdcv: Pointer to TDCV value + * + * Return: 0 on success + */ +static int xcan_get_auto_tdcv(const struct net_device *ndev, u32 *tdcv) +{ + struct xcan_priv *priv = netdev_priv(ndev); + + *tdcv = FIELD_GET(XCAN_SR_TDCV_MASK, priv->read_reg(priv, XCAN_SR_OFFSET)); + + return 0; +} + +static void xcan_get_strings(struct net_device *ndev, u32 stringset, u8 *buf) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(buf, &xcan_priv_flags_strings, + sizeof(xcan_priv_flags_strings)); + } +} + +static int xcan_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(xcan_priv_flags_strings); + default: + return -EOPNOTSUPP; + } +} + +static void xcan_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct xcan_priv *priv = netdev_priv(ndev); + unsigned int start; + + do { + start = u64_stats_fetch_begin(&priv->syncp); + + data[XCAN_ECC_RX_2_BIT_ERRORS] = u64_stats_read(&priv->ecc_rx_2_bit_errors); + data[XCAN_ECC_RX_1_BIT_ERRORS] = u64_stats_read(&priv->ecc_rx_1_bit_errors); + data[XCAN_ECC_TXOL_2_BIT_ERRORS] = u64_stats_read(&priv->ecc_txol_2_bit_errors); + data[XCAN_ECC_TXOL_1_BIT_ERRORS] = u64_stats_read(&priv->ecc_txol_1_bit_errors); + data[XCAN_ECC_TXTL_2_BIT_ERRORS] = u64_stats_read(&priv->ecc_txtl_2_bit_errors); + data[XCAN_ECC_TXTL_1_BIT_ERRORS] = u64_stats_read(&priv->ecc_txtl_1_bit_errors); + } while (u64_stats_fetch_retry(&priv->syncp, start)); +} static const struct net_device_ops xcan_netdev_ops = { .ndo_open = xcan_open, .ndo_stop = xcan_close, .ndo_start_xmit = xcan_start_xmit, - .ndo_change_mtu = can_change_mtu, +}; + +static const struct ethtool_ops xcan_ethtool_ops = { + .get_ts_info = ethtool_op_get_ts_info, + .get_strings = xcan_get_strings, + .get_sset_count = xcan_get_sset_count, + .get_ethtool_stats = xcan_get_ethtool_stats, }; /** @@ -1425,6 +1815,8 @@ static const struct dev_pm_ops xcan_dev_pm_ops = { }; static const struct xcan_devtype_data xcan_zynq_data = { + .cantype = XZYNQ_CANPS, + .flags = XCAN_FLAG_TXFEMP, .bittiming_const = &xcan_bittiming_const, .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, .btr_sjw_shift = XCAN_BTR_SJW_SHIFT, @@ -1432,6 +1824,7 @@ static const struct xcan_devtype_data xcan_zynq_data = { }; static const struct xcan_devtype_data xcan_axi_data = { + .cantype = XAXI_CAN, .bittiming_const = &xcan_bittiming_const, .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, .btr_sjw_shift = XCAN_BTR_SJW_SHIFT, @@ -1439,17 +1832,19 @@ static const struct xcan_devtype_data xcan_axi_data = { }; static const struct xcan_devtype_data xcan_canfd_data = { + .cantype = XAXI_CANFD, .flags = XCAN_FLAG_EXT_FILTERS | XCAN_FLAG_RXMNF | XCAN_FLAG_TX_MAILBOXES | XCAN_FLAG_RX_FIFO_MULTI, - .bittiming_const = &xcan_bittiming_const, + .bittiming_const = &xcan_bittiming_const_canfd, .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD, .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD, .bus_clk_name = "s_axi_aclk", }; static const struct xcan_devtype_data xcan_canfd2_data = { + .cantype = XAXI_CANFD_2_0, .flags = XCAN_FLAG_EXT_FILTERS | XCAN_FLAG_RXMNF | XCAN_FLAG_TX_MAILBOXES | @@ -1482,28 +1877,24 @@ MODULE_DEVICE_TABLE(of, xcan_of_match); */ static int xcan_probe(struct platform_device *pdev) { - struct resource *res; /* IO mem resources */ struct net_device *ndev; struct xcan_priv *priv; - const struct of_device_id *of_id; - const struct xcan_devtype_data *devtype = &xcan_axi_data; + struct phy *transceiver; + const struct xcan_devtype_data *devtype; void __iomem *addr; int ret; int rx_max, tx_max; - int hw_tx_max, hw_rx_max; + u32 hw_tx_max = 0, hw_rx_max = 0; const char *hw_tx_max_property; /* Get the virtual base address for the device */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - addr = devm_ioremap_resource(&pdev->dev, res); + addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(addr)) { ret = PTR_ERR(addr); goto err; } - of_id = of_match_device(xcan_of_match, &pdev->dev); - if (of_id && of_id->data) - devtype = of_id->data; + devtype = device_get_match_data(&pdev->dev); hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ? "tx-mailbox-count" : "tx-fifo-depth"; @@ -1544,7 +1935,7 @@ static int xcan_probe(struct platform_device *pdev) */ if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) && (devtype->flags & XCAN_FLAG_TXFEMP)) - tx_max = min(hw_tx_max, 2); + tx_max = min(hw_tx_max, 2U); else tx_max = 1; @@ -1556,39 +1947,84 @@ static int xcan_probe(struct platform_device *pdev) return -ENOMEM; priv = netdev_priv(ndev); + priv->ecc_enable = of_property_read_bool(pdev->dev.of_node, "xlnx,has-ecc"); priv->dev = &pdev->dev; priv->can.bittiming_const = devtype->bittiming_const; priv->can.do_set_mode = xcan_do_set_mode; priv->can.do_get_berr_counter = xcan_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_BERR_REPORTING; + priv->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); + if (IS_ERR(priv->rstc)) { + dev_err(&pdev->dev, "Cannot get CAN reset.\n"); + ret = PTR_ERR(priv->rstc); + goto err_free; + } + + ret = reset_control_reset(priv->rstc); + if (ret) + goto err_free; + + if (devtype->cantype == XAXI_CANFD) { + priv->can.fd.data_bittiming_const = + &xcan_data_bittiming_const_canfd; + priv->can.fd.tdc_const = &xcan_tdc_const_canfd; + } + + if (devtype->cantype == XAXI_CANFD_2_0) { + priv->can.fd.data_bittiming_const = + &xcan_data_bittiming_const_canfd2; + priv->can.fd.tdc_const = &xcan_tdc_const_canfd2; + } + + if (devtype->cantype == XAXI_CANFD || + devtype->cantype == XAXI_CANFD_2_0) { + priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD | + CAN_CTRLMODE_TDC_AUTO; + priv->can.fd.do_get_auto_tdcv = xcan_get_auto_tdcv; + } + priv->reg_base = addr; priv->tx_max = tx_max; priv->devtype = *devtype; spin_lock_init(&priv->tx_lock); /* Get IRQ for the device */ - ndev->irq = platform_get_irq(pdev, 0); + ret = platform_get_irq(pdev, 0); + if (ret < 0) + goto err_reset; + + ndev->irq = ret; + ndev->flags |= IFF_ECHO; /* We support local echo */ platform_set_drvdata(pdev, ndev); SET_NETDEV_DEV(ndev, &pdev->dev); ndev->netdev_ops = &xcan_netdev_ops; + ndev->ethtool_ops = &xcan_ethtool_ops; /* Getting the CAN can_clk info */ priv->can_clk = devm_clk_get(&pdev->dev, "can_clk"); if (IS_ERR(priv->can_clk)) { - dev_err(&pdev->dev, "Device clock not found.\n"); - ret = PTR_ERR(priv->can_clk); - goto err_free; + ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->can_clk), + "device clock not found\n"); + goto err_reset; } priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name); if (IS_ERR(priv->bus_clk)) { - dev_err(&pdev->dev, "bus clock not found\n"); - ret = PTR_ERR(priv->bus_clk); - goto err_free; + ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->bus_clk), + "bus clock not found\n"); + goto err_reset; + } + + transceiver = devm_phy_optional_get(&pdev->dev, NULL); + if (IS_ERR(transceiver)) { + ret = PTR_ERR(transceiver); + dev_err_probe(&pdev->dev, ret, "failed to get phy\n"); + goto err_reset; } + priv->transceiver = transceiver; priv->write_reg = xcan_write_reg_le; priv->read_reg = xcan_read_reg_le; @@ -1597,8 +2033,8 @@ static int xcan_probe(struct platform_device *pdev) ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) { netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", - __func__, ret); - goto err_pmdisable; + __func__, ret); + goto err_disableclks; } if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) { @@ -1608,7 +2044,7 @@ static int xcan_probe(struct platform_device *pdev) priv->can.clock.freq = clk_get_rate(priv->can_clk); - netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max); + netif_napi_add_weight(ndev, &priv->napi, xcan_rx_poll, rx_max); ret = register_candev(ndev); if (ret) { @@ -1616,20 +2052,30 @@ static int xcan_probe(struct platform_device *pdev) goto err_disableclks; } - devm_can_led_init(ndev); - + of_can_transceiver(ndev); pm_runtime_put(&pdev->dev); + if (priv->devtype.flags & XCAN_FLAG_CANFD_2) { + priv->write_reg(priv, XCAN_AFR_2_ID_OFFSET, 0x00000000); + priv->write_reg(priv, XCAN_AFR_2_MASK_OFFSET, 0x00000000); + } + netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n", priv->reg_base, ndev->irq, priv->can.clock.freq, hw_tx_max, priv->tx_max); + if (priv->ecc_enable) { + /* Reset FIFO ECC counters */ + priv->write_reg(priv, XCAN_ECC_CFG_OFFSET, XCAN_ECC_CFG_REECRX_MASK | + XCAN_ECC_CFG_REECTXOL_MASK | XCAN_ECC_CFG_REECTXTL_MASK); + } return 0; err_disableclks: pm_runtime_put(priv->dev); -err_pmdisable: pm_runtime_disable(&pdev->dev); +err_reset: + reset_control_assert(priv->rstc); err_free: free_candev(ndev); err: @@ -1643,22 +2089,20 @@ err: * This function frees all the resources allocated to the device. * Return: 0 always */ -static int xcan_remove(struct platform_device *pdev) +static void xcan_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct xcan_priv *priv = netdev_priv(ndev); unregister_candev(ndev); pm_runtime_disable(&pdev->dev); - netif_napi_del(&priv->napi); + reset_control_assert(priv->rstc); free_candev(ndev); - - return 0; } static struct platform_driver xcan_driver = { .probe = xcan_probe, - .remove = xcan_remove, + .remove = xcan_remove, .driver = { .name = DRIVER_NAME, .pm = &xcan_dev_pm_ops, |
