summaryrefslogtreecommitdiff
path: root/drivers/net/can/m_can/m_can.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/can/m_can/m_can.c')
-rw-r--r--drivers/net/can/m_can/m_can.c1234
1 files changed, 895 insertions, 339 deletions
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index c5af92bcc9c9..eb856547ae7d 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// CAN bus driver for Bosch M_CAN controller
// Copyright (C) 2014 Freescale Semiconductor, Inc.
-// Dong Aisheng <b29396@freescale.com>
+// Dong Aisheng <aisheng.dong@nxp.com>
// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
/* Bosch M_CAN user manual can be obtained from:
@@ -11,6 +11,7 @@
#include <linux/bitfield.h>
#include <linux/can/dev.h>
#include <linux/ethtool.h>
+#include <linux/hrtimer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -18,11 +19,11 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include "m_can.h"
@@ -255,6 +256,7 @@ enum m_can_reg {
#define TXESC_TBDS_64B 0x7
/* Tx Event FIFO Configuration (TXEFC) */
+#define TXEFC_EFWM_MASK GENMASK(29, 24)
#define TXEFC_EFS_MASK GENMASK(21, 16)
/* Tx Event FIFO Status (TXEFS) */
@@ -308,6 +310,9 @@ enum m_can_reg {
#define TX_EVENT_MM_MASK GENMASK(31, 24)
#define TX_EVENT_TXTS_MASK GENMASK(15, 0)
+/* Hrtimer polling interval */
+#define HRTIMER_POLL_INTERVAL_MS 1
+
/* The ID and DLC registers are adjacent in M_CAN FIFO memory,
* and we can save a (potentially slow) bus round trip by combining
* reads and writes to them.
@@ -317,6 +322,12 @@ struct id_and_dlc {
u32 dlc;
};
+struct m_can_fifo_element {
+ u32 id;
+ u32 dlc;
+ u8 data[CANFD_MAX_DLEN];
+};
+
static inline u32 m_can_read(struct m_can_classdev *cdev, enum m_can_reg reg)
{
return cdev->ops->read_reg(cdev, reg);
@@ -369,59 +380,115 @@ m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset, u32 *val)
return cdev->ops->read_fifo(cdev, addr_offset, val, 1);
}
-static inline bool _m_can_tx_fifo_full(u32 txfqs)
+static int m_can_cccr_update_bits(struct m_can_classdev *cdev, u32 mask, u32 val)
{
- return !!(txfqs & TXFQS_TFQF);
+ u32 val_before = m_can_read(cdev, M_CAN_CCCR);
+ u32 val_after = (val_before & ~mask) | val;
+ size_t tries = 10;
+
+ if (!(mask & CCCR_INIT) && !(val_before & CCCR_INIT)) {
+ netdev_err(cdev->net,
+ "refusing to configure device when in normal mode\n");
+ return -EBUSY;
+ }
+
+ /* The chip should be in standby mode when changing the CCCR register,
+ * and some chips set the CSR and CSA bits when in standby. Furthermore,
+ * the CSR and CSA bits should be written as zeros, even when they read
+ * ones.
+ */
+ val_after &= ~(CCCR_CSR | CCCR_CSA);
+
+ while (tries--) {
+ u32 val_read;
+
+ /* Write the desired value in each try, as setting some bits in
+ * the CCCR register require other bits to be set first. E.g.
+ * setting the NISO bit requires setting the CCE bit first.
+ */
+ m_can_write(cdev, M_CAN_CCCR, val_after);
+
+ val_read = m_can_read(cdev, M_CAN_CCCR) & ~(CCCR_CSR | CCCR_CSA);
+
+ if (val_read == val_after)
+ return 0;
+
+ usleep_range(1, 5);
+ }
+
+ return -ETIMEDOUT;
}
-static inline bool m_can_tx_fifo_full(struct m_can_classdev *cdev)
+static int m_can_config_enable(struct m_can_classdev *cdev)
{
- return _m_can_tx_fifo_full(m_can_read(cdev, M_CAN_TXFQS));
+ int err;
+
+ /* CCCR_INIT must be set in order to set CCCR_CCE, but access to
+ * configuration registers should only be enabled when in standby mode,
+ * where CCCR_INIT is always set.
+ */
+ err = m_can_cccr_update_bits(cdev, CCCR_CCE, CCCR_CCE);
+ if (err)
+ netdev_err(cdev->net, "failed to enable configuration mode\n");
+
+ return err;
}
-static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable)
+static int m_can_config_disable(struct m_can_classdev *cdev)
{
- u32 cccr = m_can_read(cdev, M_CAN_CCCR);
- u32 timeout = 10;
- u32 val = 0;
+ int err;
- /* Clear the Clock stop request if it was set */
- if (cccr & CCCR_CSR)
- cccr &= ~CCCR_CSR;
+ /* Only clear CCCR_CCE, since CCCR_INIT cannot be cleared while in
+ * standby mode
+ */
+ err = m_can_cccr_update_bits(cdev, CCCR_CCE, 0);
+ if (err)
+ netdev_err(cdev->net, "failed to disable configuration registers\n");
- if (enable) {
- /* enable m_can configuration */
- m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT);
- udelay(5);
- /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */
- m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE);
- } else {
- m_can_write(cdev, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE));
- }
+ return err;
+}
- /* there's a delay for module initialization */
- if (enable)
- val = CCCR_INIT | CCCR_CCE;
+static void m_can_interrupt_enable(struct m_can_classdev *cdev, u32 interrupts)
+{
+ if (cdev->active_interrupts == interrupts)
+ return;
+ m_can_write(cdev, M_CAN_IE, interrupts);
+ cdev->active_interrupts = interrupts;
+}
- while ((m_can_read(cdev, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) {
- if (timeout == 0) {
- netdev_warn(cdev->net, "Failed to init module\n");
- return;
- }
- timeout--;
- udelay(1);
- }
+static void m_can_coalescing_disable(struct m_can_classdev *cdev)
+{
+ u32 new_interrupts = cdev->active_interrupts | IR_RF0N | IR_TEFN;
+
+ if (!cdev->net->irq)
+ return;
+
+ hrtimer_cancel(&cdev->hrtimer);
+ m_can_interrupt_enable(cdev, new_interrupts);
}
static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev)
{
+ if (!cdev->net->irq) {
+ netdev_dbg(cdev->net, "Start hrtimer\n");
+ hrtimer_start(&cdev->hrtimer,
+ ms_to_ktime(HRTIMER_POLL_INTERVAL_MS),
+ HRTIMER_MODE_REL_PINNED);
+ }
+
/* Only interrupt line 0 is used in this driver */
m_can_write(cdev, M_CAN_ILE, ILE_EINT0);
}
static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev)
{
+ m_can_coalescing_disable(cdev);
m_can_write(cdev, M_CAN_ILE, 0x0);
+
+ if (!cdev->net->irq) {
+ netdev_dbg(cdev->net, "Stop hrtimer\n");
+ hrtimer_try_to_cancel(&cdev->hrtimer);
+ }
}
/* Retrieve internal timestamp counter from TSCV.TSC, and shift it to 32-bit
@@ -441,18 +508,26 @@ static u32 m_can_get_timestamp(struct m_can_classdev *cdev)
static void m_can_clean(struct net_device *net)
{
struct m_can_classdev *cdev = netdev_priv(net);
+ unsigned long irqflags;
- if (cdev->tx_skb) {
- int putidx = 0;
+ if (cdev->tx_ops) {
+ for (int i = 0; i != cdev->tx_fifo_size; ++i) {
+ if (!cdev->tx_ops[i].skb)
+ continue;
- net->stats.tx_errors++;
- if (cdev->version > 30)
- putidx = FIELD_GET(TXFQS_TFQPI_MASK,
- m_can_read(cdev, M_CAN_TXFQS));
-
- can_free_echo_skb(cdev->net, putidx, NULL);
- cdev->tx_skb = NULL;
+ net->stats.tx_errors++;
+ cdev->tx_ops[i].skb = NULL;
+ }
}
+
+ for (int i = 0; i != cdev->can.echo_skb_max; ++i)
+ can_free_echo_skb(cdev->net, i, NULL);
+
+ netdev_reset_queue(cdev->net);
+
+ spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
+ cdev->tx_fifo_in_flight = 0;
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
}
/* For peripherals, pass skb to rx-offload, which will push skb from
@@ -591,7 +666,7 @@ static int m_can_handle_lost_msg(struct net_device *dev)
struct can_frame *frame;
u32 timestamp = 0;
- netdev_err(dev, "msg lost in rxf0\n");
+ netdev_dbg(dev, "msg lost in rxf0\n");
stats->rx_errors++;
stats->rx_over_errors++;
@@ -621,47 +696,60 @@ static int m_can_handle_lec_err(struct net_device *dev,
u32 timestamp = 0;
cdev->can.can_stats.bus_error++;
- stats->rx_errors++;
/* propagate the error condition to the CAN stack */
skb = alloc_can_err_skb(dev, &cf);
- if (unlikely(!skb))
- return 0;
/* check for 'last error code' which tells us the
* type of the last error to occur on the CAN bus
*/
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ if (likely(skb))
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
switch (lec_type) {
case LEC_STUFF_ERROR:
netdev_dbg(dev, "stuff error\n");
- cf->data[2] |= CAN_ERR_PROT_STUFF;
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
break;
case LEC_FORM_ERROR:
netdev_dbg(dev, "form error\n");
- cf->data[2] |= CAN_ERR_PROT_FORM;
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_FORM;
break;
case LEC_ACK_ERROR:
netdev_dbg(dev, "ack error\n");
- cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ stats->tx_errors++;
+ if (likely(skb))
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
break;
case LEC_BIT1_ERROR:
netdev_dbg(dev, "bit1 error\n");
- cf->data[2] |= CAN_ERR_PROT_BIT1;
+ stats->tx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
break;
case LEC_BIT0_ERROR:
netdev_dbg(dev, "bit0 error\n");
- cf->data[2] |= CAN_ERR_PROT_BIT0;
+ stats->tx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
break;
case LEC_CRC_ERROR:
netdev_dbg(dev, "CRC error\n");
- cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
break;
default:
break;
}
+ if (unlikely(!skb))
+ return 0;
+
if (cdev->is_peripheral)
timestamp = m_can_get_timestamp(cdev);
@@ -703,6 +791,10 @@ static int m_can_get_berr_counter(const struct net_device *dev,
struct m_can_classdev *cdev = netdev_priv(dev);
int err;
+ /* Avoid waking up the controller if the interface is down */
+ if (!(dev->flags & IFF_UP))
+ return 0;
+
err = m_can_clk_start(cdev);
if (err)
return err;
@@ -725,6 +817,9 @@ static int m_can_handle_state_change(struct net_device *dev,
u32 timestamp = 0;
switch (new_state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ cdev->can.state = CAN_STATE_ERROR_ACTIVE;
+ break;
case CAN_STATE_ERROR_WARNING:
/* error warning state */
cdev->can.can_stats.error_warning++;
@@ -754,6 +849,12 @@ static int m_can_handle_state_change(struct net_device *dev,
__m_can_get_berr_counter(dev, &bec);
switch (new_state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
+ cf->data[1] = CAN_ERR_CRTL_ACTIVE;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ break;
case CAN_STATE_ERROR_WARNING:
/* error warning state */
cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
@@ -790,30 +891,33 @@ static int m_can_handle_state_change(struct net_device *dev,
return 1;
}
-static int m_can_handle_state_errors(struct net_device *dev, u32 psr)
+static enum can_state
+m_can_state_get_by_psr(struct m_can_classdev *cdev)
{
- struct m_can_classdev *cdev = netdev_priv(dev);
- int work_done = 0;
+ u32 reg_psr;
- if (psr & PSR_EW && cdev->can.state != CAN_STATE_ERROR_WARNING) {
- netdev_dbg(dev, "entered error warning state\n");
- work_done += m_can_handle_state_change(dev,
- CAN_STATE_ERROR_WARNING);
- }
+ reg_psr = m_can_read(cdev, M_CAN_PSR);
- if (psr & PSR_EP && cdev->can.state != CAN_STATE_ERROR_PASSIVE) {
- netdev_dbg(dev, "entered error passive state\n");
- work_done += m_can_handle_state_change(dev,
- CAN_STATE_ERROR_PASSIVE);
- }
+ if (reg_psr & PSR_BO)
+ return CAN_STATE_BUS_OFF;
+ if (reg_psr & PSR_EP)
+ return CAN_STATE_ERROR_PASSIVE;
+ if (reg_psr & PSR_EW)
+ return CAN_STATE_ERROR_WARNING;
- if (psr & PSR_BO && cdev->can.state != CAN_STATE_BUS_OFF) {
- netdev_dbg(dev, "entered error bus off state\n");
- work_done += m_can_handle_state_change(dev,
- CAN_STATE_BUS_OFF);
- }
+ return CAN_STATE_ERROR_ACTIVE;
+}
- return work_done;
+static int m_can_handle_state_errors(struct net_device *dev)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+ enum can_state new_state;
+
+ new_state = m_can_state_get_by_psr(cdev);
+ if (new_state == cdev->can.state)
+ return 0;
+
+ return m_can_handle_state_change(dev, new_state);
}
static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
@@ -944,8 +1048,7 @@ static int m_can_rx_handler(struct net_device *dev, int quota, u32 irqstatus)
}
if (irqstatus & IR_ERR_STATE)
- work_done += m_can_handle_state_errors(dev,
- m_can_read(cdev, M_CAN_PSR));
+ work_done += m_can_handle_state_errors(dev);
if (irqstatus & IR_ERR_BUS_30X)
work_done += m_can_handle_bus_errors(dev, irqstatus,
@@ -962,22 +1065,6 @@ end:
return work_done;
}
-static int m_can_rx_peripheral(struct net_device *dev, u32 irqstatus)
-{
- struct m_can_classdev *cdev = netdev_priv(dev);
- int work_done;
-
- work_done = m_can_rx_handler(dev, NAPI_POLL_WEIGHT, irqstatus);
-
- /* Don't re-enable interrupts if the driver had a fatal error
- * (e.g., FIFO read failure).
- */
- if (work_done < 0)
- m_can_disable_all_interrupts(cdev);
-
- return work_done;
-}
-
static int m_can_poll(struct napi_struct *napi, int quota)
{
struct net_device *dev = napi->dev;
@@ -1004,23 +1091,60 @@ static int m_can_poll(struct napi_struct *napi, int quota)
* echo. timestamp is used for peripherals to ensure correct ordering
* by rx-offload, and is ignored for non-peripherals.
*/
-static void m_can_tx_update_stats(struct m_can_classdev *cdev,
- unsigned int msg_mark,
- u32 timestamp)
+static unsigned int m_can_tx_update_stats(struct m_can_classdev *cdev,
+ unsigned int msg_mark, u32 timestamp)
{
struct net_device *dev = cdev->net;
struct net_device_stats *stats = &dev->stats;
+ unsigned int frame_len;
if (cdev->is_peripheral)
stats->tx_bytes +=
- can_rx_offload_get_echo_skb(&cdev->offload,
- msg_mark,
- timestamp,
- NULL);
+ can_rx_offload_get_echo_skb_queue_timestamp(&cdev->offload,
+ msg_mark,
+ timestamp,
+ &frame_len);
else
- stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL);
+ stats->tx_bytes += can_get_echo_skb(dev, msg_mark, &frame_len);
stats->tx_packets++;
+
+ return frame_len;
+}
+
+static void m_can_finish_tx(struct m_can_classdev *cdev, int transmitted,
+ unsigned int transmitted_frame_len)
+{
+ unsigned long irqflags;
+
+ netdev_completed_queue(cdev->net, transmitted, transmitted_frame_len);
+
+ spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
+ if (cdev->tx_fifo_in_flight >= cdev->tx_fifo_size && transmitted > 0)
+ netif_wake_queue(cdev->net);
+ cdev->tx_fifo_in_flight -= transmitted;
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
+}
+
+static netdev_tx_t m_can_start_tx(struct m_can_classdev *cdev)
+{
+ unsigned long irqflags;
+ int tx_fifo_in_flight;
+
+ spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
+ tx_fifo_in_flight = cdev->tx_fifo_in_flight + 1;
+ if (tx_fifo_in_flight >= cdev->tx_fifo_size) {
+ netif_stop_queue(cdev->net);
+ if (tx_fifo_in_flight > cdev->tx_fifo_size) {
+ netdev_err_once(cdev->net, "hard_xmit called while TX FIFO full\n");
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
+ return NETDEV_TX_BUSY;
+ }
+ }
+ cdev->tx_fifo_in_flight = tx_fifo_in_flight;
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
+
+ return NETDEV_TX_OK;
}
static int m_can_echo_tx_event(struct net_device *dev)
@@ -1032,6 +1156,8 @@ static int m_can_echo_tx_event(struct net_device *dev)
int i = 0;
int err = 0;
unsigned int msg_mark;
+ int processed = 0;
+ unsigned int processed_frame_len = 0;
struct m_can_classdev *cdev = netdev_priv(dev);
@@ -1060,31 +1186,82 @@ static int m_can_echo_tx_event(struct net_device *dev)
fgi = (++fgi >= cdev->mcfg[MRAM_TXE].num ? 0 : fgi);
/* update stats */
- m_can_tx_update_stats(cdev, msg_mark, timestamp);
+ processed_frame_len += m_can_tx_update_stats(cdev, msg_mark,
+ timestamp);
+
+ ++processed;
}
if (ack_fgi != -1)
m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK,
ack_fgi));
+ m_can_finish_tx(cdev, processed, processed_frame_len);
+
return err;
}
-static irqreturn_t m_can_isr(int irq, void *dev_id)
+static void m_can_coalescing_update(struct m_can_classdev *cdev, u32 ir)
{
- struct net_device *dev = (struct net_device *)dev_id;
- struct m_can_classdev *cdev = netdev_priv(dev);
- u32 ir;
+ u32 new_interrupts = cdev->active_interrupts;
+ bool enable_rx_timer = false;
+ bool enable_tx_timer = false;
+
+ if (!cdev->net->irq)
+ return;
+
+ if (cdev->rx_coalesce_usecs_irq > 0 && (ir & (IR_RF0N | IR_RF0W))) {
+ enable_rx_timer = true;
+ new_interrupts &= ~IR_RF0N;
+ }
+ if (cdev->tx_coalesce_usecs_irq > 0 && (ir & (IR_TEFN | IR_TEFW))) {
+ enable_tx_timer = true;
+ new_interrupts &= ~IR_TEFN;
+ }
+ if (!enable_rx_timer && !hrtimer_active(&cdev->hrtimer))
+ new_interrupts |= IR_RF0N;
+ if (!enable_tx_timer && !hrtimer_active(&cdev->hrtimer))
+ new_interrupts |= IR_TEFN;
+
+ m_can_interrupt_enable(cdev, new_interrupts);
+ if (enable_rx_timer | enable_tx_timer)
+ hrtimer_start(&cdev->hrtimer, cdev->irq_timer_wait,
+ HRTIMER_MODE_REL);
+}
+
+/* This interrupt handler is called either from the interrupt thread or a
+ * hrtimer. This has implications like cancelling a timer won't be possible
+ * blocking.
+ */
+static int m_can_interrupt_handler(struct m_can_classdev *cdev)
+{
+ struct net_device *dev = cdev->net;
+ u32 ir = 0, ir_read;
+ int ret;
if (pm_runtime_suspended(cdev->dev))
return IRQ_NONE;
- ir = m_can_read(cdev, M_CAN_IR);
+
+ /* The m_can controller signals its interrupt status as a level, but
+ * depending in the integration the CPU may interpret the signal as
+ * edge-triggered (for example with m_can_pci). For these
+ * edge-triggered integrations, we must observe that IR is 0 at least
+ * once to be sure that the next interrupt will generate an edge.
+ */
+ while ((ir_read = m_can_read(cdev, M_CAN_IR)) != 0) {
+ ir |= ir_read;
+
+ /* ACK all irqs */
+ m_can_write(cdev, M_CAN_IR, ir);
+
+ if (!cdev->irq_edge_triggered)
+ break;
+ }
+
+ m_can_coalescing_update(cdev, ir);
if (!ir)
return IRQ_NONE;
- /* ACK all irqs */
- m_can_write(cdev, M_CAN_IR, ir);
-
if (cdev->ops->clear_interrupts)
cdev->ops->clear_interrupts(cdev);
@@ -1093,13 +1270,15 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
* - state change IRQ
* - bus error IRQ and bus error reporting
*/
- if ((ir & IR_RF0N) || (ir & IR_ERR_ALL_30X)) {
+ if (ir & (IR_RF0N | IR_RF0W | IR_ERR_ALL_30X)) {
cdev->irqstatus = ir;
if (!cdev->is_peripheral) {
m_can_disable_all_interrupts(cdev);
napi_schedule(&cdev->napi);
- } else if (m_can_rx_peripheral(dev, ir) < 0) {
- goto out_fail;
+ } else {
+ ret = m_can_rx_handler(dev, NAPI_POLL_WEIGHT, ir);
+ if (ret < 0)
+ return ret;
}
}
@@ -1107,21 +1286,19 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
if (ir & IR_TC) {
/* Transmission Complete Interrupt*/
u32 timestamp = 0;
+ unsigned int frame_len;
if (cdev->is_peripheral)
timestamp = m_can_get_timestamp(cdev);
- m_can_tx_update_stats(cdev, 0, timestamp);
- netif_wake_queue(dev);
+ frame_len = m_can_tx_update_stats(cdev, 0, timestamp);
+ m_can_finish_tx(cdev, 1, frame_len);
}
} else {
- if (ir & IR_TEFN) {
+ if (ir & (IR_TEFN | IR_TEFW)) {
/* New TX FIFO Element arrived */
- if (m_can_echo_tx_event(dev) != 0)
- goto out_fail;
-
- if (netif_queue_stopped(dev) &&
- !m_can_tx_fifo_full(cdev))
- netif_wake_queue(dev);
+ ret = m_can_echo_tx_event(dev);
+ if (ret != 0)
+ return ret;
}
}
@@ -1129,10 +1306,34 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
can_rx_offload_threaded_irq_finish(&cdev->offload);
return IRQ_HANDLED;
+}
-out_fail:
- m_can_disable_all_interrupts(cdev);
- return IRQ_HANDLED;
+static irqreturn_t m_can_isr(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct m_can_classdev *cdev = netdev_priv(dev);
+ int ret;
+
+ ret = m_can_interrupt_handler(cdev);
+ if (ret < 0) {
+ m_can_disable_all_interrupts(cdev);
+ return IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static enum hrtimer_restart m_can_coalescing_timer(struct hrtimer *timer)
+{
+ struct m_can_classdev *cdev = container_of(timer, struct m_can_classdev, hrtimer);
+
+ if (cdev->can.state == CAN_STATE_BUS_OFF ||
+ cdev->can.state == CAN_STATE_STOPPED)
+ return HRTIMER_NORESTART;
+
+ irq_wake_thread(cdev->net->irq, cdev->net);
+
+ return HRTIMER_NORESTART;
}
static const struct can_bittiming_const m_can_bittiming_const_30X = {
@@ -1183,11 +1384,32 @@ static const struct can_bittiming_const m_can_data_bittiming_const_31X = {
.brp_inc = 1,
};
+static int m_can_init_ram(struct m_can_classdev *cdev)
+{
+ int end, i, start;
+ int err = 0;
+
+ /* initialize the entire Message RAM in use to avoid possible
+ * ECC/parity checksum errors when reading an uninitialized buffer
+ */
+ start = cdev->mcfg[MRAM_SIDF].off;
+ end = cdev->mcfg[MRAM_TXB].off +
+ cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
+
+ for (i = start; i < end; i += 4) {
+ err = m_can_fifo_write_no_off(cdev, i, 0x0);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
static int m_can_set_bittiming(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
const struct can_bittiming *bt = &cdev->can.bittiming;
- const struct can_bittiming *dbt = &cdev->can.data_bittiming;
+ const struct can_bittiming *dbt = &cdev->can.fd.data_bittiming;
u16 brp, sjw, tseg1, tseg2;
u32 reg_btp;
@@ -1268,16 +1490,18 @@ static int m_can_chip_config(struct net_device *dev)
err = m_can_init_ram(cdev);
if (err) {
- dev_err(cdev->dev, "Message RAM configuration failed\n");
+ netdev_err(dev, "Message RAM configuration failed\n");
return err;
}
/* Disable unused interrupts */
- interrupts &= ~(IR_ARA | IR_ELO | IR_DRX | IR_TEFF | IR_TEFW | IR_TFE |
- IR_TCF | IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N |
- IR_RF0F | IR_RF0W);
+ interrupts &= ~(IR_ARA | IR_ELO | IR_DRX | IR_TEFF | IR_TFE | IR_TCF |
+ IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N | IR_RF0F |
+ IR_TSW);
- m_can_config_endisable(cdev, true);
+ err = m_can_config_enable(cdev);
+ if (err)
+ return err;
/* RX Buffer/FIFO Element Size 64 bytes data field */
m_can_write(cdev, M_CAN_RXESC,
@@ -1312,6 +1536,8 @@ static int m_can_chip_config(struct net_device *dev)
} else {
/* Full TX Event FIFO is used */
m_can_write(cdev, M_CAN_TXEFC,
+ FIELD_PREP(TXEFC_EFWM_MASK,
+ cdev->tx_max_coalesced_frames_irq) |
FIELD_PREP(TXEFC_EFS_MASK,
cdev->mcfg[MRAM_TXE].num) |
cdev->mcfg[MRAM_TXE].off);
@@ -1319,6 +1545,7 @@ static int m_can_chip_config(struct net_device *dev)
/* rx fifo configuration, blocking mode, fifo size 1 */
m_can_write(cdev, M_CAN_RXF0C,
+ FIELD_PREP(RXFC_FWM_MASK, cdev->rx_max_coalesced_frames_irq) |
FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF0].num) |
cdev->mcfg[MRAM_RXF0].off);
@@ -1377,7 +1604,8 @@ static int m_can_chip_config(struct net_device *dev)
else
interrupts &= ~(IR_ERR_LEC_31X);
}
- m_can_write(cdev, M_CAN_IE, interrupts);
+ cdev->active_interrupts = 0;
+ m_can_interrupt_enable(cdev, interrupts);
/* route all interrupts to INT0 */
m_can_write(cdev, M_CAN_ILS, ILS_ALL_INT0);
@@ -1392,7 +1620,9 @@ static int m_can_chip_config(struct net_device *dev)
FIELD_PREP(TSCC_TCP_MASK, 0xf) |
FIELD_PREP(TSCC_TSS_MASK, TSCC_TSS_INTERNAL));
- m_can_config_endisable(cdev, false);
+ err = m_can_config_disable(cdev);
+ if (err)
+ return err;
if (cdev->ops->init)
cdev->ops->init(cdev);
@@ -1410,11 +1640,22 @@ static int m_can_start(struct net_device *dev)
if (ret)
return ret;
- cdev->can.state = CAN_STATE_ERROR_ACTIVE;
+ netdev_queue_set_dql_min_limit(netdev_get_tx_queue(cdev->net, 0),
+ cdev->tx_max_coalesced_frames);
+
+ cdev->can.state = m_can_state_get_by_psr(cdev);
m_can_enable_all_interrupts(cdev);
- return 0;
+ if (cdev->version > 30)
+ cdev->tx_fifo_putidx = FIELD_GET(TXFQS_TFQPI_MASK,
+ m_can_read(cdev, M_CAN_TXFQS));
+
+ ret = m_can_cccr_update_bits(cdev, CCCR_INIT, 0);
+ if (ret)
+ netdev_err(dev, "failed to enter normal mode\n");
+
+ return ret;
}
static int m_can_set_mode(struct net_device *dev, enum can_mode mode)
@@ -1463,52 +1704,54 @@ static int m_can_check_core_release(struct m_can_classdev *cdev)
}
/* Selectable Non ISO support only in version 3.2.x
- * This function checks if the bit is writable.
+ * Return 1 if the bit is writable, 0 if it is not, or negative on error.
*/
-static bool m_can_niso_supported(struct m_can_classdev *cdev)
+static int m_can_niso_supported(struct m_can_classdev *cdev)
{
- u32 cccr_reg, cccr_poll = 0;
- int niso_timeout = -ETIMEDOUT;
- int i;
+ int ret, niso;
- m_can_config_endisable(cdev, true);
- cccr_reg = m_can_read(cdev, M_CAN_CCCR);
- cccr_reg |= CCCR_NISO;
- m_can_write(cdev, M_CAN_CCCR, cccr_reg);
+ ret = m_can_config_enable(cdev);
+ if (ret)
+ return ret;
- for (i = 0; i <= 10; i++) {
- cccr_poll = m_can_read(cdev, M_CAN_CCCR);
- if (cccr_poll == cccr_reg) {
- niso_timeout = 0;
- break;
- }
+ /* First try to set the NISO bit. */
+ niso = m_can_cccr_update_bits(cdev, CCCR_NISO, CCCR_NISO);
- usleep_range(1, 5);
+ /* Then clear the it again. */
+ ret = m_can_cccr_update_bits(cdev, CCCR_NISO, 0);
+ if (ret) {
+ netdev_err(cdev->net, "failed to revert the NON-ISO bit in CCCR\n");
+ return ret;
}
- /* Clear NISO */
- cccr_reg &= ~(CCCR_NISO);
- m_can_write(cdev, M_CAN_CCCR, cccr_reg);
-
- m_can_config_endisable(cdev, false);
+ ret = m_can_config_disable(cdev);
+ if (ret)
+ return ret;
- /* return false if time out (-ETIMEDOUT), else return true */
- return !niso_timeout;
+ return niso == 0;
}
static int m_can_dev_setup(struct m_can_classdev *cdev)
{
struct net_device *dev = cdev->net;
- int m_can_version, err;
+ int m_can_version, err, niso;
m_can_version = m_can_check_core_release(cdev);
/* return if unsupported version */
if (!m_can_version) {
- dev_err(cdev->dev, "Unsupported version number: %2d",
- m_can_version);
+ netdev_err(cdev->net, "Unsupported version number: %2d",
+ m_can_version);
return -EINVAL;
}
+ /* Write the INIT bit, in case no hardware reset has happened before
+ * the probe (for example, it was observed that the Intel Elkhart Lake
+ * SoCs do not properly reset the CAN controllers on reboot)
+ */
+ err = m_can_cccr_update_bits(cdev, CCCR_INIT, CCCR_INIT);
+ if (err)
+ return err;
+
if (!cdev->is_peripheral)
netif_napi_add(dev, &cdev->napi, m_can_poll);
@@ -1532,7 +1775,7 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
if (err)
return err;
cdev->can.bittiming_const = &m_can_bittiming_const_30X;
- cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X;
+ cdev->can.fd.data_bittiming_const = &m_can_data_bittiming_const_30X;
break;
case 31:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
@@ -1540,42 +1783,52 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
if (err)
return err;
cdev->can.bittiming_const = &m_can_bittiming_const_31X;
- cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
+ cdev->can.fd.data_bittiming_const = &m_can_data_bittiming_const_31X;
break;
case 32:
case 33:
/* Support both MCAN version v3.2.x and v3.3.0 */
cdev->can.bittiming_const = &m_can_bittiming_const_31X;
- cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
+ cdev->can.fd.data_bittiming_const = &m_can_data_bittiming_const_31X;
- cdev->can.ctrlmode_supported |=
- (m_can_niso_supported(cdev) ?
- CAN_CTRLMODE_FD_NON_ISO : 0);
+ niso = m_can_niso_supported(cdev);
+ if (niso < 0)
+ return niso;
+ if (niso)
+ cdev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO;
break;
default:
- dev_err(cdev->dev, "Unsupported version number: %2d",
- cdev->version);
+ netdev_err(cdev->net, "Unsupported version number: %2d",
+ cdev->version);
return -EINVAL;
}
- if (cdev->ops->init)
- cdev->ops->init(cdev);
-
return 0;
}
static void m_can_stop(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
+ int ret;
/* disable all interrupts */
m_can_disable_all_interrupts(cdev);
/* Set init mode to disengage from the network */
- m_can_config_endisable(cdev, true);
+ ret = m_can_cccr_update_bits(cdev, CCCR_INIT, CCCR_INIT);
+ if (ret)
+ netdev_err(dev, "failed to enter standby mode: %pe\n",
+ ERR_PTR(ret));
/* set the state as STOPPED */
cdev->can.state = CAN_STATE_STOPPED;
+
+ if (cdev->ops->deinit) {
+ ret = cdev->ops->deinit(cdev);
+ if (ret)
+ netdev_err(dev, "failed to deinitialize: %pe\n",
+ ERR_PTR(ret));
+ }
}
static int m_can_close(struct net_device *dev)
@@ -1584,78 +1837,65 @@ static int m_can_close(struct net_device *dev)
netif_stop_queue(dev);
- if (!cdev->is_peripheral)
- napi_disable(&cdev->napi);
-
m_can_stop(dev);
- m_can_clk_stop(cdev);
- free_irq(dev->irq, dev);
+ if (dev->irq)
+ free_irq(dev->irq, dev);
+
+ m_can_clean(dev);
if (cdev->is_peripheral) {
- cdev->tx_skb = NULL;
destroy_workqueue(cdev->tx_wq);
cdev->tx_wq = NULL;
can_rx_offload_disable(&cdev->offload);
+ } else {
+ napi_disable(&cdev->napi);
}
close_candev(dev);
+ reset_control_assert(cdev->rst);
+ m_can_clk_stop(cdev);
phy_power_off(cdev->transceiver);
return 0;
}
-static int m_can_next_echo_skb_occupied(struct net_device *dev, int putidx)
+static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev,
+ struct sk_buff *skb)
{
- struct m_can_classdev *cdev = netdev_priv(dev);
- /*get wrap around for loopback skb index */
- unsigned int wrap = cdev->can.echo_skb_max;
- int next_idx;
-
- /* calculate next index */
- next_idx = (++putidx >= wrap ? 0 : putidx);
-
- /* check if occupied */
- return !!cdev->can.echo_skb[next_idx];
-}
-
-static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
-{
- struct canfd_frame *cf = (struct canfd_frame *)cdev->tx_skb->data;
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ u8 len_padded = DIV_ROUND_UP(cf->len, 4);
+ struct m_can_fifo_element fifo_element;
struct net_device *dev = cdev->net;
- struct sk_buff *skb = cdev->tx_skb;
- struct id_and_dlc fifo_header;
u32 cccr, fdflags;
- u32 txfqs;
int err;
- int putidx;
-
- cdev->tx_skb = NULL;
+ u32 putidx;
+ unsigned int frame_len = can_skb_get_frame_len(skb);
/* Generate ID field for TX buffer Element */
/* Common to all supported M_CAN versions */
if (cf->can_id & CAN_EFF_FLAG) {
- fifo_header.id = cf->can_id & CAN_EFF_MASK;
- fifo_header.id |= TX_BUF_XTD;
+ fifo_element.id = cf->can_id & CAN_EFF_MASK;
+ fifo_element.id |= TX_BUF_XTD;
} else {
- fifo_header.id = ((cf->can_id & CAN_SFF_MASK) << 18);
+ fifo_element.id = ((cf->can_id & CAN_SFF_MASK) << 18);
}
if (cf->can_id & CAN_RTR_FLAG)
- fifo_header.id |= TX_BUF_RTR;
+ fifo_element.id |= TX_BUF_RTR;
if (cdev->version == 30) {
netif_stop_queue(dev);
- fifo_header.dlc = can_fd_len2dlc(cf->len) << 16;
+ fifo_element.dlc = can_fd_len2dlc(cf->len) << 16;
/* Write the frame ID, DLC, and payload to the FIFO element. */
- err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, &fifo_header, 2);
+ err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, &fifo_element, 2);
if (err)
goto out_fail;
err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_DATA,
- cf->data, DIV_ROUND_UP(cf->len, 4));
+ cf->data, len_padded);
if (err)
goto out_fail;
@@ -1676,33 +1916,15 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
}
m_can_write(cdev, M_CAN_TXBTIE, 0x1);
- can_put_echo_skb(skb, dev, 0, 0);
+ can_put_echo_skb(skb, dev, 0, frame_len);
m_can_write(cdev, M_CAN_TXBAR, 0x1);
/* End of xmit function for version 3.0.x */
} else {
/* Transmit routine for version >= v3.1.x */
- txfqs = m_can_read(cdev, M_CAN_TXFQS);
-
- /* Check if FIFO full */
- if (_m_can_tx_fifo_full(txfqs)) {
- /* This shouldn't happen */
- netif_stop_queue(dev);
- netdev_warn(dev,
- "TX queue active although FIFO is full.");
-
- if (cdev->is_peripheral) {
- kfree_skb(skb);
- dev->stats.tx_dropped++;
- return NETDEV_TX_OK;
- } else {
- return NETDEV_TX_BUSY;
- }
- }
-
/* get put index for frame */
- putidx = FIELD_GET(TXFQS_TFQPI_MASK, txfqs);
+ putidx = cdev->tx_fifo_putidx;
/* Construct DLC Field, with CAN-FD configuration.
* Use the put index of the fifo as the message marker,
@@ -1717,30 +1939,32 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
fdflags |= TX_BUF_BRS;
}
- fifo_header.dlc = FIELD_PREP(TX_BUF_MM_MASK, putidx) |
+ fifo_element.dlc = FIELD_PREP(TX_BUF_MM_MASK, putidx) |
FIELD_PREP(TX_BUF_DLC_MASK, can_fd_len2dlc(cf->len)) |
fdflags | TX_BUF_EFC;
- err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID, &fifo_header, 2);
- if (err)
- goto out_fail;
- err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DATA,
- cf->data, DIV_ROUND_UP(cf->len, 4));
+ memcpy_and_pad(fifo_element.data, CANFD_MAX_DLEN, &cf->data,
+ cf->len, 0);
+
+ err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID,
+ &fifo_element, 2 + len_padded);
if (err)
goto out_fail;
/* Push loopback echo.
* Will be looped back on TX interrupt based on message marker
*/
- can_put_echo_skb(skb, dev, putidx, 0);
+ can_put_echo_skb(skb, dev, putidx, frame_len);
- /* Enable TX FIFO element to start transfer */
- m_can_write(cdev, M_CAN_TXBAR, (1 << putidx));
-
- /* stop network queue if fifo full */
- if (m_can_tx_fifo_full(cdev) ||
- m_can_next_echo_skb_occupied(dev, putidx))
- netif_stop_queue(dev);
+ if (cdev->is_peripheral) {
+ /* Delay enabling TX FIFO element */
+ cdev->tx_peripheral_submit |= BIT(putidx);
+ } else {
+ /* Enable TX FIFO element to start transfer */
+ m_can_write(cdev, M_CAN_TXBAR, BIT(putidx));
+ }
+ cdev->tx_fifo_putidx = (++cdev->tx_fifo_putidx >= cdev->can.echo_skb_max ?
+ 0 : cdev->tx_fifo_putidx);
}
return NETDEV_TX_OK;
@@ -1751,46 +1975,107 @@ out_fail:
return NETDEV_TX_BUSY;
}
+static void m_can_tx_submit(struct m_can_classdev *cdev)
+{
+ m_can_write(cdev, M_CAN_TXBAR, cdev->tx_peripheral_submit);
+ cdev->tx_peripheral_submit = 0;
+}
+
static void m_can_tx_work_queue(struct work_struct *ws)
{
- struct m_can_classdev *cdev = container_of(ws, struct m_can_classdev,
- tx_work);
+ struct m_can_tx_op *op = container_of(ws, struct m_can_tx_op, work);
+ struct m_can_classdev *cdev = op->cdev;
+ struct sk_buff *skb = op->skb;
+
+ op->skb = NULL;
+ m_can_tx_handler(cdev, skb);
+ if (op->submit)
+ m_can_tx_submit(cdev);
+}
+
+static void m_can_tx_queue_skb(struct m_can_classdev *cdev, struct sk_buff *skb,
+ bool submit)
+{
+ cdev->tx_ops[cdev->next_tx_op].skb = skb;
+ cdev->tx_ops[cdev->next_tx_op].submit = submit;
+ queue_work(cdev->tx_wq, &cdev->tx_ops[cdev->next_tx_op].work);
+
+ ++cdev->next_tx_op;
+ if (cdev->next_tx_op >= cdev->tx_fifo_size)
+ cdev->next_tx_op = 0;
+}
+
+static netdev_tx_t m_can_start_peripheral_xmit(struct m_can_classdev *cdev,
+ struct sk_buff *skb)
+{
+ bool submit;
- m_can_tx_handler(cdev);
+ ++cdev->nr_txs_without_submit;
+ if (cdev->nr_txs_without_submit >= cdev->tx_max_coalesced_frames ||
+ !netdev_xmit_more()) {
+ cdev->nr_txs_without_submit = 0;
+ submit = true;
+ } else {
+ submit = false;
+ }
+ m_can_tx_queue_skb(cdev, skb, submit);
+
+ return NETDEV_TX_OK;
}
static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
+ unsigned int frame_len;
+ netdev_tx_t ret;
if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
- if (cdev->is_peripheral) {
- if (cdev->tx_skb) {
- netdev_err(dev, "hard_xmit called while tx busy\n");
- return NETDEV_TX_BUSY;
- }
+ frame_len = can_skb_get_frame_len(skb);
- if (cdev->can.state == CAN_STATE_BUS_OFF) {
- m_can_clean(dev);
- } else {
- /* Need to stop the queue to avoid numerous requests
- * from being sent. Suggested improvement is to create
- * a queueing mechanism that will queue the skbs and
- * process them in order.
- */
- cdev->tx_skb = skb;
- netif_stop_queue(cdev->net);
- queue_work(cdev->tx_wq, &cdev->tx_work);
- }
- } else {
- cdev->tx_skb = skb;
- return m_can_tx_handler(cdev);
+ if (cdev->can.state == CAN_STATE_BUS_OFF) {
+ m_can_clean(cdev->net);
+ return NETDEV_TX_OK;
}
- return NETDEV_TX_OK;
+ ret = m_can_start_tx(cdev);
+ if (ret != NETDEV_TX_OK)
+ return ret;
+
+ netdev_sent_queue(dev, frame_len);
+
+ if (cdev->is_peripheral)
+ ret = m_can_start_peripheral_xmit(cdev, skb);
+ else
+ ret = m_can_tx_handler(cdev, skb);
+
+ if (ret != NETDEV_TX_OK)
+ netdev_completed_queue(dev, 1, frame_len);
+
+ return ret;
+}
+
+static enum hrtimer_restart m_can_polling_timer(struct hrtimer *timer)
+{
+ struct m_can_classdev *cdev = container_of(timer, struct
+ m_can_classdev, hrtimer);
+ int ret;
+
+ if (cdev->can.state == CAN_STATE_BUS_OFF ||
+ cdev->can.state == CAN_STATE_STOPPED)
+ return HRTIMER_NORESTART;
+
+ ret = m_can_interrupt_handler(cdev);
+
+ /* On error or if napi is scheduled to read, stop the timer */
+ if (ret < 0 || napi_is_scheduled(&cdev->napi))
+ return HRTIMER_NORESTART;
+
+ hrtimer_forward_now(timer, ms_to_ktime(HRTIMER_POLL_INTERVAL_MS));
+
+ return HRTIMER_RESTART;
}
static int m_can_open(struct net_device *dev)
@@ -1806,32 +2091,40 @@ static int m_can_open(struct net_device *dev)
if (err)
goto out_phy_power_off;
+ err = reset_control_deassert(cdev->rst);
+ if (err)
+ goto exit_disable_clks;
+
/* open the can device */
err = open_candev(dev);
if (err) {
netdev_err(dev, "failed to open can device\n");
- goto exit_disable_clks;
+ goto out_reset_control_assert;
}
if (cdev->is_peripheral)
can_rx_offload_enable(&cdev->offload);
+ else
+ napi_enable(&cdev->napi);
/* register interrupt handler */
if (cdev->is_peripheral) {
- cdev->tx_skb = NULL;
- cdev->tx_wq = alloc_workqueue("mcan_wq",
- WQ_FREEZABLE | WQ_MEM_RECLAIM, 0);
+ cdev->tx_wq = alloc_ordered_workqueue("mcan_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM);
if (!cdev->tx_wq) {
err = -ENOMEM;
goto out_wq_fail;
}
- INIT_WORK(&cdev->tx_work, m_can_tx_work_queue);
+ for (int i = 0; i != cdev->tx_fifo_size; ++i) {
+ cdev->tx_ops[i].cdev = cdev;
+ INIT_WORK(&cdev->tx_ops[i].work, m_can_tx_work_queue);
+ }
err = request_threaded_irq(dev->irq, NULL, m_can_isr,
IRQF_ONESHOT,
dev->name, dev);
- } else {
+ } else if (dev->irq) {
err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name,
dev);
}
@@ -1844,22 +2137,26 @@ static int m_can_open(struct net_device *dev)
/* start the m_can controller */
err = m_can_start(dev);
if (err)
- goto exit_irq_fail;
-
- if (!cdev->is_peripheral)
- napi_enable(&cdev->napi);
+ goto exit_start_fail;
netif_start_queue(dev);
return 0;
+exit_start_fail:
+ if (cdev->is_peripheral || dev->irq)
+ free_irq(dev->irq, dev);
exit_irq_fail:
if (cdev->is_peripheral)
destroy_workqueue(cdev->tx_wq);
out_wq_fail:
if (cdev->is_peripheral)
can_rx_offload_disable(&cdev->offload);
+ else
+ napi_disable(&cdev->napi);
close_candev(dev);
+out_reset_control_assert:
+ reset_control_assert(cdev->rst);
exit_disable_clks:
m_can_clk_stop(cdev);
out_phy_power_off:
@@ -1871,22 +2168,194 @@ static const struct net_device_ops m_can_netdev_ops = {
.ndo_open = m_can_open,
.ndo_stop = m_can_close,
.ndo_start_xmit = m_can_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+};
+
+static int m_can_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kec,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+
+ ec->rx_max_coalesced_frames_irq = cdev->rx_max_coalesced_frames_irq;
+ ec->rx_coalesce_usecs_irq = cdev->rx_coalesce_usecs_irq;
+ ec->tx_max_coalesced_frames = cdev->tx_max_coalesced_frames;
+ ec->tx_max_coalesced_frames_irq = cdev->tx_max_coalesced_frames_irq;
+ ec->tx_coalesce_usecs_irq = cdev->tx_coalesce_usecs_irq;
+
+ return 0;
+}
+
+static int m_can_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kec,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+
+ if (cdev->can.state != CAN_STATE_STOPPED) {
+ netdev_err(dev, "Device is in use, please shut it down first\n");
+ return -EBUSY;
+ }
+
+ if (ec->rx_max_coalesced_frames_irq > cdev->mcfg[MRAM_RXF0].num) {
+ netdev_err(dev, "rx-frames-irq %u greater than the RX FIFO %u\n",
+ ec->rx_max_coalesced_frames_irq,
+ cdev->mcfg[MRAM_RXF0].num);
+ return -EINVAL;
+ }
+ if ((ec->rx_max_coalesced_frames_irq == 0) != (ec->rx_coalesce_usecs_irq == 0)) {
+ netdev_err(dev, "rx-frames-irq and rx-usecs-irq can only be set together\n");
+ return -EINVAL;
+ }
+ if (ec->tx_max_coalesced_frames_irq > cdev->mcfg[MRAM_TXE].num) {
+ netdev_err(dev, "tx-frames-irq %u greater than the TX event FIFO %u\n",
+ ec->tx_max_coalesced_frames_irq,
+ cdev->mcfg[MRAM_TXE].num);
+ return -EINVAL;
+ }
+ if (ec->tx_max_coalesced_frames_irq > cdev->mcfg[MRAM_TXB].num) {
+ netdev_err(dev, "tx-frames-irq %u greater than the TX FIFO %u\n",
+ ec->tx_max_coalesced_frames_irq,
+ cdev->mcfg[MRAM_TXB].num);
+ return -EINVAL;
+ }
+ if ((ec->tx_max_coalesced_frames_irq == 0) != (ec->tx_coalesce_usecs_irq == 0)) {
+ netdev_err(dev, "tx-frames-irq and tx-usecs-irq can only be set together\n");
+ return -EINVAL;
+ }
+ if (ec->tx_max_coalesced_frames > cdev->mcfg[MRAM_TXE].num) {
+ netdev_err(dev, "tx-frames %u greater than the TX event FIFO %u\n",
+ ec->tx_max_coalesced_frames,
+ cdev->mcfg[MRAM_TXE].num);
+ return -EINVAL;
+ }
+ if (ec->tx_max_coalesced_frames > cdev->mcfg[MRAM_TXB].num) {
+ netdev_err(dev, "tx-frames %u greater than the TX FIFO %u\n",
+ ec->tx_max_coalesced_frames,
+ cdev->mcfg[MRAM_TXB].num);
+ return -EINVAL;
+ }
+ if (ec->rx_coalesce_usecs_irq != 0 && ec->tx_coalesce_usecs_irq != 0 &&
+ ec->rx_coalesce_usecs_irq != ec->tx_coalesce_usecs_irq) {
+ netdev_err(dev, "rx-usecs-irq %u needs to be equal to tx-usecs-irq %u if both are enabled\n",
+ ec->rx_coalesce_usecs_irq,
+ ec->tx_coalesce_usecs_irq);
+ return -EINVAL;
+ }
+
+ cdev->rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
+ cdev->rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
+ cdev->tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
+ cdev->tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
+ cdev->tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
+
+ if (cdev->rx_coalesce_usecs_irq)
+ cdev->irq_timer_wait = us_to_ktime(cdev->rx_coalesce_usecs_irq);
+ else
+ cdev->irq_timer_wait = us_to_ktime(cdev->tx_coalesce_usecs_irq);
+
+ return 0;
+}
+
+static void m_can_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+
+ wol->supported = device_can_wakeup(cdev->dev) ? WAKE_PHY : 0;
+ wol->wolopts = device_may_wakeup(cdev->dev) ? WAKE_PHY : 0;
+}
+
+static int m_can_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+ bool wol_enable = !!(wol->wolopts & WAKE_PHY);
+ int ret;
+
+ if (wol->wolopts & ~WAKE_PHY)
+ return -EINVAL;
+
+ if (wol_enable == device_may_wakeup(cdev->dev))
+ return 0;
+
+ ret = device_set_wakeup_enable(cdev->dev, wol_enable);
+ if (ret) {
+ netdev_err(cdev->net, "Failed to set wakeup enable %pE\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ if (!IS_ERR_OR_NULL(cdev->pinctrl_state_wakeup)) {
+ if (wol_enable)
+ ret = pinctrl_select_state(cdev->pinctrl, cdev->pinctrl_state_wakeup);
+ else
+ ret = pinctrl_pm_select_default_state(cdev->dev);
+
+ if (ret) {
+ netdev_err(cdev->net, "Failed to select pinctrl state %pE\n",
+ ERR_PTR(ret));
+ goto err_wakeup_enable;
+ }
+ }
+
+ return 0;
+
+err_wakeup_enable:
+ /* Revert wakeup enable */
+ device_set_wakeup_enable(cdev->dev, !wol_enable);
+
+ return ret;
+}
+
+static const struct ethtool_ops m_can_ethtool_ops_coalescing = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS_IRQ |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_TX_USECS_IRQ |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_coalesce = m_can_get_coalesce,
+ .set_coalesce = m_can_set_coalesce,
+ .get_wol = m_can_get_wol,
+ .set_wol = m_can_set_wol,
};
static const struct ethtool_ops m_can_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
+ .get_wol = m_can_get_wol,
+ .set_wol = m_can_set_wol,
};
-static int register_m_can_dev(struct net_device *dev)
+static int register_m_can_dev(struct m_can_classdev *cdev)
{
+ struct net_device *dev = cdev->net;
+
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &m_can_netdev_ops;
- dev->ethtool_ops = &m_can_ethtool_ops;
+ if (dev->irq && cdev->is_peripheral)
+ dev->ethtool_ops = &m_can_ethtool_ops_coalescing;
+ else
+ dev->ethtool_ops = &m_can_ethtool_ops;
return register_candev(dev);
}
+int m_can_check_mram_cfg(struct m_can_classdev *cdev, u32 mram_max_size)
+{
+ u32 total_size;
+
+ total_size = cdev->mcfg[MRAM_TXB].off - cdev->mcfg[MRAM_SIDF].off +
+ cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
+ if (total_size > mram_max_size) {
+ netdev_err(cdev->net, "Total size of mram config(%u) exceeds mram(%u)\n",
+ total_size, mram_max_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(m_can_check_mram_cfg);
+
static void m_can_of_parse_mram(struct m_can_classdev *cdev,
const u32 *mram_config_vals)
{
@@ -1914,39 +2383,17 @@ static void m_can_of_parse_mram(struct m_can_classdev *cdev,
cdev->mcfg[MRAM_TXB].num = mram_config_vals[7] &
FIELD_MAX(TXBC_NDTB_MASK);
- dev_dbg(cdev->dev,
- "sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n",
- cdev->mcfg[MRAM_SIDF].off, cdev->mcfg[MRAM_SIDF].num,
- cdev->mcfg[MRAM_XIDF].off, cdev->mcfg[MRAM_XIDF].num,
- cdev->mcfg[MRAM_RXF0].off, cdev->mcfg[MRAM_RXF0].num,
- cdev->mcfg[MRAM_RXF1].off, cdev->mcfg[MRAM_RXF1].num,
- cdev->mcfg[MRAM_RXB].off, cdev->mcfg[MRAM_RXB].num,
- cdev->mcfg[MRAM_TXE].off, cdev->mcfg[MRAM_TXE].num,
- cdev->mcfg[MRAM_TXB].off, cdev->mcfg[MRAM_TXB].num);
+ netdev_dbg(cdev->net,
+ "sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n",
+ cdev->mcfg[MRAM_SIDF].off, cdev->mcfg[MRAM_SIDF].num,
+ cdev->mcfg[MRAM_XIDF].off, cdev->mcfg[MRAM_XIDF].num,
+ cdev->mcfg[MRAM_RXF0].off, cdev->mcfg[MRAM_RXF0].num,
+ cdev->mcfg[MRAM_RXF1].off, cdev->mcfg[MRAM_RXF1].num,
+ cdev->mcfg[MRAM_RXB].off, cdev->mcfg[MRAM_RXB].num,
+ cdev->mcfg[MRAM_TXE].off, cdev->mcfg[MRAM_TXE].num,
+ cdev->mcfg[MRAM_TXB].off, cdev->mcfg[MRAM_TXB].num);
}
-int m_can_init_ram(struct m_can_classdev *cdev)
-{
- int end, i, start;
- int err = 0;
-
- /* initialize the entire Message RAM in use to avoid possible
- * ECC/parity checksum errors when reading an uninitialized buffer
- */
- start = cdev->mcfg[MRAM_SIDF].off;
- end = cdev->mcfg[MRAM_TXB].off +
- cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
-
- for (i = start; i < end; i += 4) {
- err = m_can_fifo_write_no_off(cdev, i, 0x0);
- if (err)
- break;
- }
-
- return err;
-}
-EXPORT_SYMBOL_GPL(m_can_init_ram);
-
int m_can_class_get_clocks(struct m_can_classdev *cdev)
{
int ret = 0;
@@ -1955,7 +2402,7 @@ int m_can_class_get_clocks(struct m_can_classdev *cdev)
cdev->cclk = devm_clk_get(cdev->dev, "cclk");
if (IS_ERR(cdev->hclk) || IS_ERR(cdev->cclk)) {
- dev_err(cdev->dev, "no clock found\n");
+ netdev_err(cdev->net, "no clock found\n");
ret = -ENODEV;
}
@@ -1963,6 +2410,42 @@ int m_can_class_get_clocks(struct m_can_classdev *cdev)
}
EXPORT_SYMBOL_GPL(m_can_class_get_clocks);
+static bool m_can_class_wakeup_pinctrl_enabled(struct m_can_classdev *class_dev)
+{
+ return device_may_wakeup(class_dev->dev) && class_dev->pinctrl_state_wakeup;
+}
+
+static int m_can_class_parse_pinctrl(struct m_can_classdev *class_dev)
+{
+ struct device *dev = class_dev->dev;
+ int ret;
+
+ class_dev->pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR(class_dev->pinctrl)) {
+ ret = PTR_ERR(class_dev->pinctrl);
+ class_dev->pinctrl = NULL;
+
+ if (ret == -ENODEV)
+ return 0;
+
+ return dev_err_probe(dev, ret, "Failed to get pinctrl\n");
+ }
+
+ class_dev->pinctrl_state_wakeup =
+ pinctrl_lookup_state(class_dev->pinctrl, "wakeup");
+ if (IS_ERR(class_dev->pinctrl_state_wakeup)) {
+ ret = PTR_ERR(class_dev->pinctrl_state_wakeup);
+ class_dev->pinctrl_state_wakeup = NULL;
+
+ if (ret == -ENODEV)
+ return 0;
+
+ return dev_err_probe(dev, ret, "Failed to lookup pinctrl wakeup state\n");
+ }
+
+ return 0;
+}
+
struct m_can_classdev *m_can_class_allocate_dev(struct device *dev,
int sizeof_priv)
{
@@ -1978,9 +2461,12 @@ struct m_can_classdev *m_can_class_allocate_dev(struct device *dev,
sizeof(mram_config_vals) / 4);
if (ret) {
dev_err(dev, "Could not get Message RAM configuration.");
- goto out;
+ return ERR_PTR(ret);
}
+ if (dev->of_node && of_property_read_bool(dev->of_node, "wakeup-source"))
+ device_set_wakeup_capable(dev, true);
+
/* Get TX FIFO size
* Defines the total amount of echo buffers for loopback
*/
@@ -1990,7 +2476,7 @@ struct m_can_classdev *m_can_class_allocate_dev(struct device *dev,
net_dev = alloc_candev(sizeof_priv, tx_fifo_size);
if (!net_dev) {
dev_err(dev, "Failed to allocate CAN device");
- goto out;
+ return ERR_PTR(-ENOMEM);
}
class_dev = netdev_priv(net_dev);
@@ -1999,8 +2485,17 @@ struct m_can_classdev *m_can_class_allocate_dev(struct device *dev,
SET_NETDEV_DEV(net_dev, dev);
m_can_of_parse_mram(class_dev, mram_config_vals);
-out:
+ spin_lock_init(&class_dev->tx_handling_spinlock);
+
+ ret = m_can_class_parse_pinctrl(class_dev);
+ if (ret)
+ goto err_free_candev;
+
return class_dev;
+
+err_free_candev:
+ free_candev(net_dev);
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(m_can_class_allocate_dev);
@@ -2014,38 +2509,67 @@ int m_can_class_register(struct m_can_classdev *cdev)
{
int ret;
- if (cdev->pm_clock_support) {
- ret = m_can_clk_start(cdev);
- if (ret)
- return ret;
+ cdev->tx_fifo_size = max(1, min(cdev->mcfg[MRAM_TXB].num,
+ cdev->mcfg[MRAM_TXE].num));
+ if (cdev->is_peripheral) {
+ cdev->tx_ops =
+ devm_kzalloc(cdev->dev,
+ cdev->tx_fifo_size * sizeof(*cdev->tx_ops),
+ GFP_KERNEL);
+ if (!cdev->tx_ops)
+ return -ENOMEM;
}
+ cdev->rst = devm_reset_control_get_optional_shared(cdev->dev, NULL);
+ if (IS_ERR(cdev->rst))
+ return dev_err_probe(cdev->dev, PTR_ERR(cdev->rst),
+ "Failed to get reset line\n");
+
+ ret = m_can_clk_start(cdev);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(cdev->rst);
+ if (ret)
+ goto clk_disable;
+
if (cdev->is_peripheral) {
ret = can_rx_offload_add_manual(cdev->net, &cdev->offload,
NAPI_POLL_WEIGHT);
if (ret)
- goto clk_disable;
+ goto out_reset_control_assert;
+ }
+
+ if (!cdev->net->irq) {
+ netdev_dbg(cdev->net, "Polling enabled, initialize hrtimer");
+ hrtimer_setup(&cdev->hrtimer, m_can_polling_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ } else {
+ hrtimer_setup(&cdev->hrtimer, m_can_coalescing_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
ret = m_can_dev_setup(cdev);
if (ret)
goto rx_offload_del;
- ret = register_m_can_dev(cdev->net);
+ ret = register_m_can_dev(cdev);
if (ret) {
- dev_err(cdev->dev, "registering %s failed (err=%d)\n",
- cdev->net->name, ret);
+ netdev_err(cdev->net, "registering %s failed (err=%d)\n",
+ cdev->net->name, ret);
goto rx_offload_del;
}
of_can_transceiver(cdev->net);
- dev_info(cdev->dev, "%s device registered (irq=%d, version=%d)\n",
- KBUILD_MODNAME, cdev->net->irq, cdev->version);
+ netdev_info(cdev->net, "device registered (irq=%d, version=%d)\n",
+ cdev->net->irq, cdev->version);
/* Probe finished
- * Stop clocks. They will be reactivated once the M_CAN device is opened
+ * Assert reset and stop clocks.
+ * They will be reactivated once the M_CAN device is opened
*/
+ reset_control_assert(cdev->rst);
m_can_clk_stop(cdev);
return 0;
@@ -2053,6 +2577,8 @@ int m_can_class_register(struct m_can_classdev *cdev)
rx_offload_del:
if (cdev->is_peripheral)
can_rx_offload_del(&cdev->offload);
+out_reset_control_assert:
+ reset_control_assert(cdev->rst);
clk_disable:
m_can_clk_stop(cdev);
@@ -2062,9 +2588,9 @@ EXPORT_SYMBOL_GPL(m_can_class_register);
void m_can_class_unregister(struct m_can_classdev *cdev)
{
+ unregister_candev(cdev->net);
if (cdev->is_peripheral)
can_rx_offload_del(&cdev->offload);
- unregister_candev(cdev->net);
}
EXPORT_SYMBOL_GPL(m_can_class_unregister);
@@ -2072,19 +2598,34 @@ int m_can_class_suspend(struct device *dev)
{
struct m_can_classdev *cdev = dev_get_drvdata(dev);
struct net_device *ndev = cdev->net;
+ int ret = 0;
if (netif_running(ndev)) {
netif_stop_queue(ndev);
netif_device_detach(ndev);
- m_can_stop(ndev);
+
+ /* leave the chip running with rx interrupt enabled if it is
+ * used as a wake-up source. Coalescing needs to be reset then,
+ * the timer is cancelled here, interrupts are done in resume.
+ */
+ if (cdev->pm_wake_source) {
+ hrtimer_cancel(&cdev->hrtimer);
+ m_can_write(cdev, M_CAN_IE, IR_RF0N);
+
+ if (cdev->ops->deinit)
+ ret = cdev->ops->deinit(cdev);
+ } else {
+ m_can_stop(ndev);
+ }
+
m_can_clk_stop(cdev);
+ cdev->can.state = CAN_STATE_SLEEPING;
}
- pinctrl_pm_select_sleep_state(dev);
+ if (!m_can_class_wakeup_pinctrl_enabled(cdev))
+ pinctrl_pm_select_sleep_state(dev);
- cdev->can.state = CAN_STATE_SLEEPING;
-
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(m_can_class_suspend);
@@ -2092,33 +2633,48 @@ int m_can_class_resume(struct device *dev)
{
struct m_can_classdev *cdev = dev_get_drvdata(dev);
struct net_device *ndev = cdev->net;
+ int ret = 0;
- pinctrl_pm_select_default_state(dev);
-
- cdev->can.state = CAN_STATE_ERROR_ACTIVE;
+ if (!m_can_class_wakeup_pinctrl_enabled(cdev))
+ pinctrl_pm_select_default_state(dev);
if (netif_running(ndev)) {
- int ret;
-
ret = m_can_clk_start(cdev);
if (ret)
return ret;
- ret = m_can_start(ndev);
- if (ret) {
- m_can_clk_stop(cdev);
- return ret;
+ if (cdev->pm_wake_source) {
+ /* Restore active interrupts but disable coalescing as
+ * we may have missed important waterlevel interrupts
+ * between suspend and resume. Timers are already
+ * stopped in suspend. Here we enable all interrupts
+ * again.
+ */
+ cdev->active_interrupts |= IR_RF0N | IR_TEFN;
+
+ if (cdev->ops->init)
+ ret = cdev->ops->init(cdev);
+
+ cdev->can.state = m_can_state_get_by_psr(cdev);
+
+ m_can_write(cdev, M_CAN_IE, cdev->active_interrupts);
+ } else {
+ ret = m_can_start(ndev);
+ if (ret) {
+ m_can_clk_stop(cdev);
+ return ret;
+ }
}
netif_device_attach(ndev);
netif_start_queue(ndev);
}
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(m_can_class_resume);
-MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>");
+MODULE_AUTHOR("Dong Aisheng <aisheng.dong@nxp.com>");
MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller");