summaryrefslogtreecommitdiff
path: root/drivers/net/can/dev
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/can/dev')
-rw-r--r--drivers/net/can/dev/Makefile12
-rw-r--r--drivers/net/can/dev/bittiming.c216
-rw-r--r--drivers/net/can/dev/calc_bittiming.c262
-rw-r--r--drivers/net/can/dev/dev.c661
-rw-r--r--drivers/net/can/dev/length.c84
-rw-r--r--drivers/net/can/dev/netlink.c1111
-rw-r--r--drivers/net/can/dev/rx-offload.c427
-rw-r--r--drivers/net/can/dev/skb.c374
8 files changed, 3147 insertions, 0 deletions
diff --git a/drivers/net/can/dev/Makefile b/drivers/net/can/dev/Makefile
new file mode 100644
index 000000000000..633687d6b6c0
--- /dev/null
+++ b/drivers/net/can/dev/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CAN_DEV) += can-dev.o
+
+can-dev-y += skb.o
+
+can-dev-$(CONFIG_CAN_CALC_BITTIMING) += calc_bittiming.o
+can-dev-$(CONFIG_CAN_NETLINK) += bittiming.o
+can-dev-$(CONFIG_CAN_NETLINK) += dev.o
+can-dev-$(CONFIG_CAN_NETLINK) += length.o
+can-dev-$(CONFIG_CAN_NETLINK) += netlink.o
+can-dev-$(CONFIG_CAN_RX_OFFLOAD) += rx-offload.o
diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c
new file mode 100644
index 000000000000..8f82418230ce
--- /dev/null
+++ b/drivers/net/can/dev/bittiming.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (c) 2025 Vincent Mailhol <mailhol@kernel.org>
+ */
+
+#include <linux/can/dev.h>
+
+void can_sjw_set_default(struct can_bittiming *bt)
+{
+ if (bt->sjw)
+ return;
+
+ /* If user space provides no sjw, use sane default of phase_seg2 / 2 */
+ bt->sjw = max(1U, min(bt->phase_seg1, bt->phase_seg2 / 2));
+}
+
+int can_sjw_check(const struct net_device *dev, const struct can_bittiming *bt,
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack)
+{
+ if (bt->sjw > btc->sjw_max) {
+ NL_SET_ERR_MSG_FMT(extack, "sjw: %u greater than max sjw: %u",
+ bt->sjw, btc->sjw_max);
+ return -EINVAL;
+ }
+
+ if (bt->sjw > bt->phase_seg1) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "sjw: %u greater than phase-seg1: %u",
+ bt->sjw, bt->phase_seg1);
+ return -EINVAL;
+ }
+
+ if (bt->sjw > bt->phase_seg2) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "sjw: %u greater than phase-seg2: %u",
+ bt->sjw, bt->phase_seg2);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Checks the validity of the specified bit-timing parameters prop_seg,
+ * phase_seg1, phase_seg2 and sjw and tries to determine the bitrate
+ * prescaler value brp. You can find more information in the header
+ * file linux/can/netlink.h.
+ */
+static int can_fixup_bittiming(const struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc,
+ struct netlink_ext_ack *extack)
+{
+ const unsigned int tseg1 = bt->prop_seg + bt->phase_seg1;
+ const struct can_priv *priv = netdev_priv(dev);
+ u64 brp64;
+ int err;
+
+ if (tseg1 < btc->tseg1_min) {
+ NL_SET_ERR_MSG_FMT(extack, "prop-seg + phase-seg1: %u less than tseg1-min: %u",
+ tseg1, btc->tseg1_min);
+ return -EINVAL;
+ }
+ if (tseg1 > btc->tseg1_max) {
+ NL_SET_ERR_MSG_FMT(extack, "prop-seg + phase-seg1: %u greater than tseg1-max: %u",
+ tseg1, btc->tseg1_max);
+ return -EINVAL;
+ }
+ if (bt->phase_seg2 < btc->tseg2_min) {
+ NL_SET_ERR_MSG_FMT(extack, "phase-seg2: %u less than tseg2-min: %u",
+ bt->phase_seg2, btc->tseg2_min);
+ return -EINVAL;
+ }
+ if (bt->phase_seg2 > btc->tseg2_max) {
+ NL_SET_ERR_MSG_FMT(extack, "phase-seg2: %u greater than tseg2-max: %u",
+ bt->phase_seg2, btc->tseg2_max);
+ return -EINVAL;
+ }
+
+ can_sjw_set_default(bt);
+
+ err = can_sjw_check(dev, bt, btc, extack);
+ if (err)
+ return err;
+
+ brp64 = (u64)priv->clock.freq * (u64)bt->tq;
+ if (btc->brp_inc > 1)
+ do_div(brp64, btc->brp_inc);
+ brp64 += 500000000UL - 1;
+ do_div(brp64, 1000000000UL); /* the practicable BRP */
+ if (btc->brp_inc > 1)
+ brp64 *= btc->brp_inc;
+ bt->brp = (u32)brp64;
+
+ if (bt->brp < btc->brp_min) {
+ NL_SET_ERR_MSG_FMT(extack, "resulting brp: %u less than brp-min: %u",
+ bt->brp, btc->brp_min);
+ return -EINVAL;
+ }
+ if (bt->brp > btc->brp_max) {
+ NL_SET_ERR_MSG_FMT(extack, "resulting brp: %u greater than brp-max: %u",
+ bt->brp, btc->brp_max);
+ return -EINVAL;
+ }
+
+ bt->bitrate = priv->clock.freq / (bt->brp * can_bit_time(bt));
+ bt->sample_point = ((CAN_SYNC_SEG + tseg1) * 1000) / can_bit_time(bt);
+ bt->tq = DIV_U64_ROUND_CLOSEST(mul_u32_u32(bt->brp, NSEC_PER_SEC),
+ priv->clock.freq);
+
+ return 0;
+}
+
+/* Checks the validity of predefined bitrate settings */
+static int
+can_validate_bitrate(const struct net_device *dev, const struct can_bittiming *bt,
+ const u32 *bitrate_const,
+ const unsigned int bitrate_const_cnt,
+ struct netlink_ext_ack *extack)
+{
+ unsigned int i;
+
+ for (i = 0; i < bitrate_const_cnt; i++) {
+ if (bt->bitrate == bitrate_const[i])
+ return 0;
+ }
+
+ NL_SET_ERR_MSG_FMT(extack, "bitrate %u bps not supported",
+ bt->brp);
+
+ return -EINVAL;
+}
+
+int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc,
+ const u32 *bitrate_const,
+ const unsigned int bitrate_const_cnt,
+ struct netlink_ext_ack *extack)
+{
+ /* Depending on the given can_bittiming parameter structure the CAN
+ * timing parameters are calculated based on the provided bitrate OR
+ * alternatively the CAN timing parameters (tq, prop_seg, etc.) are
+ * provided directly which are then checked and fixed up.
+ */
+ if (!bt->tq && bt->bitrate && btc)
+ return can_calc_bittiming(dev, bt, btc, extack);
+ if (bt->tq && !bt->bitrate && btc)
+ return can_fixup_bittiming(dev, bt, btc, extack);
+ if (!bt->tq && bt->bitrate && bitrate_const)
+ return can_validate_bitrate(dev, bt, bitrate_const,
+ bitrate_const_cnt, extack);
+
+ return -EINVAL;
+}
+
+int can_validate_pwm_bittiming(const struct net_device *dev,
+ const struct can_pwm *pwm,
+ struct netlink_ext_ack *extack)
+{
+ const struct can_priv *priv = netdev_priv(dev);
+ u32 xl_bit_time_tqmin = can_bit_time_tqmin(&priv->xl.data_bittiming);
+ u32 nom_bit_time_tqmin = can_bit_time_tqmin(&priv->bittiming);
+ u32 pwms_ns = can_tqmin_to_ns(pwm->pwms, priv->clock.freq);
+ u32 pwml_ns = can_tqmin_to_ns(pwm->pwml, priv->clock.freq);
+
+ if (pwms_ns + pwml_ns > CAN_PWM_NS_MAX) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "The PWM symbol duration: %u ns may not exceed %u ns",
+ pwms_ns + pwml_ns, CAN_PWM_NS_MAX);
+ return -EINVAL;
+ }
+
+ if (pwms_ns < CAN_PWM_DECODE_NS) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWMS: %u ns shall be at least %u ns",
+ pwms_ns, CAN_PWM_DECODE_NS);
+ return -EINVAL;
+ }
+
+ if (pwm->pwms >= pwm->pwml) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWMS: %u tqmin shall be smaller than PWML: %u tqmin",
+ pwm->pwms, pwm->pwml);
+ return -EINVAL;
+ }
+
+ if (pwml_ns - pwms_ns < 2 * CAN_PWM_DECODE_NS) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "At least %u ns shall separate PWMS: %u ns from PMWL: %u ns",
+ 2 * CAN_PWM_DECODE_NS, pwms_ns, pwml_ns);
+ return -EINVAL;
+ }
+
+ if (xl_bit_time_tqmin % (pwm->pwms + pwm->pwml) != 0) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWM duration: %u tqmin does not divide XL's bit time: %u tqmin",
+ pwm->pwms + pwm->pwml, xl_bit_time_tqmin);
+ return -EINVAL;
+ }
+
+ if (pwm->pwmo >= pwm->pwms + pwm->pwml) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWMO: %u tqmin can not be greater than PWMS + PWML: %u tqmin",
+ pwm->pwmo, pwm->pwms + pwm->pwml);
+ return -EINVAL;
+ }
+
+ if (nom_bit_time_tqmin % (pwm->pwms + pwm->pwml) != pwm->pwmo) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Can not assemble nominal bit time: %u tqmin out of PWMS + PMWL and PWMO",
+ nom_bit_time_tqmin);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/can/dev/calc_bittiming.c b/drivers/net/can/dev/calc_bittiming.c
new file mode 100644
index 000000000000..cc4022241553
--- /dev/null
+++ b/drivers/net/can/dev/calc_bittiming.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (C) 2021-2025 Vincent Mailhol <mailhol@kernel.org>
+ */
+
+#include <linux/units.h>
+#include <linux/can/dev.h>
+
+#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
+
+/* CiA recommended sample points for Non Return to Zero encoding. */
+static int can_calc_sample_point_nrz(const struct can_bittiming *bt)
+{
+ if (bt->bitrate > 800 * KILO /* BPS */)
+ return 750;
+
+ if (bt->bitrate > 500 * KILO /* BPS */)
+ return 800;
+
+ return 875;
+}
+
+/* Sample points for Pulse-Width Modulation encoding. */
+static int can_calc_sample_point_pwm(const struct can_bittiming *bt)
+{
+ if (bt->bitrate > 15 * MEGA /* BPS */)
+ return 625;
+
+ if (bt->bitrate > 9 * MEGA /* BPS */)
+ return 600;
+
+ if (bt->bitrate > 4 * MEGA /* BPS */)
+ return 560;
+
+ return 520;
+}
+
+/* Bit-timing calculation derived from:
+ *
+ * Code based on LinCAN sources and H8S2638 project
+ * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz
+ * Copyright 2005 Stanislav Marek
+ * email: pisa@cmp.felk.cvut.cz
+ *
+ * Calculates proper bit-timing parameters for a specified bit-rate
+ * and sample-point, which can then be used to set the bit-timing
+ * registers of the CAN controller. You can find more information
+ * in the header file linux/can/netlink.h.
+ */
+static int
+can_update_sample_point(const struct can_bittiming_const *btc,
+ const unsigned int sample_point_reference, const unsigned int tseg,
+ unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
+ unsigned int *sample_point_error_ptr)
+{
+ unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
+ unsigned int sample_point, best_sample_point = 0;
+ unsigned int tseg1, tseg2;
+ int i;
+
+ for (i = 0; i <= 1; i++) {
+ tseg2 = tseg + CAN_SYNC_SEG -
+ (sample_point_reference * (tseg + CAN_SYNC_SEG)) /
+ 1000 - i;
+ tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
+ tseg1 = tseg - tseg2;
+ if (tseg1 > btc->tseg1_max) {
+ tseg1 = btc->tseg1_max;
+ tseg2 = tseg - tseg1;
+ }
+
+ sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) /
+ (tseg + CAN_SYNC_SEG);
+ sample_point_error = abs(sample_point_reference - sample_point);
+
+ if (sample_point <= sample_point_reference &&
+ sample_point_error < best_sample_point_error) {
+ best_sample_point = sample_point;
+ best_sample_point_error = sample_point_error;
+ *tseg1_ptr = tseg1;
+ *tseg2_ptr = tseg2;
+ }
+ }
+
+ if (sample_point_error_ptr)
+ *sample_point_error_ptr = best_sample_point_error;
+
+ return best_sample_point;
+}
+
+int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ unsigned int bitrate; /* current bitrate */
+ unsigned int bitrate_error; /* diff between calculated and reference value */
+ unsigned int best_bitrate_error = UINT_MAX;
+ unsigned int sample_point_error; /* diff between calculated and reference value */
+ unsigned int best_sample_point_error = UINT_MAX;
+ unsigned int sample_point_reference; /* reference sample point */
+ unsigned int best_tseg = 0; /* current best value for tseg */
+ unsigned int best_brp = 0; /* current best value for brp */
+ unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
+ u64 v64;
+ int err;
+
+ if (bt->sample_point)
+ sample_point_reference = bt->sample_point;
+ else if (btc == priv->xl.data_bittiming_const &&
+ (priv->ctrlmode & CAN_CTRLMODE_XL_TMS))
+ sample_point_reference = can_calc_sample_point_pwm(bt);
+ else
+ sample_point_reference = can_calc_sample_point_nrz(bt);
+
+ /* tseg even = round down, odd = round up */
+ for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
+ tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
+ tsegall = CAN_SYNC_SEG + tseg / 2;
+
+ /* Compute all possible tseg choices (tseg=tseg1+tseg2) */
+ brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
+
+ /* choose brp step which is possible in system */
+ brp = (brp / btc->brp_inc) * btc->brp_inc;
+ if (brp < btc->brp_min || brp > btc->brp_max)
+ continue;
+
+ bitrate = priv->clock.freq / (brp * tsegall);
+ bitrate_error = abs(bt->bitrate - bitrate);
+
+ /* tseg brp biterror */
+ if (bitrate_error > best_bitrate_error)
+ continue;
+
+ /* reset sample point error if we have a better bitrate */
+ if (bitrate_error < best_bitrate_error)
+ best_sample_point_error = UINT_MAX;
+
+ can_update_sample_point(btc, sample_point_reference, tseg / 2,
+ &tseg1, &tseg2, &sample_point_error);
+ if (sample_point_error >= best_sample_point_error)
+ continue;
+
+ best_sample_point_error = sample_point_error;
+ best_bitrate_error = bitrate_error;
+ best_tseg = tseg / 2;
+ best_brp = brp;
+
+ if (bitrate_error == 0 && sample_point_error == 0)
+ break;
+ }
+
+ if (best_bitrate_error) {
+ /* Error in one-hundredth of a percent */
+ v64 = (u64)best_bitrate_error * 10000;
+ do_div(v64, bt->bitrate);
+ bitrate_error = (u32)v64;
+ /* print at least 0.01% if the error is smaller */
+ bitrate_error = max(bitrate_error, 1U);
+ if (bitrate_error > CAN_CALC_MAX_ERROR) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "bitrate error: %u.%02u%% too high",
+ bitrate_error / 100,
+ bitrate_error % 100);
+ return -EINVAL;
+ }
+ NL_SET_ERR_MSG_FMT(extack,
+ "bitrate error: %u.%02u%%",
+ bitrate_error / 100, bitrate_error % 100);
+ }
+
+ /* real sample point */
+ bt->sample_point = can_update_sample_point(btc, sample_point_reference,
+ best_tseg, &tseg1, &tseg2,
+ NULL);
+
+ v64 = (u64)best_brp * 1000 * 1000 * 1000;
+ do_div(v64, priv->clock.freq);
+ bt->tq = (u32)v64;
+ bt->prop_seg = tseg1 / 2;
+ bt->phase_seg1 = tseg1 - bt->prop_seg;
+ bt->phase_seg2 = tseg2;
+
+ can_sjw_set_default(bt);
+
+ err = can_sjw_check(dev, bt, btc, extack);
+ if (err)
+ return err;
+
+ bt->brp = best_brp;
+
+ /* real bitrate */
+ bt->bitrate = priv->clock.freq /
+ (bt->brp * can_bit_time(bt));
+
+ return 0;
+}
+
+void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
+ const struct can_bittiming *dbt,
+ u32 tdc_mask, u32 *ctrlmode, u32 ctrlmode_supported)
+
+{
+ u32 tdc_auto = tdc_mask & CAN_CTRLMODE_TDC_AUTO_MASK;
+
+ if (!tdc_const || !(ctrlmode_supported & tdc_auto))
+ return;
+
+ *ctrlmode &= ~tdc_mask;
+
+ /* As specified in ISO 11898-1 section 11.3.3 "Transmitter
+ * delay compensation" (TDC) is only applicable if data BRP is
+ * one or two.
+ */
+ if (dbt->brp == 1 || dbt->brp == 2) {
+ /* Sample point in clock periods */
+ u32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg +
+ dbt->phase_seg1) * dbt->brp;
+
+ if (sample_point_in_tc < tdc_const->tdco_min)
+ return;
+ tdc->tdco = min(sample_point_in_tc, tdc_const->tdco_max);
+ *ctrlmode |= tdc_auto;
+ }
+}
+
+int can_calc_pwm(struct net_device *dev, struct netlink_ext_ack *extack)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ const struct can_pwm_const *pwm_const = priv->xl.pwm_const;
+ struct can_pwm *pwm = &priv->xl.pwm;
+ u32 xl_tqmin = can_bit_time_tqmin(&priv->xl.data_bittiming);
+ u32 xl_ns = can_tqmin_to_ns(xl_tqmin, priv->clock.freq);
+ u32 nom_tqmin = can_bit_time_tqmin(&priv->bittiming);
+ int pwm_per_bit_max = xl_tqmin / (pwm_const->pwms_min + pwm_const->pwml_min);
+ int pwm_per_bit;
+ u32 pwm_tqmin;
+
+ /* For 5 MB/s databitrate or greater, xl_ns < CAN_PWM_NS_MAX
+ * giving us a pwm_per_bit of 1 and the loop immediately breaks
+ */
+ for (pwm_per_bit = DIV_ROUND_UP(xl_ns, CAN_PWM_NS_MAX);
+ pwm_per_bit <= pwm_per_bit_max; pwm_per_bit++)
+ if (xl_tqmin % pwm_per_bit == 0)
+ break;
+
+ if (pwm_per_bit > pwm_per_bit_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Can not divide the XL data phase's bit time: %u tqmin into multiple PWM symbols",
+ xl_tqmin);
+ return -EINVAL;
+ }
+
+ pwm_tqmin = xl_tqmin / pwm_per_bit;
+ pwm->pwms = DIV_ROUND_UP_POW2(pwm_tqmin, 4);
+ pwm->pwml = pwm_tqmin - pwm->pwms;
+ pwm->pwmo = nom_tqmin % pwm_tqmin;
+
+ return 0;
+}
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
new file mode 100644
index 000000000000..091f30e94c61
--- /dev/null
+++ b/drivers/net/can/dev/dev.c
@@ -0,0 +1,661 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ */
+
+#include <linux/can.h>
+#include <linux/can/can-ml.h>
+#include <linux/can/dev.h>
+#include <linux/can/skb.h>
+#include <linux/gpio/consumer.h>
+#include <linux/if_arp.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+static void can_update_state_error_stats(struct net_device *dev,
+ enum can_state new_state)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (new_state <= priv->state)
+ return;
+
+ switch (new_state) {
+ case CAN_STATE_ERROR_WARNING:
+ priv->can_stats.error_warning++;
+ break;
+ case CAN_STATE_ERROR_PASSIVE:
+ priv->can_stats.error_passive++;
+ break;
+ case CAN_STATE_BUS_OFF:
+ priv->can_stats.bus_off++;
+ break;
+ default:
+ break;
+ }
+}
+
+static int can_tx_state_to_frame(struct net_device *dev, enum can_state state)
+{
+ switch (state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ return CAN_ERR_CRTL_ACTIVE;
+ case CAN_STATE_ERROR_WARNING:
+ return CAN_ERR_CRTL_TX_WARNING;
+ case CAN_STATE_ERROR_PASSIVE:
+ return CAN_ERR_CRTL_TX_PASSIVE;
+ default:
+ return 0;
+ }
+}
+
+static int can_rx_state_to_frame(struct net_device *dev, enum can_state state)
+{
+ switch (state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ return CAN_ERR_CRTL_ACTIVE;
+ case CAN_STATE_ERROR_WARNING:
+ return CAN_ERR_CRTL_RX_WARNING;
+ case CAN_STATE_ERROR_PASSIVE:
+ return CAN_ERR_CRTL_RX_PASSIVE;
+ default:
+ return 0;
+ }
+}
+
+const char *can_get_state_str(const enum can_state state)
+{
+ switch (state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ return "Error Active";
+ case CAN_STATE_ERROR_WARNING:
+ return "Error Warning";
+ case CAN_STATE_ERROR_PASSIVE:
+ return "Error Passive";
+ case CAN_STATE_BUS_OFF:
+ return "Bus Off";
+ case CAN_STATE_STOPPED:
+ return "Stopped";
+ case CAN_STATE_SLEEPING:
+ return "Sleeping";
+ default:
+ return "<unknown>";
+ }
+}
+EXPORT_SYMBOL_GPL(can_get_state_str);
+
+const char *can_get_ctrlmode_str(u32 ctrlmode)
+{
+ switch (ctrlmode & ~(ctrlmode - 1)) {
+ case 0:
+ return "(none)";
+ case CAN_CTRLMODE_LOOPBACK:
+ return "LOOPBACK";
+ case CAN_CTRLMODE_LISTENONLY:
+ return "LISTEN-ONLY";
+ case CAN_CTRLMODE_3_SAMPLES:
+ return "TRIPLE-SAMPLING";
+ case CAN_CTRLMODE_ONE_SHOT:
+ return "ONE-SHOT";
+ case CAN_CTRLMODE_BERR_REPORTING:
+ return "BERR-REPORTING";
+ case CAN_CTRLMODE_FD:
+ return "FD";
+ case CAN_CTRLMODE_PRESUME_ACK:
+ return "PRESUME-ACK";
+ case CAN_CTRLMODE_FD_NON_ISO:
+ return "FD-NON-ISO";
+ case CAN_CTRLMODE_CC_LEN8_DLC:
+ return "CC-LEN8-DLC";
+ case CAN_CTRLMODE_TDC_AUTO:
+ return "TDC-AUTO";
+ case CAN_CTRLMODE_TDC_MANUAL:
+ return "TDC-MANUAL";
+ case CAN_CTRLMODE_RESTRICTED:
+ return "RESTRICTED";
+ case CAN_CTRLMODE_XL:
+ return "XL";
+ case CAN_CTRLMODE_XL_TDC_AUTO:
+ return "XL-TDC-AUTO";
+ case CAN_CTRLMODE_XL_TDC_MANUAL:
+ return "XL-TDC-MANUAL";
+ case CAN_CTRLMODE_XL_TMS:
+ return "TMS";
+ default:
+ return "<unknown>";
+ }
+}
+EXPORT_SYMBOL_GPL(can_get_ctrlmode_str);
+
+static enum can_state can_state_err_to_state(u16 err)
+{
+ if (err < CAN_ERROR_WARNING_THRESHOLD)
+ return CAN_STATE_ERROR_ACTIVE;
+ if (err < CAN_ERROR_PASSIVE_THRESHOLD)
+ return CAN_STATE_ERROR_WARNING;
+ if (err < CAN_BUS_OFF_THRESHOLD)
+ return CAN_STATE_ERROR_PASSIVE;
+
+ return CAN_STATE_BUS_OFF;
+}
+
+void can_state_get_by_berr_counter(const struct net_device *dev,
+ const struct can_berr_counter *bec,
+ enum can_state *tx_state,
+ enum can_state *rx_state)
+{
+ *tx_state = can_state_err_to_state(bec->txerr);
+ *rx_state = can_state_err_to_state(bec->rxerr);
+}
+EXPORT_SYMBOL_GPL(can_state_get_by_berr_counter);
+
+void can_change_state(struct net_device *dev, struct can_frame *cf,
+ enum can_state tx_state, enum can_state rx_state)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ enum can_state new_state = max(tx_state, rx_state);
+
+ if (unlikely(new_state == priv->state)) {
+ netdev_warn(dev, "%s: oops, state did not change", __func__);
+ return;
+ }
+
+ netdev_dbg(dev, "Controller changed from %s State (%d) into %s State (%d).\n",
+ can_get_state_str(priv->state), priv->state,
+ can_get_state_str(new_state), new_state);
+
+ can_update_state_error_stats(dev, new_state);
+ priv->state = new_state;
+
+ if (!cf)
+ return;
+
+ if (unlikely(new_state == CAN_STATE_BUS_OFF)) {
+ cf->can_id |= CAN_ERR_BUSOFF;
+ return;
+ }
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= tx_state >= rx_state ?
+ can_tx_state_to_frame(dev, tx_state) : 0;
+ cf->data[1] |= tx_state <= rx_state ?
+ can_rx_state_to_frame(dev, rx_state) : 0;
+}
+EXPORT_SYMBOL_GPL(can_change_state);
+
+/* CAN device restart for bus-off recovery */
+static int can_restart(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ int err;
+
+ if (!priv->do_set_mode)
+ return -EOPNOTSUPP;
+
+ if (netif_carrier_ok(dev))
+ netdev_err(dev, "Attempt to restart for bus-off recovery, but carrier is OK?\n");
+
+ /* No synchronization needed because the device is bus-off and
+ * no messages can come in or go out.
+ */
+ can_flush_echo_skb(dev);
+
+ /* send restart message upstream */
+ skb = alloc_can_err_skb(dev, &cf);
+ if (skb) {
+ cf->can_id |= CAN_ERR_RESTARTED;
+ netif_rx(skb);
+ }
+
+ /* Now restart the device */
+ netif_carrier_on(dev);
+ err = priv->do_set_mode(dev, CAN_MODE_START);
+ if (err) {
+ netdev_err(dev, "Restart failed, error %pe\n", ERR_PTR(err));
+ netif_carrier_off(dev);
+
+ return err;
+ } else {
+ netdev_dbg(dev, "Restarted\n");
+ priv->can_stats.restarts++;
+ }
+
+ return 0;
+}
+
+static void can_restart_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct can_priv *priv = container_of(dwork, struct can_priv,
+ restart_work);
+
+ can_restart(priv->dev);
+}
+
+int can_restart_now(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ /* A manual restart is only permitted if automatic restart is
+ * disabled and the device is in the bus-off state
+ */
+ if (priv->restart_ms)
+ return -EINVAL;
+ if (priv->state != CAN_STATE_BUS_OFF)
+ return -EBUSY;
+
+ cancel_delayed_work_sync(&priv->restart_work);
+
+ return can_restart(dev);
+}
+
+/* CAN bus-off
+ *
+ * This functions should be called when the device goes bus-off to
+ * tell the netif layer that no more packets can be sent or received.
+ * If enabled, a timer is started to trigger bus-off recovery.
+ */
+void can_bus_off(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (priv->restart_ms)
+ netdev_info(dev, "bus-off, scheduling restart in %d ms\n",
+ priv->restart_ms);
+ else
+ netdev_info(dev, "bus-off\n");
+
+ netif_carrier_off(dev);
+
+ if (priv->restart_ms)
+ schedule_delayed_work(&priv->restart_work,
+ msecs_to_jiffies(priv->restart_ms));
+}
+EXPORT_SYMBOL_GPL(can_bus_off);
+
+void can_setup(struct net_device *dev)
+{
+ dev->type = ARPHRD_CAN;
+ dev->mtu = CAN_MTU;
+ dev->min_mtu = CAN_MTU;
+ dev->max_mtu = CAN_MTU;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->tx_queue_len = 10;
+
+ /* New-style flags. */
+ dev->flags = IFF_NOARP;
+ dev->features = NETIF_F_HW_CSUM;
+}
+
+/* Allocate and setup space for the CAN network device */
+struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
+ unsigned int txqs, unsigned int rxqs)
+{
+ struct can_ml_priv *can_ml;
+ struct net_device *dev;
+ struct can_priv *priv;
+ int size;
+
+ /* We put the driver's priv, the CAN mid layer priv and the
+ * echo skb into the netdevice's priv. The memory layout for
+ * the netdev_priv is like this:
+ *
+ * +-------------------------+
+ * | driver's priv |
+ * +-------------------------+
+ * | struct can_ml_priv |
+ * +-------------------------+
+ * | array of struct sk_buff |
+ * +-------------------------+
+ */
+
+ size = ALIGN(sizeof_priv, NETDEV_ALIGN) + sizeof(struct can_ml_priv);
+
+ if (echo_skb_max)
+ size = ALIGN(size, sizeof(struct sk_buff *)) +
+ echo_skb_max * sizeof(struct sk_buff *);
+
+ dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup,
+ txqs, rxqs);
+ if (!dev)
+ return NULL;
+
+ priv = netdev_priv(dev);
+ priv->dev = dev;
+
+ can_ml = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN);
+ can_set_ml_priv(dev, can_ml);
+
+ if (echo_skb_max) {
+ priv->echo_skb_max = echo_skb_max;
+ priv->echo_skb = (void *)priv +
+ (size - echo_skb_max * sizeof(struct sk_buff *));
+ }
+
+ priv->state = CAN_STATE_STOPPED;
+
+ INIT_DELAYED_WORK(&priv->restart_work, can_restart_work);
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(alloc_candev_mqs);
+
+/* Free space of the CAN network device */
+void free_candev(struct net_device *dev)
+{
+ free_netdev(dev);
+}
+EXPORT_SYMBOL_GPL(free_candev);
+
+void can_set_default_mtu(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (priv->ctrlmode & CAN_CTRLMODE_XL) {
+ if (can_is_canxl_dev_mtu(dev->mtu))
+ return;
+ dev->mtu = CANXL_MTU;
+ dev->min_mtu = CANXL_MIN_MTU;
+ dev->max_mtu = CANXL_MAX_MTU;
+ } else if (priv->ctrlmode & CAN_CTRLMODE_FD) {
+ dev->mtu = CANFD_MTU;
+ dev->min_mtu = CANFD_MTU;
+ dev->max_mtu = CANFD_MTU;
+ } else {
+ dev->mtu = CAN_MTU;
+ dev->min_mtu = CAN_MTU;
+ dev->max_mtu = CAN_MTU;
+ }
+}
+
+/* helper to define static CAN controller features at device creation time */
+int can_set_static_ctrlmode(struct net_device *dev, u32 static_mode)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ /* alloc_candev() succeeded => netdev_priv() is valid at this point */
+ if (priv->ctrlmode_supported & static_mode) {
+ netdev_warn(dev,
+ "Controller features can not be supported and static at the same time\n");
+ return -EINVAL;
+ }
+ priv->ctrlmode = static_mode;
+
+ /* override MTU which was set by default in can_setup()? */
+ can_set_default_mtu(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(can_set_static_ctrlmode);
+
+/* generic implementation of netdev_ops::ndo_hwtstamp_get for CAN devices
+ * supporting hardware timestamps
+ */
+int can_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg)
+{
+ cfg->tx_type = HWTSTAMP_TX_ON;
+ cfg->rx_filter = HWTSTAMP_FILTER_ALL;
+
+ return 0;
+}
+EXPORT_SYMBOL(can_hwtstamp_get);
+
+/* generic implementation of netdev_ops::ndo_hwtstamp_set for CAN devices
+ * supporting hardware timestamps
+ */
+int can_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ if (cfg->tx_type == HWTSTAMP_TX_ON &&
+ cfg->rx_filter == HWTSTAMP_FILTER_ALL)
+ return 0;
+ NL_SET_ERR_MSG_MOD(extack, "Only TX on and RX all packets filter supported");
+ return -ERANGE;
+}
+EXPORT_SYMBOL(can_hwtstamp_set);
+
+/* generic implementation of ethtool_ops::get_ts_info for CAN devices
+ * supporting hardware timestamps
+ */
+int can_ethtool_op_get_ts_info_hwts(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info)
+{
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+EXPORT_SYMBOL(can_ethtool_op_get_ts_info_hwts);
+
+/* Common open function when the device gets opened.
+ *
+ * This function should be called in the open function of the device
+ * driver.
+ */
+int open_candev(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (!priv->bittiming.bitrate) {
+ netdev_err(dev, "bit-timing not yet defined\n");
+ return -EINVAL;
+ }
+
+ /* For CAN FD the data bitrate has to be >= the arbitration bitrate */
+ if ((priv->ctrlmode & CAN_CTRLMODE_FD) &&
+ (!priv->fd.data_bittiming.bitrate ||
+ priv->fd.data_bittiming.bitrate < priv->bittiming.bitrate)) {
+ netdev_err(dev, "incorrect/missing data bit-timing\n");
+ return -EINVAL;
+ }
+
+ /* Switch carrier on if device was stopped while in bus-off state */
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(open_candev);
+
+#ifdef CONFIG_OF
+/* Common function that can be used to understand the limitation of
+ * a transceiver when it provides no means to determine these limitations
+ * at runtime.
+ */
+void of_can_transceiver(struct net_device *dev)
+{
+ struct device_node *dn;
+ struct can_priv *priv = netdev_priv(dev);
+ struct device_node *np = dev->dev.parent->of_node;
+ int ret;
+
+ dn = of_get_child_by_name(np, "can-transceiver");
+ if (!dn)
+ return;
+
+ ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max);
+ of_node_put(dn);
+ if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max))
+ netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n");
+}
+EXPORT_SYMBOL_GPL(of_can_transceiver);
+#endif
+
+/* Common close function for cleanup before the device gets closed.
+ *
+ * This function should be called in the close function of the device
+ * driver.
+ */
+void close_candev(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ cancel_delayed_work_sync(&priv->restart_work);
+ can_flush_echo_skb(dev);
+}
+EXPORT_SYMBOL_GPL(close_candev);
+
+static int can_set_termination(struct net_device *ndev, u16 term)
+{
+ struct can_priv *priv = netdev_priv(ndev);
+ int set;
+
+ if (term == priv->termination_gpio_ohms[CAN_TERMINATION_GPIO_ENABLED])
+ set = 1;
+ else
+ set = 0;
+
+ gpiod_set_value_cansleep(priv->termination_gpio, set);
+
+ return 0;
+}
+
+static int can_get_termination(struct net_device *ndev)
+{
+ struct can_priv *priv = netdev_priv(ndev);
+ struct device *dev = ndev->dev.parent;
+ struct gpio_desc *gpio;
+ u32 term;
+ int ret;
+
+ /* Disabling termination by default is the safe choice: Else if many
+ * bus participants enable it, no communication is possible at all.
+ */
+ gpio = devm_gpiod_get_optional(dev, "termination", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return dev_err_probe(dev, PTR_ERR(gpio),
+ "Cannot get termination-gpios\n");
+
+ if (!gpio)
+ return 0;
+
+ ret = device_property_read_u32(dev, "termination-ohms", &term);
+ if (ret) {
+ netdev_err(ndev, "Cannot get termination-ohms: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ if (term > U16_MAX) {
+ netdev_err(ndev, "Invalid termination-ohms value (%u > %u)\n",
+ term, U16_MAX);
+ return -EINVAL;
+ }
+
+ priv->termination_const_cnt = ARRAY_SIZE(priv->termination_gpio_ohms);
+ priv->termination_const = priv->termination_gpio_ohms;
+ priv->termination_gpio = gpio;
+ priv->termination_gpio_ohms[CAN_TERMINATION_GPIO_DISABLED] =
+ CAN_TERMINATION_DISABLED;
+ priv->termination_gpio_ohms[CAN_TERMINATION_GPIO_ENABLED] = term;
+ priv->do_set_termination = can_set_termination;
+
+ return 0;
+}
+
+static bool
+can_bittiming_const_valid(const struct can_bittiming_const *btc)
+{
+ if (!btc)
+ return true;
+
+ if (!btc->sjw_max)
+ return false;
+
+ return true;
+}
+
+/* Register the CAN network device */
+int register_candev(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ int err;
+
+ /* Ensure termination_const, termination_const_cnt and
+ * do_set_termination consistency. All must be either set or
+ * unset.
+ */
+ if ((!priv->termination_const != !priv->termination_const_cnt) ||
+ (!priv->termination_const != !priv->do_set_termination))
+ return -EINVAL;
+
+ if (!priv->bitrate_const != !priv->bitrate_const_cnt)
+ return -EINVAL;
+
+ if (!priv->fd.data_bitrate_const != !priv->fd.data_bitrate_const_cnt)
+ return -EINVAL;
+
+ /* We only support either fixed bit rates or bit timing const. */
+ if ((priv->bitrate_const || priv->fd.data_bitrate_const) &&
+ (priv->bittiming_const || priv->fd.data_bittiming_const))
+ return -EINVAL;
+
+ if (!can_bittiming_const_valid(priv->bittiming_const) ||
+ !can_bittiming_const_valid(priv->fd.data_bittiming_const))
+ return -EINVAL;
+
+ if (!priv->termination_const) {
+ err = can_get_termination(dev);
+ if (err)
+ return err;
+ }
+
+ dev->rtnl_link_ops = &can_link_ops;
+ netif_carrier_off(dev);
+
+ return register_netdev(dev);
+}
+EXPORT_SYMBOL_GPL(register_candev);
+
+/* Unregister the CAN network device */
+void unregister_candev(struct net_device *dev)
+{
+ unregister_netdev(dev);
+}
+EXPORT_SYMBOL_GPL(unregister_candev);
+
+/* Test if a network device is a candev based device
+ * and return the can_priv* if so.
+ */
+struct can_priv *safe_candev_priv(struct net_device *dev)
+{
+ if (dev->type != ARPHRD_CAN || dev->rtnl_link_ops != &can_link_ops)
+ return NULL;
+
+ return netdev_priv(dev);
+}
+EXPORT_SYMBOL_GPL(safe_candev_priv);
+
+static __init int can_dev_init(void)
+{
+ int err;
+
+ err = can_netlink_register();
+ if (!err)
+ pr_info("CAN device driver interface\n");
+
+ return err;
+}
+module_init(can_dev_init);
+
+static __exit void can_dev_exit(void)
+{
+ can_netlink_unregister();
+}
+module_exit(can_dev_exit);
+
+MODULE_ALIAS_RTNL_LINK("can");
diff --git a/drivers/net/can/dev/length.c b/drivers/net/can/dev/length.c
new file mode 100644
index 000000000000..b7f4d76dd444
--- /dev/null
+++ b/drivers/net/can/dev/length.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2012, 2020 Oliver Hartkopp <socketcan@hartkopp.net>
+ */
+
+#include <linux/can/dev.h>
+
+/* CAN DLC to real data length conversion helpers */
+
+static const u8 dlc2len[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 12, 16, 20, 24, 32, 48, 64
+};
+
+/* get data length from raw data length code (DLC) */
+u8 can_fd_dlc2len(u8 dlc)
+{
+ return dlc2len[dlc & 0x0F];
+}
+EXPORT_SYMBOL_GPL(can_fd_dlc2len);
+
+static const u8 len2dlc[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */
+ 9, 9, 9, 9, /* 9 - 12 */
+ 10, 10, 10, 10, /* 13 - 16 */
+ 11, 11, 11, 11, /* 17 - 20 */
+ 12, 12, 12, 12, /* 21 - 24 */
+ 13, 13, 13, 13, 13, 13, 13, 13, /* 25 - 32 */
+ 14, 14, 14, 14, 14, 14, 14, 14, /* 33 - 40 */
+ 14, 14, 14, 14, 14, 14, 14, 14, /* 41 - 48 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 49 - 56 */
+ 15, 15, 15, 15, 15, 15, 15, 15 /* 57 - 64 */
+};
+
+/* map the sanitized data length to an appropriate data length code */
+u8 can_fd_len2dlc(u8 len)
+{
+ /* check for length mapping table size at build time */
+ BUILD_BUG_ON(ARRAY_SIZE(len2dlc) != CANFD_MAX_DLEN + 1);
+
+ if (unlikely(len > CANFD_MAX_DLEN))
+ return CANFD_MAX_DLC;
+
+ return len2dlc[len];
+}
+EXPORT_SYMBOL_GPL(can_fd_len2dlc);
+
+/**
+ * can_skb_get_frame_len() - Calculate the CAN Frame length in bytes
+ * of a given skb.
+ * @skb: socket buffer of a CAN message.
+ *
+ * Do a rough calculation: bit stuffing is ignored and length in bits
+ * is rounded up to a length in bytes.
+ *
+ * Rationale: this function is to be used for the BQL functions
+ * (netdev_sent_queue() and netdev_completed_queue()) which expect a
+ * value in bytes. Just using skb->len is insufficient because it will
+ * return the constant value of CAN(FD)_MTU. Doing the bit stuffing
+ * calculation would be too expensive in term of computing resources
+ * for no noticeable gain.
+ *
+ * Remarks: The payload of CAN FD frames with BRS flag are sent at a
+ * different bitrate. Currently, the can-utils canbusload tool does
+ * not support CAN-FD yet and so we could not run any benchmark to
+ * measure the impact. There might be possible improvement here.
+ *
+ * Return: length in bytes.
+ */
+unsigned int can_skb_get_frame_len(const struct sk_buff *skb)
+{
+ const struct canfd_frame *cf = (const struct canfd_frame *)skb->data;
+ u8 len;
+
+ if (can_is_canfd_skb(skb))
+ len = canfd_sanitize_len(cf->len);
+ else if (cf->can_id & CAN_RTR_FLAG)
+ len = 0;
+ else
+ len = cf->len;
+
+ return can_frame_bytes(can_is_canfd_skb(skb), cf->can_id & CAN_EFF_FLAG,
+ false, len);
+}
+EXPORT_SYMBOL_GPL(can_skb_get_frame_len);
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
new file mode 100644
index 000000000000..d6b0e686fb11
--- /dev/null
+++ b/drivers/net/can/dev/netlink.c
@@ -0,0 +1,1111 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (C) 2021-2025 Vincent Mailhol <mailhol@kernel.org>
+ */
+
+#include <linux/can/dev.h>
+#include <net/rtnetlink.h>
+
+static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
+ [IFLA_CAN_STATE] = { .type = NLA_U32 },
+ [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) },
+ [IFLA_CAN_RESTART_MS] = { .type = NLA_U32 },
+ [IFLA_CAN_RESTART] = { .type = NLA_U32 },
+ [IFLA_CAN_BITTIMING] = { .len = sizeof(struct can_bittiming) },
+ [IFLA_CAN_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) },
+ [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
+ [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
+ [IFLA_CAN_DATA_BITTIMING] = { .len = sizeof(struct can_bittiming) },
+ [IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) },
+ [IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
+ [IFLA_CAN_TDC] = { .type = NLA_NESTED },
+ [IFLA_CAN_CTRLMODE_EXT] = { .type = NLA_NESTED },
+ [IFLA_CAN_XL_DATA_BITTIMING] = { .len = sizeof(struct can_bittiming) },
+ [IFLA_CAN_XL_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) },
+ [IFLA_CAN_XL_TDC] = { .type = NLA_NESTED },
+ [IFLA_CAN_XL_PWM] = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy can_tdc_policy[IFLA_CAN_TDC_MAX + 1] = {
+ [IFLA_CAN_TDC_TDCV_MIN] = { .type = NLA_U32 },
+ [IFLA_CAN_TDC_TDCV_MAX] = { .type = NLA_U32 },
+ [IFLA_CAN_TDC_TDCO_MIN] = { .type = NLA_U32 },
+ [IFLA_CAN_TDC_TDCO_MAX] = { .type = NLA_U32 },
+ [IFLA_CAN_TDC_TDCF_MIN] = { .type = NLA_U32 },
+ [IFLA_CAN_TDC_TDCF_MAX] = { .type = NLA_U32 },
+ [IFLA_CAN_TDC_TDCV] = { .type = NLA_U32 },
+ [IFLA_CAN_TDC_TDCO] = { .type = NLA_U32 },
+ [IFLA_CAN_TDC_TDCF] = { .type = NLA_U32 },
+};
+
+static const struct nla_policy can_pwm_policy[IFLA_CAN_PWM_MAX + 1] = {
+ [IFLA_CAN_PWM_PWMS_MIN] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWMS_MAX] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWML_MIN] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWML_MAX] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWMO_MIN] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWMO_MAX] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWMS] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWML] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWMO] = { .type = NLA_U32 },
+};
+
+static int can_validate_bittiming(struct nlattr *data[],
+ struct netlink_ext_ack *extack,
+ int ifla_can_bittiming)
+{
+ struct can_bittiming *bt;
+
+ if (!data[ifla_can_bittiming])
+ return 0;
+
+ static_assert(__alignof__(*bt) <= NLA_ALIGNTO);
+ bt = nla_data(data[ifla_can_bittiming]);
+
+ /* sample point is in one-tenth of a percent */
+ if (bt->sample_point >= 1000) {
+ NL_SET_ERR_MSG(extack, "sample point must be between 0 and 100%");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int can_validate_tdc(struct nlattr *data_tdc,
+ struct netlink_ext_ack *extack, u32 tdc_flags)
+{
+ bool tdc_manual = tdc_flags & CAN_CTRLMODE_TDC_MANUAL_MASK;
+ bool tdc_auto = tdc_flags & CAN_CTRLMODE_TDC_AUTO_MASK;
+ int err;
+
+ if (tdc_auto && tdc_manual) {
+ NL_SET_ERR_MSG(extack,
+ "TDC manual and auto modes are mutually exclusive");
+ return -EOPNOTSUPP;
+ }
+
+ /* If one of the CAN_CTRLMODE_{,XL}_TDC_* flags is set then TDC
+ * must be set and vice-versa
+ */
+ if ((tdc_auto || tdc_manual) && !data_tdc) {
+ NL_SET_ERR_MSG(extack, "TDC parameters are missing");
+ return -EOPNOTSUPP;
+ }
+ if (!(tdc_auto || tdc_manual) && data_tdc) {
+ NL_SET_ERR_MSG(extack, "TDC mode (auto or manual) is missing");
+ return -EOPNOTSUPP;
+ }
+
+ /* If providing TDC parameters, at least TDCO is needed. TDCV is
+ * needed if and only if CAN_CTRLMODE_{,XL}_TDC_MANUAL is set
+ */
+ if (data_tdc) {
+ struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
+
+ err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX,
+ data_tdc, can_tdc_policy, extack);
+ if (err)
+ return err;
+
+ if (tb_tdc[IFLA_CAN_TDC_TDCV]) {
+ if (tdc_auto) {
+ NL_SET_ERR_MSG(extack,
+ "TDCV is incompatible with TDC auto mode");
+ return -EOPNOTSUPP;
+ }
+ } else {
+ if (tdc_manual) {
+ NL_SET_ERR_MSG(extack,
+ "TDC manual mode requires TDCV");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (!tb_tdc[IFLA_CAN_TDC_TDCO]) {
+ NL_SET_ERR_MSG(extack, "TDCO is missing");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int can_validate_pwm(struct nlattr *data[],
+ struct netlink_ext_ack *extack, u32 flags)
+{
+ struct nlattr *tb_pwm[IFLA_CAN_PWM_MAX + 1];
+ int err;
+
+ if (!data[IFLA_CAN_XL_PWM])
+ return 0;
+
+ if (!(flags & CAN_CTRLMODE_XL_TMS)) {
+ NL_SET_ERR_MSG(extack, "PWM requires TMS");
+ return -EOPNOTSUPP;
+ }
+
+ err = nla_parse_nested(tb_pwm, IFLA_CAN_PWM_MAX, data[IFLA_CAN_XL_PWM],
+ can_pwm_policy, extack);
+ if (err)
+ return err;
+
+ if (!tb_pwm[IFLA_CAN_PWM_PWMS] != !tb_pwm[IFLA_CAN_PWM_PWML]) {
+ NL_SET_ERR_MSG(extack,
+ "Provide either both PWMS and PWML, or none for automatic calculation");
+ return -EOPNOTSUPP;
+ }
+
+ if (tb_pwm[IFLA_CAN_PWM_PWMO] &&
+ (!tb_pwm[IFLA_CAN_PWM_PWMS] || !tb_pwm[IFLA_CAN_PWM_PWML])) {
+ NL_SET_ERR_MSG(extack, "PWMO requires both PWMS and PWML");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int can_validate_databittiming(struct nlattr *data[],
+ struct netlink_ext_ack *extack,
+ int ifla_can_data_bittiming, u32 flags)
+{
+ struct nlattr *data_tdc;
+ const char *type;
+ u32 tdc_flags;
+ bool is_on;
+ int err;
+
+ /* Make sure that valid CAN FD/XL configurations always consist of
+ * - nominal/arbitration bittiming
+ * - data bittiming
+ * - control mode with CAN_CTRLMODE_{FD,XL} set
+ * - TDC parameters are coherent (details in can_validate_tdc())
+ */
+
+ if (ifla_can_data_bittiming == IFLA_CAN_DATA_BITTIMING) {
+ data_tdc = data[IFLA_CAN_TDC];
+ tdc_flags = flags & CAN_CTRLMODE_FD_TDC_MASK;
+ is_on = flags & CAN_CTRLMODE_FD;
+ type = "FD";
+ } else {
+ data_tdc = data[IFLA_CAN_XL_TDC];
+ tdc_flags = flags & CAN_CTRLMODE_XL_TDC_MASK;
+ is_on = flags & CAN_CTRLMODE_XL;
+ type = "XL";
+ }
+
+ if (is_on) {
+ if (!data[IFLA_CAN_BITTIMING] || !data[ifla_can_data_bittiming]) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Provide both nominal and %s data bittiming",
+ type);
+ return -EOPNOTSUPP;
+ }
+ } else {
+ if (data[ifla_can_data_bittiming]) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "%s data bittiming requires CAN %s",
+ type, type);
+ return -EOPNOTSUPP;
+ }
+ if (data_tdc) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "%s TDC requires CAN %s",
+ type, type);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ err = can_validate_bittiming(data, extack, ifla_can_data_bittiming);
+ if (err)
+ return err;
+
+ err = can_validate_tdc(data_tdc, extack, tdc_flags);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int can_validate_xl_flags(struct netlink_ext_ack *extack,
+ u32 masked_flags, u32 mask)
+{
+ if (masked_flags & CAN_CTRLMODE_XL) {
+ if (masked_flags & CAN_CTRLMODE_XL_TMS) {
+ const u32 tms_conflicts_mask = CAN_CTRLMODE_FD |
+ CAN_CTRLMODE_XL_TDC_MASK;
+ u32 tms_conflicts = masked_flags & tms_conflicts_mask;
+
+ if (tms_conflicts) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "TMS and %s are mutually exclusive",
+ can_get_ctrlmode_str(tms_conflicts));
+ return -EOPNOTSUPP;
+ }
+ }
+ } else {
+ if (mask & CAN_CTRLMODE_XL_TMS) {
+ NL_SET_ERR_MSG(extack, "TMS requires CAN XL");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int can_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ u32 flags = 0;
+ int err;
+
+ if (!data)
+ return 0;
+
+ if (data[IFLA_CAN_CTRLMODE]) {
+ struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+
+ flags = cm->flags & cm->mask;
+
+ if ((flags & CAN_CTRLMODE_LISTENONLY) &&
+ (flags & CAN_CTRLMODE_RESTRICTED)) {
+ NL_SET_ERR_MSG(extack,
+ "LISTEN-ONLY and RESTRICTED modes are mutually exclusive");
+ return -EOPNOTSUPP;
+ }
+
+ err = can_validate_xl_flags(extack, flags, cm->mask);
+ if (err)
+ return err;
+ }
+
+ err = can_validate_bittiming(data, extack, IFLA_CAN_BITTIMING);
+ if (err)
+ return err;
+
+ err = can_validate_databittiming(data, extack,
+ IFLA_CAN_DATA_BITTIMING, flags);
+ if (err)
+ return err;
+
+ err = can_validate_databittiming(data, extack,
+ IFLA_CAN_XL_DATA_BITTIMING, flags);
+ if (err)
+ return err;
+
+ err = can_validate_pwm(data, extack, flags);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int can_ctrlmode_changelink(struct net_device *dev,
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ struct can_ctrlmode *cm;
+ u32 ctrlstatic, maskedflags, deactivated, notsupp, ctrlstatic_missing;
+
+ if (!data[IFLA_CAN_CTRLMODE])
+ return 0;
+
+ /* Do not allow changing controller mode while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+ ctrlstatic = can_get_static_ctrlmode(priv);
+ maskedflags = cm->flags & cm->mask;
+ deactivated = ~cm->flags & cm->mask;
+ notsupp = maskedflags & ~(priv->ctrlmode_supported | ctrlstatic);
+ ctrlstatic_missing = (maskedflags & ctrlstatic) ^ ctrlstatic;
+
+ if (notsupp) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "requested control mode %s not supported",
+ can_get_ctrlmode_str(notsupp));
+ return -EOPNOTSUPP;
+ }
+
+ /* do not check for static fd-non-iso if 'fd' is disabled */
+ if (!(maskedflags & CAN_CTRLMODE_FD))
+ ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
+
+ if (ctrlstatic_missing) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "missing required %s static control mode",
+ can_get_ctrlmode_str(ctrlstatic_missing));
+ return -EOPNOTSUPP;
+ }
+
+ /* If FD was active and is not turned off, check for XL conflicts */
+ if (priv->ctrlmode & CAN_CTRLMODE_FD & ~deactivated) {
+ if (maskedflags & CAN_CTRLMODE_XL_TMS) {
+ NL_SET_ERR_MSG(extack,
+ "TMS can not be activated while CAN FD is on");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ /* If a top dependency flag is provided, reset all its dependencies */
+ if (cm->mask & CAN_CTRLMODE_FD)
+ priv->ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK;
+ if (cm->mask & CAN_CTRLMODE_XL)
+ priv->ctrlmode &= ~(CAN_CTRLMODE_XL_TDC_MASK |
+ CAN_CTRLMODE_XL_TMS);
+
+ /* clear bits to be modified and copy the flag values */
+ priv->ctrlmode &= ~cm->mask;
+ priv->ctrlmode |= maskedflags;
+
+ /* Wipe potential leftovers from previous CAN FD/XL config */
+ if (!(priv->ctrlmode & CAN_CTRLMODE_FD)) {
+ memset(&priv->fd.data_bittiming, 0,
+ sizeof(priv->fd.data_bittiming));
+ priv->ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK;
+ memset(&priv->fd.tdc, 0, sizeof(priv->fd.tdc));
+ }
+ if (!(priv->ctrlmode & CAN_CTRLMODE_XL)) {
+ memset(&priv->xl.data_bittiming, 0,
+ sizeof(priv->fd.data_bittiming));
+ priv->ctrlmode &= ~CAN_CTRLMODE_XL_TDC_MASK;
+ memset(&priv->xl.tdc, 0, sizeof(priv->xl.tdc));
+ memset(&priv->xl.pwm, 0, sizeof(priv->xl.pwm));
+ }
+
+ can_set_default_mtu(dev);
+
+ return 0;
+}
+
+static int can_tdc_changelink(struct data_bittiming_params *dbt_params,
+ const struct nlattr *nla,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
+ struct can_tdc tdc = { 0 };
+ const struct can_tdc_const *tdc_const = dbt_params->tdc_const;
+ int err;
+
+ if (!tdc_const) {
+ NL_SET_ERR_MSG(extack, "The device does not support TDC");
+ return -EOPNOTSUPP;
+ }
+
+ err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX, nla,
+ can_tdc_policy, extack);
+ if (err)
+ return err;
+
+ if (tb_tdc[IFLA_CAN_TDC_TDCV]) {
+ u32 tdcv = nla_get_u32(tb_tdc[IFLA_CAN_TDC_TDCV]);
+
+ if (tdcv < tdc_const->tdcv_min || tdcv > tdc_const->tdcv_max)
+ return -EINVAL;
+
+ tdc.tdcv = tdcv;
+ }
+
+ if (tb_tdc[IFLA_CAN_TDC_TDCO]) {
+ u32 tdco = nla_get_u32(tb_tdc[IFLA_CAN_TDC_TDCO]);
+
+ if (tdco < tdc_const->tdco_min || tdco > tdc_const->tdco_max)
+ return -EINVAL;
+
+ tdc.tdco = tdco;
+ }
+
+ if (tb_tdc[IFLA_CAN_TDC_TDCF]) {
+ u32 tdcf = nla_get_u32(tb_tdc[IFLA_CAN_TDC_TDCF]);
+
+ if (tdcf < tdc_const->tdcf_min || tdcf > tdc_const->tdcf_max)
+ return -EINVAL;
+
+ tdc.tdcf = tdcf;
+ }
+
+ dbt_params->tdc = tdc;
+
+ return 0;
+}
+
+static int can_dbt_changelink(struct net_device *dev, struct nlattr *data[],
+ bool fd, struct netlink_ext_ack *extack)
+{
+ struct nlattr *data_bittiming, *data_tdc;
+ struct can_priv *priv = netdev_priv(dev);
+ struct data_bittiming_params *dbt_params;
+ struct can_bittiming dbt;
+ bool need_tdc_calc = false;
+ u32 tdc_mask;
+ int err;
+
+ if (fd) {
+ data_bittiming = data[IFLA_CAN_DATA_BITTIMING];
+ data_tdc = data[IFLA_CAN_TDC];
+ dbt_params = &priv->fd;
+ tdc_mask = CAN_CTRLMODE_FD_TDC_MASK;
+ } else {
+ data_bittiming = data[IFLA_CAN_XL_DATA_BITTIMING];
+ data_tdc = data[IFLA_CAN_XL_TDC];
+ dbt_params = &priv->xl;
+ tdc_mask = CAN_CTRLMODE_XL_TDC_MASK;
+ }
+
+ if (!data_bittiming)
+ return 0;
+
+ /* Do not allow changing bittiming while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* Calculate bittiming parameters based on data_bittiming_const
+ * if set, otherwise pass bitrate directly via do_set_bitrate().
+ * Bail out if neither is given.
+ */
+ if (!dbt_params->data_bittiming_const && !dbt_params->do_set_data_bittiming &&
+ !dbt_params->data_bitrate_const)
+ return -EOPNOTSUPP;
+
+ memcpy(&dbt, nla_data(data_bittiming), sizeof(dbt));
+ err = can_get_bittiming(dev, &dbt, dbt_params->data_bittiming_const,
+ dbt_params->data_bitrate_const,
+ dbt_params->data_bitrate_const_cnt, extack);
+ if (err)
+ return err;
+
+ if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "CAN data bitrate %u bps surpasses transceiver capabilities of %u bps",
+ dbt.bitrate, priv->bitrate_max);
+ return -EINVAL;
+ }
+
+ memset(&dbt_params->tdc, 0, sizeof(dbt_params->tdc));
+ if (data[IFLA_CAN_CTRLMODE]) {
+ struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+
+ if (fd || !(priv->ctrlmode & CAN_CTRLMODE_XL_TMS))
+ need_tdc_calc = !(cm->mask & tdc_mask);
+ }
+ if (data_tdc) {
+ /* TDC parameters are provided: use them */
+ err = can_tdc_changelink(dbt_params, data_tdc, extack);
+ if (err) {
+ priv->ctrlmode &= ~tdc_mask;
+ return err;
+ }
+ } else if (need_tdc_calc) {
+ /* Neither of TDC parameters nor TDC flags are provided:
+ * do calculation
+ */
+ can_calc_tdco(&dbt_params->tdc, dbt_params->tdc_const, &dbt,
+ tdc_mask, &priv->ctrlmode, priv->ctrlmode_supported);
+ } /* else: both CAN_CTRLMODE_{,XL}_TDC_{AUTO,MANUAL} are explicitly
+ * turned off. TDC is disabled: do nothing
+ */
+
+ memcpy(&dbt_params->data_bittiming, &dbt, sizeof(dbt));
+
+ if (dbt_params->do_set_data_bittiming) {
+ /* Finally, set the bit-timing registers */
+ err = dbt_params->do_set_data_bittiming(dev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int can_pwm_changelink(struct net_device *dev,
+ const struct nlattr *pwm_nla,
+ struct netlink_ext_ack *extack)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ const struct can_pwm_const *pwm_const = priv->xl.pwm_const;
+ struct nlattr *tb_pwm[IFLA_CAN_PWM_MAX + 1];
+ struct can_pwm pwm = { 0 };
+ int err;
+
+ if (!(priv->ctrlmode & CAN_CTRLMODE_XL_TMS))
+ return 0;
+
+ if (!pwm_const) {
+ NL_SET_ERR_MSG(extack, "The device does not support PWM");
+ return -EOPNOTSUPP;
+ }
+
+ if (!pwm_nla)
+ return can_calc_pwm(dev, extack);
+
+ err = nla_parse_nested(tb_pwm, IFLA_CAN_PWM_MAX, pwm_nla,
+ can_pwm_policy, extack);
+ if (err)
+ return err;
+
+ if (tb_pwm[IFLA_CAN_PWM_PWMS]) {
+ pwm.pwms = nla_get_u32(tb_pwm[IFLA_CAN_PWM_PWMS]);
+ if (pwm.pwms < pwm_const->pwms_min ||
+ pwm.pwms > pwm_const->pwms_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWMS: %u tqmin is out of range: %u...%u",
+ pwm.pwms, pwm_const->pwms_min,
+ pwm_const->pwms_max);
+ return -EINVAL;
+ }
+ }
+
+ if (tb_pwm[IFLA_CAN_PWM_PWML]) {
+ pwm.pwml = nla_get_u32(tb_pwm[IFLA_CAN_PWM_PWML]);
+ if (pwm.pwml < pwm_const->pwml_min ||
+ pwm.pwml > pwm_const->pwml_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWML: %u tqmin is out of range: %u...%u",
+ pwm.pwml, pwm_const->pwml_min,
+ pwm_const->pwml_max);
+ return -EINVAL;
+ }
+ }
+
+ if (tb_pwm[IFLA_CAN_PWM_PWMO]) {
+ pwm.pwmo = nla_get_u32(tb_pwm[IFLA_CAN_PWM_PWMO]);
+ if (pwm.pwmo < pwm_const->pwmo_min ||
+ pwm.pwmo > pwm_const->pwmo_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWMO: %u tqmin is out of range: %u...%u",
+ pwm.pwmo, pwm_const->pwmo_min,
+ pwm_const->pwmo_max);
+ return -EINVAL;
+ }
+ }
+
+ err = can_validate_pwm_bittiming(dev, &pwm, extack);
+ if (err)
+ return err;
+
+ priv->xl.pwm = pwm;
+ return 0;
+}
+
+static int can_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ int err;
+
+ /* We need synchronization with dev->stop() */
+ ASSERT_RTNL();
+
+ can_ctrlmode_changelink(dev, data, extack);
+
+ if (data[IFLA_CAN_BITTIMING]) {
+ struct can_bittiming bt;
+
+ /* Do not allow changing bittiming while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* Calculate bittiming parameters based on
+ * bittiming_const if set, otherwise pass bitrate
+ * directly via do_set_bitrate(). Bail out if neither
+ * is given.
+ */
+ if (!priv->bittiming_const && !priv->do_set_bittiming &&
+ !priv->bitrate_const)
+ return -EOPNOTSUPP;
+
+ memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
+ err = can_get_bittiming(dev, &bt,
+ priv->bittiming_const,
+ priv->bitrate_const,
+ priv->bitrate_const_cnt,
+ extack);
+ if (err)
+ return err;
+
+ if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "arbitration bitrate %u bps surpasses transceiver capabilities of %u bps",
+ bt.bitrate, priv->bitrate_max);
+ return -EINVAL;
+ }
+
+ memcpy(&priv->bittiming, &bt, sizeof(bt));
+
+ if (priv->do_set_bittiming) {
+ /* Finally, set the bit-timing registers */
+ err = priv->do_set_bittiming(dev);
+ if (err)
+ return err;
+ }
+ }
+
+ if (data[IFLA_CAN_RESTART_MS]) {
+ unsigned int restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
+
+ if (restart_ms != 0 && !priv->do_set_mode) {
+ NL_SET_ERR_MSG(extack,
+ "Device doesn't support restart from Bus Off");
+ return -EOPNOTSUPP;
+ }
+
+ /* Do not allow changing restart delay while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+ priv->restart_ms = restart_ms;
+ }
+
+ if (data[IFLA_CAN_RESTART]) {
+ if (!priv->do_set_mode) {
+ NL_SET_ERR_MSG(extack,
+ "Device doesn't support restart from Bus Off");
+ return -EOPNOTSUPP;
+ }
+
+ /* Do not allow a restart while not running */
+ if (!(dev->flags & IFF_UP))
+ return -EINVAL;
+ err = can_restart_now(dev);
+ if (err)
+ return err;
+ }
+
+ /* CAN FD */
+ err = can_dbt_changelink(dev, data, true, extack);
+ if (err)
+ return err;
+
+ /* CAN XL */
+ err = can_dbt_changelink(dev, data, false, extack);
+ if (err)
+ return err;
+ err = can_pwm_changelink(dev, data[IFLA_CAN_XL_PWM], extack);
+ if (err)
+ return err;
+
+ if (data[IFLA_CAN_TERMINATION]) {
+ const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]);
+ const unsigned int num_term = priv->termination_const_cnt;
+ unsigned int i;
+
+ if (!priv->do_set_termination) {
+ NL_SET_ERR_MSG(extack,
+ "Termination is not configurable on this device");
+ return -EOPNOTSUPP;
+ }
+
+ /* check whether given value is supported by the interface */
+ for (i = 0; i < num_term; i++) {
+ if (termval == priv->termination_const[i])
+ break;
+ }
+ if (i >= num_term)
+ return -EINVAL;
+
+ /* Finally, set the termination value */
+ err = priv->do_set_termination(dev, termval);
+ if (err)
+ return err;
+
+ priv->termination = termval;
+ }
+
+ return 0;
+}
+
+static size_t can_tdc_get_size(struct data_bittiming_params *dbt_params,
+ u32 tdc_flags)
+{
+ bool tdc_manual = tdc_flags & CAN_CTRLMODE_TDC_MANUAL_MASK;
+ size_t size;
+
+ if (!dbt_params->tdc_const)
+ return 0;
+
+ size = nla_total_size(0); /* nest IFLA_CAN_TDC */
+ if (tdc_manual) {
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV_MIN */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV_MAX */
+ }
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MIN */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MAX */
+ if (dbt_params->tdc_const->tdcf_max) {
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MIN */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MAX */
+ }
+
+ if (tdc_flags) {
+ if (tdc_manual || dbt_params->do_get_auto_tdcv)
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO */
+ if (dbt_params->tdc_const->tdcf_max)
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF */
+ }
+
+ return size;
+}
+
+static size_t can_data_bittiming_get_size(struct data_bittiming_params *dbt_params,
+ u32 tdc_flags)
+{
+ size_t size = 0;
+
+ if (dbt_params->data_bittiming.bitrate) /* IFLA_CAN_{,XL}_DATA_BITTIMING */
+ size += nla_total_size(sizeof(dbt_params->data_bittiming));
+ if (dbt_params->data_bittiming_const) /* IFLA_CAN_{,XL}_DATA_BITTIMING_CONST */
+ size += nla_total_size(sizeof(*dbt_params->data_bittiming_const));
+ if (dbt_params->data_bitrate_const) /* IFLA_CAN_{,XL}_DATA_BITRATE_CONST */
+ size += nla_total_size(sizeof(*dbt_params->data_bitrate_const) *
+ dbt_params->data_bitrate_const_cnt);
+ size += can_tdc_get_size(dbt_params, tdc_flags);/* IFLA_CAN_{,XL}_TDC */
+
+ return size;
+}
+
+static size_t can_ctrlmode_ext_get_size(void)
+{
+ return nla_total_size(0) + /* nest IFLA_CAN_CTRLMODE_EXT */
+ nla_total_size(sizeof(u32)); /* IFLA_CAN_CTRLMODE_SUPPORTED */
+}
+
+static size_t can_pwm_get_size(const struct can_pwm_const *pwm_const,
+ bool pwm_on)
+{
+ size_t size;
+
+ if (!pwm_const || !pwm_on)
+ return 0;
+
+ size = nla_total_size(0); /* nest IFLA_CAN_PWM */
+
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMS_MIN */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMS_MAX */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWML_MIN */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWML_MAX */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMO_MIN */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMO_MAX */
+
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMS */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWML */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMO */
+
+ return size;
+}
+
+static size_t can_get_size(const struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ size_t size = 0;
+
+ if (priv->bittiming.bitrate) /* IFLA_CAN_BITTIMING */
+ size += nla_total_size(sizeof(struct can_bittiming));
+ if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
+ size += nla_total_size(sizeof(struct can_bittiming_const));
+ size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
+ size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
+ if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
+ size += nla_total_size(sizeof(struct can_berr_counter));
+ if (priv->termination_const) {
+ size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */
+ size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */
+ priv->termination_const_cnt);
+ }
+ if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */
+ size += nla_total_size(sizeof(*priv->bitrate_const) *
+ priv->bitrate_const_cnt);
+ size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
+ size += can_ctrlmode_ext_get_size(); /* IFLA_CAN_CTRLMODE_EXT */
+
+ size += can_data_bittiming_get_size(&priv->fd,
+ priv->ctrlmode & CAN_CTRLMODE_FD_TDC_MASK);
+
+ size += can_data_bittiming_get_size(&priv->xl,
+ priv->ctrlmode & CAN_CTRLMODE_XL_TDC_MASK);
+ size += can_pwm_get_size(priv->xl.pwm_const, /* IFLA_CAN_XL_PWM */
+ priv->ctrlmode & CAN_CTRLMODE_XL_TMS);
+
+ return size;
+}
+
+static int can_bittiming_fill_info(struct sk_buff *skb, int ifla_can_bittiming,
+ struct can_bittiming *bittiming)
+{
+ return bittiming->bitrate != CAN_BITRATE_UNSET &&
+ bittiming->bitrate != CAN_BITRATE_UNKNOWN &&
+ nla_put(skb, ifla_can_bittiming, sizeof(*bittiming), bittiming);
+}
+
+static int can_bittiming_const_fill_info(struct sk_buff *skb,
+ int ifla_can_bittiming_const,
+ const struct can_bittiming_const *bittiming_const)
+{
+ return bittiming_const &&
+ nla_put(skb, ifla_can_bittiming_const,
+ sizeof(*bittiming_const), bittiming_const);
+}
+
+static int can_bitrate_const_fill_info(struct sk_buff *skb,
+ int ifla_can_bitrate_const,
+ const u32 *bitrate_const, unsigned int cnt)
+{
+ return bitrate_const &&
+ nla_put(skb, ifla_can_bitrate_const,
+ sizeof(*bitrate_const) * cnt, bitrate_const);
+}
+
+static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev,
+ int ifla_can_tdc)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ struct data_bittiming_params *dbt_params;
+ const struct can_tdc_const *tdc_const;
+ struct can_tdc *tdc;
+ struct nlattr *nest;
+ bool tdc_is_enabled, tdc_manual;
+
+ if (ifla_can_tdc == IFLA_CAN_TDC) {
+ dbt_params = &priv->fd;
+ tdc_is_enabled = can_fd_tdc_is_enabled(priv);
+ tdc_manual = priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL;
+ } else {
+ dbt_params = &priv->xl;
+ tdc_is_enabled = can_xl_tdc_is_enabled(priv);
+ tdc_manual = priv->ctrlmode & CAN_CTRLMODE_XL_TDC_MANUAL;
+ }
+ tdc_const = dbt_params->tdc_const;
+ tdc = &dbt_params->tdc;
+
+ if (!tdc_const)
+ return 0;
+
+ nest = nla_nest_start(skb, ifla_can_tdc);
+ if (!nest)
+ return -EMSGSIZE;
+
+ if (tdc_manual &&
+ (nla_put_u32(skb, IFLA_CAN_TDC_TDCV_MIN, tdc_const->tdcv_min) ||
+ nla_put_u32(skb, IFLA_CAN_TDC_TDCV_MAX, tdc_const->tdcv_max)))
+ goto err_cancel;
+ if (nla_put_u32(skb, IFLA_CAN_TDC_TDCO_MIN, tdc_const->tdco_min) ||
+ nla_put_u32(skb, IFLA_CAN_TDC_TDCO_MAX, tdc_const->tdco_max))
+ goto err_cancel;
+ if (tdc_const->tdcf_max &&
+ (nla_put_u32(skb, IFLA_CAN_TDC_TDCF_MIN, tdc_const->tdcf_min) ||
+ nla_put_u32(skb, IFLA_CAN_TDC_TDCF_MAX, tdc_const->tdcf_max)))
+ goto err_cancel;
+
+ if (tdc_is_enabled) {
+ u32 tdcv;
+ int err = -EINVAL;
+
+ if (tdc_manual) {
+ tdcv = tdc->tdcv;
+ err = 0;
+ } else if (dbt_params->do_get_auto_tdcv) {
+ err = dbt_params->do_get_auto_tdcv(dev, &tdcv);
+ }
+ if (!err && nla_put_u32(skb, IFLA_CAN_TDC_TDCV, tdcv))
+ goto err_cancel;
+ if (nla_put_u32(skb, IFLA_CAN_TDC_TDCO, tdc->tdco))
+ goto err_cancel;
+ if (tdc_const->tdcf_max &&
+ nla_put_u32(skb, IFLA_CAN_TDC_TDCF, tdc->tdcf))
+ goto err_cancel;
+ }
+
+ nla_nest_end(skb, nest);
+ return 0;
+
+err_cancel:
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+}
+
+static int can_pwm_fill_info(struct sk_buff *skb, const struct can_priv *priv)
+{
+ const struct can_pwm_const *pwm_const = priv->xl.pwm_const;
+ const struct can_pwm *pwm = &priv->xl.pwm;
+ struct nlattr *nest;
+
+ if (!pwm_const)
+ return 0;
+
+ nest = nla_nest_start(skb, IFLA_CAN_XL_PWM);
+ if (!nest)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, IFLA_CAN_PWM_PWMS_MIN, pwm_const->pwms_min) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWMS_MAX, pwm_const->pwms_max) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWML_MIN, pwm_const->pwml_min) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWML_MAX, pwm_const->pwml_max) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWMO_MIN, pwm_const->pwmo_min) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWMO_MAX, pwm_const->pwmo_max))
+ goto err_cancel;
+
+ if (priv->ctrlmode & CAN_CTRLMODE_XL_TMS) {
+ if (nla_put_u32(skb, IFLA_CAN_PWM_PWMS, pwm->pwms) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWML, pwm->pwml) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWMO, pwm->pwmo))
+ goto err_cancel;
+ }
+
+ nla_nest_end(skb, nest);
+ return 0;
+
+err_cancel:
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+}
+
+static int can_ctrlmode_ext_fill_info(struct sk_buff *skb,
+ const struct can_priv *priv)
+{
+ struct nlattr *nest;
+
+ nest = nla_nest_start(skb, IFLA_CAN_CTRLMODE_EXT);
+ if (!nest)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, IFLA_CAN_CTRLMODE_SUPPORTED,
+ priv->ctrlmode_supported)) {
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+ }
+
+ nla_nest_end(skb, nest);
+ return 0;
+}
+
+static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ struct can_ctrlmode cm = {.flags = priv->ctrlmode};
+ struct can_berr_counter bec = { };
+ enum can_state state = priv->state;
+
+ if (priv->do_get_state)
+ priv->do_get_state(dev, &state);
+
+ if (can_bittiming_fill_info(skb, IFLA_CAN_BITTIMING,
+ &priv->bittiming) ||
+
+ can_bittiming_const_fill_info(skb, IFLA_CAN_BITTIMING_CONST,
+ priv->bittiming_const) ||
+
+ nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) ||
+ nla_put_u32(skb, IFLA_CAN_STATE, state) ||
+ nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
+ nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
+
+ (priv->do_get_berr_counter &&
+ !priv->do_get_berr_counter(dev, &bec) &&
+ nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
+
+ can_bittiming_fill_info(skb, IFLA_CAN_DATA_BITTIMING,
+ &priv->fd.data_bittiming) ||
+
+ can_bittiming_const_fill_info(skb, IFLA_CAN_DATA_BITTIMING_CONST,
+ priv->fd.data_bittiming_const) ||
+
+ (priv->termination_const &&
+ (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
+ nla_put(skb, IFLA_CAN_TERMINATION_CONST,
+ sizeof(*priv->termination_const) *
+ priv->termination_const_cnt,
+ priv->termination_const))) ||
+
+ can_bitrate_const_fill_info(skb, IFLA_CAN_BITRATE_CONST,
+ priv->bitrate_const,
+ priv->bitrate_const_cnt) ||
+
+ can_bitrate_const_fill_info(skb, IFLA_CAN_DATA_BITRATE_CONST,
+ priv->fd.data_bitrate_const,
+ priv->fd.data_bitrate_const_cnt) ||
+
+ (nla_put(skb, IFLA_CAN_BITRATE_MAX,
+ sizeof(priv->bitrate_max),
+ &priv->bitrate_max)) ||
+
+ can_tdc_fill_info(skb, dev, IFLA_CAN_TDC) ||
+
+ can_ctrlmode_ext_fill_info(skb, priv) ||
+
+ can_bittiming_fill_info(skb, IFLA_CAN_XL_DATA_BITTIMING,
+ &priv->xl.data_bittiming) ||
+
+ can_bittiming_const_fill_info(skb, IFLA_CAN_XL_DATA_BITTIMING_CONST,
+ priv->xl.data_bittiming_const) ||
+
+ can_bitrate_const_fill_info(skb, IFLA_CAN_XL_DATA_BITRATE_CONST,
+ priv->xl.data_bitrate_const,
+ priv->xl.data_bitrate_const_cnt) ||
+
+ can_tdc_fill_info(skb, dev, IFLA_CAN_XL_TDC) ||
+
+ can_pwm_fill_info(skb, priv)
+ )
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static size_t can_get_xstats_size(const struct net_device *dev)
+{
+ return sizeof(struct can_device_stats);
+}
+
+static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (nla_put(skb, IFLA_INFO_XSTATS,
+ sizeof(priv->can_stats), &priv->can_stats))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int can_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
+ struct netlink_ext_ack *extack)
+{
+ return -EOPNOTSUPP;
+}
+
+static void can_dellink(struct net_device *dev, struct list_head *head)
+{
+}
+
+struct rtnl_link_ops can_link_ops __read_mostly = {
+ .kind = "can",
+ .netns_refund = true,
+ .maxtype = IFLA_CAN_MAX,
+ .policy = can_policy,
+ .setup = can_setup,
+ .validate = can_validate,
+ .newlink = can_newlink,
+ .changelink = can_changelink,
+ .dellink = can_dellink,
+ .get_size = can_get_size,
+ .fill_info = can_fill_info,
+ .get_xstats_size = can_get_xstats_size,
+ .fill_xstats = can_fill_xstats,
+};
+
+int can_netlink_register(void)
+{
+ return rtnl_link_register(&can_link_ops);
+}
+
+void can_netlink_unregister(void)
+{
+ rtnl_link_unregister(&can_link_ops);
+}
diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c
new file mode 100644
index 000000000000..46e7b6db4a1e
--- /dev/null
+++ b/drivers/net/can/dev/rx-offload.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2014 Protonic Holland,
+ * David Jander
+ * Copyright (C) 2014-2021, 2023 Pengutronix,
+ * Marc Kleine-Budde <kernel@pengutronix.de>
+ */
+
+#include <linux/can/dev.h>
+#include <linux/can/rx-offload.h>
+
+struct can_rx_offload_cb {
+ u32 timestamp;
+};
+
+static inline struct can_rx_offload_cb *
+can_rx_offload_get_cb(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
+
+ return (struct can_rx_offload_cb *)skb->cb;
+}
+
+static inline bool
+can_rx_offload_le(struct can_rx_offload *offload,
+ unsigned int a, unsigned int b)
+{
+ if (offload->inc)
+ return a <= b;
+ else
+ return a >= b;
+}
+
+static inline unsigned int
+can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
+{
+ if (offload->inc)
+ return (*val)++;
+ else
+ return (*val)--;
+}
+
+static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
+{
+ struct can_rx_offload *offload = container_of(napi,
+ struct can_rx_offload,
+ napi);
+ struct net_device *dev = offload->dev;
+ struct net_device_stats *stats = &dev->stats;
+ struct sk_buff *skb;
+ int work_done = 0;
+
+ while ((work_done < quota) &&
+ (skb = skb_dequeue(&offload->skb_queue))) {
+ struct can_frame *cf = (struct can_frame *)skb->data;
+
+ work_done++;
+ if (!(cf->can_id & CAN_ERR_FLAG)) {
+ stats->rx_packets++;
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ stats->rx_bytes += cf->len;
+ }
+ netif_receive_skb(skb);
+ }
+
+ if (work_done < quota) {
+ napi_complete_done(napi, work_done);
+
+ /* Check if there was another interrupt */
+ if (!skb_queue_empty(&offload->skb_queue))
+ napi_schedule(&offload->napi);
+ }
+
+ return work_done;
+}
+
+static inline void
+__skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
+ int (*compare)(struct sk_buff *a, struct sk_buff *b))
+{
+ struct sk_buff *pos, *insert = NULL;
+
+ skb_queue_reverse_walk(head, pos) {
+ const struct can_rx_offload_cb *cb_pos, *cb_new;
+
+ cb_pos = can_rx_offload_get_cb(pos);
+ cb_new = can_rx_offload_get_cb(new);
+
+ netdev_dbg(new->dev,
+ "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
+ __func__,
+ cb_pos->timestamp, cb_new->timestamp,
+ cb_new->timestamp - cb_pos->timestamp,
+ skb_queue_len(head));
+
+ if (compare(pos, new) < 0)
+ continue;
+ insert = pos;
+ break;
+ }
+ if (!insert)
+ __skb_queue_head(head, new);
+ else
+ __skb_queue_after(head, insert, new);
+}
+
+static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
+{
+ const struct can_rx_offload_cb *cb_a, *cb_b;
+
+ cb_a = can_rx_offload_get_cb(a);
+ cb_b = can_rx_offload_get_cb(b);
+
+ /* Subtract two u32 and return result as int, to keep
+ * difference steady around the u32 overflow.
+ */
+ return cb_b->timestamp - cb_a->timestamp;
+}
+
+/**
+ * can_rx_offload_offload_one() - Read one CAN frame from HW
+ * @offload: pointer to rx_offload context
+ * @n: number of mailbox to read
+ *
+ * The task of this function is to read a CAN frame from mailbox @n
+ * from the device and return the mailbox's content as a struct
+ * sk_buff.
+ *
+ * If the struct can_rx_offload::skb_queue exceeds the maximal queue
+ * length (struct can_rx_offload::skb_queue_len_max) or no skb can be
+ * allocated, the mailbox contents is discarded by reading it into an
+ * overflow buffer. This way the mailbox is marked as free by the
+ * driver.
+ *
+ * Return: A pointer to skb containing the CAN frame on success.
+ *
+ * NULL if the mailbox @n is empty.
+ *
+ * ERR_PTR() in case of an error
+ */
+static struct sk_buff *
+can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
+{
+ struct sk_buff *skb;
+ struct can_rx_offload_cb *cb;
+ bool drop = false;
+ u32 timestamp;
+
+ /* If queue is full drop frame */
+ if (unlikely(skb_queue_len(&offload->skb_queue) >
+ offload->skb_queue_len_max))
+ drop = true;
+
+ skb = offload->mailbox_read(offload, n, &timestamp, drop);
+ /* Mailbox was empty. */
+ if (unlikely(!skb))
+ return NULL;
+
+ /* There was a problem reading the mailbox, propagate
+ * error value.
+ */
+ if (IS_ERR(skb)) {
+ offload->dev->stats.rx_dropped++;
+ offload->dev->stats.rx_fifo_errors++;
+
+ return skb;
+ }
+
+ /* Mailbox was read. */
+ cb = can_rx_offload_get_cb(skb);
+ cb->timestamp = timestamp;
+
+ return skb;
+}
+
+int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
+ u64 pending)
+{
+ unsigned int i;
+ int received = 0;
+
+ for (i = offload->mb_first;
+ can_rx_offload_le(offload, i, offload->mb_last);
+ can_rx_offload_inc(offload, &i)) {
+ struct sk_buff *skb;
+
+ if (!(pending & BIT_ULL(i)))
+ continue;
+
+ skb = can_rx_offload_offload_one(offload, i);
+ if (IS_ERR_OR_NULL(skb))
+ continue;
+
+ __skb_queue_add_sort(&offload->skb_irq_queue, skb,
+ can_rx_offload_compare);
+ received++;
+ }
+
+ return received;
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
+
+int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
+{
+ struct sk_buff *skb;
+ int received = 0;
+
+ while (1) {
+ skb = can_rx_offload_offload_one(offload, 0);
+ if (IS_ERR(skb))
+ continue;
+ if (!skb)
+ break;
+
+ __skb_queue_tail(&offload->skb_irq_queue, skb);
+ received++;
+ }
+
+ return received;
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
+
+int can_rx_offload_queue_timestamp(struct can_rx_offload *offload,
+ struct sk_buff *skb, u32 timestamp)
+{
+ struct can_rx_offload_cb *cb;
+
+ if (skb_queue_len(&offload->skb_queue) >
+ offload->skb_queue_len_max) {
+ dev_kfree_skb_any(skb);
+ return -ENOBUFS;
+ }
+
+ cb = can_rx_offload_get_cb(skb);
+ cb->timestamp = timestamp;
+
+ __skb_queue_add_sort(&offload->skb_irq_queue, skb,
+ can_rx_offload_compare);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_queue_timestamp);
+
+unsigned int
+can_rx_offload_get_echo_skb_queue_timestamp(struct can_rx_offload *offload,
+ unsigned int idx, u32 timestamp,
+ unsigned int *frame_len_ptr)
+{
+ struct net_device *dev = offload->dev;
+ struct net_device_stats *stats = &dev->stats;
+ struct sk_buff *skb;
+ unsigned int len;
+ int err;
+
+ skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
+ if (!skb)
+ return 0;
+
+ err = can_rx_offload_queue_timestamp(offload, skb, timestamp);
+ if (err) {
+ stats->rx_errors++;
+ stats->tx_fifo_errors++;
+ }
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb_queue_timestamp);
+
+int can_rx_offload_queue_tail(struct can_rx_offload *offload,
+ struct sk_buff *skb)
+{
+ if (skb_queue_len(&offload->skb_queue) >
+ offload->skb_queue_len_max) {
+ dev_kfree_skb_any(skb);
+ return -ENOBUFS;
+ }
+
+ __skb_queue_tail(&offload->skb_irq_queue, skb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
+
+unsigned int
+can_rx_offload_get_echo_skb_queue_tail(struct can_rx_offload *offload,
+ unsigned int idx,
+ unsigned int *frame_len_ptr)
+{
+ struct net_device *dev = offload->dev;
+ struct net_device_stats *stats = &dev->stats;
+ struct sk_buff *skb;
+ unsigned int len;
+ int err;
+
+ skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
+ if (!skb)
+ return 0;
+
+ err = can_rx_offload_queue_tail(offload, skb);
+ if (err) {
+ stats->rx_errors++;
+ stats->tx_fifo_errors++;
+ }
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb_queue_tail);
+
+void can_rx_offload_irq_finish(struct can_rx_offload *offload)
+{
+ unsigned long flags;
+ int queue_len;
+
+ if (skb_queue_empty_lockless(&offload->skb_irq_queue))
+ return;
+
+ spin_lock_irqsave(&offload->skb_queue.lock, flags);
+ skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue);
+ spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
+
+ queue_len = skb_queue_len(&offload->skb_queue);
+ if (queue_len > offload->skb_queue_len_max / 8)
+ netdev_dbg(offload->dev, "%s: queue_len=%d\n",
+ __func__, queue_len);
+
+ napi_schedule(&offload->napi);
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_irq_finish);
+
+void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload)
+{
+ unsigned long flags;
+ int queue_len;
+
+ if (skb_queue_empty_lockless(&offload->skb_irq_queue))
+ return;
+
+ spin_lock_irqsave(&offload->skb_queue.lock, flags);
+ skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue);
+ spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
+
+ queue_len = skb_queue_len(&offload->skb_queue);
+ if (queue_len > offload->skb_queue_len_max / 8)
+ netdev_dbg(offload->dev, "%s: queue_len=%d\n",
+ __func__, queue_len);
+
+ local_bh_disable();
+ napi_schedule(&offload->napi);
+ local_bh_enable();
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_threaded_irq_finish);
+
+static int can_rx_offload_init_queue(struct net_device *dev,
+ struct can_rx_offload *offload,
+ unsigned int weight)
+{
+ offload->dev = dev;
+
+ /* Limit queue len to 4x the weight (rounded to next power of two) */
+ offload->skb_queue_len_max = 2 << fls(weight);
+ offload->skb_queue_len_max *= 4;
+ skb_queue_head_init(&offload->skb_queue);
+ __skb_queue_head_init(&offload->skb_irq_queue);
+
+ netif_napi_add_weight(dev, &offload->napi, can_rx_offload_napi_poll,
+ weight);
+
+ dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
+ __func__, offload->skb_queue_len_max);
+
+ return 0;
+}
+
+int can_rx_offload_add_timestamp(struct net_device *dev,
+ struct can_rx_offload *offload)
+{
+ unsigned int weight;
+
+ if (offload->mb_first > BITS_PER_LONG_LONG ||
+ offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
+ return -EINVAL;
+
+ if (offload->mb_first < offload->mb_last) {
+ offload->inc = true;
+ weight = offload->mb_last - offload->mb_first;
+ } else {
+ offload->inc = false;
+ weight = offload->mb_first - offload->mb_last;
+ }
+
+ return can_rx_offload_init_queue(dev, offload, weight);
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
+
+int can_rx_offload_add_fifo(struct net_device *dev,
+ struct can_rx_offload *offload, unsigned int weight)
+{
+ if (!offload->mailbox_read)
+ return -EINVAL;
+
+ return can_rx_offload_init_queue(dev, offload, weight);
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
+
+int can_rx_offload_add_manual(struct net_device *dev,
+ struct can_rx_offload *offload,
+ unsigned int weight)
+{
+ if (offload->mailbox_read)
+ return -EINVAL;
+
+ return can_rx_offload_init_queue(dev, offload, weight);
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_add_manual);
+
+void can_rx_offload_enable(struct can_rx_offload *offload)
+{
+ napi_enable(&offload->napi);
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_enable);
+
+void can_rx_offload_del(struct can_rx_offload *offload)
+{
+ netif_napi_del(&offload->napi);
+ skb_queue_purge(&offload->skb_queue);
+ __skb_queue_purge(&offload->skb_irq_queue);
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_del);
diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
new file mode 100644
index 000000000000..3ebd4f779b9b
--- /dev/null
+++ b/drivers/net/can/dev/skb.c
@@ -0,0 +1,374 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ */
+
+#include <linux/can/dev.h>
+#include <linux/module.h>
+
+#define MOD_DESC "CAN device driver interface"
+
+MODULE_DESCRIPTION(MOD_DESC);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+
+/* Local echo of CAN messages
+ *
+ * CAN network devices *should* support a local echo functionality
+ * (see Documentation/networking/can.rst). To test the handling of CAN
+ * interfaces that do not support the local echo both driver types are
+ * implemented. In the case that the driver does not support the echo
+ * the IFF_ECHO remains clear in dev->flags. This causes the PF_CAN core
+ * to perform the echo as a fallback solution.
+ */
+void can_flush_echo_skb(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ int i;
+
+ for (i = 0; i < priv->echo_skb_max; i++) {
+ if (priv->echo_skb[i]) {
+ kfree_skb(priv->echo_skb[i]);
+ priv->echo_skb[i] = NULL;
+ stats->tx_dropped++;
+ stats->tx_aborted_errors++;
+ }
+ }
+}
+
+/* Put the skb on the stack to be looped backed locally lateron
+ *
+ * The function is typically called in the start_xmit function
+ * of the device driver. The driver must protect access to
+ * priv->echo_skb, if necessary.
+ */
+int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+ unsigned int idx, unsigned int frame_len)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (idx >= priv->echo_skb_max) {
+ netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
+ __func__, idx, priv->echo_skb_max);
+ return -EINVAL;
+ }
+
+ /* check flag whether this packet has to be looped back */
+ if (!(dev->flags & IFF_ECHO) ||
+ (skb->protocol != htons(ETH_P_CAN) &&
+ skb->protocol != htons(ETH_P_CANFD) &&
+ skb->protocol != htons(ETH_P_CANXL))) {
+ kfree_skb(skb);
+ return 0;
+ }
+
+ if (!priv->echo_skb[idx]) {
+ skb = can_create_echo_skb(skb);
+ if (!skb)
+ return -ENOMEM;
+
+ /* make settings for echo to reduce code in irq context */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->dev = dev;
+
+ /* save frame_len to reuse it when transmission is completed */
+ can_skb_prv(skb)->frame_len = frame_len;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ skb_tx_timestamp(skb);
+
+ /* save this skb for tx interrupt echo handling */
+ priv->echo_skb[idx] = skb;
+ } else {
+ /* locking problem with netif_stop_queue() ?? */
+ netdev_err(dev, "%s: BUG! echo_skb %d is occupied!\n", __func__, idx);
+ kfree_skb(skb);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(can_put_echo_skb);
+
+struct sk_buff *
+__can_get_echo_skb(struct net_device *dev, unsigned int idx,
+ unsigned int *len_ptr, unsigned int *frame_len_ptr)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (idx >= priv->echo_skb_max) {
+ netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
+ __func__, idx, priv->echo_skb_max);
+ return NULL;
+ }
+
+ if (priv->echo_skb[idx]) {
+ /* Using "struct canfd_frame::len" for the frame
+ * length is supported on both CAN and CANFD frames.
+ */
+ struct sk_buff *skb = priv->echo_skb[idx];
+ struct can_skb_priv *can_skb_priv = can_skb_prv(skb);
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)
+ skb_tstamp_tx(skb, skb_hwtstamps(skb));
+
+ /* get the real payload length for netdev statistics */
+ *len_ptr = can_skb_get_data_len(skb);
+
+ if (frame_len_ptr)
+ *frame_len_ptr = can_skb_priv->frame_len;
+
+ priv->echo_skb[idx] = NULL;
+
+ if (skb->pkt_type == PACKET_LOOPBACK) {
+ skb->pkt_type = PACKET_BROADCAST;
+ } else {
+ dev_consume_skb_any(skb);
+ return NULL;
+ }
+
+ return skb;
+ }
+
+ return NULL;
+}
+
+/* Get the skb from the stack and loop it back locally
+ *
+ * The function is typically called when the TX done interrupt
+ * is handled in the device driver. The driver must protect
+ * access to priv->echo_skb, if necessary.
+ */
+unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx,
+ unsigned int *frame_len_ptr)
+{
+ struct sk_buff *skb;
+ unsigned int len;
+
+ skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
+ if (!skb)
+ return 0;
+
+ skb_get(skb);
+ if (netif_rx(skb) == NET_RX_SUCCESS)
+ dev_consume_skb_any(skb);
+ else
+ dev_kfree_skb_any(skb);
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(can_get_echo_skb);
+
+/* Remove the skb from the stack and free it.
+ *
+ * The function is typically called when TX failed.
+ */
+void can_free_echo_skb(struct net_device *dev, unsigned int idx,
+ unsigned int *frame_len_ptr)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (idx >= priv->echo_skb_max) {
+ netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
+ __func__, idx, priv->echo_skb_max);
+ return;
+ }
+
+ if (priv->echo_skb[idx]) {
+ struct sk_buff *skb = priv->echo_skb[idx];
+ struct can_skb_priv *can_skb_priv = can_skb_prv(skb);
+
+ if (frame_len_ptr)
+ *frame_len_ptr = can_skb_priv->frame_len;
+
+ dev_kfree_skb_any(skb);
+ priv->echo_skb[idx] = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(can_free_echo_skb);
+
+/* fill common values for CAN sk_buffs */
+static void init_can_skb_reserve(struct sk_buff *skb)
+{
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->skbcnt = 0;
+}
+
+struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
+{
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+ sizeof(struct can_frame));
+ if (unlikely(!skb)) {
+ *cf = NULL;
+
+ return NULL;
+ }
+
+ skb->protocol = htons(ETH_P_CAN);
+ init_can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+
+ *cf = skb_put_zero(skb, sizeof(struct can_frame));
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(alloc_can_skb);
+
+struct sk_buff *alloc_canfd_skb(struct net_device *dev,
+ struct canfd_frame **cfd)
+{
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+ sizeof(struct canfd_frame));
+ if (unlikely(!skb)) {
+ *cfd = NULL;
+
+ return NULL;
+ }
+
+ skb->protocol = htons(ETH_P_CANFD);
+ init_can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+
+ *cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
+
+ /* set CAN FD flag by default */
+ (*cfd)->flags = CANFD_FDF;
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(alloc_canfd_skb);
+
+struct sk_buff *alloc_canxl_skb(struct net_device *dev,
+ struct canxl_frame **cxl,
+ unsigned int data_len)
+{
+ struct sk_buff *skb;
+
+ if (data_len < CANXL_MIN_DLEN || data_len > CANXL_MAX_DLEN)
+ goto out_error;
+
+ skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+ CANXL_HDR_SIZE + data_len);
+ if (unlikely(!skb))
+ goto out_error;
+
+ skb->protocol = htons(ETH_P_CANXL);
+ init_can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+
+ *cxl = skb_put_zero(skb, CANXL_HDR_SIZE + data_len);
+
+ /* set CAN XL flag and length information by default */
+ (*cxl)->flags = CANXL_XLF;
+ (*cxl)->len = data_len;
+
+ return skb;
+
+out_error:
+ *cxl = NULL;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(alloc_canxl_skb);
+
+struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_can_skb(dev, cf);
+ if (unlikely(!skb))
+ return NULL;
+
+ (*cf)->can_id = CAN_ERR_FLAG;
+ (*cf)->len = CAN_ERR_DLC;
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(alloc_can_err_skb);
+
+/* Check for outgoing skbs that have not been created by the CAN subsystem */
+static bool can_skb_headroom_valid(struct net_device *dev, struct sk_buff *skb)
+{
+ /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */
+ if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv)))
+ return false;
+
+ /* af_packet does not apply CAN skb specific settings */
+ if (skb->ip_summed == CHECKSUM_NONE) {
+ /* init headroom */
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* perform proper loopback on capable devices */
+ if (dev->flags & IFF_ECHO)
+ skb->pkt_type = PACKET_LOOPBACK;
+ else
+ skb->pkt_type = PACKET_HOST;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
+ /* set CANFD_FDF flag for CAN FD frames */
+ if (can_is_canfd_skb(skb)) {
+ struct canfd_frame *cfd;
+
+ cfd = (struct canfd_frame *)skb->data;
+ cfd->flags |= CANFD_FDF;
+ }
+ }
+
+ return true;
+}
+
+/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
+bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb)
+{
+ switch (ntohs(skb->protocol)) {
+ case ETH_P_CAN:
+ if (!can_is_can_skb(skb))
+ goto inval_skb;
+ break;
+
+ case ETH_P_CANFD:
+ if (!can_is_canfd_skb(skb))
+ goto inval_skb;
+ break;
+
+ case ETH_P_CANXL:
+ if (!can_is_canxl_skb(skb))
+ goto inval_skb;
+ break;
+
+ default:
+ goto inval_skb;
+ }
+
+ if (!can_skb_headroom_valid(dev, skb))
+ goto inval_skb;
+
+ return false;
+
+inval_skb:
+ kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return true;
+}
+EXPORT_SYMBOL_GPL(can_dropped_invalid_skb);