summaryrefslogtreecommitdiff
path: root/net/can
diff options
context:
space:
mode:
Diffstat (limited to 'net/can')
-rw-r--r--net/can/Kconfig12
-rw-r--r--net/can/af_can.c27
-rw-r--r--net/can/af_can.h12
-rw-r--r--net/can/bcm.c211
-rw-r--r--net/can/gw.c185
-rw-r--r--net/can/isotp.c256
-rw-r--r--net/can/j1939/address-claim.c40
-rw-r--r--net/can/j1939/bus.c9
-rw-r--r--net/can/j1939/j1939-priv.h4
-rw-r--r--net/can/j1939/main.c37
-rw-r--r--net/can/j1939/socket.c132
-rw-r--r--net/can/j1939/transport.c61
-rw-r--r--net/can/proc.c48
-rw-r--r--net/can/raw.c328
14 files changed, 923 insertions, 439 deletions
diff --git a/net/can/Kconfig b/net/can/Kconfig
index cb56be8e3862..e4ccf731a24c 100644
--- a/net/can/Kconfig
+++ b/net/can/Kconfig
@@ -5,6 +5,7 @@
menuconfig CAN
tristate "CAN bus subsystem support"
+ select CAN_DEV
help
Controller Area Network (CAN) is a slow (up to 1Mbit/s) serial
communications protocol. Development of the CAN bus started in
@@ -56,18 +57,17 @@ config CAN_GW
source "net/can/j1939/Kconfig"
config CAN_ISOTP
- tristate "ISO 15765-2:2016 CAN transport protocol"
+ tristate "ISO 15765-2 CAN transport protocol"
help
CAN Transport Protocols offer support for segmented Point-to-Point
communication between CAN nodes via two defined CAN Identifiers.
+ This protocol driver implements segmented data transfers for CAN CC
+ (aka Classical CAN, CAN 2.0B) and CAN FD frame types which were
+ introduced with ISO 15765-2:2016.
As CAN frames can only transport a small amount of data bytes
- (max. 8 bytes for 'classic' CAN and max. 64 bytes for CAN FD) this
+ (max. 8 bytes for CAN CC and max. 64 bytes for CAN FD) this
segmentation is needed to transport longer Protocol Data Units (PDU)
as needed e.g. for vehicle diagnosis (UDS, ISO 14229) or IP-over-CAN
traffic.
- This protocol driver implements data transfers according to
- ISO 15765-2:2016 for 'classic' CAN and CAN FD frame types.
- If you want to perform automotive vehicle diagnostic services (UDS),
- say 'y'.
endif
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 7343fd487dbe..770173d8db42 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -171,6 +171,9 @@ static int can_create(struct net *net, struct socket *sock, int protocol,
/* release sk on errors */
sock_orphan(sk);
sock_put(sk);
+ sock->sk = NULL;
+ } else {
+ sock_prot_inuse_add(net, sk->sk_prot, 1);
}
errout:
@@ -218,7 +221,7 @@ int can_send(struct sk_buff *skb, int loop)
}
/* Make sure the CAN frame can pass the selected CAN netdevice. */
- if (unlikely(skb->len > skb->dev->mtu)) {
+ if (unlikely(skb->len > READ_ONCE(skb->dev->mtu))) {
err = -EMSGSIZE;
goto inval_skb;
}
@@ -286,8 +289,8 @@ int can_send(struct sk_buff *skb, int loop)
netif_rx(newskb);
/* update statistics */
- pkg_stats->tx_frames++;
- pkg_stats->tx_frames_delta++;
+ atomic_long_inc(&pkg_stats->tx_frames);
+ atomic_long_inc(&pkg_stats->tx_frames_delta);
return 0;
@@ -646,8 +649,8 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev)
int matches;
/* update statistics */
- pkg_stats->rx_frames++;
- pkg_stats->rx_frames_delta++;
+ atomic_long_inc(&pkg_stats->rx_frames);
+ atomic_long_inc(&pkg_stats->rx_frames_delta);
/* create non-zero unique skb identifier together with *skb */
while (!(can_skb_prv(skb)->skbcnt))
@@ -668,8 +671,8 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev)
consume_skb(skb);
if (matches > 0) {
- pkg_stats->matches++;
- pkg_stats->matches_delta++;
+ atomic_long_inc(&pkg_stats->matches);
+ atomic_long_inc(&pkg_stats->matches_delta);
}
}
@@ -680,7 +683,7 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d\n",
dev->type, skb->len);
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_CAN_RX_INVALID_FRAME);
return NET_RX_DROP;
}
@@ -695,7 +698,7 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d\n",
dev->type, skb->len);
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_CANFD_RX_INVALID_FRAME);
return NET_RX_DROP;
}
@@ -710,7 +713,7 @@ static int canxl_rcv(struct sk_buff *skb, struct net_device *dev,
pr_warn_once("PF_CAN: dropped non conform CAN XL skbuff: dev type %d, len %d\n",
dev->type, skb->len);
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_CANXL_RX_INVALID_FRAME);
return NET_RX_DROP;
}
@@ -822,7 +825,7 @@ static void can_pernet_exit(struct net *net)
if (IS_ENABLED(CONFIG_PROC_FS)) {
can_remove_proc(net);
if (stats_timer)
- del_timer_sync(&net->can.stattimer);
+ timer_delete_sync(&net->can.stattimer);
}
kfree(net->can.rx_alldev_list);
@@ -865,6 +868,8 @@ static __init int can_init(void)
/* check for correct padding to be able to use the structs similarly */
BUILD_BUG_ON(offsetof(struct can_frame, len) !=
offsetof(struct canfd_frame, len) ||
+ offsetof(struct can_frame, len) !=
+ offsetof(struct canxl_frame, flags) ||
offsetof(struct can_frame, data) !=
offsetof(struct canfd_frame, data));
diff --git a/net/can/af_can.h b/net/can/af_can.h
index 7c2d9161e224..22f3352c77fe 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -66,9 +66,9 @@ struct receiver {
struct can_pkg_stats {
unsigned long jiffies_init;
- unsigned long rx_frames;
- unsigned long tx_frames;
- unsigned long matches;
+ atomic_long_t rx_frames;
+ atomic_long_t tx_frames;
+ atomic_long_t matches;
unsigned long total_rx_rate;
unsigned long total_tx_rate;
@@ -82,9 +82,9 @@ struct can_pkg_stats {
unsigned long max_tx_rate;
unsigned long max_rx_match_ratio;
- unsigned long rx_frames_delta;
- unsigned long tx_frames_delta;
- unsigned long matches_delta;
+ atomic_long_t rx_frames_delta;
+ atomic_long_t tx_frames_delta;
+ atomic_long_t matches_delta;
};
/* persistent statistics */
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 27706f6ace34..7eba8ae01a5b 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -58,6 +58,7 @@
#include <linux/can/skb.h>
#include <linux/can/bcm.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <net/sock.h>
#include <net/net_namespace.h>
@@ -72,9 +73,11 @@
#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
/* use of last_frames[index].flags */
+#define RX_LOCAL 0x10 /* frame was created on the local host */
+#define RX_OWN 0x20 /* frame was sent via the socket it was received on */
#define RX_RECV 0x40 /* received data for this element */
#define RX_THR 0x80 /* element not been sent due to throttle feature */
-#define BCM_CAN_FLAGS_MASK 0x3F /* to clean private flags after usage */
+#define BCM_CAN_FLAGS_MASK 0x0F /* to clean private flags after usage */
/* get best masking value for can_rx_register() for a given single can_id */
#define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
@@ -120,6 +123,7 @@ struct bcm_op {
struct canfd_frame last_sframe;
struct sock *sk;
struct net_device *rx_reg_dev;
+ spinlock_t bcm_tx_lock; /* protect currframe/count in runtime updates */
};
struct bcm_sock {
@@ -138,6 +142,16 @@ static LIST_HEAD(bcm_notifier_list);
static DEFINE_SPINLOCK(bcm_notifier_lock);
static struct bcm_sock *bcm_busy_notifier;
+/* Return pointer to store the extra msg flags for bcm_recvmsg().
+ * We use the space of one unsigned int beyond the 'struct sockaddr_can'
+ * in skb->cb.
+ */
+static inline unsigned int *bcm_flags(struct sk_buff *skb)
+{
+ /* return pointer after struct sockaddr_can */
+ return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
+}
+
static inline struct bcm_sock *bcm_sk(const struct sock *sk)
{
return (struct bcm_sock *)sk;
@@ -205,7 +219,9 @@ static int bcm_proc_show(struct seq_file *m, void *v)
seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex));
seq_printf(m, " <<<\n");
- list_for_each_entry(op, &bo->rx_ops, list) {
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(op, &bo->rx_ops, list) {
unsigned long reduction;
@@ -261,6 +277,9 @@ static int bcm_proc_show(struct seq_file *m, void *v)
seq_printf(m, "# sent %ld\n", op->frames_abs);
}
seq_putc(m, '\n');
+
+ rcu_read_unlock();
+
return 0;
}
#endif /* CONFIG_PROC_FS */
@@ -273,13 +292,18 @@ static void bcm_can_tx(struct bcm_op *op)
{
struct sk_buff *skb;
struct net_device *dev;
- struct canfd_frame *cf = op->frames + op->cfsiz * op->currframe;
+ struct canfd_frame *cf;
int err;
/* no target device? => exit */
if (!op->ifindex)
return;
+ /* read currframe under lock protection */
+ spin_lock_bh(&op->bcm_tx_lock);
+ cf = op->frames + op->cfsiz * op->currframe;
+ spin_unlock_bh(&op->bcm_tx_lock);
+
dev = dev_get_by_index(sock_net(op->sk), op->ifindex);
if (!dev) {
/* RFC: should this bcm_op remove itself here? */
@@ -300,6 +324,10 @@ static void bcm_can_tx(struct bcm_op *op)
skb->dev = dev;
can_skb_set_owner(skb, op->sk);
err = can_send(skb, 1);
+
+ /* update currframe and count under lock protection */
+ spin_lock_bh(&op->bcm_tx_lock);
+
if (!err)
op->frames_abs++;
@@ -308,6 +336,11 @@ static void bcm_can_tx(struct bcm_op *op)
/* reached last frame? */
if (op->currframe >= op->nframes)
op->currframe = 0;
+
+ if (op->count > 0)
+ op->count--;
+
+ spin_unlock_bh(&op->bcm_tx_lock);
out:
dev_put(dev);
}
@@ -325,6 +358,8 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
struct sock *sk = op->sk;
unsigned int datalen = head->nframes * op->cfsiz;
int err;
+ unsigned int *pflags;
+ enum skb_drop_reason reason;
skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
if (!skb)
@@ -332,6 +367,14 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
skb_put_data(skb, head, sizeof(*head));
+ /* ensure space for sockaddr_can and msg flags */
+ sock_skb_cb_check_size(sizeof(struct sockaddr_can) +
+ sizeof(unsigned int));
+
+ /* initialize msg flags */
+ pflags = bcm_flags(skb);
+ *pflags = 0;
+
if (head->nframes) {
/* CAN frames starting here */
firstframe = (struct canfd_frame *)skb_tail_pointer(skb);
@@ -344,8 +387,14 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
* relevant for updates that are generated by the
* BCM, where nframes is 1
*/
- if (head->nframes == 1)
+ if (head->nframes == 1) {
+ if (firstframe->flags & RX_LOCAL)
+ *pflags |= MSG_DONTROUTE;
+ if (firstframe->flags & RX_OWN)
+ *pflags |= MSG_CONFIRM;
+
firstframe->flags &= BCM_CAN_FLAGS_MASK;
+ }
}
if (has_timestamp) {
@@ -360,17 +409,16 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
* containing the interface index.
*/
- sock_skb_cb_check_size(sizeof(struct sockaddr_can));
addr = (struct sockaddr_can *)skb->cb;
memset(addr, 0, sizeof(*addr));
addr->can_family = AF_CAN;
addr->can_ifindex = op->rx_ifindex;
- err = sock_queue_rcv_skb(sk, skb);
+ err = sock_queue_rcv_skb_reason(sk, skb, &reason);
if (err < 0) {
struct bcm_sock *bo = bcm_sk(sk);
- kfree_skb(skb);
+ sk_skb_reason_drop(sk, skb, reason);
/* don't care about overflows in this statistic */
bo->dropped_usr_msgs++;
}
@@ -404,7 +452,7 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
struct bcm_msg_head msg_head;
if (op->kt_ival1 && (op->count > 0)) {
- op->count--;
+ bcm_can_tx(op);
if (!op->count && (op->flags & TX_COUNTEVT)) {
/* create notification to user */
@@ -419,7 +467,6 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
bcm_send_to_user(op, &msg_head, NULL, 0);
}
- bcm_can_tx(op);
} else if (op->kt_ival2) {
bcm_can_tx(op);
@@ -444,7 +491,7 @@ static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
op->frames_filtered = op->frames_abs = 0;
/* this element is not throttled anymore */
- data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV);
+ data->flags &= ~RX_THR;
memset(&head, 0, sizeof(head));
head.opcode = RX_CHANGED;
@@ -465,13 +512,17 @@ static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
*/
static void bcm_rx_update_and_send(struct bcm_op *op,
struct canfd_frame *lastdata,
- const struct canfd_frame *rxdata)
+ const struct canfd_frame *rxdata,
+ unsigned char traffic_flags)
{
memcpy(lastdata, rxdata, op->cfsiz);
/* mark as used and throttled by default */
lastdata->flags |= (RX_RECV|RX_THR);
+ /* add own/local/remote traffic flags */
+ lastdata->flags |= traffic_flags;
+
/* throttling mode inactive ? */
if (!op->kt_ival2) {
/* send RX_CHANGED to the user immediately */
@@ -508,7 +559,8 @@ rx_changed_settime:
* received data stored in op->last_frames[]
*/
static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
- const struct canfd_frame *rxdata)
+ const struct canfd_frame *rxdata,
+ unsigned char traffic_flags)
{
struct canfd_frame *cf = op->frames + op->cfsiz * index;
struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
@@ -521,7 +573,7 @@ static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
if (!(lcf->flags & RX_RECV)) {
/* received data for the first time => send update to user */
- bcm_rx_update_and_send(op, lcf, rxdata);
+ bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags);
return;
}
@@ -529,7 +581,7 @@ static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
for (i = 0; i < rxdata->len; i += 8) {
if ((get_u64(cf, i) & get_u64(rxdata, i)) !=
(get_u64(cf, i) & get_u64(lcf, i))) {
- bcm_rx_update_and_send(op, lcf, rxdata);
+ bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags);
return;
}
}
@@ -537,7 +589,7 @@ static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
if (op->flags & RX_CHECK_DLC) {
/* do a real check in CAN frame length */
if (rxdata->len != lcf->len) {
- bcm_rx_update_and_send(op, lcf, rxdata);
+ bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags);
return;
}
}
@@ -644,6 +696,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
struct bcm_op *op = (struct bcm_op *)data;
const struct canfd_frame *rxframe = (struct canfd_frame *)skb->data;
unsigned int i;
+ unsigned char traffic_flags;
if (op->can_id != rxframe->can_id)
return;
@@ -673,15 +726,24 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
return;
}
+ /* compute flags to distinguish between own/local/remote CAN traffic */
+ traffic_flags = 0;
+ if (skb->sk) {
+ traffic_flags |= RX_LOCAL;
+ if (skb->sk == op->sk)
+ traffic_flags |= RX_OWN;
+ }
+
if (op->flags & RX_FILTER_ID) {
/* the easiest case */
- bcm_rx_update_and_send(op, op->last_frames, rxframe);
+ bcm_rx_update_and_send(op, op->last_frames, rxframe,
+ traffic_flags);
goto rx_starttimer;
}
if (op->nframes == 1) {
/* simple compare with index 0 */
- bcm_rx_cmp_to_index(op, 0, rxframe);
+ bcm_rx_cmp_to_index(op, 0, rxframe, traffic_flags);
goto rx_starttimer;
}
@@ -698,7 +760,8 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
if ((get_u64(op->frames, 0) & get_u64(rxframe, 0)) ==
(get_u64(op->frames, 0) &
get_u64(op->frames + op->cfsiz * i, 0))) {
- bcm_rx_cmp_to_index(op, i, rxframe);
+ bcm_rx_cmp_to_index(op, i, rxframe,
+ traffic_flags);
break;
}
}
@@ -801,7 +864,7 @@ static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh,
REGMASK(op->can_id),
bcm_rx_handler, op);
- list_del(&op->list);
+ list_del_rcu(&op->list);
bcm_remove_op(op);
return 1; /* done */
}
@@ -821,7 +884,7 @@ static int bcm_delete_tx_op(struct list_head *ops, struct bcm_msg_head *mh,
list_for_each_entry_safe(op, n, ops, list) {
if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
(op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
- list_del(&op->list);
+ list_del_rcu(&op->list);
bcm_remove_op(op);
return 1; /* done */
}
@@ -914,6 +977,27 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
}
op->flags = msg_head->flags;
+ /* only lock for unlikely count/nframes/currframe changes */
+ if (op->nframes != msg_head->nframes ||
+ op->flags & TX_RESET_MULTI_IDX ||
+ op->flags & SETTIMER) {
+
+ spin_lock_bh(&op->bcm_tx_lock);
+
+ if (op->nframes != msg_head->nframes ||
+ op->flags & TX_RESET_MULTI_IDX) {
+ /* potentially update changed nframes */
+ op->nframes = msg_head->nframes;
+ /* restart multiple frame transmission */
+ op->currframe = 0;
+ }
+
+ if (op->flags & SETTIMER)
+ op->count = msg_head->count;
+
+ spin_unlock_bh(&op->bcm_tx_lock);
+ }
+
} else {
/* insert new BCM operation for the given can_id */
@@ -921,9 +1005,14 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
if (!op)
return -ENOMEM;
+ spin_lock_init(&op->bcm_tx_lock);
op->can_id = msg_head->can_id;
op->cfsiz = CFSIZ(msg_head->flags);
op->flags = msg_head->flags;
+ op->nframes = msg_head->nframes;
+
+ if (op->flags & SETTIMER)
+ op->count = msg_head->count;
/* create array for CAN frames and copy the data */
if (msg_head->nframes > 1) {
@@ -941,6 +1030,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
cf = op->frames + op->cfsiz * i;
err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
+ if (err < 0)
+ goto free_op;
if (op->flags & CAN_FD_FRAME) {
if (cf->len > 64)
@@ -950,12 +1041,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
err = -EINVAL;
}
- if (err < 0) {
- if (op->frames != &op->sframe)
- kfree(op->frames);
- kfree(op);
- return err;
- }
+ if (err < 0)
+ goto free_op;
if (msg_head->flags & TX_CP_CAN_ID) {
/* copy can_id into frame */
@@ -971,35 +1058,20 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
op->ifindex = ifindex;
/* initialize uninitialized (kzalloc) structure */
- hrtimer_init(&op->timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_SOFT);
- op->timer.function = bcm_tx_timeout_handler;
+ hrtimer_setup(&op->timer, bcm_tx_timeout_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_SOFT);
/* currently unused in tx_ops */
- hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_SOFT);
+ hrtimer_setup(&op->thrtimer, hrtimer_dummy_timeout, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_SOFT);
/* add this bcm_op to the list of the tx_ops */
list_add(&op->list, &bo->tx_ops);
} /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
- if (op->nframes != msg_head->nframes) {
- op->nframes = msg_head->nframes;
- /* start multiple frame transmission with index 0 */
- op->currframe = 0;
- }
-
- /* check flags */
-
- if (op->flags & TX_RESET_MULTI_IDX) {
- /* start multiple frame transmission with index 0 */
- op->currframe = 0;
- }
-
if (op->flags & SETTIMER) {
/* set timer values */
- op->count = msg_head->count;
op->ival1 = msg_head->ival1;
op->ival2 = msg_head->ival2;
op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
@@ -1016,16 +1088,19 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
op->flags |= TX_ANNOUNCE;
}
- if (op->flags & TX_ANNOUNCE) {
+ if (op->flags & TX_ANNOUNCE)
bcm_can_tx(op);
- if (op->count)
- op->count--;
- }
if (op->flags & STARTTIMER)
bcm_tx_start_timer(op);
return msg_head->nframes * op->cfsiz + MHSIZ;
+
+free_op:
+ if (op->frames != &op->sframe)
+ kfree(op->frames);
+ kfree(op);
+ return err;
}
/*
@@ -1146,13 +1221,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
op->rx_ifindex = ifindex;
/* initialize uninitialized (kzalloc) structure */
- hrtimer_init(&op->timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_SOFT);
- op->timer.function = bcm_rx_timeout_handler;
-
- hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_SOFT);
- op->thrtimer.function = bcm_rx_thr_handler;
+ hrtimer_setup(&op->timer, bcm_rx_timeout_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_SOFT);
+ hrtimer_setup(&op->thrtimer, bcm_rx_thr_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_SOFT);
/* add this bcm_op to the list of the rx_ops */
list_add(&op->list, &bo->rx_ops);
@@ -1230,7 +1302,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
bcm_rx_handler, op, "bcm", sk);
if (err) {
/* this bcm rx op is broken -> remove it */
- list_del(&op->list);
+ list_del_rcu(&op->list);
bcm_remove_op(op);
return err;
}
@@ -1424,6 +1496,12 @@ static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
/* remove device reference, if this is our bound device */
if (bo->bound && bo->ifindex == dev->ifindex) {
+#if IS_ENABLED(CONFIG_PROC_FS)
+ if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read) {
+ remove_proc_entry(bo->procname, sock_net(sk)->can.bcmproc_dir);
+ bo->bcm_proc_read = NULL;
+ }
+#endif
bo->bound = 0;
bo->ifindex = 0;
notify_enodev = 1;
@@ -1522,6 +1600,12 @@ static int bcm_release(struct socket *sock)
lock_sock(sk);
+#if IS_ENABLED(CONFIG_PROC_FS)
+ /* remove procfs entry */
+ if (net->can.bcmproc_dir && bo->bcm_proc_read)
+ remove_proc_entry(bo->procname, net->can.bcmproc_dir);
+#endif /* CONFIG_PROC_FS */
+
list_for_each_entry_safe(op, next, &bo->tx_ops, list)
bcm_remove_op(op);
@@ -1557,12 +1641,6 @@ static int bcm_release(struct socket *sock)
list_for_each_entry_safe(op, next, &bo->rx_ops, list)
bcm_remove_op(op);
-#if IS_ENABLED(CONFIG_PROC_FS)
- /* remove procfs entry */
- if (net->can.bcmproc_dir && bo->bcm_proc_read)
- remove_proc_entry(bo->procname, net->can.bcmproc_dir);
-#endif /* CONFIG_PROC_FS */
-
/* remove device reference */
if (bo->bound) {
bo->bound = 0;
@@ -1573,12 +1651,13 @@ static int bcm_release(struct socket *sock)
sock->sk = NULL;
release_sock(sk);
+ sock_prot_inuse_add(net, sk->sk_prot, -1);
sock_put(sk);
return 0;
}
-static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
+static int bcm_connect(struct socket *sock, struct sockaddr_unsized *uaddr, int len,
int flags)
{
struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
@@ -1671,6 +1750,9 @@ static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
}
+ /* assign the flags that have been recorded in bcm_send_to_user() */
+ msg->msg_flags |= *(bcm_flags(skb));
+
skb_free_datagram(sk, skb);
return size;
@@ -1699,7 +1781,6 @@ static const struct proto_ops bcm_ops = {
.sendmsg = bcm_sendmsg,
.recvmsg = bcm_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
static struct proto bcm_proto __read_mostly = {
diff --git a/net/can/gw.c b/net/can/gw.c
index 23a3d89cad81..55eccb1c7620 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -130,7 +130,7 @@ struct cgw_job {
u32 handled_frames;
u32 dropped_frames;
u32 deleted_frames;
- struct cf_mod mod;
+ struct cf_mod __rcu *cf_mod;
union {
/* CAN frame data source */
struct net_device *dev;
@@ -459,6 +459,7 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
struct cgw_job *gwj = (struct cgw_job *)data;
struct canfd_frame *cf;
struct sk_buff *nskb;
+ struct cf_mod *mod;
int modidx = 0;
/* process strictly Classic CAN or CAN FD frames */
@@ -506,7 +507,8 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
* When there is at least one modification function activated,
* we need to copy the skb as we want to modify skb->data.
*/
- if (gwj->mod.modfunc[0])
+ mod = rcu_dereference(gwj->cf_mod);
+ if (mod->modfunc[0])
nskb = skb_copy(skb, GFP_ATOMIC);
else
nskb = skb_clone(skb, GFP_ATOMIC);
@@ -529,8 +531,8 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
cf = (struct canfd_frame *)nskb->data;
/* perform preprocessed modification functions if there are any */
- while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
- (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
+ while (modidx < MAX_MODFUNCTIONS && mod->modfunc[modidx])
+ (*mod->modfunc[modidx++])(cf, mod);
/* Has the CAN frame been modified? */
if (modidx) {
@@ -546,11 +548,11 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
}
/* check for checksum updates */
- if (gwj->mod.csumfunc.crc8)
- (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
+ if (mod->csumfunc.crc8)
+ (*mod->csumfunc.crc8)(cf, &mod->csum.crc8);
- if (gwj->mod.csumfunc.xor)
- (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
+ if (mod->csumfunc.xor)
+ (*mod->csumfunc.xor)(cf, &mod->csum.xor);
}
/* clear the skb timestamp if not configured the other way */
@@ -581,9 +583,20 @@ static void cgw_job_free_rcu(struct rcu_head *rcu_head)
{
struct cgw_job *gwj = container_of(rcu_head, struct cgw_job, rcu);
+ /* cgw_job::cf_mod is always accessed from the same cgw_job object within
+ * the same RCU read section. Once cgw_job is scheduled for removal,
+ * cf_mod can also be removed without mandating an additional grace period.
+ */
+ kfree(rcu_access_pointer(gwj->cf_mod));
kmem_cache_free(cgw_cache, gwj);
}
+/* Return cgw_job::cf_mod with RTNL protected section */
+static struct cf_mod *cgw_job_cf_mod(struct cgw_job *gwj)
+{
+ return rcu_dereference_protected(gwj->cf_mod, rtnl_is_locked());
+}
+
static int cgw_notifier(struct notifier_block *nb,
unsigned long msg, void *ptr)
{
@@ -616,6 +629,7 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
{
struct rtcanmsg *rtcan;
struct nlmsghdr *nlh;
+ struct cf_mod *mod;
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtcan), flags);
if (!nlh)
@@ -650,82 +664,83 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
goto cancel;
}
+ mod = cgw_job_cf_mod(gwj);
if (gwj->flags & CGW_FLAGS_CAN_FD) {
struct cgw_fdframe_mod mb;
- if (gwj->mod.modtype.and) {
- memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf));
- mb.modtype = gwj->mod.modtype.and;
+ if (mod->modtype.and) {
+ memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf));
+ mb.modtype = mod->modtype.and;
if (nla_put(skb, CGW_FDMOD_AND, sizeof(mb), &mb) < 0)
goto cancel;
}
- if (gwj->mod.modtype.or) {
- memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf));
- mb.modtype = gwj->mod.modtype.or;
+ if (mod->modtype.or) {
+ memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf));
+ mb.modtype = mod->modtype.or;
if (nla_put(skb, CGW_FDMOD_OR, sizeof(mb), &mb) < 0)
goto cancel;
}
- if (gwj->mod.modtype.xor) {
- memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf));
- mb.modtype = gwj->mod.modtype.xor;
+ if (mod->modtype.xor) {
+ memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf));
+ mb.modtype = mod->modtype.xor;
if (nla_put(skb, CGW_FDMOD_XOR, sizeof(mb), &mb) < 0)
goto cancel;
}
- if (gwj->mod.modtype.set) {
- memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf));
- mb.modtype = gwj->mod.modtype.set;
+ if (mod->modtype.set) {
+ memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf));
+ mb.modtype = mod->modtype.set;
if (nla_put(skb, CGW_FDMOD_SET, sizeof(mb), &mb) < 0)
goto cancel;
}
} else {
struct cgw_frame_mod mb;
- if (gwj->mod.modtype.and) {
- memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf));
- mb.modtype = gwj->mod.modtype.and;
+ if (mod->modtype.and) {
+ memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf));
+ mb.modtype = mod->modtype.and;
if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0)
goto cancel;
}
- if (gwj->mod.modtype.or) {
- memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf));
- mb.modtype = gwj->mod.modtype.or;
+ if (mod->modtype.or) {
+ memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf));
+ mb.modtype = mod->modtype.or;
if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0)
goto cancel;
}
- if (gwj->mod.modtype.xor) {
- memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf));
- mb.modtype = gwj->mod.modtype.xor;
+ if (mod->modtype.xor) {
+ memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf));
+ mb.modtype = mod->modtype.xor;
if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0)
goto cancel;
}
- if (gwj->mod.modtype.set) {
- memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf));
- mb.modtype = gwj->mod.modtype.set;
+ if (mod->modtype.set) {
+ memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf));
+ mb.modtype = mod->modtype.set;
if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0)
goto cancel;
}
}
- if (gwj->mod.uid) {
- if (nla_put_u32(skb, CGW_MOD_UID, gwj->mod.uid) < 0)
+ if (mod->uid) {
+ if (nla_put_u32(skb, CGW_MOD_UID, mod->uid) < 0)
goto cancel;
}
- if (gwj->mod.csumfunc.crc8) {
+ if (mod->csumfunc.crc8) {
if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN,
- &gwj->mod.csum.crc8) < 0)
+ &mod->csum.crc8) < 0)
goto cancel;
}
- if (gwj->mod.csumfunc.xor) {
+ if (mod->csumfunc.xor) {
if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN,
- &gwj->mod.csum.xor) < 0)
+ &mod->csum.xor) < 0)
goto cancel;
}
@@ -1059,7 +1074,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
struct net *net = sock_net(skb->sk);
struct rtcanmsg *r;
struct cgw_job *gwj;
- struct cf_mod mod;
+ struct cf_mod *mod;
struct can_can_gw ccgw;
u8 limhops = 0;
int err = 0;
@@ -1078,37 +1093,48 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
if (r->gwtype != CGW_TYPE_CAN_CAN)
return -EINVAL;
- err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops);
+ mod = kmalloc(sizeof(*mod), GFP_KERNEL);
+ if (!mod)
+ return -ENOMEM;
+
+ err = cgw_parse_attr(nlh, mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops);
if (err < 0)
- return err;
+ goto out_free_cf;
- if (mod.uid) {
+ if (mod->uid) {
ASSERT_RTNL();
/* check for updating an existing job with identical uid */
hlist_for_each_entry(gwj, &net->can.cgw_list, list) {
- if (gwj->mod.uid != mod.uid)
+ struct cf_mod *old_cf;
+
+ old_cf = cgw_job_cf_mod(gwj);
+ if (old_cf->uid != mod->uid)
continue;
/* interfaces & filters must be identical */
- if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw)))
- return -EINVAL;
+ if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw))) {
+ err = -EINVAL;
+ goto out_free_cf;
+ }
- /* update modifications with disabled softirq & quit */
- local_bh_disable();
- memcpy(&gwj->mod, &mod, sizeof(mod));
- local_bh_enable();
+ rcu_assign_pointer(gwj->cf_mod, mod);
+ kfree_rcu_mightsleep(old_cf);
return 0;
}
}
/* ifindex == 0 is not allowed for job creation */
- if (!ccgw.src_idx || !ccgw.dst_idx)
- return -ENODEV;
+ if (!ccgw.src_idx || !ccgw.dst_idx) {
+ err = -ENODEV;
+ goto out_free_cf;
+ }
gwj = kmem_cache_alloc(cgw_cache, GFP_KERNEL);
- if (!gwj)
- return -ENOMEM;
+ if (!gwj) {
+ err = -ENOMEM;
+ goto out_free_cf;
+ }
gwj->handled_frames = 0;
gwj->dropped_frames = 0;
@@ -1118,7 +1144,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
gwj->limit_hops = limhops;
/* insert already parsed information */
- memcpy(&gwj->mod, &mod, sizeof(mod));
+ RCU_INIT_POINTER(gwj->cf_mod, mod);
memcpy(&gwj->ccgw, &ccgw, sizeof(ccgw));
err = -ENODEV;
@@ -1139,15 +1165,24 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
if (gwj->dst.dev->type != ARPHRD_CAN)
goto out;
+ /* is sending the skb back to the incoming interface intended? */
+ if (gwj->src.dev == gwj->dst.dev &&
+ !(gwj->flags & CGW_FLAGS_CAN_IIF_TX_OK)) {
+ err = -EINVAL;
+ goto out;
+ }
+
ASSERT_RTNL();
err = cgw_register_filter(net, gwj);
if (!err)
hlist_add_head_rcu(&gwj->list, &net->can.cgw_list);
out:
- if (err)
+ if (err) {
kmem_cache_free(cgw_cache, gwj);
-
+out_free_cf:
+ kfree(mod);
+ }
return err;
}
@@ -1207,19 +1242,22 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh,
/* remove only the first matching entry */
hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) {
+ struct cf_mod *cf_mod;
+
if (gwj->flags != r->flags)
continue;
if (gwj->limit_hops != limhops)
continue;
+ cf_mod = cgw_job_cf_mod(gwj);
/* we have a match when uid is enabled and identical */
- if (gwj->mod.uid || mod.uid) {
- if (gwj->mod.uid != mod.uid)
+ if (cf_mod->uid || mod.uid) {
+ if (cf_mod->uid != mod.uid)
continue;
} else {
/* no uid => check for identical modifications */
- if (memcmp(&gwj->mod, &mod, sizeof(mod)))
+ if (memcmp(cf_mod, &mod, sizeof(mod)))
continue;
}
@@ -1258,6 +1296,15 @@ static struct pernet_operations cangw_pernet_ops = {
.exit_batch = cangw_pernet_exit_batch,
};
+static const struct rtnl_msg_handler cgw_rtnl_msg_handlers[] __initconst_or_module = {
+ {.owner = THIS_MODULE, .protocol = PF_CAN, .msgtype = RTM_NEWROUTE,
+ .doit = cgw_create_job},
+ {.owner = THIS_MODULE, .protocol = PF_CAN, .msgtype = RTM_DELROUTE,
+ .doit = cgw_remove_job},
+ {.owner = THIS_MODULE, .protocol = PF_CAN, .msgtype = RTM_GETROUTE,
+ .dumpit = cgw_dump_jobs},
+};
+
static __init int cgw_module_init(void)
{
int ret;
@@ -1283,27 +1330,13 @@ static __init int cgw_module_init(void)
if (ret)
goto out_register_notifier;
- ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_GETROUTE,
- NULL, cgw_dump_jobs, 0);
- if (ret)
- goto out_rtnl_register1;
-
- ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE,
- cgw_create_job, NULL, 0);
- if (ret)
- goto out_rtnl_register2;
- ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE,
- cgw_remove_job, NULL, 0);
+ ret = rtnl_register_many(cgw_rtnl_msg_handlers);
if (ret)
- goto out_rtnl_register3;
+ goto out_rtnl_register;
return 0;
-out_rtnl_register3:
- rtnl_unregister(PF_CAN, RTM_NEWROUTE);
-out_rtnl_register2:
- rtnl_unregister(PF_CAN, RTM_GETROUTE);
-out_rtnl_register1:
+out_rtnl_register:
unregister_netdevice_notifier(&notifier);
out_register_notifier:
kmem_cache_destroy(cgw_cache);
diff --git a/net/can/isotp.c b/net/can/isotp.c
index 608f8c24ae46..ce588b85665a 100644
--- a/net/can/isotp.c
+++ b/net/can/isotp.c
@@ -72,7 +72,7 @@
#include <net/sock.h>
#include <net/net_namespace.h>
-MODULE_DESCRIPTION("PF_CAN isotp 15765-2:2016 protocol");
+MODULE_DESCRIPTION("PF_CAN ISO 15765-2 transport protocol");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
MODULE_ALIAS("can-proto-6");
@@ -83,12 +83,24 @@ MODULE_ALIAS("can-proto-6");
(CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
(CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
-/* ISO 15765-2:2016 supports more than 4095 byte per ISO PDU as the FF_DL can
- * take full 32 bit values (4 Gbyte). We would need some good concept to handle
- * this between user space and kernel space. For now increase the static buffer
- * to something about 64 kbyte to be able to test this new functionality.
+/* Since ISO 15765-2:2016 the CAN isotp protocol supports more than 4095
+ * byte per ISO PDU as the FF_DL can take full 32 bit values (4 Gbyte).
+ * We would need some good concept to handle this between user space and
+ * kernel space. For now set the static buffer to something about 8 kbyte
+ * to be able to test this new functionality.
*/
-#define MAX_MSG_LENGTH 66000
+#define DEFAULT_MAX_PDU_SIZE 8300
+
+/* maximum PDU size before ISO 15765-2:2016 extension was 4095 */
+#define MAX_12BIT_PDU_SIZE 4095
+
+/* limit the isotp pdu size from the optional module parameter to 1MByte */
+#define MAX_PDU_SIZE (1025 * 1024U)
+
+static unsigned int max_pdu_size __read_mostly = DEFAULT_MAX_PDU_SIZE;
+module_param(max_pdu_size, uint, 0444);
+MODULE_PARM_DESC(max_pdu_size, "maximum isotp pdu size (default "
+ __stringify(DEFAULT_MAX_PDU_SIZE) ")");
/* N_PCI type values in bits 7-4 of N_PCI bytes */
#define N_PCI_SF 0x00 /* single frame */
@@ -119,17 +131,20 @@ enum {
ISOTP_WAIT_FIRST_FC,
ISOTP_WAIT_FC,
ISOTP_WAIT_DATA,
- ISOTP_SENDING
+ ISOTP_SENDING,
+ ISOTP_SHUTDOWN,
};
struct tpcon {
- unsigned int idx;
+ u8 *buf;
+ unsigned int buflen;
unsigned int len;
+ unsigned int idx;
u32 state;
u8 bs;
u8 sn;
u8 ll_dl;
- u8 buf[MAX_MSG_LENGTH + 1];
+ u8 sbuf[DEFAULT_MAX_PDU_SIZE];
};
struct isotp_sock {
@@ -140,7 +155,7 @@ struct isotp_sock {
canid_t rxid;
ktime_t tx_gap;
ktime_t lastrxcf_tstamp;
- struct hrtimer rxtimer, txtimer;
+ struct hrtimer rxtimer, txtimer, txfrtimer;
struct can_isotp_options opt;
struct can_isotp_fc_options rxfc, txfc;
struct can_isotp_ll_options ll;
@@ -174,12 +189,6 @@ static bool isotp_register_rxid(struct isotp_sock *so)
return (isotp_bc_flags(so) == 0);
}
-static bool isotp_register_txecho(struct isotp_sock *so)
-{
- /* all modes but SF_BROADCAST register for tx echo skbs */
- return (isotp_bc_flags(so) != CAN_ISOTP_SF_BROADCAST);
-}
-
static enum hrtimer_restart isotp_rx_timer_handler(struct hrtimer *hrtimer)
{
struct isotp_sock *so = container_of(hrtimer, struct isotp_sock,
@@ -269,6 +278,7 @@ static int isotp_send_fc(struct sock *sk, int ae, u8 flowstatus)
static void isotp_rcv_skb(struct sk_buff *skb, struct sock *sk)
{
struct sockaddr_can *addr = (struct sockaddr_can *)skb->cb;
+ enum skb_drop_reason reason;
BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
@@ -276,8 +286,8 @@ static void isotp_rcv_skb(struct sk_buff *skb, struct sock *sk)
addr->can_family = AF_CAN;
addr->can_ifindex = skb->dev->ifindex;
- if (sock_queue_rcv_skb(sk, skb) < 0)
- kfree_skb(skb);
+ if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0)
+ sk_skb_reason_drop(sk, skb, reason);
}
static u8 padlen(u8 datalen)
@@ -373,8 +383,9 @@ static int isotp_rcv_fc(struct isotp_sock *so, struct canfd_frame *cf, int ae)
return 1;
}
- /* get communication parameters only from the first FC frame */
- if (so->tx.state == ISOTP_WAIT_FIRST_FC) {
+ /* get static/dynamic communication params from first/every FC frame */
+ if (so->tx.state == ISOTP_WAIT_FIRST_FC ||
+ so->opt.flags & CAN_ISOTP_DYN_FC_PARMS) {
so->txfc.bs = cf->data[ae + 1];
so->txfc.stmin = cf->data[ae + 2];
@@ -503,7 +514,17 @@ static int isotp_rcv_ff(struct sock *sk, struct canfd_frame *cf, int ae)
if (so->rx.len + ae + off + ff_pci_sz < so->rx.ll_dl)
return 1;
- if (so->rx.len > MAX_MSG_LENGTH) {
+ /* PDU size > default => try max_pdu_size */
+ if (so->rx.len > so->rx.buflen && so->rx.buflen < max_pdu_size) {
+ u8 *newbuf = kmalloc(max_pdu_size, GFP_ATOMIC);
+
+ if (newbuf) {
+ so->rx.buf = newbuf;
+ so->rx.buflen = max_pdu_size;
+ }
+ }
+
+ if (so->rx.len > so->rx.buflen) {
/* send FC frame with overflow status */
isotp_send_fc(sk, ae, ISOTP_FC_OVFLW);
return 1;
@@ -807,7 +828,7 @@ static void isotp_create_fframe(struct canfd_frame *cf, struct isotp_sock *so,
cf->data[0] = so->opt.ext_address;
/* create N_PCI bytes with 12/32 bit FF_DL data length */
- if (so->tx.len > 4095) {
+ if (so->tx.len > MAX_12BIT_PDU_SIZE) {
/* use 32 bit FF_DL notation */
cf->data[ae] = N_PCI_FF;
cf->data[ae + 1] = 0;
@@ -871,7 +892,7 @@ static void isotp_rcv_echo(struct sk_buff *skb, void *data)
}
/* start timer to send next consecutive frame with correct delay */
- hrtimer_start(&so->txtimer, so->tx_gap, HRTIMER_MODE_REL_SOFT);
+ hrtimer_start(&so->txfrtimer, so->tx_gap, HRTIMER_MODE_REL_SOFT);
}
static enum hrtimer_restart isotp_tx_timer_handler(struct hrtimer *hrtimer)
@@ -879,56 +900,45 @@ static enum hrtimer_restart isotp_tx_timer_handler(struct hrtimer *hrtimer)
struct isotp_sock *so = container_of(hrtimer, struct isotp_sock,
txtimer);
struct sock *sk = &so->sk;
- enum hrtimer_restart restart = HRTIMER_NORESTART;
-
- switch (so->tx.state) {
- case ISOTP_SENDING:
- /* cfecho should be consumed by isotp_rcv_echo() here */
- if (!so->cfecho) {
- /* start timeout for unlikely lost echo skb */
- hrtimer_set_expires(&so->txtimer,
- ktime_add(ktime_get(),
- ktime_set(ISOTP_ECHO_TIMEOUT, 0)));
- restart = HRTIMER_RESTART;
+ /* don't handle timeouts in IDLE or SHUTDOWN state */
+ if (so->tx.state == ISOTP_IDLE || so->tx.state == ISOTP_SHUTDOWN)
+ return HRTIMER_NORESTART;
- /* push out the next consecutive frame */
- isotp_send_cframe(so);
- break;
- }
+ /* we did not get any flow control or echo frame in time */
- /* cfecho has not been cleared in isotp_rcv_echo() */
- pr_notice_once("can-isotp: cfecho %08X timeout\n", so->cfecho);
- fallthrough;
+ /* report 'communication error on send' */
+ sk->sk_err = ECOMM;
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk_error_report(sk);
- case ISOTP_WAIT_FC:
- case ISOTP_WAIT_FIRST_FC:
+ /* reset tx state */
+ so->tx.state = ISOTP_IDLE;
+ wake_up_interruptible(&so->wait);
- /* we did not get any flow control frame in time */
+ return HRTIMER_NORESTART;
+}
- /* report 'communication error on send' */
- sk->sk_err = ECOMM;
- if (!sock_flag(sk, SOCK_DEAD))
- sk_error_report(sk);
+static enum hrtimer_restart isotp_txfr_timer_handler(struct hrtimer *hrtimer)
+{
+ struct isotp_sock *so = container_of(hrtimer, struct isotp_sock,
+ txfrtimer);
- /* reset tx state */
- so->tx.state = ISOTP_IDLE;
- wake_up_interruptible(&so->wait);
- break;
+ /* start echo timeout handling and cover below protocol error */
+ hrtimer_start(&so->txtimer, ktime_set(ISOTP_ECHO_TIMEOUT, 0),
+ HRTIMER_MODE_REL_SOFT);
- default:
- WARN_ONCE(1, "can-isotp: tx timer state %08X cfecho %08X\n",
- so->tx.state, so->cfecho);
- }
+ /* cfecho should be consumed by isotp_rcv_echo() here */
+ if (so->tx.state == ISOTP_SENDING && !so->cfecho)
+ isotp_send_cframe(so);
- return restart;
+ return HRTIMER_NORESTART;
}
static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
{
struct sock *sk = sock->sk;
struct isotp_sock *so = isotp_sk(sk);
- u32 old_state = so->tx.state;
struct sk_buff *skb;
struct net_device *dev;
struct canfd_frame *cf;
@@ -938,26 +948,34 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
int off;
int err;
- if (!so->bound)
+ if (!so->bound || so->tx.state == ISOTP_SHUTDOWN)
return -EADDRNOTAVAIL;
- /* we do not support multiple buffers - for now */
- if (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE ||
- wq_has_sleeper(&so->wait)) {
- if (msg->msg_flags & MSG_DONTWAIT) {
- err = -EAGAIN;
- goto err_out;
- }
+ while (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE) {
+ /* we do not support multiple buffers - for now */
+ if (msg->msg_flags & MSG_DONTWAIT)
+ return -EAGAIN;
+
+ if (so->tx.state == ISOTP_SHUTDOWN)
+ return -EADDRNOTAVAIL;
/* wait for complete transmission of current pdu */
err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
if (err)
- goto err_out;
+ goto err_event_drop;
+ }
- so->tx.state = ISOTP_SENDING;
+ /* PDU size > default => try max_pdu_size */
+ if (size > so->tx.buflen && so->tx.buflen < max_pdu_size) {
+ u8 *newbuf = kmalloc(max_pdu_size, GFP_KERNEL);
+
+ if (newbuf) {
+ so->tx.buf = newbuf;
+ so->tx.buflen = max_pdu_size;
+ }
}
- if (!size || size > MAX_MSG_LENGTH) {
+ if (!size || size > so->tx.buflen) {
err = -EINVAL;
goto err_out_drop;
}
@@ -1084,21 +1102,26 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
if (wait_tx_done) {
/* wait for complete transmission of current pdu */
- wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+ err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+ if (err)
+ goto err_event_drop;
- if (sk->sk_err)
- return -sk->sk_err;
+ err = sock_error(sk);
+ if (err)
+ return err;
}
return size;
+err_event_drop:
+ /* got signal: force tx state machine to be idle */
+ so->tx.state = ISOTP_IDLE;
+ hrtimer_cancel(&so->txfrtimer);
+ hrtimer_cancel(&so->txtimer);
err_out_drop:
/* drop this PDU and unlock a potential wait queue */
- old_state = ISOTP_IDLE;
-err_out:
- so->tx.state = old_state;
- if (so->tx.state == ISOTP_IDLE)
- wake_up_interruptible(&so->wait);
+ so->tx.state = ISOTP_IDLE;
+ wake_up_interruptible(&so->wait);
return err;
}
@@ -1111,7 +1134,7 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
struct isotp_sock *so = isotp_sk(sk);
int ret = 0;
- if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK))
+ if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK | MSG_CMSG_COMPAT))
return -EINVAL;
if (!so->bound)
@@ -1130,7 +1153,7 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
if (ret < 0)
goto out_err;
- sock_recv_timestamp(msg, sk, skb);
+ sock_recv_cmsgs(msg, sk, skb);
if (msg->msg_name) {
__sockaddr_check_size(ISOTP_MIN_NAMELEN);
@@ -1160,7 +1183,13 @@ static int isotp_release(struct socket *sock)
net = sock_net(sk);
/* wait for complete transmission of current pdu */
- wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+ while (wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE) == 0 &&
+ cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SHUTDOWN) != ISOTP_IDLE)
+ ;
+
+ /* force state machines to be idle also when a signal occurred */
+ so->tx.state = ISOTP_SHUTDOWN;
+ so->rx.state = ISOTP_IDLE;
spin_lock(&isotp_notifier_lock);
while (isotp_busy_notifier == so) {
@@ -1174,7 +1203,7 @@ static int isotp_release(struct socket *sock)
lock_sock(sk);
/* remove current filters & unregister */
- if (so->bound && isotp_register_txecho(so)) {
+ if (so->bound) {
if (so->ifindex) {
struct net_device *dev;
@@ -1194,22 +1223,30 @@ static int isotp_release(struct socket *sock)
}
}
+ hrtimer_cancel(&so->txfrtimer);
hrtimer_cancel(&so->txtimer);
hrtimer_cancel(&so->rxtimer);
so->ifindex = 0;
so->bound = 0;
+ if (so->rx.buf != so->rx.sbuf)
+ kfree(so->rx.buf);
+
+ if (so->tx.buf != so->tx.sbuf)
+ kfree(so->tx.buf);
+
sock_orphan(sk);
sock->sk = NULL;
release_sock(sk);
+ sock_prot_inuse_add(net, sk->sk_prot, -1);
sock_put(sk);
return 0;
}
-static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+static int isotp_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int len)
{
struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
struct sock *sk = sock->sk;
@@ -1225,6 +1262,9 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
if (len < ISOTP_MIN_NAMELEN)
return -EINVAL;
+ if (addr->can_family != AF_CAN)
+ return -EINVAL;
+
/* sanitize tx CAN identifier */
if (tx_id & CAN_EFF_FLAG)
tx_id &= (CAN_EFF_FLAG | CAN_EFF_MASK);
@@ -1273,7 +1313,7 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
err = -ENODEV;
goto out;
}
- if (dev->mtu < so->ll.mtu) {
+ if (READ_ONCE(dev->mtu) < so->ll.mtu) {
dev_put(dev);
err = -EINVAL;
goto out;
@@ -1287,14 +1327,12 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
can_rx_register(net, dev, rx_id, SINGLE_MASK(rx_id),
isotp_rcv, sk, "isotp", sk);
- if (isotp_register_txecho(so)) {
- /* no consecutive frame echo skb in flight */
- so->cfecho = 0;
+ /* no consecutive frame echo skb in flight */
+ so->cfecho = 0;
- /* register for echo skb's */
- can_rx_register(net, dev, tx_id, SINGLE_MASK(tx_id),
- isotp_rcv_echo, sk, "isotpe", sk);
- }
+ /* register for echo skb's */
+ can_rx_register(net, dev, tx_id, SINGLE_MASK(tx_id),
+ isotp_rcv_echo, sk, "isotpe", sk);
dev_put(dev);
@@ -1515,7 +1553,7 @@ static void isotp_notify(struct isotp_sock *so, unsigned long msg,
case NETDEV_UNREGISTER:
lock_sock(sk);
/* remove current filters & unregister */
- if (so->bound && isotp_register_txecho(so)) {
+ if (so->bound) {
if (isotp_register_rxid(so))
can_rx_unregister(dev_net(dev), dev, so->rxid,
SINGLE_MASK(so->rxid),
@@ -1593,10 +1631,15 @@ static int isotp_init(struct sock *sk)
so->rx.state = ISOTP_IDLE;
so->tx.state = ISOTP_IDLE;
- hrtimer_init(&so->rxtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
- so->rxtimer.function = isotp_rx_timer_handler;
- hrtimer_init(&so->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
- so->txtimer.function = isotp_tx_timer_handler;
+ so->rx.buf = so->rx.sbuf;
+ so->tx.buf = so->tx.sbuf;
+ so->rx.buflen = ARRAY_SIZE(so->rx.sbuf);
+ so->tx.buflen = ARRAY_SIZE(so->tx.sbuf);
+
+ hrtimer_setup(&so->rxtimer, isotp_rx_timer_handler, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
+ hrtimer_setup(&so->txtimer, isotp_tx_timer_handler, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
+ hrtimer_setup(&so->txfrtimer, isotp_txfr_timer_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_SOFT);
init_waitqueue_head(&so->wait);
spin_lock_init(&so->rx_lock);
@@ -1608,6 +1651,21 @@ static int isotp_init(struct sock *sk)
return 0;
}
+static __poll_t isotp_poll(struct file *file, struct socket *sock, poll_table *wait)
+{
+ struct sock *sk = sock->sk;
+ struct isotp_sock *so = isotp_sk(sk);
+
+ __poll_t mask = datagram_poll(file, sock, wait);
+ poll_wait(file, &so->wait, wait);
+
+ /* Check for false positives due to TX state */
+ if ((mask & EPOLLWRNORM) && (so->tx.state != ISOTP_IDLE))
+ mask &= ~(EPOLLOUT | EPOLLWRNORM);
+
+ return mask;
+}
+
static int isotp_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
@@ -1623,7 +1681,7 @@ static const struct proto_ops isotp_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = isotp_getname,
- .poll = datagram_poll,
+ .poll = isotp_poll,
.ioctl = isotp_sock_no_ioctlcmd,
.gettstamp = sock_gettstamp,
.listen = sock_no_listen,
@@ -1633,7 +1691,6 @@ static const struct proto_ops isotp_ops = {
.sendmsg = isotp_sendmsg,
.recvmsg = isotp_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
static struct proto isotp_proto __read_mostly = {
@@ -1658,7 +1715,10 @@ static __init int isotp_module_init(void)
{
int err;
- pr_info("can: isotp protocol\n");
+ max_pdu_size = max_t(unsigned int, max_pdu_size, MAX_12BIT_PDU_SIZE);
+ max_pdu_size = min_t(unsigned int, max_pdu_size, MAX_PDU_SIZE);
+
+ pr_info("can: isotp protocol (max_pdu_size %d)\n", max_pdu_size);
err = can_proto_register(&isotp_can_proto);
if (err < 0)
diff --git a/net/can/j1939/address-claim.c b/net/can/j1939/address-claim.c
index f33c47327927..ca4ad6cdd5cb 100644
--- a/net/can/j1939/address-claim.c
+++ b/net/can/j1939/address-claim.c
@@ -165,6 +165,46 @@ static void j1939_ac_process(struct j1939_priv *priv, struct sk_buff *skb)
* leaving this function.
*/
ecu = j1939_ecu_get_by_name_locked(priv, name);
+
+ if (ecu && ecu->addr == skcb->addr.sa) {
+ /* The ISO 11783-5 standard, in "4.5.2 - Address claim
+ * requirements", states:
+ * d) No CF shall begin, or resume, transmission on the
+ * network until 250 ms after it has successfully claimed
+ * an address except when responding to a request for
+ * address-claimed.
+ *
+ * But "Figure 6" and "Figure 7" in "4.5.4.2 - Address-claim
+ * prioritization" show that the CF begins the transmission
+ * after 250 ms from the first AC (address-claimed) message
+ * even if it sends another AC message during that time window
+ * to resolve the address contention with another CF.
+ *
+ * As stated in "4.4.2.3 - Address-claimed message":
+ * In order to successfully claim an address, the CF sending
+ * an address claimed message shall not receive a contending
+ * claim from another CF for at least 250 ms.
+ *
+ * As stated in "4.4.3.2 - NAME management (NM) message":
+ * 1) A commanding CF can
+ * d) request that a CF with a specified NAME transmit
+ * the address-claimed message with its current NAME.
+ * 2) A target CF shall
+ * d) send an address-claimed message in response to a
+ * request for a matching NAME
+ *
+ * Taking the above arguments into account, the 250 ms wait is
+ * requested only during network initialization.
+ *
+ * Do not restart the timer on AC message if both the NAME and
+ * the address match and so if the address has already been
+ * claimed (timer has expired) or the AC message has been sent
+ * to resolve the contention with another CF (timer is still
+ * running).
+ */
+ goto out_ecu_put;
+ }
+
if (!ecu && j1939_address_is_unicast(skcb->addr.sa))
ecu = j1939_ecu_create_locked(priv, name);
diff --git a/net/can/j1939/bus.c b/net/can/j1939/bus.c
index 486687901602..797719cb227e 100644
--- a/net/can/j1939/bus.c
+++ b/net/can/j1939/bus.c
@@ -158,8 +158,8 @@ struct j1939_ecu *j1939_ecu_create_locked(struct j1939_priv *priv, name_t name)
ecu->addr = J1939_IDLE_ADDR;
ecu->name = name;
- hrtimer_init(&ecu->ac_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
- ecu->ac_timer.function = j1939_ecu_timer_handler;
+ hrtimer_setup(&ecu->ac_timer, j1939_ecu_timer_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_SOFT);
INIT_LIST_HEAD(&ecu->list);
j1939_priv_get(priv);
@@ -290,8 +290,11 @@ int j1939_local_ecu_get(struct j1939_priv *priv, name_t name, u8 sa)
if (!ecu)
ecu = j1939_ecu_create_locked(priv, name);
err = PTR_ERR_OR_ZERO(ecu);
- if (err)
+ if (err) {
+ if (j1939_address_is_unicast(sa))
+ priv->ents[sa].nusers--;
goto done;
+ }
ecu->nusers++;
/* TODO: do we care if ecu->addr != sa? */
diff --git a/net/can/j1939/j1939-priv.h b/net/can/j1939/j1939-priv.h
index 16af1a7f80f6..81f58924b4ac 100644
--- a/net/can/j1939/j1939-priv.h
+++ b/net/can/j1939/j1939-priv.h
@@ -86,7 +86,7 @@ struct j1939_priv {
unsigned int tp_max_packet_size;
/* lock for j1939_socks list */
- spinlock_t j1939_socks_lock;
+ rwlock_t j1939_socks_lock;
struct list_head j1939_socks;
struct kref rx_kref;
@@ -212,6 +212,7 @@ void j1939_priv_get(struct j1939_priv *priv);
/* notify/alert all j1939 sockets bound to ifindex */
void j1939_sk_netdev_event_netdown(struct j1939_priv *priv);
+void j1939_sk_netdev_event_unregister(struct j1939_priv *priv);
int j1939_cancel_active_session(struct j1939_priv *priv, struct sock *sk);
void j1939_tp_init(struct j1939_priv *priv);
@@ -301,6 +302,7 @@ struct j1939_sock {
int ifindex;
struct j1939_addr addr;
+ spinlock_t filters_lock;
struct j1939_filter *filters;
int nfilters;
pgn_t pgn_rx_filter;
diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
index 821d4ff303b3..a93af55df5fd 100644
--- a/net/can/j1939/main.c
+++ b/net/can/j1939/main.c
@@ -30,10 +30,6 @@ MODULE_ALIAS("can-proto-" __stringify(CAN_J1939));
/* CAN_HDR: #bytes before can_frame data part */
#define J1939_CAN_HDR (offsetof(struct can_frame, data))
-/* CAN_FTR: #bytes beyond data part */
-#define J1939_CAN_FTR (sizeof(struct can_frame) - J1939_CAN_HDR - \
- sizeof(((struct can_frame *)0)->data))
-
/* lowest layer */
static void j1939_can_recv(struct sk_buff *iskb, void *data)
{
@@ -126,7 +122,7 @@ static void j1939_can_recv(struct sk_buff *iskb, void *data)
#define J1939_CAN_ID CAN_EFF_FLAG
#define J1939_CAN_MASK (CAN_EFF_FLAG | CAN_RTR_FLAG)
-static DEFINE_SPINLOCK(j1939_netdev_lock);
+static DEFINE_MUTEX(j1939_netdev_lock);
static struct j1939_priv *j1939_priv_create(struct net_device *ndev)
{
@@ -220,7 +216,7 @@ static void __j1939_rx_release(struct kref *kref)
j1939_can_rx_unregister(priv);
j1939_ecu_unmap_all(priv);
j1939_priv_set(priv->ndev, NULL);
- spin_unlock(&j1939_netdev_lock);
+ mutex_unlock(&j1939_netdev_lock);
}
/* get pointer to priv without increasing ref counter */
@@ -248,9 +244,9 @@ static struct j1939_priv *j1939_priv_get_by_ndev(struct net_device *ndev)
{
struct j1939_priv *priv;
- spin_lock(&j1939_netdev_lock);
+ mutex_lock(&j1939_netdev_lock);
priv = j1939_priv_get_by_ndev_locked(ndev);
- spin_unlock(&j1939_netdev_lock);
+ mutex_unlock(&j1939_netdev_lock);
return priv;
}
@@ -260,46 +256,48 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
struct j1939_priv *priv, *priv_new;
int ret;
- spin_lock(&j1939_netdev_lock);
+ mutex_lock(&j1939_netdev_lock);
priv = j1939_priv_get_by_ndev_locked(ndev);
if (priv) {
kref_get(&priv->rx_kref);
- spin_unlock(&j1939_netdev_lock);
+ mutex_unlock(&j1939_netdev_lock);
return priv;
}
- spin_unlock(&j1939_netdev_lock);
+ mutex_unlock(&j1939_netdev_lock);
priv = j1939_priv_create(ndev);
if (!priv)
return ERR_PTR(-ENOMEM);
j1939_tp_init(priv);
- spin_lock_init(&priv->j1939_socks_lock);
+ rwlock_init(&priv->j1939_socks_lock);
INIT_LIST_HEAD(&priv->j1939_socks);
- spin_lock(&j1939_netdev_lock);
+ mutex_lock(&j1939_netdev_lock);
priv_new = j1939_priv_get_by_ndev_locked(ndev);
if (priv_new) {
/* Someone was faster than us, use their priv and roll
* back our's.
*/
kref_get(&priv_new->rx_kref);
- spin_unlock(&j1939_netdev_lock);
+ mutex_unlock(&j1939_netdev_lock);
dev_put(ndev);
kfree(priv);
return priv_new;
}
j1939_priv_set(ndev, priv);
- spin_unlock(&j1939_netdev_lock);
ret = j1939_can_rx_register(priv);
if (ret < 0)
goto out_priv_put;
+ mutex_unlock(&j1939_netdev_lock);
return priv;
out_priv_put:
j1939_priv_set(ndev, NULL);
+ mutex_unlock(&j1939_netdev_lock);
+
dev_put(ndev);
kfree(priv);
@@ -308,7 +306,7 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
void j1939_netdev_stop(struct j1939_priv *priv)
{
- kref_put_lock(&priv->rx_kref, __j1939_rx_release, &j1939_netdev_lock);
+ kref_put_mutex(&priv->rx_kref, __j1939_rx_release, &j1939_netdev_lock);
j1939_priv_put(priv);
}
@@ -340,7 +338,7 @@ int j1939_send_one(struct j1939_priv *priv, struct sk_buff *skb)
memset(cf, 0, J1939_CAN_HDR);
/* make it a full can frame again */
- skb_put(skb, J1939_CAN_FTR + (8 - dlc));
+ skb_put_zero(skb, 8 - dlc);
canid = CAN_EFF_FLAG |
(skcb->priority << 26) |
@@ -379,6 +377,11 @@ static int j1939_netdev_notify(struct notifier_block *nb,
j1939_sk_netdev_event_netdown(priv);
j1939_ecu_unmap_all(priv);
break;
+ case NETDEV_UNREGISTER:
+ j1939_cancel_active_session(priv, NULL);
+ j1939_sk_netdev_event_netdown(priv);
+ j1939_sk_netdev_event_unregister(priv);
+ break;
}
j1939_priv_put(priv);
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
index 7e90f9e61d9b..6272326dd614 100644
--- a/net/can/j1939/socket.c
+++ b/net/can/j1939/socket.c
@@ -80,16 +80,16 @@ static void j1939_jsk_add(struct j1939_priv *priv, struct j1939_sock *jsk)
jsk->state |= J1939_SOCK_BOUND;
j1939_priv_get(priv);
- spin_lock_bh(&priv->j1939_socks_lock);
+ write_lock_bh(&priv->j1939_socks_lock);
list_add_tail(&jsk->list, &priv->j1939_socks);
- spin_unlock_bh(&priv->j1939_socks_lock);
+ write_unlock_bh(&priv->j1939_socks_lock);
}
static void j1939_jsk_del(struct j1939_priv *priv, struct j1939_sock *jsk)
{
- spin_lock_bh(&priv->j1939_socks_lock);
+ write_lock_bh(&priv->j1939_socks_lock);
list_del_init(&jsk->list);
- spin_unlock_bh(&priv->j1939_socks_lock);
+ write_unlock_bh(&priv->j1939_socks_lock);
j1939_priv_put(priv);
jsk->state &= ~J1939_SOCK_BOUND;
@@ -262,12 +262,17 @@ static bool j1939_sk_match_dst(struct j1939_sock *jsk,
static bool j1939_sk_match_filter(struct j1939_sock *jsk,
const struct j1939_sk_buff_cb *skcb)
{
- const struct j1939_filter *f = jsk->filters;
- int nfilter = jsk->nfilters;
+ const struct j1939_filter *f;
+ int nfilter;
+
+ spin_lock_bh(&jsk->filters_lock);
+
+ f = jsk->filters;
+ nfilter = jsk->nfilters;
if (!nfilter)
/* receive all when no filters are assigned */
- return true;
+ goto filter_match_found;
for (; nfilter; ++f, --nfilter) {
if ((skcb->addr.pgn & f->pgn_mask) != f->pgn)
@@ -276,9 +281,15 @@ static bool j1939_sk_match_filter(struct j1939_sock *jsk,
continue;
if ((skcb->addr.src_name & f->name_mask) != f->name)
continue;
- return true;
+ goto filter_match_found;
}
+
+ spin_unlock_bh(&jsk->filters_lock);
return false;
+
+filter_match_found:
+ spin_unlock_bh(&jsk->filters_lock);
+ return true;
}
static bool j1939_sk_recv_match_one(struct j1939_sock *jsk,
@@ -300,6 +311,7 @@ static void j1939_sk_recv_one(struct j1939_sock *jsk, struct sk_buff *oskb)
{
const struct j1939_sk_buff_cb *oskcb = j1939_skb_to_cb(oskb);
struct j1939_sk_buff_cb *skcb;
+ enum skb_drop_reason reason;
struct sk_buff *skb;
if (oskb->sk == &jsk->sk)
@@ -320,8 +332,8 @@ static void j1939_sk_recv_one(struct j1939_sock *jsk, struct sk_buff *oskb)
if (skb->sk)
skcb->msg_flags |= MSG_DONTROUTE;
- if (sock_queue_rcv_skb(&jsk->sk, skb) < 0)
- kfree_skb(skb);
+ if (sock_queue_rcv_skb_reason(&jsk->sk, skb, &reason) < 0)
+ sk_skb_reason_drop(&jsk->sk, skb, reason);
}
bool j1939_sk_recv_match(struct j1939_priv *priv, struct j1939_sk_buff_cb *skcb)
@@ -329,13 +341,13 @@ bool j1939_sk_recv_match(struct j1939_priv *priv, struct j1939_sk_buff_cb *skcb)
struct j1939_sock *jsk;
bool match = false;
- spin_lock_bh(&priv->j1939_socks_lock);
+ read_lock_bh(&priv->j1939_socks_lock);
list_for_each_entry(jsk, &priv->j1939_socks, list) {
match = j1939_sk_recv_match_one(jsk, skcb);
if (match)
break;
}
- spin_unlock_bh(&priv->j1939_socks_lock);
+ read_unlock_bh(&priv->j1939_socks_lock);
return match;
}
@@ -344,11 +356,11 @@ void j1939_sk_recv(struct j1939_priv *priv, struct sk_buff *skb)
{
struct j1939_sock *jsk;
- spin_lock_bh(&priv->j1939_socks_lock);
+ read_lock_bh(&priv->j1939_socks_lock);
list_for_each_entry(jsk, &priv->j1939_socks, list) {
j1939_sk_recv_one(jsk, skb);
}
- spin_unlock_bh(&priv->j1939_socks_lock);
+ read_unlock_bh(&priv->j1939_socks_lock);
}
static void j1939_sk_sock_destruct(struct sock *sk)
@@ -401,6 +413,7 @@ static int j1939_sk_init(struct sock *sk)
atomic_set(&jsk->skb_pending, 0);
spin_lock_init(&jsk->sk_session_queue_lock);
INIT_LIST_HEAD(&jsk->sk_session_queue);
+ spin_lock_init(&jsk->filters_lock);
/* j1939_sk_sock_destruct() depends on SOCK_RCU_FREE flag */
sock_set_flag(sk, SOCK_RCU_FREE);
@@ -427,7 +440,7 @@ static int j1939_sk_sanity_check(struct sockaddr_can *addr, int len)
return 0;
}
-static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+static int j1939_sk_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int len)
{
struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
struct j1939_sock *jsk = j1939_sk(sock->sk);
@@ -508,6 +521,9 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
ret = j1939_local_ecu_get(priv, jsk->addr.src_name, jsk->addr.sa);
if (ret) {
j1939_netdev_stop(priv);
+ jsk->priv = NULL;
+ synchronize_rcu();
+ j1939_priv_put(priv);
goto out_release_sock;
}
@@ -519,7 +535,7 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
return ret;
}
-static int j1939_sk_connect(struct socket *sock, struct sockaddr *uaddr,
+static int j1939_sk_connect(struct socket *sock, struct sockaddr_unsized *uaddr,
int len, int flags)
{
struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
@@ -643,6 +659,7 @@ static int j1939_sk_release(struct socket *sock)
sock->sk = NULL;
release_sock(sk);
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
sock_put(sk);
return 0;
@@ -703,9 +720,11 @@ static int j1939_sk_setsockopt(struct socket *sock, int level, int optname,
}
lock_sock(&jsk->sk);
+ spin_lock_bh(&jsk->filters_lock);
ofilters = jsk->filters;
jsk->filters = filters;
jsk->nfilters = count;
+ spin_unlock_bh(&jsk->filters_lock);
release_sock(&jsk->sk);
kfree(ofilters);
return 0;
@@ -798,7 +817,7 @@ static int j1939_sk_recvmsg(struct socket *sock, struct msghdr *msg,
struct j1939_sk_buff_cb *skcb;
int ret = 0;
- if (flags & ~(MSG_DONTWAIT | MSG_ERRQUEUE))
+ if (flags & ~(MSG_DONTWAIT | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
return -EINVAL;
if (flags & MSG_ERRQUEUE)
@@ -884,7 +903,7 @@ static struct sk_buff *j1939_sk_alloc_skb(struct net_device *ndev,
skcb = j1939_skb_to_cb(skb);
memset(skcb, 0, sizeof(*skcb));
skcb->addr = jsk->addr;
- skcb->priority = j1939_prio(sk->sk_priority);
+ skcb->priority = j1939_prio(READ_ONCE(sk->sk_priority));
if (msg->msg_name) {
struct sockaddr_can *addr = msg->msg_name;
@@ -974,6 +993,7 @@ static void __j1939_sk_errqueue(struct j1939_session *session, struct sock *sk,
struct sock_exterr_skb *serr;
struct sk_buff *skb;
char *state = "UNK";
+ u32 tsflags;
int err;
jsk = j1939_sk(sk);
@@ -981,13 +1001,14 @@ static void __j1939_sk_errqueue(struct j1939_session *session, struct sock *sk,
if (!(jsk->state & J1939_SOCK_ERRQUEUE))
return;
+ tsflags = READ_ONCE(sk->sk_tsflags);
switch (type) {
case J1939_ERRQUEUE_TX_ACK:
- if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK))
+ if (!(tsflags & SOF_TIMESTAMPING_TX_ACK))
return;
break;
case J1939_ERRQUEUE_TX_SCHED:
- if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED))
+ if (!(tsflags & SOF_TIMESTAMPING_TX_SCHED))
return;
break;
case J1939_ERRQUEUE_TX_ABORT:
@@ -997,7 +1018,7 @@ static void __j1939_sk_errqueue(struct j1939_session *session, struct sock *sk,
case J1939_ERRQUEUE_RX_DPO:
fallthrough;
case J1939_ERRQUEUE_RX_ABORT:
- if (!(sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE))
+ if (!(tsflags & SOF_TIMESTAMPING_RX_SOFTWARE))
return;
break;
default:
@@ -1054,7 +1075,7 @@ static void __j1939_sk_errqueue(struct j1939_session *session, struct sock *sk,
}
serr->opt_stats = true;
- if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
+ if (tsflags & SOF_TIMESTAMPING_OPT_ID)
serr->ee.ee_data = session->tskey;
netdev_dbg(session->priv->ndev, "%s: 0x%p tskey: %i, state: %s\n",
@@ -1078,16 +1099,21 @@ void j1939_sk_errqueue(struct j1939_session *session,
}
/* spread RX notifications to all sockets subscribed to this session */
- spin_lock_bh(&priv->j1939_socks_lock);
+ read_lock_bh(&priv->j1939_socks_lock);
list_for_each_entry(jsk, &priv->j1939_socks, list) {
if (j1939_sk_recv_match_one(jsk, &session->skcb))
__j1939_sk_errqueue(session, &jsk->sk, type);
}
- spin_unlock_bh(&priv->j1939_socks_lock);
+ read_unlock_bh(&priv->j1939_socks_lock);
};
void j1939_sk_send_loop_abort(struct sock *sk, int err)
{
+ struct j1939_sock *jsk = j1939_sk(sk);
+
+ if (jsk->state & J1939_SOCK_ERRQUEUE)
+ return;
+
sk->sk_err = err;
sk_error_report(sk);
@@ -1111,7 +1137,7 @@ static int j1939_sk_send_loop(struct j1939_priv *priv, struct sock *sk,
todo_size = size;
- while (todo_size) {
+ do {
struct j1939_sk_buff_cb *skcb;
segment_size = min_t(size_t, J1939_MAX_TP_PACKET_SIZE,
@@ -1156,7 +1182,7 @@ static int j1939_sk_send_loop(struct j1939_priv *priv, struct sock *sk,
todo_size -= segment_size;
session->total_queued_size += segment_size;
- }
+ } while (todo_size);
switch (ret) {
case 0: /* OK */
@@ -1266,7 +1292,7 @@ void j1939_sk_netdev_event_netdown(struct j1939_priv *priv)
struct j1939_sock *jsk;
int error_code = ENETDOWN;
- spin_lock_bh(&priv->j1939_socks_lock);
+ read_lock_bh(&priv->j1939_socks_lock);
list_for_each_entry(jsk, &priv->j1939_socks, list) {
jsk->sk.sk_err = error_code;
if (!sock_flag(&jsk->sk, SOCK_DEAD))
@@ -1274,7 +1300,56 @@ void j1939_sk_netdev_event_netdown(struct j1939_priv *priv)
j1939_sk_queue_drop_all(priv, jsk, error_code);
}
- spin_unlock_bh(&priv->j1939_socks_lock);
+ read_unlock_bh(&priv->j1939_socks_lock);
+}
+
+void j1939_sk_netdev_event_unregister(struct j1939_priv *priv)
+{
+ struct sock *sk;
+ struct j1939_sock *jsk;
+ bool wait_rcu = false;
+
+rescan: /* The caller is holding a ref on this "priv" via j1939_priv_get_by_ndev(). */
+ read_lock_bh(&priv->j1939_socks_lock);
+ list_for_each_entry(jsk, &priv->j1939_socks, list) {
+ /* Skip if j1939_jsk_add() is not called on this socket. */
+ if (!(jsk->state & J1939_SOCK_BOUND))
+ continue;
+ sk = &jsk->sk;
+ sock_hold(sk);
+ read_unlock_bh(&priv->j1939_socks_lock);
+ /* Check if j1939_jsk_del() is not yet called on this socket after holding
+ * socket's lock, for both j1939_sk_bind() and j1939_sk_release() call
+ * j1939_jsk_del() with socket's lock held.
+ */
+ lock_sock(sk);
+ if (jsk->state & J1939_SOCK_BOUND) {
+ /* Neither j1939_sk_bind() nor j1939_sk_release() called j1939_jsk_del().
+ * Make this socket no longer bound, by pretending as if j1939_sk_bind()
+ * dropped old references but did not get new references.
+ */
+ j1939_jsk_del(priv, jsk);
+ j1939_local_ecu_put(priv, jsk->addr.src_name, jsk->addr.sa);
+ j1939_netdev_stop(priv);
+ /* Call j1939_priv_put() now and prevent j1939_sk_sock_destruct() from
+ * calling the corresponding j1939_priv_put().
+ *
+ * j1939_sk_sock_destruct() is supposed to call j1939_priv_put() after
+ * an RCU grace period. But since the caller is holding a ref on this
+ * "priv", we can defer synchronize_rcu() until immediately before
+ * the caller calls j1939_priv_put().
+ */
+ j1939_priv_put(priv);
+ jsk->priv = NULL;
+ wait_rcu = true;
+ }
+ release_sock(sk);
+ sock_put(sk);
+ goto rescan;
+ }
+ read_unlock_bh(&priv->j1939_socks_lock);
+ if (wait_rcu)
+ synchronize_rcu();
}
static int j1939_sk_no_ioctlcmd(struct socket *sock, unsigned int cmd,
@@ -1301,7 +1376,6 @@ static const struct proto_ops j1939_ops = {
.sendmsg = j1939_sk_sendmsg,
.recvmsg = j1939_sk_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
static struct proto j1939_proto __read_mostly = {
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index 5c722b55fe23..fbf5c8001c9d 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -382,8 +382,9 @@ sk_buff *j1939_session_skb_get_by_offset(struct j1939_session *session,
skb_queue_walk(&session->skb_queue, do_skb) {
do_skcb = j1939_skb_to_cb(do_skb);
- if (offset_start >= do_skcb->offset &&
- offset_start < (do_skcb->offset + do_skb->len)) {
+ if ((offset_start >= do_skcb->offset &&
+ offset_start < (do_skcb->offset + do_skb->len)) ||
+ (offset_start == 0 && do_skcb->offset == 0 && do_skb->len == 0)) {
skb = do_skb;
}
}
@@ -604,7 +605,10 @@ sk_buff *j1939_tp_tx_dat_new(struct j1939_priv *priv,
/* reserve CAN header */
skb_reserve(skb, offsetof(struct can_frame, data));
- memcpy(skb->cb, re_skcb, sizeof(skb->cb));
+ /* skb->cb must be large enough to hold a j1939_sk_buff_cb structure */
+ BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*re_skcb));
+
+ memcpy(skb->cb, re_skcb, sizeof(*re_skcb));
skcb = j1939_skb_to_cb(skb);
if (swap_src_dst)
j1939_skbcb_swap(skcb);
@@ -1092,10 +1096,6 @@ static bool j1939_session_deactivate(struct j1939_session *session)
bool active;
j1939_session_list_lock(priv);
- /* This function should be called with a session ref-count of at
- * least 2.
- */
- WARN_ON_ONCE(kref_read(&session->kref) < 2);
active = j1939_session_deactivate_locked(session);
j1939_session_list_unlock(priv);
@@ -1128,8 +1128,6 @@ static void __j1939_session_cancel(struct j1939_session *session,
if (session->sk)
j1939_sk_send_loop_abort(session->sk, session->err);
- else
- j1939_sk_errqueue(session, J1939_ERRQUEUE_RX_ABORT);
}
static void j1939_session_cancel(struct j1939_session *session,
@@ -1144,6 +1142,9 @@ static void j1939_session_cancel(struct j1939_session *session,
}
j1939_session_list_unlock(session->priv);
+
+ if (!session->sk)
+ j1939_sk_errqueue(session, J1939_ERRQUEUE_RX_ABORT);
}
static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer)
@@ -1179,10 +1180,10 @@ static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer)
break;
case -ENETDOWN:
/* In this case we should get a netdev_event(), all active
- * sessions will be cleared by
- * j1939_cancel_all_active_sessions(). So handle this as an
- * error, but let j1939_cancel_all_active_sessions() do the
- * cleanup including propagation of the error to user space.
+ * sessions will be cleared by j1939_cancel_active_session().
+ * So handle this as an error, but let
+ * j1939_cancel_active_session() do the cleanup including
+ * propagation of the error to user space.
*/
break;
case -EOVERFLOW:
@@ -1257,6 +1258,9 @@ static enum hrtimer_restart j1939_tp_rxtimer(struct hrtimer *hrtimer)
__j1939_session_cancel(session, J1939_XTP_ABORT_TIMEOUT);
}
j1939_session_list_unlock(session->priv);
+
+ if (!session->sk)
+ j1939_sk_errqueue(session, J1939_ERRQUEUE_RX_ABORT);
}
j1939_session_put(session);
@@ -1502,17 +1506,13 @@ static struct j1939_session *j1939_session_new(struct j1939_priv *priv,
session->state = J1939_SESSION_NEW;
skb_queue_head_init(&session->skb_queue);
- skb_queue_tail(&session->skb_queue, skb);
+ skb_queue_tail(&session->skb_queue, skb_get(skb));
skcb = j1939_skb_to_cb(skb);
memcpy(&session->skcb, skcb, sizeof(session->skcb));
- hrtimer_init(&session->txtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_SOFT);
- session->txtimer.function = j1939_tp_txtimer;
- hrtimer_init(&session->rxtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_SOFT);
- session->rxtimer.function = j1939_tp_rxtimer;
+ hrtimer_setup(&session->txtimer, j1939_tp_txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
+ hrtimer_setup(&session->rxtimer, j1939_tp_rxtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
netdev_dbg(priv->ndev, "%s: 0x%p: sa: %02x, da: %02x\n",
__func__, session, skcb->addr.sa, skcb->addr.da);
@@ -1590,8 +1590,8 @@ j1939_session *j1939_xtp_rx_rts_session_new(struct j1939_priv *priv,
struct j1939_sk_buff_cb skcb = *j1939_skb_to_cb(skb);
struct j1939_session *session;
const u8 *dat;
+ int len, ret;
pgn_t pgn;
- int len;
netdev_dbg(priv->ndev, "%s\n", __func__);
@@ -1650,7 +1650,22 @@ j1939_session *j1939_xtp_rx_rts_session_new(struct j1939_priv *priv,
session->tskey = priv->rx_tskey++;
j1939_sk_errqueue(session, J1939_ERRQUEUE_RX_RTS);
- WARN_ON_ONCE(j1939_session_activate(session));
+ ret = j1939_session_activate(session);
+ if (ret) {
+ /* Entering this scope indicates an issue with the J1939 bus.
+ * Possible scenarios include:
+ * - A time lapse occurred, and a new session was initiated
+ * due to another packet being sent correctly. This could
+ * have been caused by too long interrupt, debugger, or being
+ * out-scheduled by another task.
+ * - The bus is receiving numerous erroneous packets, either
+ * from a malfunctioning device or during a test scenario.
+ */
+ netdev_alert(priv->ndev, "%s: 0x%p: concurrent session with same addr (%02x %02x) is already active.\n",
+ __func__, session, skcb.addr.sa, skcb.addr.da);
+ j1939_session_put(session);
+ return NULL;
+ }
return session;
}
@@ -1678,6 +1693,8 @@ static int j1939_xtp_rx_rts_session_active(struct j1939_session *session,
j1939_session_timers_cancel(session);
j1939_session_cancel(session, J1939_XTP_ABORT_BUSY);
+ if (session->transmission)
+ j1939_session_deactivate_activate_next(session);
return -EBUSY;
}
diff --git a/net/can/proc.c b/net/can/proc.c
index bbce97825f13..0938bf7dd646 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -114,10 +114,17 @@ static unsigned long calc_rate(unsigned long oldjif, unsigned long newjif,
void can_stat_update(struct timer_list *t)
{
- struct net *net = from_timer(net, t, can.stattimer);
+ struct net *net = timer_container_of(net, t, can.stattimer);
struct can_pkg_stats *pkg_stats = net->can.pkg_stats;
unsigned long j = jiffies; /* snapshot */
+ long rx_frames = atomic_long_read(&pkg_stats->rx_frames);
+ long tx_frames = atomic_long_read(&pkg_stats->tx_frames);
+ long matches = atomic_long_read(&pkg_stats->matches);
+ long rx_frames_delta = atomic_long_read(&pkg_stats->rx_frames_delta);
+ long tx_frames_delta = atomic_long_read(&pkg_stats->tx_frames_delta);
+ long matches_delta = atomic_long_read(&pkg_stats->matches_delta);
+
/* restart counting in timer context on user request */
if (user_reset)
can_init_stats(net);
@@ -127,35 +134,33 @@ void can_stat_update(struct timer_list *t)
can_init_stats(net);
/* prevent overflow in calc_rate() */
- if (pkg_stats->rx_frames > (ULONG_MAX / HZ))
+ if (rx_frames > (LONG_MAX / HZ))
can_init_stats(net);
/* prevent overflow in calc_rate() */
- if (pkg_stats->tx_frames > (ULONG_MAX / HZ))
+ if (tx_frames > (LONG_MAX / HZ))
can_init_stats(net);
/* matches overflow - very improbable */
- if (pkg_stats->matches > (ULONG_MAX / 100))
+ if (matches > (LONG_MAX / 100))
can_init_stats(net);
/* calc total values */
- if (pkg_stats->rx_frames)
- pkg_stats->total_rx_match_ratio = (pkg_stats->matches * 100) /
- pkg_stats->rx_frames;
+ if (rx_frames)
+ pkg_stats->total_rx_match_ratio = (matches * 100) / rx_frames;
pkg_stats->total_tx_rate = calc_rate(pkg_stats->jiffies_init, j,
- pkg_stats->tx_frames);
+ tx_frames);
pkg_stats->total_rx_rate = calc_rate(pkg_stats->jiffies_init, j,
- pkg_stats->rx_frames);
+ rx_frames);
/* calc current values */
- if (pkg_stats->rx_frames_delta)
+ if (rx_frames_delta)
pkg_stats->current_rx_match_ratio =
- (pkg_stats->matches_delta * 100) /
- pkg_stats->rx_frames_delta;
+ (matches_delta * 100) / rx_frames_delta;
- pkg_stats->current_tx_rate = calc_rate(0, HZ, pkg_stats->tx_frames_delta);
- pkg_stats->current_rx_rate = calc_rate(0, HZ, pkg_stats->rx_frames_delta);
+ pkg_stats->current_tx_rate = calc_rate(0, HZ, tx_frames_delta);
+ pkg_stats->current_rx_rate = calc_rate(0, HZ, rx_frames_delta);
/* check / update maximum values */
if (pkg_stats->max_tx_rate < pkg_stats->current_tx_rate)
@@ -168,9 +173,9 @@ void can_stat_update(struct timer_list *t)
pkg_stats->max_rx_match_ratio = pkg_stats->current_rx_match_ratio;
/* clear values for 'current rate' calculation */
- pkg_stats->tx_frames_delta = 0;
- pkg_stats->rx_frames_delta = 0;
- pkg_stats->matches_delta = 0;
+ atomic_long_set(&pkg_stats->tx_frames_delta, 0);
+ atomic_long_set(&pkg_stats->rx_frames_delta, 0);
+ atomic_long_set(&pkg_stats->matches_delta, 0);
/* restart timer (one second) */
mod_timer(&net->can.stattimer, round_jiffies(jiffies + HZ));
@@ -214,9 +219,12 @@ static int can_stats_proc_show(struct seq_file *m, void *v)
struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
seq_putc(m, '\n');
- seq_printf(m, " %8ld transmitted frames (TXF)\n", pkg_stats->tx_frames);
- seq_printf(m, " %8ld received frames (RXF)\n", pkg_stats->rx_frames);
- seq_printf(m, " %8ld matched frames (RXMF)\n", pkg_stats->matches);
+ seq_printf(m, " %8ld transmitted frames (TXF)\n",
+ atomic_long_read(&pkg_stats->tx_frames));
+ seq_printf(m, " %8ld received frames (RXF)\n",
+ atomic_long_read(&pkg_stats->rx_frames));
+ seq_printf(m, " %8ld matched frames (RXMF)\n",
+ atomic_long_read(&pkg_stats->matches));
seq_putc(m, '\n');
diff --git a/net/can/raw.c b/net/can/raw.c
index 81071cdb0301..be1ef7cf4204 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -75,25 +75,31 @@ MODULE_ALIAS("can-proto-1");
*/
struct uniqframe {
- int skbcnt;
const struct sk_buff *skb;
+ int skbcnt;
unsigned int join_rx_count;
};
struct raw_sock {
struct sock sk;
- int bound;
- int ifindex;
+ struct net_device *dev;
+ netdevice_tracker dev_tracker;
struct list_head notifier;
- int loopback;
- int recv_own_msgs;
- int fd_frames;
- int xl_frames;
- int join_filters;
+ int ifindex;
+ unsigned int bound:1;
+ unsigned int loopback:1;
+ unsigned int recv_own_msgs:1;
+ unsigned int fd_frames:1;
+ unsigned int xl_frames:1;
+ unsigned int join_filters:1;
+ struct can_raw_vcid_options raw_vcid_opts;
+ canid_t tx_vcid_shifted;
+ canid_t rx_vcid_shifted;
+ canid_t rx_vcid_mask_shifted;
+ can_err_mask_t err_mask;
int count; /* number of active filters */
struct can_filter dfilter; /* default/single filter */
struct can_filter *filter; /* pointer to filter(s) */
- can_err_mask_t err_mask;
struct uniqframe __percpu *uniq;
};
@@ -123,6 +129,7 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
{
struct sock *sk = (struct sock *)data;
struct raw_sock *ro = raw_sk(sk);
+ enum skb_drop_reason reason;
struct sockaddr_can *addr;
struct sk_buff *skb;
unsigned int *pflags;
@@ -132,10 +139,29 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
return;
/* make sure to not pass oversized frames to the socket */
- if ((can_is_canfd_skb(oskb) && !ro->fd_frames && !ro->xl_frames) ||
- (can_is_canxl_skb(oskb) && !ro->xl_frames))
+ if (!ro->fd_frames && can_is_canfd_skb(oskb))
return;
+ if (can_is_canxl_skb(oskb)) {
+ struct canxl_frame *cxl = (struct canxl_frame *)oskb->data;
+
+ /* make sure to not pass oversized frames to the socket */
+ if (!ro->xl_frames)
+ return;
+
+ /* filter CAN XL VCID content */
+ if (ro->raw_vcid_opts.flags & CAN_RAW_XL_VCID_RX_FILTER) {
+ /* apply VCID filter if user enabled the filter */
+ if ((cxl->prio & ro->rx_vcid_mask_shifted) !=
+ (ro->rx_vcid_shifted & ro->rx_vcid_mask_shifted))
+ return;
+ } else {
+ /* no filter => do not forward VCID tagged frames */
+ if (cxl->prio & CANXL_VCID_MASK)
+ return;
+ }
+ }
+
/* eliminate multiple filter matches for the same skb */
if (this_cpu_ptr(ro->uniq)->skb == oskb &&
this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
@@ -180,8 +206,8 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
if (oskb->sk == sk)
*pflags |= MSG_CONFIRM;
- if (sock_queue_rcv_skb(sk, skb) < 0)
- kfree_skb(skb);
+ if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0)
+ sk_skb_reason_drop(sk, skb, reason);
}
static int raw_enable_filters(struct net *net, struct net_device *dev,
@@ -277,21 +303,24 @@ static void raw_notify(struct raw_sock *ro, unsigned long msg,
if (!net_eq(dev_net(dev), sock_net(sk)))
return;
- if (ro->ifindex != dev->ifindex)
+ if (ro->dev != dev)
return;
switch (msg) {
case NETDEV_UNREGISTER:
lock_sock(sk);
/* remove current filters & unregister */
- if (ro->bound)
+ if (ro->bound) {
raw_disable_allfilters(dev_net(dev), dev, sk);
+ netdev_put(dev, &ro->dev_tracker);
+ }
if (ro->count > 1)
kfree(ro->filter);
ro->ifindex = 0;
ro->bound = 0;
+ ro->dev = NULL;
ro->count = 0;
release_sock(sk);
@@ -337,6 +366,7 @@ static int raw_init(struct sock *sk)
ro->bound = 0;
ro->ifindex = 0;
+ ro->dev = NULL;
/* set default filter to single entry dfilter */
ro->dfilter.can_id = 0;
@@ -368,11 +398,13 @@ static int raw_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct raw_sock *ro;
+ struct net *net;
if (!sk)
return 0;
ro = raw_sk(sk);
+ net = sock_net(sk);
spin_lock(&raw_notifier_lock);
while (raw_busy_notifier == ro) {
@@ -383,20 +415,16 @@ static int raw_release(struct socket *sock)
list_del(&ro->notifier);
spin_unlock(&raw_notifier_lock);
+ rtnl_lock();
lock_sock(sk);
/* remove current filters & unregister */
if (ro->bound) {
- if (ro->ifindex) {
- struct net_device *dev;
-
- dev = dev_get_by_index(sock_net(sk), ro->ifindex);
- if (dev) {
- raw_disable_allfilters(dev_net(dev), dev, sk);
- dev_put(dev);
- }
+ if (ro->dev) {
+ raw_disable_allfilters(dev_net(ro->dev), ro->dev, sk);
+ netdev_put(ro->dev, &ro->dev_tracker);
} else {
- raw_disable_allfilters(sock_net(sk), NULL, sk);
+ raw_disable_allfilters(net, NULL, sk);
}
}
@@ -405,6 +433,7 @@ static int raw_release(struct socket *sock)
ro->ifindex = 0;
ro->bound = 0;
+ ro->dev = NULL;
ro->count = 0;
free_percpu(ro->uniq);
@@ -412,16 +441,20 @@ static int raw_release(struct socket *sock)
sock->sk = NULL;
release_sock(sk);
+ rtnl_unlock();
+
+ sock_prot_inuse_add(net, sk->sk_prot, -1);
sock_put(sk);
return 0;
}
-static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+static int raw_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int len)
{
struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
struct sock *sk = sock->sk;
struct raw_sock *ro = raw_sk(sk);
+ struct net_device *dev = NULL;
int ifindex;
int err = 0;
int notify_enetdown = 0;
@@ -431,24 +464,23 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
if (addr->can_family != AF_CAN)
return -EINVAL;
+ rtnl_lock();
lock_sock(sk);
if (ro->bound && addr->can_ifindex == ro->ifindex)
goto out;
if (addr->can_ifindex) {
- struct net_device *dev;
-
dev = dev_get_by_index(sock_net(sk), addr->can_ifindex);
if (!dev) {
err = -ENODEV;
goto out;
}
if (dev->type != ARPHRD_CAN) {
- dev_put(dev);
err = -ENODEV;
- goto out;
+ goto out_put_dev;
}
+
if (!(dev->flags & IFF_UP))
notify_enetdown = 1;
@@ -456,7 +488,9 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
/* filters set by default/setsockopt */
err = raw_enable_allfilters(sock_net(sk), dev, sk);
- dev_put(dev);
+ if (err)
+ goto out_put_dev;
+
} else {
ifindex = 0;
@@ -467,26 +501,29 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
if (!err) {
if (ro->bound) {
/* unregister old filters */
- if (ro->ifindex) {
- struct net_device *dev;
-
- dev = dev_get_by_index(sock_net(sk),
- ro->ifindex);
- if (dev) {
- raw_disable_allfilters(dev_net(dev),
- dev, sk);
- dev_put(dev);
- }
+ if (ro->dev) {
+ raw_disable_allfilters(dev_net(ro->dev),
+ ro->dev, sk);
+ /* drop reference to old ro->dev */
+ netdev_put(ro->dev, &ro->dev_tracker);
} else {
raw_disable_allfilters(sock_net(sk), NULL, sk);
}
}
ro->ifindex = ifindex;
ro->bound = 1;
+ /* bind() ok -> hold a reference for new ro->dev */
+ ro->dev = dev;
+ if (ro->dev)
+ netdev_hold(ro->dev, &ro->dev_tracker, GFP_KERNEL);
}
- out:
+out_put_dev:
+ /* remove potential reference from dev_get_by_index() */
+ dev_put(dev);
+out:
release_sock(sk);
+ rtnl_unlock();
if (notify_enetdown) {
sk->sk_err = ENETDOWN;
@@ -524,6 +561,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
struct net_device *dev = NULL;
can_err_mask_t err_mask = 0;
int count = 0;
+ int flag;
int err = 0;
if (level != SOL_CAN_RAW)
@@ -552,9 +590,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
rtnl_lock();
lock_sock(sk);
- if (ro->bound && ro->ifindex) {
- dev = dev_get_by_index(sock_net(sk), ro->ifindex);
- if (!dev) {
+ dev = ro->dev;
+ if (ro->bound && dev) {
+ if (dev->reg_state != NETREG_REGISTERED) {
if (count > 1)
kfree(filter);
err = -ENODEV;
@@ -595,7 +633,6 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
ro->count = count;
out_fil:
- dev_put(dev);
release_sock(sk);
rtnl_unlock();
@@ -613,9 +650,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
rtnl_lock();
lock_sock(sk);
- if (ro->bound && ro->ifindex) {
- dev = dev_get_by_index(sock_net(sk), ro->ifindex);
- if (!dev) {
+ dev = ro->dev;
+ if (ro->bound && dev) {
+ if (dev->reg_state != NETREG_REGISTERED) {
err = -ENODEV;
goto out_err;
}
@@ -639,55 +676,80 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
ro->err_mask = err_mask;
out_err:
- dev_put(dev);
release_sock(sk);
rtnl_unlock();
break;
case CAN_RAW_LOOPBACK:
- if (optlen != sizeof(ro->loopback))
+ if (optlen != sizeof(flag))
return -EINVAL;
- if (copy_from_sockptr(&ro->loopback, optval, optlen))
+ if (copy_from_sockptr(&flag, optval, optlen))
return -EFAULT;
+ ro->loopback = !!flag;
break;
case CAN_RAW_RECV_OWN_MSGS:
- if (optlen != sizeof(ro->recv_own_msgs))
+ if (optlen != sizeof(flag))
return -EINVAL;
- if (copy_from_sockptr(&ro->recv_own_msgs, optval, optlen))
+ if (copy_from_sockptr(&flag, optval, optlen))
return -EFAULT;
+ ro->recv_own_msgs = !!flag;
break;
case CAN_RAW_FD_FRAMES:
- if (optlen != sizeof(ro->fd_frames))
+ if (optlen != sizeof(flag))
return -EINVAL;
- if (copy_from_sockptr(&ro->fd_frames, optval, optlen))
+ if (copy_from_sockptr(&flag, optval, optlen))
return -EFAULT;
+ /* Enabling CAN XL includes CAN FD */
+ if (ro->xl_frames && !flag)
+ return -EINVAL;
+
+ ro->fd_frames = !!flag;
break;
case CAN_RAW_XL_FRAMES:
- if (optlen != sizeof(ro->xl_frames))
+ if (optlen != sizeof(flag))
return -EINVAL;
- if (copy_from_sockptr(&ro->xl_frames, optval, optlen))
+ if (copy_from_sockptr(&flag, optval, optlen))
return -EFAULT;
+ ro->xl_frames = !!flag;
+
+ /* Enabling CAN XL includes CAN FD */
+ if (ro->xl_frames)
+ ro->fd_frames = ro->xl_frames;
+ break;
+
+ case CAN_RAW_XL_VCID_OPTS:
+ if (optlen != sizeof(ro->raw_vcid_opts))
+ return -EINVAL;
+
+ if (copy_from_sockptr(&ro->raw_vcid_opts, optval, optlen))
+ return -EFAULT;
+
+ /* prepare 32 bit values for handling in hot path */
+ ro->tx_vcid_shifted = ro->raw_vcid_opts.tx_vcid << CANXL_VCID_OFFSET;
+ ro->rx_vcid_shifted = ro->raw_vcid_opts.rx_vcid << CANXL_VCID_OFFSET;
+ ro->rx_vcid_mask_shifted = ro->raw_vcid_opts.rx_vcid_mask << CANXL_VCID_OFFSET;
break;
case CAN_RAW_JOIN_FILTERS:
- if (optlen != sizeof(ro->join_filters))
+ if (optlen != sizeof(flag))
return -EINVAL;
- if (copy_from_sockptr(&ro->join_filters, optval, optlen))
+ if (copy_from_sockptr(&flag, optval, optlen))
return -EFAULT;
+ ro->join_filters = !!flag;
break;
default:
@@ -701,9 +763,9 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
{
struct sock *sk = sock->sk;
struct raw_sock *ro = raw_sk(sk);
+ int flag;
int len;
void *val;
- int err = 0;
if (level != SOL_CAN_RAW)
return -EINVAL;
@@ -713,7 +775,9 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
return -EINVAL;
switch (optname) {
- case CAN_RAW_FILTER:
+ case CAN_RAW_FILTER: {
+ int err = 0;
+
lock_sock(sk);
if (ro->count > 0) {
int fsize = ro->count * sizeof(struct can_filter);
@@ -738,7 +802,7 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
if (!err)
err = put_user(len, optlen);
return err;
-
+ }
case CAN_RAW_ERR_FILTER:
if (len > sizeof(can_err_mask_t))
len = sizeof(can_err_mask_t);
@@ -748,31 +812,55 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
case CAN_RAW_LOOPBACK:
if (len > sizeof(int))
len = sizeof(int);
- val = &ro->loopback;
+ flag = ro->loopback;
+ val = &flag;
break;
case CAN_RAW_RECV_OWN_MSGS:
if (len > sizeof(int))
len = sizeof(int);
- val = &ro->recv_own_msgs;
+ flag = ro->recv_own_msgs;
+ val = &flag;
break;
case CAN_RAW_FD_FRAMES:
if (len > sizeof(int))
len = sizeof(int);
- val = &ro->fd_frames;
+ flag = ro->fd_frames;
+ val = &flag;
break;
case CAN_RAW_XL_FRAMES:
if (len > sizeof(int))
len = sizeof(int);
- val = &ro->xl_frames;
+ flag = ro->xl_frames;
+ val = &flag;
break;
+ case CAN_RAW_XL_VCID_OPTS: {
+ int err = 0;
+
+ /* user space buffer to small for VCID opts? */
+ if (len < sizeof(ro->raw_vcid_opts)) {
+ /* return -ERANGE and needed space in optlen */
+ err = -ERANGE;
+ if (put_user(sizeof(ro->raw_vcid_opts), optlen))
+ err = -EFAULT;
+ } else {
+ if (len > sizeof(ro->raw_vcid_opts))
+ len = sizeof(ro->raw_vcid_opts);
+ if (copy_to_user(optval, &ro->raw_vcid_opts, len))
+ err = -EFAULT;
+ }
+ if (!err)
+ err = put_user(len, optlen);
+ return err;
+ }
case CAN_RAW_JOIN_FILTERS:
if (len > sizeof(int))
len = sizeof(int);
- val = &ro->join_filters;
+ flag = ro->join_filters;
+ val = &flag;
break;
default:
@@ -786,6 +874,81 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
return 0;
}
+static void raw_put_canxl_vcid(struct raw_sock *ro, struct sk_buff *skb)
+{
+ struct canxl_frame *cxl = (struct canxl_frame *)skb->data;
+
+ /* sanitize non CAN XL bits */
+ cxl->prio &= (CANXL_PRIO_MASK | CANXL_VCID_MASK);
+
+ /* clear VCID in CAN XL frame if pass through is disabled */
+ if (!(ro->raw_vcid_opts.flags & CAN_RAW_XL_VCID_TX_PASS))
+ cxl->prio &= CANXL_PRIO_MASK;
+
+ /* set VCID in CAN XL frame if enabled */
+ if (ro->raw_vcid_opts.flags & CAN_RAW_XL_VCID_TX_SET) {
+ cxl->prio &= CANXL_PRIO_MASK;
+ cxl->prio |= ro->tx_vcid_shifted;
+ }
+}
+
+static inline bool raw_dev_cc_enabled(struct net_device *dev,
+ struct can_priv *priv)
+{
+ /* The CANXL-only mode disables error-signalling on the CAN bus
+ * which is needed to send CAN CC/FD frames
+ */
+ if (priv)
+ return !can_dev_in_xl_only_mode(priv);
+
+ /* virtual CAN interfaces always support CAN CC */
+ return true;
+}
+
+static inline bool raw_dev_fd_enabled(struct net_device *dev,
+ struct can_priv *priv)
+{
+ /* check FD ctrlmode on real CAN interfaces */
+ if (priv)
+ return (priv->ctrlmode & CAN_CTRLMODE_FD);
+
+ /* check MTU for virtual CAN FD interfaces */
+ return (READ_ONCE(dev->mtu) >= CANFD_MTU);
+}
+
+static inline bool raw_dev_xl_enabled(struct net_device *dev,
+ struct can_priv *priv)
+{
+ /* check XL ctrlmode on real CAN interfaces */
+ if (priv)
+ return (priv->ctrlmode & CAN_CTRLMODE_XL);
+
+ /* check MTU for virtual CAN XL interfaces */
+ return can_is_canxl_dev_mtu(READ_ONCE(dev->mtu));
+}
+
+static unsigned int raw_check_txframe(struct raw_sock *ro, struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct can_priv *priv = safe_candev_priv(dev);
+
+ /* Classical CAN */
+ if (can_is_can_skb(skb) && raw_dev_cc_enabled(dev, priv))
+ return CAN_MTU;
+
+ /* CAN FD */
+ if (ro->fd_frames && can_is_canfd_skb(skb) &&
+ raw_dev_fd_enabled(dev, priv))
+ return CANFD_MTU;
+
+ /* CAN XL */
+ if (ro->xl_frames && can_is_canxl_skb(skb) &&
+ raw_dev_xl_enabled(dev, priv))
+ return CANXL_MTU;
+
+ return 0;
+}
+
static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
{
struct sock *sk = sock->sk;
@@ -793,6 +956,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
struct sockcm_cookie sockc;
struct sk_buff *skb;
struct net_device *dev;
+ unsigned int txmtu;
int ifindex;
int err = -EINVAL;
@@ -833,20 +997,15 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
goto free_skb;
err = -EINVAL;
- if (ro->xl_frames && can_is_canxl_dev_mtu(dev->mtu)) {
- /* CAN XL, CAN FD and Classical CAN */
- if (!can_is_canxl_skb(skb) && !can_is_canfd_skb(skb) &&
- !can_is_can_skb(skb))
- goto free_skb;
- } else if (ro->fd_frames && dev->mtu == CANFD_MTU) {
- /* CAN FD and Classical CAN */
- if (!can_is_canfd_skb(skb) && !can_is_can_skb(skb))
- goto free_skb;
- } else {
- /* Classical CAN */
- if (!can_is_can_skb(skb))
- goto free_skb;
- }
+
+ /* check for valid CAN (CC/FD/XL) frame content */
+ txmtu = raw_check_txframe(ro, skb, dev);
+ if (!txmtu)
+ goto free_skb;
+
+ /* only CANXL: clear/forward/set VCID value */
+ if (txmtu == CANXL_MTU)
+ raw_put_canxl_vcid(ro, skb);
sockcm_init(&sockc, sk);
if (msg->msg_controllen) {
@@ -856,11 +1015,11 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
}
skb->dev = dev;
- skb->priority = sk->sk_priority;
- skb->mark = sk->sk_mark;
+ skb->priority = sockc.priority;
+ skb->mark = sockc.mark;
skb->tstamp = sockc.transmit_time;
- skb_setup_tx_timestamp(skb, sockc.tsflags);
+ skb_setup_tx_timestamp(skb, &sockc);
err = can_send(skb, ro->loopback);
@@ -946,7 +1105,6 @@ static const struct proto_ops raw_ops = {
.sendmsg = raw_sendmsg,
.recvmsg = raw_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
static struct proto raw_proto __read_mostly = {