summaryrefslogtreecommitdiff
path: root/drivers/net/can
diff options
context:
space:
mode:
authorOleksij Rempel <o.rempel@pengutronix.de>2018-09-18 11:40:38 +0200
committerMarc Kleine-Budde <mkl@pengutronix.de>2018-11-09 17:20:49 +0100
commit55059f2b7f868cd43b3ad30e28e18347e1b46ace (patch)
tree7393fec4b700cd4ad1c917d903441f2dbd7b8490 /drivers/net/can
parent7da11ba5c5066dadc2e96835a6233d56d7b7764a (diff)
can: rx-offload: introduce can_rx_offload_get_echo_skb() and can_rx_offload_queue_sorted() functions
Current CAN framework can't guarantee proper/chronological order of RX and TX-ECHO messages. To make this possible, drivers should use this functions instead of can_get_echo_skb(). Signed-off-by: Oleksij Rempel <o.rempel@pengutronix.de> Cc: linux-stable <stable@vger.kernel.org> Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
Diffstat (limited to 'drivers/net/can')
-rw-r--r--drivers/net/can/rx-offload.c46
1 files changed, 46 insertions, 0 deletions
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
index c7d05027a7a0..c368686e2164 100644
--- a/drivers/net/can/rx-offload.c
+++ b/drivers/net/can/rx-offload.c
@@ -211,6 +211,52 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
}
EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
+int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
+ struct sk_buff *skb, u32 timestamp)
+{
+ struct can_rx_offload_cb *cb;
+ unsigned long flags;
+
+ if (skb_queue_len(&offload->skb_queue) >
+ offload->skb_queue_len_max)
+ return -ENOMEM;
+
+ cb = can_rx_offload_get_cb(skb);
+ cb->timestamp = timestamp;
+
+ spin_lock_irqsave(&offload->skb_queue.lock, flags);
+ __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
+ spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
+
+ can_rx_offload_schedule(offload);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
+
+unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
+ unsigned int idx, u32 timestamp)
+{
+ struct net_device *dev = offload->dev;
+ struct net_device_stats *stats = &dev->stats;
+ struct sk_buff *skb;
+ u8 len;
+ int err;
+
+ skb = __can_get_echo_skb(dev, idx, &len);
+ if (!skb)
+ return 0;
+
+ err = can_rx_offload_queue_sorted(offload, skb, timestamp);
+ if (err) {
+ stats->rx_errors++;
+ stats->tx_fifo_errors++;
+ }
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
+
int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb)
{
if (skb_queue_len(&offload->skb_queue) >