summaryrefslogtreecommitdiff
path: root/drivers/net/mctp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/mctp')
-rw-r--r--drivers/net/mctp/Kconfig62
-rw-r--r--drivers/net/mctp/Makefile4
-rw-r--r--drivers/net/mctp/mctp-i2c.c1146
-rw-r--r--drivers/net/mctp/mctp-i3c.c767
-rw-r--r--drivers/net/mctp/mctp-serial.c634
-rw-r--r--drivers/net/mctp/mctp-usb.c390
6 files changed, 3003 insertions, 0 deletions
diff --git a/drivers/net/mctp/Kconfig b/drivers/net/mctp/Kconfig
new file mode 100644
index 000000000000..cf325ab0b1ef
--- /dev/null
+++ b/drivers/net/mctp/Kconfig
@@ -0,0 +1,62 @@
+
+if MCTP
+
+menu "MCTP Device Drivers"
+
+config MCTP_SERIAL
+ tristate "MCTP serial transport"
+ depends on TTY
+ select CRC_CCITT
+ help
+ This driver provides an MCTP-over-serial interface, through a
+ serial line-discipline, as defined by DMTF specification "DSP0253 -
+ MCTP Serial Transport Binding". By attaching the ldisc to a serial
+ device, we get a new net device to transport MCTP packets.
+
+ This allows communication with external MCTP endpoints which use
+ serial as their transport. It can also be used as an easy way to
+ provide MCTP connectivity between virtual machines, by forwarding
+ data between simple virtual serial devices.
+
+ Say y here if you need to connect to MCTP endpoints over serial. To
+ compile as a module, use m; the module will be called mctp-serial.
+
+config MCTP_SERIAL_TEST
+ bool "MCTP serial tests" if !KUNIT_ALL_TESTS
+ depends on MCTP_SERIAL=y && KUNIT=y
+ default KUNIT_ALL_TESTS
+
+config MCTP_TRANSPORT_I2C
+ tristate "MCTP SMBus/I2C transport"
+ # i2c-mux is optional, but we must build as a module if i2c-mux is a module
+ depends on I2C_MUX || !I2C_MUX
+ depends on I2C
+ depends on I2C_SLAVE
+ select MCTP_FLOWS
+ help
+ Provides a driver to access MCTP devices over SMBus/I2C transport,
+ from DMTF specification DSP0237. A MCTP protocol network device is
+ created for each I2C bus that has been assigned a mctp-i2c device.
+
+config MCTP_TRANSPORT_I3C
+ tristate "MCTP I3C transport"
+ depends on I3C
+ help
+ Provides a driver to access MCTP devices over I3C transport,
+ from DMTF specification DSP0233.
+ A MCTP protocol network device is created for each I3C bus
+ having a "mctp-controller" devicetree property.
+
+config MCTP_TRANSPORT_USB
+ tristate "MCTP USB transport"
+ depends on USB
+ help
+ Provides a driver to access MCTP devices over USB transport,
+ defined by DMTF specification DSP0283.
+
+ MCTP-over-USB interfaces are peer-to-peer, so each interface
+ represents a physical connection to one remote MCTP endpoint.
+
+endmenu
+
+endif
diff --git a/drivers/net/mctp/Makefile b/drivers/net/mctp/Makefile
new file mode 100644
index 000000000000..c36006849a1e
--- /dev/null
+++ b/drivers/net/mctp/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_MCTP_SERIAL) += mctp-serial.o
+obj-$(CONFIG_MCTP_TRANSPORT_I2C) += mctp-i2c.o
+obj-$(CONFIG_MCTP_TRANSPORT_I3C) += mctp-i3c.o
+obj-$(CONFIG_MCTP_TRANSPORT_USB) += mctp-usb.o
diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c
new file mode 100644
index 000000000000..f782d93f826e
--- /dev/null
+++ b/drivers/net/mctp/mctp-i2c.c
@@ -0,0 +1,1146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Management Controller Transport Protocol (MCTP)
+ * Implements DMTF specification
+ * "DSP0237 Management Component Transport Protocol (MCTP) SMBus/I2C
+ * Transport Binding"
+ * https://www.dmtf.org/sites/default/files/standards/documents/DSP0237_1.2.0.pdf
+ *
+ * A netdev is created for each I2C bus that handles MCTP. In the case of an I2C
+ * mux topology a single I2C client is attached to the root of the mux topology,
+ * shared between all mux I2C busses underneath. For non-mux cases an I2C client
+ * is attached per netdev.
+ *
+ * mctp-i2c-controller.yml devicetree binding has further details.
+ *
+ * Copyright (c) 2022 Code Construct
+ * Copyright (c) 2022 Google
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/if_arp.h>
+#include <net/mctp.h>
+#include <net/mctpdevice.h>
+
+/* byte_count is limited to u8 */
+#define MCTP_I2C_MAXBLOCK 255
+/* One byte is taken by source_slave */
+#define MCTP_I2C_MAXMTU (MCTP_I2C_MAXBLOCK - 1)
+#define MCTP_I2C_MINMTU (64 + 4)
+/* Allow space for dest_address, command, byte_count, data, PEC */
+#define MCTP_I2C_BUFSZ (3 + MCTP_I2C_MAXBLOCK + 1)
+#define MCTP_I2C_MINLEN 8
+#define MCTP_I2C_COMMANDCODE 0x0f
+#define MCTP_I2C_TX_WORK_LEN 100
+/* Sufficient for 64kB at min mtu */
+#define MCTP_I2C_TX_QUEUE_LEN 1100
+
+#define MCTP_I2C_OF_PROP "mctp-controller"
+
+enum {
+ MCTP_I2C_FLOW_STATE_NEW = 0,
+ MCTP_I2C_FLOW_STATE_ACTIVE,
+ MCTP_I2C_FLOW_STATE_INVALID,
+};
+
+/* List of all struct mctp_i2c_client
+ * Lock protects driver_clients and also prevents adding/removing adapters
+ * during mctp_i2c_client probe/remove.
+ */
+static DEFINE_MUTEX(driver_clients_lock);
+static LIST_HEAD(driver_clients);
+
+struct mctp_i2c_client;
+
+/* The netdev structure. One of these per I2C adapter. */
+struct mctp_i2c_dev {
+ struct net_device *ndev;
+ struct i2c_adapter *adapter;
+ struct mctp_i2c_client *client;
+ struct list_head list; /* For mctp_i2c_client.devs */
+
+ size_t rx_pos;
+ u8 rx_buffer[MCTP_I2C_BUFSZ];
+ struct completion rx_done;
+
+ struct task_struct *tx_thread;
+ wait_queue_head_t tx_wq;
+ struct sk_buff_head tx_queue;
+ u8 tx_scratch[MCTP_I2C_BUFSZ];
+
+ /* A fake entry in our tx queue to perform an unlock operation */
+ struct sk_buff unlock_marker;
+
+ /* Spinlock protects i2c_lock_count, release_count, allow_rx */
+ spinlock_t lock;
+ int i2c_lock_count;
+ int release_count;
+ /* Indicates that the netif is ready to receive incoming packets */
+ bool allow_rx;
+
+};
+
+/* The i2c client structure. One per hardware i2c bus at the top of the
+ * mux tree, shared by multiple netdevs
+ */
+struct mctp_i2c_client {
+ struct i2c_client *client;
+ u8 lladdr;
+
+ struct mctp_i2c_dev *sel;
+ struct list_head devs;
+ spinlock_t sel_lock; /* Protects sel and devs */
+
+ struct list_head list; /* For driver_clients */
+};
+
+/* Header on the wire. */
+struct mctp_i2c_hdr {
+ u8 dest_slave;
+ u8 command;
+ /* Count of bytes following byte_count, excluding PEC */
+ u8 byte_count;
+ u8 source_slave;
+};
+
+static int mctp_i2c_recv(struct mctp_i2c_dev *midev);
+static int mctp_i2c_slave_cb(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val);
+static void mctp_i2c_ndo_uninit(struct net_device *dev);
+static int mctp_i2c_ndo_open(struct net_device *dev);
+
+static struct i2c_adapter *mux_root_adapter(struct i2c_adapter *adap)
+{
+#if IS_ENABLED(CONFIG_I2C_MUX)
+ return i2c_root_adapter(&adap->dev);
+#else
+ /* In non-mux config all i2c adapters are root adapters */
+ return adap;
+#endif
+}
+
+/* Creates a new i2c slave device attached to the root adapter.
+ * Sets up the slave callback.
+ * Must be called with a client on a root adapter.
+ */
+static struct mctp_i2c_client *mctp_i2c_new_client(struct i2c_client *client)
+{
+ struct mctp_i2c_client *mcli = NULL;
+ struct i2c_adapter *root = NULL;
+ int rc;
+
+ if (client->flags & I2C_CLIENT_TEN) {
+ dev_err(&client->dev, "failed, MCTP requires a 7-bit I2C address, addr=0x%x\n",
+ client->addr);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ root = mux_root_adapter(client->adapter);
+ if (!root) {
+ dev_err(&client->dev, "failed to find root adapter\n");
+ rc = -ENOENT;
+ goto err;
+ }
+ if (root != client->adapter) {
+ dev_err(&client->dev,
+ "A mctp-i2c-controller client cannot be placed on an I2C mux adapter.\n"
+ " It should be placed on the mux tree root adapter\n"
+ " then set mctp-controller property on adapters to attach\n");
+ rc = -EINVAL;
+ goto err;
+ }
+
+ mcli = kzalloc(sizeof(*mcli), GFP_KERNEL);
+ if (!mcli) {
+ rc = -ENOMEM;
+ goto err;
+ }
+ spin_lock_init(&mcli->sel_lock);
+ INIT_LIST_HEAD(&mcli->devs);
+ INIT_LIST_HEAD(&mcli->list);
+ mcli->lladdr = client->addr & 0xff;
+ mcli->client = client;
+ i2c_set_clientdata(client, mcli);
+
+ rc = i2c_slave_register(mcli->client, mctp_i2c_slave_cb);
+ if (rc < 0) {
+ dev_err(&client->dev, "i2c register failed %d\n", rc);
+ mcli->client = NULL;
+ i2c_set_clientdata(client, NULL);
+ goto err;
+ }
+
+ return mcli;
+err:
+ if (mcli) {
+ i2c_unregister_device(mcli->client);
+ kfree(mcli);
+ }
+ return ERR_PTR(rc);
+}
+
+static void mctp_i2c_free_client(struct mctp_i2c_client *mcli)
+{
+ int rc;
+
+ WARN_ON(!mutex_is_locked(&driver_clients_lock));
+ WARN_ON(!list_empty(&mcli->devs));
+ WARN_ON(mcli->sel); /* sanity check, no locking */
+
+ rc = i2c_slave_unregister(mcli->client);
+ /* Leak if it fails, we can't propagate errors upwards */
+ if (rc < 0)
+ dev_err(&mcli->client->dev, "i2c unregister failed %d\n", rc);
+ else
+ kfree(mcli);
+}
+
+/* Switch the mctp i2c device to receive responses.
+ * Call with sel_lock held
+ */
+static void __mctp_i2c_device_select(struct mctp_i2c_client *mcli,
+ struct mctp_i2c_dev *midev)
+{
+ assert_spin_locked(&mcli->sel_lock);
+ if (midev)
+ dev_hold(midev->ndev);
+ if (mcli->sel)
+ dev_put(mcli->sel->ndev);
+ mcli->sel = midev;
+}
+
+/* Switch the mctp i2c device to receive responses */
+static void mctp_i2c_device_select(struct mctp_i2c_client *mcli,
+ struct mctp_i2c_dev *midev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mcli->sel_lock, flags);
+ __mctp_i2c_device_select(mcli, midev);
+ spin_unlock_irqrestore(&mcli->sel_lock, flags);
+}
+
+static int mctp_i2c_slave_cb(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val)
+{
+ struct mctp_i2c_client *mcli = i2c_get_clientdata(client);
+ struct mctp_i2c_dev *midev = NULL;
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&mcli->sel_lock, flags);
+ midev = mcli->sel;
+ if (midev)
+ dev_hold(midev->ndev);
+ spin_unlock_irqrestore(&mcli->sel_lock, flags);
+
+ if (!midev)
+ return 0;
+
+ switch (event) {
+ case I2C_SLAVE_WRITE_RECEIVED:
+ if (midev->rx_pos < MCTP_I2C_BUFSZ) {
+ midev->rx_buffer[midev->rx_pos] = *val;
+ midev->rx_pos++;
+ } else {
+ midev->ndev->stats.rx_over_errors++;
+ }
+
+ break;
+ case I2C_SLAVE_WRITE_REQUESTED:
+ /* dest_slave as first byte */
+ midev->rx_buffer[0] = mcli->lladdr << 1;
+ midev->rx_pos = 1;
+ break;
+ case I2C_SLAVE_STOP:
+ rc = mctp_i2c_recv(midev);
+ break;
+ default:
+ break;
+ }
+
+ dev_put(midev->ndev);
+ return rc;
+}
+
+/* Processes incoming data that has been accumulated by the slave cb */
+static int mctp_i2c_recv(struct mctp_i2c_dev *midev)
+{
+ struct net_device *ndev = midev->ndev;
+ struct mctp_i2c_hdr *hdr;
+ struct mctp_skb_cb *cb;
+ struct sk_buff *skb;
+ unsigned long flags;
+ u8 pec, calc_pec;
+ size_t recvlen;
+ int status;
+
+ /* + 1 for the PEC */
+ if (midev->rx_pos < MCTP_I2C_MINLEN + 1) {
+ ndev->stats.rx_length_errors++;
+ return -EINVAL;
+ }
+ /* recvlen excludes PEC */
+ recvlen = midev->rx_pos - 1;
+
+ hdr = (void *)midev->rx_buffer;
+ if (hdr->command != MCTP_I2C_COMMANDCODE) {
+ ndev->stats.rx_dropped++;
+ return -EINVAL;
+ }
+
+ if (hdr->byte_count + offsetof(struct mctp_i2c_hdr, source_slave) != recvlen) {
+ ndev->stats.rx_length_errors++;
+ return -EINVAL;
+ }
+
+ pec = midev->rx_buffer[midev->rx_pos - 1];
+ calc_pec = i2c_smbus_pec(0, midev->rx_buffer, recvlen);
+ if (pec != calc_pec) {
+ ndev->stats.rx_crc_errors++;
+ return -EINVAL;
+ }
+
+ skb = netdev_alloc_skb(ndev, recvlen);
+ if (!skb) {
+ ndev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
+ skb->protocol = htons(ETH_P_MCTP);
+ skb_put_data(skb, midev->rx_buffer, recvlen);
+ skb_reset_mac_header(skb);
+ skb_pull(skb, sizeof(struct mctp_i2c_hdr));
+ skb_reset_network_header(skb);
+
+ cb = __mctp_cb(skb);
+ cb->halen = 1;
+ cb->haddr[0] = hdr->source_slave >> 1;
+
+ /* We need to ensure that the netif is not used once netdev
+ * unregister occurs
+ */
+ spin_lock_irqsave(&midev->lock, flags);
+ if (midev->allow_rx) {
+ reinit_completion(&midev->rx_done);
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ status = netif_rx(skb);
+ complete(&midev->rx_done);
+ } else {
+ status = NET_RX_DROP;
+ spin_unlock_irqrestore(&midev->lock, flags);
+ }
+
+ if (status == NET_RX_SUCCESS) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += recvlen;
+ } else {
+ ndev->stats.rx_dropped++;
+ }
+ return 0;
+}
+
+enum mctp_i2c_flow_state {
+ MCTP_I2C_TX_FLOW_INVALID,
+ MCTP_I2C_TX_FLOW_NONE,
+ MCTP_I2C_TX_FLOW_NEW,
+ MCTP_I2C_TX_FLOW_EXISTING,
+};
+
+static enum mctp_i2c_flow_state
+mctp_i2c_get_tx_flow_state(struct mctp_i2c_dev *midev, struct sk_buff *skb)
+{
+ enum mctp_i2c_flow_state state;
+ struct mctp_sk_key *key;
+ struct mctp_flow *flow;
+ unsigned long flags;
+
+ flow = skb_ext_find(skb, SKB_EXT_MCTP);
+ if (!flow)
+ return MCTP_I2C_TX_FLOW_NONE;
+
+ key = flow->key;
+ if (!key)
+ return MCTP_I2C_TX_FLOW_NONE;
+
+ spin_lock_irqsave(&key->lock, flags);
+ /* If the key is present but invalid, we're unlikely to be able
+ * to handle the flow at all; just drop now
+ */
+ if (!key->valid) {
+ state = MCTP_I2C_TX_FLOW_INVALID;
+ } else {
+ switch (key->dev_flow_state) {
+ case MCTP_I2C_FLOW_STATE_NEW:
+ key->dev_flow_state = MCTP_I2C_FLOW_STATE_ACTIVE;
+ state = MCTP_I2C_TX_FLOW_NEW;
+ break;
+ case MCTP_I2C_FLOW_STATE_ACTIVE:
+ state = MCTP_I2C_TX_FLOW_EXISTING;
+ break;
+ default:
+ state = MCTP_I2C_TX_FLOW_INVALID;
+ }
+ }
+
+ spin_unlock_irqrestore(&key->lock, flags);
+
+ return state;
+}
+
+/* We're not contending with ourselves here; we only need to exclude other
+ * i2c clients from using the bus. refcounts are simply to prevent
+ * recursive locking.
+ */
+static void mctp_i2c_lock_nest(struct mctp_i2c_dev *midev)
+{
+ unsigned long flags;
+ bool lock;
+
+ spin_lock_irqsave(&midev->lock, flags);
+ lock = midev->i2c_lock_count == 0;
+ midev->i2c_lock_count++;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ if (lock)
+ i2c_lock_bus(midev->adapter, I2C_LOCK_SEGMENT);
+}
+
+static void mctp_i2c_unlock_nest(struct mctp_i2c_dev *midev)
+{
+ unsigned long flags;
+ bool unlock;
+
+ spin_lock_irqsave(&midev->lock, flags);
+ if (!WARN_ONCE(midev->i2c_lock_count == 0, "lock count underflow!"))
+ midev->i2c_lock_count--;
+ unlock = midev->i2c_lock_count == 0;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ if (unlock)
+ i2c_unlock_bus(midev->adapter, I2C_LOCK_SEGMENT);
+}
+
+/* Unlocks the bus if was previously locked, used for cleanup */
+static void mctp_i2c_unlock_reset(struct mctp_i2c_dev *midev)
+{
+ unsigned long flags;
+ bool unlock;
+
+ spin_lock_irqsave(&midev->lock, flags);
+ unlock = midev->i2c_lock_count > 0;
+ midev->i2c_lock_count = 0;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ if (unlock)
+ i2c_unlock_bus(midev->adapter, I2C_LOCK_SEGMENT);
+}
+
+static void mctp_i2c_invalidate_tx_flow(struct mctp_i2c_dev *midev,
+ struct sk_buff *skb)
+{
+ struct mctp_sk_key *key;
+ struct mctp_flow *flow;
+ unsigned long flags;
+ bool release;
+
+ flow = skb_ext_find(skb, SKB_EXT_MCTP);
+ if (!flow)
+ return;
+
+ key = flow->key;
+ if (!key)
+ return;
+
+ spin_lock_irqsave(&key->lock, flags);
+ if (key->manual_alloc) {
+ /* we don't have control over lifetimes for manually-allocated
+ * keys, so cannot assume we can invalidate all future flows
+ * that would use this key.
+ */
+ release = false;
+ } else {
+ release = key->dev_flow_state == MCTP_I2C_FLOW_STATE_ACTIVE;
+ key->dev_flow_state = MCTP_I2C_FLOW_STATE_INVALID;
+ }
+ spin_unlock_irqrestore(&key->lock, flags);
+
+ /* if we have changed state from active, the flow held a reference on
+ * the lock; release that now.
+ */
+ if (release)
+ mctp_i2c_unlock_nest(midev);
+}
+
+static void mctp_i2c_xmit(struct mctp_i2c_dev *midev, struct sk_buff *skb)
+{
+ struct net_device_stats *stats = &midev->ndev->stats;
+ enum mctp_i2c_flow_state fs;
+ struct mctp_i2c_hdr *hdr;
+ struct i2c_msg msg = {0};
+ u8 *pecp;
+ int rc;
+
+ fs = mctp_i2c_get_tx_flow_state(midev, skb);
+
+ hdr = (void *)skb_mac_header(skb);
+ /* Sanity check that packet contents matches skb length,
+ * and can't exceed MCTP_I2C_BUFSZ
+ */
+ if (skb->len != hdr->byte_count + 3) {
+ dev_warn_ratelimited(&midev->adapter->dev,
+ "Bad tx length %d vs skb %u\n",
+ hdr->byte_count + 3, skb->len);
+ return;
+ }
+
+ if (skb_tailroom(skb) >= 1) {
+ /* Linear case with space, we can just append the PEC */
+ skb_put(skb, 1);
+ } else {
+ /* Otherwise need to copy the buffer */
+ skb_copy_bits(skb, 0, midev->tx_scratch, skb->len);
+ hdr = (void *)midev->tx_scratch;
+ }
+
+ pecp = (void *)&hdr->source_slave + hdr->byte_count;
+ *pecp = i2c_smbus_pec(0, (u8 *)hdr, hdr->byte_count + 3);
+ msg.buf = (void *)&hdr->command;
+ /* command, bytecount, data, pec */
+ msg.len = 2 + hdr->byte_count + 1;
+ msg.addr = hdr->dest_slave >> 1;
+
+ switch (fs) {
+ case MCTP_I2C_TX_FLOW_NONE:
+ /* no flow: full lock & unlock */
+ mctp_i2c_lock_nest(midev);
+ mctp_i2c_device_select(midev->client, midev);
+ rc = __i2c_transfer(midev->adapter, &msg, 1);
+ mctp_i2c_unlock_nest(midev);
+ break;
+
+ case MCTP_I2C_TX_FLOW_NEW:
+ /* new flow: lock, tx, but don't unlock; that will happen
+ * on flow release
+ */
+ mctp_i2c_lock_nest(midev);
+ mctp_i2c_device_select(midev->client, midev);
+ fallthrough;
+
+ case MCTP_I2C_TX_FLOW_EXISTING:
+ /* existing flow: we already have the lock; just tx */
+ rc = __i2c_transfer(midev->adapter, &msg, 1);
+
+ /* on tx errors, the flow can no longer be considered valid */
+ if (rc < 0)
+ mctp_i2c_invalidate_tx_flow(midev, skb);
+
+ break;
+
+ case MCTP_I2C_TX_FLOW_INVALID:
+ return;
+ }
+
+ if (rc < 0) {
+ dev_warn_ratelimited(&midev->adapter->dev,
+ "__i2c_transfer failed %d\n", rc);
+ stats->tx_errors++;
+ } else {
+ stats->tx_bytes += skb->len;
+ stats->tx_packets++;
+ }
+}
+
+static void mctp_i2c_flow_release(struct mctp_i2c_dev *midev)
+{
+ unsigned long flags;
+ bool unlock;
+
+ spin_lock_irqsave(&midev->lock, flags);
+ if (midev->release_count > midev->i2c_lock_count) {
+ WARN_ONCE(1, "release count overflow");
+ midev->release_count = midev->i2c_lock_count;
+ }
+
+ midev->i2c_lock_count -= midev->release_count;
+ unlock = midev->i2c_lock_count == 0 && midev->release_count > 0;
+ midev->release_count = 0;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ if (unlock)
+ i2c_unlock_bus(midev->adapter, I2C_LOCK_SEGMENT);
+}
+
+static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, const void *daddr,
+ const void *saddr, unsigned int len)
+{
+ struct mctp_i2c_hdr *hdr;
+ struct mctp_hdr *mhdr;
+ u8 lldst, llsrc;
+ int rc;
+
+ if (len > MCTP_I2C_MAXMTU)
+ return -EMSGSIZE;
+
+ if (!daddr || !saddr)
+ return -EINVAL;
+
+ lldst = *((u8 *)daddr);
+ llsrc = *((u8 *)saddr);
+
+ rc = skb_cow_head(skb, sizeof(struct mctp_i2c_hdr));
+ if (rc)
+ return rc;
+
+ skb_push(skb, sizeof(struct mctp_i2c_hdr));
+ skb_reset_mac_header(skb);
+ hdr = (void *)skb_mac_header(skb);
+ mhdr = mctp_hdr(skb);
+ hdr->dest_slave = (lldst << 1) & 0xff;
+ hdr->command = MCTP_I2C_COMMANDCODE;
+ hdr->byte_count = len + 1;
+ hdr->source_slave = ((llsrc << 1) & 0xff) | 0x01;
+ mhdr->ver = 0x01;
+
+ return sizeof(struct mctp_i2c_hdr);
+}
+
+static int mctp_i2c_tx_thread(void *data)
+{
+ struct mctp_i2c_dev *midev = data;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ for (;;) {
+ if (kthread_should_stop())
+ break;
+
+ spin_lock_irqsave(&midev->tx_queue.lock, flags);
+ skb = __skb_dequeue(&midev->tx_queue);
+ if (netif_queue_stopped(midev->ndev))
+ netif_wake_queue(midev->ndev);
+ spin_unlock_irqrestore(&midev->tx_queue.lock, flags);
+
+ if (skb == &midev->unlock_marker) {
+ mctp_i2c_flow_release(midev);
+
+ } else if (skb) {
+ mctp_i2c_xmit(midev, skb);
+ kfree_skb(skb);
+
+ } else {
+ wait_event_idle(midev->tx_wq,
+ !skb_queue_empty(&midev->tx_queue) ||
+ kthread_should_stop());
+ }
+ }
+
+ return 0;
+}
+
+static netdev_tx_t mctp_i2c_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct mctp_i2c_dev *midev = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&midev->tx_queue.lock, flags);
+ if (skb_queue_len(&midev->tx_queue) >= MCTP_I2C_TX_WORK_LEN) {
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&midev->tx_queue.lock, flags);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ __skb_queue_tail(&midev->tx_queue, skb);
+ if (skb_queue_len(&midev->tx_queue) == MCTP_I2C_TX_WORK_LEN)
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&midev->tx_queue.lock, flags);
+
+ wake_up(&midev->tx_wq);
+ return NETDEV_TX_OK;
+}
+
+static void mctp_i2c_release_flow(struct mctp_dev *mdev,
+ struct mctp_sk_key *key)
+
+{
+ struct mctp_i2c_dev *midev = netdev_priv(mdev->dev);
+ bool queue_release = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&midev->lock, flags);
+ /* if we have seen the flow/key previously, we need to pair the
+ * original lock with a release
+ */
+ if (key->dev_flow_state == MCTP_I2C_FLOW_STATE_ACTIVE) {
+ midev->release_count++;
+ queue_release = true;
+ }
+ key->dev_flow_state = MCTP_I2C_FLOW_STATE_INVALID;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ if (queue_release) {
+ /* Ensure we have a release operation queued, through the fake
+ * marker skb
+ */
+ spin_lock(&midev->tx_queue.lock);
+ if (!midev->unlock_marker.next)
+ __skb_queue_tail(&midev->tx_queue,
+ &midev->unlock_marker);
+ spin_unlock(&midev->tx_queue.lock);
+ wake_up(&midev->tx_wq);
+ }
+}
+
+static const struct net_device_ops mctp_i2c_ops = {
+ .ndo_start_xmit = mctp_i2c_start_xmit,
+ .ndo_uninit = mctp_i2c_ndo_uninit,
+ .ndo_open = mctp_i2c_ndo_open,
+};
+
+static const struct header_ops mctp_i2c_headops = {
+ .create = mctp_i2c_header_create,
+};
+
+static const struct mctp_netdev_ops mctp_i2c_mctp_ops = {
+ .release_flow = mctp_i2c_release_flow,
+};
+
+static void mctp_i2c_net_setup(struct net_device *dev)
+{
+ dev->type = ARPHRD_MCTP;
+
+ dev->mtu = MCTP_I2C_MAXMTU;
+ dev->min_mtu = MCTP_I2C_MINMTU;
+ dev->max_mtu = MCTP_I2C_MAXMTU;
+ dev->tx_queue_len = MCTP_I2C_TX_QUEUE_LEN;
+
+ dev->hard_header_len = sizeof(struct mctp_i2c_hdr);
+ dev->addr_len = 1;
+
+ dev->netdev_ops = &mctp_i2c_ops;
+ dev->header_ops = &mctp_i2c_headops;
+}
+
+/* Populates the mctp_i2c_dev priv struct for a netdev.
+ * Returns an error pointer on failure.
+ */
+static struct mctp_i2c_dev *mctp_i2c_midev_init(struct net_device *dev,
+ struct mctp_i2c_client *mcli,
+ struct i2c_adapter *adap)
+{
+ struct mctp_i2c_dev *midev = netdev_priv(dev);
+ unsigned long flags;
+
+ midev->tx_thread = kthread_create(mctp_i2c_tx_thread, midev,
+ "%s/tx", dev->name);
+ if (IS_ERR(midev->tx_thread))
+ return ERR_CAST(midev->tx_thread);
+
+ midev->ndev = dev;
+ get_device(&adap->dev);
+ midev->adapter = adap;
+ get_device(&mcli->client->dev);
+ midev->client = mcli;
+ INIT_LIST_HEAD(&midev->list);
+ spin_lock_init(&midev->lock);
+ midev->i2c_lock_count = 0;
+ midev->release_count = 0;
+ init_completion(&midev->rx_done);
+ complete(&midev->rx_done);
+ init_waitqueue_head(&midev->tx_wq);
+ skb_queue_head_init(&midev->tx_queue);
+
+ /* Add to the parent mcli */
+ spin_lock_irqsave(&mcli->sel_lock, flags);
+ list_add(&midev->list, &mcli->devs);
+ /* Select a device by default */
+ if (!mcli->sel)
+ __mctp_i2c_device_select(mcli, midev);
+ spin_unlock_irqrestore(&mcli->sel_lock, flags);
+
+ /* Start the worker thread */
+ wake_up_process(midev->tx_thread);
+
+ return midev;
+}
+
+/* Counterpart of mctp_i2c_midev_init */
+static void mctp_i2c_midev_free(struct mctp_i2c_dev *midev)
+{
+ struct mctp_i2c_client *mcli = midev->client;
+ unsigned long flags;
+
+ if (midev->tx_thread) {
+ kthread_stop(midev->tx_thread);
+ midev->tx_thread = NULL;
+ }
+
+ /* Unconditionally unlock on close */
+ mctp_i2c_unlock_reset(midev);
+
+ /* Remove the netdev from the parent i2c client. */
+ spin_lock_irqsave(&mcli->sel_lock, flags);
+ list_del(&midev->list);
+ if (mcli->sel == midev) {
+ struct mctp_i2c_dev *first;
+
+ first = list_first_entry_or_null(&mcli->devs, struct mctp_i2c_dev, list);
+ __mctp_i2c_device_select(mcli, first);
+ }
+ spin_unlock_irqrestore(&mcli->sel_lock, flags);
+
+ skb_queue_purge(&midev->tx_queue);
+ put_device(&midev->adapter->dev);
+ put_device(&mcli->client->dev);
+}
+
+/* Stops, unregisters, and frees midev */
+static void mctp_i2c_unregister(struct mctp_i2c_dev *midev)
+{
+ unsigned long flags;
+
+ /* Stop tx thread prior to unregister, it uses netif_() functions */
+ kthread_stop(midev->tx_thread);
+ midev->tx_thread = NULL;
+
+ /* Prevent any new rx in mctp_i2c_recv(), let any pending work finish */
+ spin_lock_irqsave(&midev->lock, flags);
+ midev->allow_rx = false;
+ spin_unlock_irqrestore(&midev->lock, flags);
+ wait_for_completion(&midev->rx_done);
+
+ mctp_unregister_netdev(midev->ndev);
+ /* midev has been freed now by mctp_i2c_ndo_uninit callback */
+
+ free_netdev(midev->ndev);
+}
+
+static void mctp_i2c_ndo_uninit(struct net_device *dev)
+{
+ struct mctp_i2c_dev *midev = netdev_priv(dev);
+
+ /* Perform cleanup here to ensure that mcli->sel isn't holding
+ * a reference that would prevent unregister_netdevice()
+ * from completing.
+ */
+ mctp_i2c_midev_free(midev);
+}
+
+static int mctp_i2c_ndo_open(struct net_device *dev)
+{
+ struct mctp_i2c_dev *midev = netdev_priv(dev);
+ unsigned long flags;
+
+ /* i2c rx handler can only pass packets once the netdev is registered */
+ spin_lock_irqsave(&midev->lock, flags);
+ midev->allow_rx = true;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ return 0;
+}
+
+static int mctp_i2c_add_netdev(struct mctp_i2c_client *mcli,
+ struct i2c_adapter *adap)
+{
+ struct mctp_i2c_dev *midev = NULL;
+ struct net_device *ndev = NULL;
+ struct i2c_adapter *root;
+ unsigned long flags;
+ char namebuf[30];
+ int rc;
+
+ root = mux_root_adapter(adap);
+ if (root != mcli->client->adapter) {
+ dev_err(&mcli->client->dev,
+ "I2C adapter %s is not a child bus of %s\n",
+ mcli->client->adapter->name, root->name);
+ return -EINVAL;
+ }
+
+ WARN_ON(!mutex_is_locked(&driver_clients_lock));
+ snprintf(namebuf, sizeof(namebuf), "mctpi2c%d", adap->nr);
+ ndev = alloc_netdev(sizeof(*midev), namebuf, NET_NAME_ENUM, mctp_i2c_net_setup);
+ if (!ndev) {
+ dev_err(&mcli->client->dev, "alloc netdev failed\n");
+ rc = -ENOMEM;
+ goto err;
+ }
+ dev_net_set(ndev, current->nsproxy->net_ns);
+ SET_NETDEV_DEV(ndev, &adap->dev);
+ dev_addr_set(ndev, &mcli->lladdr);
+
+ midev = mctp_i2c_midev_init(ndev, mcli, adap);
+ if (IS_ERR(midev)) {
+ rc = PTR_ERR(midev);
+ midev = NULL;
+ goto err;
+ }
+
+ rc = mctp_register_netdev(ndev, &mctp_i2c_mctp_ops,
+ MCTP_PHYS_BINDING_SMBUS);
+ if (rc < 0) {
+ dev_err(&mcli->client->dev,
+ "register netdev \"%s\" failed %d\n",
+ ndev->name, rc);
+ goto err;
+ }
+
+ spin_lock_irqsave(&midev->lock, flags);
+ midev->allow_rx = false;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ return 0;
+err:
+ if (midev)
+ mctp_i2c_midev_free(midev);
+ if (ndev)
+ free_netdev(ndev);
+ return rc;
+}
+
+/* Removes any netdev for adap. mcli is the parent root i2c client */
+static void mctp_i2c_remove_netdev(struct mctp_i2c_client *mcli,
+ struct i2c_adapter *adap)
+{
+ struct mctp_i2c_dev *midev = NULL, *m = NULL;
+ unsigned long flags;
+
+ WARN_ON(!mutex_is_locked(&driver_clients_lock));
+ spin_lock_irqsave(&mcli->sel_lock, flags);
+ /* List size is limited by number of MCTP netdevs on a single hardware bus */
+ list_for_each_entry(m, &mcli->devs, list)
+ if (m->adapter == adap) {
+ midev = m;
+ break;
+ }
+ spin_unlock_irqrestore(&mcli->sel_lock, flags);
+
+ if (midev)
+ mctp_i2c_unregister(midev);
+}
+
+/* Determines whether a device is an i2c adapter.
+ * Optionally returns the root i2c_adapter
+ */
+static struct i2c_adapter *mctp_i2c_get_adapter(struct device *dev,
+ struct i2c_adapter **ret_root)
+{
+ struct i2c_adapter *root, *adap;
+
+ if (dev->type != &i2c_adapter_type)
+ return NULL;
+ adap = to_i2c_adapter(dev);
+ root = mux_root_adapter(adap);
+ WARN_ONCE(!root, "MCTP I2C failed to find root adapter for %s\n",
+ dev_name(dev));
+ if (!root)
+ return NULL;
+ if (ret_root)
+ *ret_root = root;
+ return adap;
+}
+
+/* Determines whether a device is an i2c adapter with the "mctp-controller"
+ * devicetree property set. If adap is not an OF node, returns match_no_of
+ */
+static bool mctp_i2c_adapter_match(struct i2c_adapter *adap, bool match_no_of)
+{
+ if (!adap->dev.of_node)
+ return match_no_of;
+ return of_property_read_bool(adap->dev.of_node, MCTP_I2C_OF_PROP);
+}
+
+/* Called for each existing i2c device (adapter or client) when a
+ * new mctp-i2c client is probed.
+ */
+static int mctp_i2c_client_try_attach(struct device *dev, void *data)
+{
+ struct i2c_adapter *adap = NULL, *root = NULL;
+ struct mctp_i2c_client *mcli = data;
+
+ adap = mctp_i2c_get_adapter(dev, &root);
+ if (!adap)
+ return 0;
+ if (mcli->client->adapter != root)
+ return 0;
+ /* Must either have mctp-controller property on the adapter, or
+ * be a root adapter if it's non-devicetree
+ */
+ if (!mctp_i2c_adapter_match(adap, adap == root))
+ return 0;
+
+ return mctp_i2c_add_netdev(mcli, adap);
+}
+
+static void mctp_i2c_notify_add(struct device *dev)
+{
+ struct mctp_i2c_client *mcli = NULL, *m = NULL;
+ struct i2c_adapter *root = NULL, *adap = NULL;
+ int rc;
+
+ adap = mctp_i2c_get_adapter(dev, &root);
+ if (!adap)
+ return;
+ /* Check for mctp-controller property on the adapter */
+ if (!mctp_i2c_adapter_match(adap, false))
+ return;
+
+ /* Find an existing mcli for adap's root */
+ mutex_lock(&driver_clients_lock);
+ list_for_each_entry(m, &driver_clients, list) {
+ if (m->client->adapter == root) {
+ mcli = m;
+ break;
+ }
+ }
+
+ if (mcli) {
+ rc = mctp_i2c_add_netdev(mcli, adap);
+ if (rc < 0)
+ dev_warn(dev, "Failed adding mctp-i2c net device\n");
+ }
+ mutex_unlock(&driver_clients_lock);
+}
+
+static void mctp_i2c_notify_del(struct device *dev)
+{
+ struct i2c_adapter *root = NULL, *adap = NULL;
+ struct mctp_i2c_client *mcli = NULL;
+
+ adap = mctp_i2c_get_adapter(dev, &root);
+ if (!adap)
+ return;
+
+ mutex_lock(&driver_clients_lock);
+ list_for_each_entry(mcli, &driver_clients, list) {
+ if (mcli->client->adapter == root) {
+ mctp_i2c_remove_netdev(mcli, adap);
+ break;
+ }
+ }
+ mutex_unlock(&driver_clients_lock);
+}
+
+static int mctp_i2c_probe(struct i2c_client *client)
+{
+ struct mctp_i2c_client *mcli = NULL;
+ int rc;
+
+ mutex_lock(&driver_clients_lock);
+ mcli = mctp_i2c_new_client(client);
+ if (IS_ERR(mcli)) {
+ rc = PTR_ERR(mcli);
+ mcli = NULL;
+ goto out;
+ } else {
+ list_add(&mcli->list, &driver_clients);
+ }
+
+ /* Add a netdev for adapters that have a 'mctp-controller' property */
+ i2c_for_each_dev(mcli, mctp_i2c_client_try_attach);
+ rc = 0;
+out:
+ mutex_unlock(&driver_clients_lock);
+ return rc;
+}
+
+static void mctp_i2c_remove(struct i2c_client *client)
+{
+ struct mctp_i2c_client *mcli = i2c_get_clientdata(client);
+ struct mctp_i2c_dev *midev = NULL, *tmp = NULL;
+
+ mutex_lock(&driver_clients_lock);
+ list_del(&mcli->list);
+ /* Remove all child adapter netdevs */
+ list_for_each_entry_safe(midev, tmp, &mcli->devs, list)
+ mctp_i2c_unregister(midev);
+
+ mctp_i2c_free_client(mcli);
+ mutex_unlock(&driver_clients_lock);
+}
+
+/* We look for a 'mctp-controller' property on I2C busses as they are
+ * added/deleted, creating/removing netdevs as required.
+ */
+static int mctp_i2c_notifier_call(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ mctp_i2c_notify_add(dev);
+ break;
+ case BUS_NOTIFY_DEL_DEVICE:
+ mctp_i2c_notify_del(dev);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block mctp_i2c_notifier = {
+ .notifier_call = mctp_i2c_notifier_call,
+};
+
+static const struct i2c_device_id mctp_i2c_id[] = {
+ { "mctp-i2c-interface" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mctp_i2c_id);
+
+static const struct of_device_id mctp_i2c_of_match[] = {
+ { .compatible = "mctp-i2c-controller" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mctp_i2c_of_match);
+
+static struct i2c_driver mctp_i2c_driver = {
+ .driver = {
+ .name = "mctp-i2c-interface",
+ .of_match_table = mctp_i2c_of_match,
+ },
+ .probe = mctp_i2c_probe,
+ .remove = mctp_i2c_remove,
+ .id_table = mctp_i2c_id,
+};
+
+static __init int mctp_i2c_mod_init(void)
+{
+ int rc;
+
+ pr_info("MCTP I2C interface driver\n");
+ rc = i2c_add_driver(&mctp_i2c_driver);
+ if (rc < 0)
+ return rc;
+ rc = bus_register_notifier(&i2c_bus_type, &mctp_i2c_notifier);
+ if (rc < 0) {
+ i2c_del_driver(&mctp_i2c_driver);
+ return rc;
+ }
+ return 0;
+}
+
+static __exit void mctp_i2c_mod_exit(void)
+{
+ int rc;
+
+ rc = bus_unregister_notifier(&i2c_bus_type, &mctp_i2c_notifier);
+ if (rc < 0)
+ pr_warn("MCTP I2C could not unregister notifier, %d\n", rc);
+ i2c_del_driver(&mctp_i2c_driver);
+}
+
+module_init(mctp_i2c_mod_init);
+module_exit(mctp_i2c_mod_exit);
+
+MODULE_DESCRIPTION("MCTP I2C device");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Matt Johnston <matt@codeconstruct.com.au>");
diff --git a/drivers/net/mctp/mctp-i3c.c b/drivers/net/mctp/mctp-i3c.c
new file mode 100644
index 000000000000..36c2405677c2
--- /dev/null
+++ b/drivers/net/mctp/mctp-i3c.c
@@ -0,0 +1,767 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implements DMTF specification
+ * "DSP0233 Management Component Transport Protocol (MCTP) I3C Transport
+ * Binding"
+ * https://www.dmtf.org/sites/default/files/standards/documents/DSP0233_1.0.0.pdf
+ *
+ * Copyright (c) 2023 Code Construct
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/i3c/device.h>
+#include <linux/i3c/master.h>
+#include <linux/if_arp.h>
+#include <linux/unaligned.h>
+#include <net/mctp.h>
+#include <net/mctpdevice.h>
+
+#define MCTP_I3C_MAXBUF 65536
+/* 48 bit Provisioned Id */
+#define PID_SIZE 6
+
+/* 64 byte payload, 4 byte MCTP header */
+static const int MCTP_I3C_MINMTU = 64 + 4;
+/* One byte less to allow for the PEC */
+static const int MCTP_I3C_MAXMTU = MCTP_I3C_MAXBUF - 1;
+/* 4 byte MCTP header, no data, 1 byte PEC */
+static const int MCTP_I3C_MINLEN = 4 + 1;
+
+/* Sufficient for 64kB at min mtu */
+static const int MCTP_I3C_TX_QUEUE_LEN = 1100;
+
+/* Somewhat arbitrary */
+static const int MCTP_I3C_IBI_SLOTS = 8;
+
+/* Mandatory Data Byte in an IBI, from DSP0233 */
+#define I3C_MDB_MCTP 0xAE
+/* From MIPI Device Characteristics Register (DCR) Assignments */
+#define I3C_DCR_MCTP 0xCC
+
+static const char *MCTP_I3C_OF_PROP = "mctp-controller";
+
+/* List of mctp_i3c_busdev */
+static LIST_HEAD(busdevs);
+/* Protects busdevs, as well as mctp_i3c_bus.devs lists */
+static DEFINE_MUTEX(busdevs_lock);
+
+struct mctp_i3c_bus {
+ struct net_device *ndev;
+
+ struct task_struct *tx_thread;
+ wait_queue_head_t tx_wq;
+ /* tx_lock protects tx_skb and devs */
+ spinlock_t tx_lock;
+ /* Next skb to transmit */
+ struct sk_buff *tx_skb;
+ /* Scratch buffer for xmit */
+ u8 tx_scratch[MCTP_I3C_MAXBUF];
+
+ /* Element of busdevs */
+ struct list_head list;
+
+ /* Provisioned ID of our controller */
+ u64 pid;
+
+ struct i3c_bus *bus;
+ /* Head of mctp_i3c_device.list. Protected by busdevs_lock */
+ struct list_head devs;
+};
+
+struct mctp_i3c_device {
+ struct i3c_device *i3c;
+ struct mctp_i3c_bus *mbus;
+ struct list_head list; /* Element of mctp_i3c_bus.devs */
+
+ /* Held while tx_thread is using this device */
+ struct mutex lock;
+
+ /* Whether BCR indicates MDB is present in IBI */
+ bool have_mdb;
+ /* I3C dynamic address */
+ u8 addr;
+ /* Maximum read length */
+ u16 mrl;
+ /* Maximum write length */
+ u16 mwl;
+ /* Provisioned ID */
+ u64 pid;
+};
+
+/* We synthesise a mac header using the Provisioned ID.
+ * Used to pass dest to mctp_i3c_start_xmit.
+ */
+struct mctp_i3c_internal_hdr {
+ u8 dest[PID_SIZE];
+ u8 source[PID_SIZE];
+} __packed;
+
+static int mctp_i3c_read(struct mctp_i3c_device *mi)
+{
+ struct i3c_xfer xfer = { .rnw = 1, .len = mi->mrl };
+ struct net_device_stats *stats = &mi->mbus->ndev->stats;
+ struct mctp_i3c_internal_hdr *ihdr = NULL;
+ struct sk_buff *skb = NULL;
+ struct mctp_skb_cb *cb;
+ int net_status, rc;
+ u8 pec, addr;
+
+ skb = netdev_alloc_skb(mi->mbus->ndev,
+ mi->mrl + sizeof(struct mctp_i3c_internal_hdr));
+ if (!skb) {
+ stats->rx_dropped++;
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ skb->protocol = htons(ETH_P_MCTP);
+ /* Create a header for internal use */
+ skb_reset_mac_header(skb);
+ ihdr = skb_put(skb, sizeof(struct mctp_i3c_internal_hdr));
+ put_unaligned_be48(mi->pid, ihdr->source);
+ put_unaligned_be48(mi->mbus->pid, ihdr->dest);
+ skb_pull(skb, sizeof(struct mctp_i3c_internal_hdr));
+
+ xfer.data.in = skb_put(skb, mi->mrl);
+
+ /* Make sure netif_rx() is read in the same order as i3c. */
+ mutex_lock(&mi->lock);
+ rc = i3c_device_do_xfers(mi->i3c, &xfer, 1, I3C_SDR);
+ if (rc < 0)
+ goto err;
+
+ if (WARN_ON_ONCE(xfer.len > mi->mrl)) {
+ /* Bad i3c bus driver */
+ rc = -EIO;
+ goto err;
+ }
+ if (xfer.len < MCTP_I3C_MINLEN) {
+ stats->rx_length_errors++;
+ rc = -EIO;
+ goto err;
+ }
+
+ /* check PEC, including address byte */
+ addr = mi->addr << 1 | 1;
+ pec = i2c_smbus_pec(0, &addr, 1);
+ pec = i2c_smbus_pec(pec, xfer.data.in, xfer.len - 1);
+ if (pec != ((u8 *)xfer.data.in)[xfer.len - 1]) {
+ stats->rx_crc_errors++;
+ rc = -EINVAL;
+ goto err;
+ }
+
+ /* Remove PEC */
+ skb_trim(skb, xfer.len - 1);
+
+ cb = __mctp_cb(skb);
+ cb->halen = PID_SIZE;
+ put_unaligned_be48(mi->pid, cb->haddr);
+
+ net_status = netif_rx(skb);
+
+ if (net_status == NET_RX_SUCCESS) {
+ stats->rx_packets++;
+ stats->rx_bytes += xfer.len - 1;
+ } else {
+ stats->rx_dropped++;
+ }
+
+ mutex_unlock(&mi->lock);
+ return 0;
+err:
+ mutex_unlock(&mi->lock);
+ kfree_skb(skb);
+ return rc;
+}
+
+static void mctp_i3c_ibi_handler(struct i3c_device *i3c,
+ const struct i3c_ibi_payload *payload)
+{
+ struct mctp_i3c_device *mi = i3cdev_get_drvdata(i3c);
+
+ if (WARN_ON_ONCE(!mi))
+ return;
+
+ if (mi->have_mdb) {
+ if (payload->len > 0) {
+ if (((u8 *)payload->data)[0] != I3C_MDB_MCTP) {
+ /* Not a mctp-i3c interrupt, ignore it */
+ return;
+ }
+ } else {
+ /* The BCR advertised a Mandatory Data Byte but the
+ * device didn't send one.
+ */
+ dev_warn_once(i3cdev_to_dev(i3c), "IBI with missing MDB");
+ }
+ }
+
+ mctp_i3c_read(mi);
+}
+
+static int mctp_i3c_setup(struct mctp_i3c_device *mi)
+{
+ const struct i3c_ibi_setup ibi = {
+ .max_payload_len = 1,
+ .num_slots = MCTP_I3C_IBI_SLOTS,
+ .handler = mctp_i3c_ibi_handler,
+ };
+ struct i3c_device_info info;
+ int rc;
+
+ i3c_device_get_info(mi->i3c, &info);
+ mi->have_mdb = info.bcr & BIT(2);
+ mi->addr = info.dyn_addr;
+ mi->mwl = info.max_write_len;
+ mi->mrl = info.max_read_len;
+ mi->pid = info.pid;
+
+ rc = i3c_device_request_ibi(mi->i3c, &ibi);
+ if (rc == -ENOTSUPP) {
+ /* This driver only supports In-Band Interrupt mode.
+ * Support for Polling Mode could be added if required.
+ * (ENOTSUPP is from the i3c layer, not EOPNOTSUPP).
+ */
+ dev_warn(i3cdev_to_dev(mi->i3c),
+ "Failed, bus driver doesn't support In-Band Interrupts");
+ goto err;
+ } else if (rc < 0) {
+ dev_err(i3cdev_to_dev(mi->i3c),
+ "Failed requesting IBI (%d)\n", rc);
+ goto err;
+ }
+
+ rc = i3c_device_enable_ibi(mi->i3c);
+ if (rc < 0) {
+ /* Assume a driver supporting request_ibi also
+ * supports enable_ibi.
+ */
+ dev_err(i3cdev_to_dev(mi->i3c), "Failed enabling IBI (%d)\n", rc);
+ goto err_free_ibi;
+ }
+
+ return 0;
+
+err_free_ibi:
+ i3c_device_free_ibi(mi->i3c);
+
+err:
+ return rc;
+}
+
+/* Adds a new MCTP i3c_device to a bus */
+static int mctp_i3c_add_device(struct mctp_i3c_bus *mbus,
+ struct i3c_device *i3c)
+__must_hold(&busdevs_lock)
+{
+ struct mctp_i3c_device *mi = NULL;
+ int rc;
+
+ mi = kzalloc(sizeof(*mi), GFP_KERNEL);
+ if (!mi) {
+ rc = -ENOMEM;
+ goto err;
+ }
+ mi->mbus = mbus;
+ mi->i3c = i3c;
+ mutex_init(&mi->lock);
+ list_add(&mi->list, &mbus->devs);
+
+ i3cdev_set_drvdata(i3c, mi);
+ rc = mctp_i3c_setup(mi);
+ if (rc < 0)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ list_del(&mi->list);
+ kfree(mi);
+
+err:
+ dev_warn(i3cdev_to_dev(i3c), "Error adding mctp-i3c device, %d\n", rc);
+ return rc;
+}
+
+static int mctp_i3c_probe(struct i3c_device *i3c)
+{
+ struct mctp_i3c_bus *b = NULL, *mbus = NULL;
+
+ /* Look for a known bus */
+ mutex_lock(&busdevs_lock);
+ list_for_each_entry(b, &busdevs, list)
+ if (b->bus == i3c->bus) {
+ mbus = b;
+ break;
+ }
+ mutex_unlock(&busdevs_lock);
+
+ if (!mbus) {
+ /* probably no "mctp-controller" property on the i3c bus */
+ return -ENODEV;
+ }
+
+ return mctp_i3c_add_device(mbus, i3c);
+}
+
+static void mctp_i3c_remove_device(struct mctp_i3c_device *mi)
+__must_hold(&busdevs_lock)
+{
+ /* Ensure the tx thread isn't using the device */
+ mutex_lock(&mi->lock);
+
+ /* Counterpart of mctp_i3c_setup */
+ i3c_device_disable_ibi(mi->i3c);
+ i3c_device_free_ibi(mi->i3c);
+
+ /* Counterpart of mctp_i3c_add_device */
+ i3cdev_set_drvdata(mi->i3c, NULL);
+ list_del(&mi->list);
+
+ /* Safe to unlock after removing from the list */
+ mutex_unlock(&mi->lock);
+ kfree(mi);
+}
+
+static void mctp_i3c_remove(struct i3c_device *i3c)
+{
+ struct mctp_i3c_device *mi = i3cdev_get_drvdata(i3c);
+
+ /* We my have received a Bus Remove notify prior to device remove,
+ * so mi will already be removed.
+ */
+ if (!mi)
+ return;
+
+ mutex_lock(&busdevs_lock);
+ mctp_i3c_remove_device(mi);
+ mutex_unlock(&busdevs_lock);
+}
+
+/* Returns the device for an address, with mi->lock held */
+static struct mctp_i3c_device *
+mctp_i3c_lookup(struct mctp_i3c_bus *mbus, u64 pid)
+{
+ struct mctp_i3c_device *mi = NULL, *ret = NULL;
+
+ mutex_lock(&busdevs_lock);
+ list_for_each_entry(mi, &mbus->devs, list)
+ if (mi->pid == pid) {
+ ret = mi;
+ mutex_lock(&mi->lock);
+ break;
+ }
+ mutex_unlock(&busdevs_lock);
+ return ret;
+}
+
+static void mctp_i3c_xmit(struct mctp_i3c_bus *mbus, struct sk_buff *skb)
+{
+ struct net_device_stats *stats = &mbus->ndev->stats;
+ struct i3c_xfer xfer = { .rnw = false };
+ struct mctp_i3c_internal_hdr *ihdr = NULL;
+ struct mctp_i3c_device *mi = NULL;
+ unsigned int data_len;
+ u8 *data = NULL;
+ u8 addr, pec;
+ int rc = 0;
+ u64 pid;
+
+ skb_pull(skb, sizeof(struct mctp_i3c_internal_hdr));
+ data_len = skb->len;
+
+ ihdr = (void *)skb_mac_header(skb);
+
+ pid = get_unaligned_be48(ihdr->dest);
+ mi = mctp_i3c_lookup(mbus, pid);
+ if (!mi) {
+ /* I3C endpoint went away after the packet was enqueued? */
+ stats->tx_dropped++;
+ goto out;
+ }
+
+ if (WARN_ON_ONCE(data_len + 1 > MCTP_I3C_MAXBUF))
+ goto out;
+
+ if (data_len + 1 > (unsigned int)mi->mwl) {
+ /* Route MTU was larger than supported by the endpoint */
+ stats->tx_dropped++;
+ goto out;
+ }
+
+ /* Need a linear buffer with space for the PEC */
+ xfer.len = data_len + 1;
+ if (skb_tailroom(skb) >= 1) {
+ skb_put(skb, 1);
+ data = skb->data;
+ } else {
+ /* Otherwise need to copy the buffer */
+ skb_copy_bits(skb, 0, mbus->tx_scratch, skb->len);
+ data = mbus->tx_scratch;
+ }
+
+ /* PEC calculation */
+ addr = mi->addr << 1;
+ pec = i2c_smbus_pec(0, &addr, 1);
+ pec = i2c_smbus_pec(pec, data, data_len);
+ data[data_len] = pec;
+
+ xfer.data.out = data;
+ rc = i3c_device_do_xfers(mi->i3c, &xfer, 1, I3C_SDR);
+ if (rc == 0) {
+ stats->tx_bytes += data_len;
+ stats->tx_packets++;
+ } else {
+ stats->tx_errors++;
+ }
+
+out:
+ if (mi)
+ mutex_unlock(&mi->lock);
+}
+
+static int mctp_i3c_tx_thread(void *data)
+{
+ struct mctp_i3c_bus *mbus = data;
+ struct sk_buff *skb;
+
+ for (;;) {
+ if (kthread_should_stop())
+ break;
+
+ spin_lock_bh(&mbus->tx_lock);
+ skb = mbus->tx_skb;
+ mbus->tx_skb = NULL;
+ spin_unlock_bh(&mbus->tx_lock);
+
+ if (netif_queue_stopped(mbus->ndev))
+ netif_wake_queue(mbus->ndev);
+
+ if (skb) {
+ mctp_i3c_xmit(mbus, skb);
+ kfree_skb(skb);
+ } else {
+ wait_event_idle(mbus->tx_wq,
+ mbus->tx_skb || kthread_should_stop());
+ }
+ }
+
+ return 0;
+}
+
+static netdev_tx_t mctp_i3c_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct mctp_i3c_bus *mbus = netdev_priv(ndev);
+ netdev_tx_t ret;
+
+ spin_lock(&mbus->tx_lock);
+ netif_stop_queue(ndev);
+ if (mbus->tx_skb) {
+ dev_warn_ratelimited(&ndev->dev, "TX with queue stopped");
+ ret = NETDEV_TX_BUSY;
+ } else {
+ mbus->tx_skb = skb;
+ ret = NETDEV_TX_OK;
+ }
+ spin_unlock(&mbus->tx_lock);
+
+ if (ret == NETDEV_TX_OK)
+ wake_up(&mbus->tx_wq);
+
+ return ret;
+}
+
+static void mctp_i3c_bus_free(struct mctp_i3c_bus *mbus)
+__must_hold(&busdevs_lock)
+{
+ struct mctp_i3c_device *mi = NULL, *tmp = NULL;
+
+ if (mbus->tx_thread) {
+ kthread_stop(mbus->tx_thread);
+ mbus->tx_thread = NULL;
+ }
+
+ /* Remove any child devices */
+ list_for_each_entry_safe(mi, tmp, &mbus->devs, list) {
+ mctp_i3c_remove_device(mi);
+ }
+
+ kfree_skb(mbus->tx_skb);
+ list_del(&mbus->list);
+}
+
+static void mctp_i3c_ndo_uninit(struct net_device *ndev)
+{
+ struct mctp_i3c_bus *mbus = netdev_priv(ndev);
+
+ /* Perform cleanup here to ensure there are no remaining references */
+ mctp_i3c_bus_free(mbus);
+}
+
+static int mctp_i3c_header_create(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, const void *daddr,
+ const void *saddr, unsigned int len)
+{
+ struct mctp_i3c_internal_hdr *ihdr;
+ int rc;
+
+ if (!daddr || !saddr)
+ return -EINVAL;
+
+ rc = skb_cow_head(skb, sizeof(struct mctp_i3c_internal_hdr));
+ if (rc)
+ return rc;
+
+ skb_push(skb, sizeof(struct mctp_i3c_internal_hdr));
+ skb_reset_mac_header(skb);
+ ihdr = (void *)skb_mac_header(skb);
+ memcpy(ihdr->dest, daddr, PID_SIZE);
+ memcpy(ihdr->source, saddr, PID_SIZE);
+ return 0;
+}
+
+static const struct net_device_ops mctp_i3c_ops = {
+ .ndo_start_xmit = mctp_i3c_start_xmit,
+ .ndo_uninit = mctp_i3c_ndo_uninit,
+};
+
+static const struct header_ops mctp_i3c_headops = {
+ .create = mctp_i3c_header_create,
+};
+
+static void mctp_i3c_net_setup(struct net_device *dev)
+{
+ dev->type = ARPHRD_MCTP;
+
+ dev->mtu = MCTP_I3C_MAXMTU;
+ dev->min_mtu = MCTP_I3C_MINMTU;
+ dev->max_mtu = MCTP_I3C_MAXMTU;
+ dev->tx_queue_len = MCTP_I3C_TX_QUEUE_LEN;
+
+ dev->hard_header_len = sizeof(struct mctp_i3c_internal_hdr);
+ dev->addr_len = PID_SIZE;
+
+ dev->netdev_ops = &mctp_i3c_ops;
+ dev->header_ops = &mctp_i3c_headops;
+}
+
+static bool mctp_i3c_is_mctp_controller(struct i3c_bus *bus)
+{
+ struct i3c_dev_desc *master = bus->cur_master;
+
+ if (!master)
+ return false;
+
+ return of_property_read_bool(master->common.master->dev.of_node,
+ MCTP_I3C_OF_PROP);
+}
+
+/* Returns the Provisioned Id of a local bus master */
+static int mctp_i3c_bus_local_pid(struct i3c_bus *bus, u64 *ret_pid)
+{
+ struct i3c_dev_desc *master;
+
+ master = bus->cur_master;
+ if (WARN_ON_ONCE(!master))
+ return -ENOENT;
+ *ret_pid = master->info.pid;
+
+ return 0;
+}
+
+/* Returns an ERR_PTR on failure */
+static struct mctp_i3c_bus *mctp_i3c_bus_add(struct i3c_bus *bus)
+__must_hold(&busdevs_lock)
+{
+ struct mctp_i3c_bus *mbus = NULL;
+ struct net_device *ndev = NULL;
+ char namebuf[IFNAMSIZ];
+ u8 addr[PID_SIZE];
+ int rc;
+
+ if (!mctp_i3c_is_mctp_controller(bus))
+ return ERR_PTR(-ENOENT);
+
+ snprintf(namebuf, sizeof(namebuf), "mctpi3c%d", bus->id);
+ ndev = alloc_netdev(sizeof(*mbus), namebuf, NET_NAME_ENUM,
+ mctp_i3c_net_setup);
+ if (!ndev) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ mbus = netdev_priv(ndev);
+ mbus->ndev = ndev;
+ mbus->bus = bus;
+ INIT_LIST_HEAD(&mbus->devs);
+ list_add(&mbus->list, &busdevs);
+
+ rc = mctp_i3c_bus_local_pid(bus, &mbus->pid);
+ if (rc < 0) {
+ dev_err(&ndev->dev, "No I3C PID available\n");
+ goto err_free_uninit;
+ }
+ put_unaligned_be48(mbus->pid, addr);
+ dev_addr_set(ndev, addr);
+
+ init_waitqueue_head(&mbus->tx_wq);
+ spin_lock_init(&mbus->tx_lock);
+ mbus->tx_thread = kthread_run(mctp_i3c_tx_thread, mbus,
+ "%s/tx", ndev->name);
+ if (IS_ERR(mbus->tx_thread)) {
+ dev_warn(&ndev->dev, "Error creating thread: %pe\n",
+ mbus->tx_thread);
+ rc = PTR_ERR(mbus->tx_thread);
+ mbus->tx_thread = NULL;
+ goto err_free_uninit;
+ }
+
+ rc = mctp_register_netdev(ndev, NULL, MCTP_PHYS_BINDING_I3C);
+ if (rc < 0) {
+ dev_warn(&ndev->dev, "netdev register failed: %d\n", rc);
+ goto err_free_netdev;
+ }
+ return mbus;
+
+err_free_uninit:
+ /* uninit will not get called if a netdev has not been registered,
+ * so we perform the same mbus cleanup manually.
+ */
+ mctp_i3c_bus_free(mbus);
+
+err_free_netdev:
+ free_netdev(ndev);
+
+err:
+ return ERR_PTR(rc);
+}
+
+static void mctp_i3c_bus_remove(struct mctp_i3c_bus *mbus)
+__must_hold(&busdevs_lock)
+{
+ /* Unregister calls through to ndo_uninit -> mctp_i3c_bus_free() */
+ mctp_unregister_netdev(mbus->ndev);
+
+ free_netdev(mbus->ndev);
+ /* mbus is deallocated */
+}
+
+/* Removes all mctp-i3c busses */
+static void mctp_i3c_bus_remove_all(void)
+{
+ struct mctp_i3c_bus *mbus = NULL, *tmp = NULL;
+
+ mutex_lock(&busdevs_lock);
+ list_for_each_entry_safe(mbus, tmp, &busdevs, list) {
+ mctp_i3c_bus_remove(mbus);
+ }
+ mutex_unlock(&busdevs_lock);
+}
+
+/* Adds a i3c_bus if it isn't already in the busdevs list.
+ * Suitable as an i3c_for_each_bus_locked callback.
+ */
+static int mctp_i3c_bus_add_new(struct i3c_bus *bus, void *data)
+{
+ struct mctp_i3c_bus *mbus = NULL, *tmp = NULL;
+ bool exists = false;
+
+ mutex_lock(&busdevs_lock);
+ list_for_each_entry_safe(mbus, tmp, &busdevs, list)
+ if (mbus->bus == bus)
+ exists = true;
+
+ /* It is OK for a bus to already exist. That can occur due to
+ * the race in mod_init between notifier and for_each_bus
+ */
+ if (!exists)
+ mctp_i3c_bus_add(bus);
+ mutex_unlock(&busdevs_lock);
+ return 0;
+}
+
+static void mctp_i3c_notify_bus_remove(struct i3c_bus *bus)
+{
+ struct mctp_i3c_bus *mbus = NULL, *tmp;
+
+ mutex_lock(&busdevs_lock);
+ list_for_each_entry_safe(mbus, tmp, &busdevs, list)
+ if (mbus->bus == bus)
+ mctp_i3c_bus_remove(mbus);
+ mutex_unlock(&busdevs_lock);
+}
+
+static int mctp_i3c_notifier_call(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ switch (action) {
+ case I3C_NOTIFY_BUS_ADD:
+ mctp_i3c_bus_add_new((struct i3c_bus *)data, NULL);
+ break;
+ case I3C_NOTIFY_BUS_REMOVE:
+ mctp_i3c_notify_bus_remove((struct i3c_bus *)data);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block mctp_i3c_notifier = {
+ .notifier_call = mctp_i3c_notifier_call,
+};
+
+static const struct i3c_device_id mctp_i3c_ids[] = {
+ I3C_CLASS(I3C_DCR_MCTP, NULL),
+ { 0 },
+};
+
+static struct i3c_driver mctp_i3c_driver = {
+ .driver = {
+ .name = "mctp-i3c",
+ },
+ .probe = mctp_i3c_probe,
+ .remove = mctp_i3c_remove,
+ .id_table = mctp_i3c_ids,
+};
+
+static __init int mctp_i3c_mod_init(void)
+{
+ int rc;
+
+ rc = i3c_register_notifier(&mctp_i3c_notifier);
+ if (rc < 0) {
+ i3c_driver_unregister(&mctp_i3c_driver);
+ return rc;
+ }
+
+ i3c_for_each_bus_locked(mctp_i3c_bus_add_new, NULL);
+
+ rc = i3c_driver_register(&mctp_i3c_driver);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+static __exit void mctp_i3c_mod_exit(void)
+{
+ int rc;
+
+ i3c_driver_unregister(&mctp_i3c_driver);
+
+ rc = i3c_unregister_notifier(&mctp_i3c_notifier);
+ if (rc < 0)
+ pr_warn("MCTP I3C could not unregister notifier, %d\n", rc);
+
+ mctp_i3c_bus_remove_all();
+}
+
+module_init(mctp_i3c_mod_init);
+module_exit(mctp_i3c_mod_exit);
+
+MODULE_DEVICE_TABLE(i3c, mctp_i3c_ids);
+MODULE_DESCRIPTION("MCTP I3C device");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Matt Johnston <matt@codeconstruct.com.au>");
diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c
new file mode 100644
index 000000000000..26c9a33fd636
--- /dev/null
+++ b/drivers/net/mctp/mctp-serial.c
@@ -0,0 +1,634 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Management Component Transport Protocol (MCTP) - serial transport
+ * binding. This driver is an implementation of the DMTF specificiation
+ * "DSP0253 - Management Component Transport Protocol (MCTP) Serial Transport
+ * Binding", available at:
+ *
+ * https://www.dmtf.org/sites/default/files/standards/documents/DSP0253_1.0.0.pdf
+ *
+ * This driver provides DSP0253-type MCTP-over-serial transport using a Linux
+ * tty device, by setting the N_MCTP line discipline on the tty.
+ *
+ * Copyright (c) 2021 Code Construct
+ */
+
+#include <linux/idr.h>
+#include <linux/if_arp.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/tty.h>
+#include <linux/workqueue.h>
+#include <linux/crc-ccitt.h>
+
+#include <linux/mctp.h>
+#include <net/mctp.h>
+#include <net/mctpdevice.h>
+#include <net/pkt_sched.h>
+
+#define MCTP_SERIAL_MTU 68 /* base mtu (64) + mctp header */
+#define MCTP_SERIAL_FRAME_MTU (MCTP_SERIAL_MTU + 6) /* + serial framing */
+
+#define MCTP_SERIAL_VERSION 0x1 /* DSP0253 defines a single version: 1 */
+
+#define BUFSIZE MCTP_SERIAL_FRAME_MTU
+
+#define BYTE_FRAME 0x7e
+#define BYTE_ESC 0x7d
+
+#define FCS_INIT 0xffff
+
+static DEFINE_IDA(mctp_serial_ida);
+
+enum mctp_serial_state {
+ STATE_IDLE,
+ STATE_START,
+ STATE_HEADER,
+ STATE_DATA,
+ STATE_ESCAPE,
+ STATE_TRAILER,
+ STATE_DONE,
+ STATE_ERR,
+};
+
+struct mctp_serial {
+ struct net_device *netdev;
+ struct tty_struct *tty;
+
+ int idx;
+
+ /* protects our rx & tx state machines; held during both paths */
+ spinlock_t lock;
+
+ struct work_struct tx_work;
+ enum mctp_serial_state txstate, rxstate;
+ u16 txfcs, rxfcs, rxfcs_rcvd;
+ unsigned int txlen, rxlen;
+ unsigned int txpos, rxpos;
+ u8 txbuf[BUFSIZE],
+ rxbuf[BUFSIZE];
+};
+
+static bool needs_escape(u8 c)
+{
+ return c == BYTE_ESC || c == BYTE_FRAME;
+}
+
+static unsigned int next_chunk_len(struct mctp_serial *dev)
+{
+ unsigned int i;
+
+ /* either we have no bytes to send ... */
+ if (dev->txpos == dev->txlen)
+ return 0;
+
+ /* ... or the next byte to send is an escaped byte; requiring a
+ * single-byte chunk...
+ */
+ if (needs_escape(dev->txbuf[dev->txpos]))
+ return 1;
+
+ /* ... or we have one or more bytes up to the next escape - this chunk
+ * will be those non-escaped bytes, and does not include the escaped
+ * byte.
+ */
+ for (i = 1; i + dev->txpos < dev->txlen; i++) {
+ if (needs_escape(dev->txbuf[dev->txpos + i]))
+ break;
+ }
+
+ return i;
+}
+
+static ssize_t write_chunk(struct mctp_serial *dev, u8 *buf, size_t len)
+{
+ return dev->tty->ops->write(dev->tty, buf, len);
+}
+
+static void mctp_serial_tx_work(struct work_struct *work)
+{
+ struct mctp_serial *dev = container_of(work, struct mctp_serial,
+ tx_work);
+ unsigned long flags;
+ ssize_t txlen;
+ unsigned int len;
+ u8 c, buf[3];
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ /* txstate represents the next thing to send */
+ switch (dev->txstate) {
+ case STATE_START:
+ dev->txpos = 0;
+ fallthrough;
+ case STATE_HEADER:
+ buf[0] = BYTE_FRAME;
+ buf[1] = MCTP_SERIAL_VERSION;
+ buf[2] = dev->txlen;
+
+ if (!dev->txpos)
+ dev->txfcs = crc_ccitt(FCS_INIT, buf + 1, 2);
+
+ txlen = write_chunk(dev, buf + dev->txpos, 3 - dev->txpos);
+ if (txlen <= 0) {
+ dev->txstate = STATE_ERR;
+ } else {
+ dev->txpos += txlen;
+ if (dev->txpos == 3) {
+ dev->txstate = STATE_DATA;
+ dev->txpos = 0;
+ }
+ }
+ break;
+
+ case STATE_ESCAPE:
+ buf[0] = dev->txbuf[dev->txpos] & ~0x20;
+ txlen = write_chunk(dev, buf, 1);
+ if (txlen <= 0) {
+ dev->txstate = STATE_ERR;
+ } else {
+ dev->txpos += txlen;
+ if (dev->txpos == dev->txlen) {
+ dev->txstate = STATE_TRAILER;
+ dev->txpos = 0;
+ }
+ }
+
+ break;
+
+ case STATE_DATA:
+ len = next_chunk_len(dev);
+ if (len) {
+ c = dev->txbuf[dev->txpos];
+ if (len == 1 && needs_escape(c)) {
+ buf[0] = BYTE_ESC;
+ buf[1] = c & ~0x20;
+ dev->txfcs = crc_ccitt_byte(dev->txfcs, c);
+ txlen = write_chunk(dev, buf, 2);
+ if (txlen == 2)
+ dev->txpos++;
+ else if (txlen == 1)
+ dev->txstate = STATE_ESCAPE;
+ else
+ dev->txstate = STATE_ERR;
+ } else {
+ txlen = write_chunk(dev,
+ dev->txbuf + dev->txpos,
+ len);
+ if (txlen <= 0) {
+ dev->txstate = STATE_ERR;
+ } else {
+ dev->txfcs = crc_ccitt(dev->txfcs,
+ dev->txbuf +
+ dev->txpos,
+ txlen);
+ dev->txpos += txlen;
+ }
+ }
+ if (dev->txstate == STATE_DATA &&
+ dev->txpos == dev->txlen) {
+ dev->txstate = STATE_TRAILER;
+ dev->txpos = 0;
+ }
+ break;
+ }
+ dev->txstate = STATE_TRAILER;
+ dev->txpos = 0;
+ fallthrough;
+
+ case STATE_TRAILER:
+ buf[0] = dev->txfcs >> 8;
+ buf[1] = dev->txfcs & 0xff;
+ buf[2] = BYTE_FRAME;
+ txlen = write_chunk(dev, buf + dev->txpos, 3 - dev->txpos);
+ if (txlen <= 0) {
+ dev->txstate = STATE_ERR;
+ } else {
+ dev->txpos += txlen;
+ if (dev->txpos == 3) {
+ dev->txstate = STATE_DONE;
+ dev->txpos = 0;
+ }
+ }
+ break;
+ default:
+ netdev_err_once(dev->netdev, "invalid tx state %d\n",
+ dev->txstate);
+ }
+
+ if (dev->txstate == STATE_DONE) {
+ dev->netdev->stats.tx_packets++;
+ dev->netdev->stats.tx_bytes += dev->txlen;
+ dev->txlen = 0;
+ dev->txpos = 0;
+ clear_bit(TTY_DO_WRITE_WAKEUP, &dev->tty->flags);
+ dev->txstate = STATE_IDLE;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ netif_wake_queue(dev->netdev);
+ } else {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ }
+}
+
+static netdev_tx_t mctp_serial_tx(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct mctp_serial *dev = netdev_priv(ndev);
+ unsigned long flags;
+
+ WARN_ON(dev->txstate != STATE_IDLE);
+
+ if (skb->len > MCTP_SERIAL_MTU) {
+ dev->netdev->stats.tx_dropped++;
+ goto out;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ netif_stop_queue(dev->netdev);
+ skb_copy_bits(skb, 0, dev->txbuf, skb->len);
+ dev->txpos = 0;
+ dev->txlen = skb->len;
+ dev->txstate = STATE_START;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ set_bit(TTY_DO_WRITE_WAKEUP, &dev->tty->flags);
+ schedule_work(&dev->tx_work);
+
+out:
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static void mctp_serial_tty_write_wakeup(struct tty_struct *tty)
+{
+ struct mctp_serial *dev = tty->disc_data;
+
+ schedule_work(&dev->tx_work);
+}
+
+static void mctp_serial_rx(struct mctp_serial *dev)
+{
+ struct mctp_skb_cb *cb;
+ struct sk_buff *skb;
+
+ if (dev->rxfcs != dev->rxfcs_rcvd) {
+ dev->netdev->stats.rx_dropped++;
+ dev->netdev->stats.rx_crc_errors++;
+ return;
+ }
+
+ skb = netdev_alloc_skb(dev->netdev, dev->rxlen);
+ if (!skb) {
+ dev->netdev->stats.rx_dropped++;
+ return;
+ }
+
+ skb->protocol = htons(ETH_P_MCTP);
+ skb_put_data(skb, dev->rxbuf, dev->rxlen);
+ skb_reset_network_header(skb);
+
+ cb = __mctp_cb(skb);
+ cb->halen = 0;
+
+ netif_rx(skb);
+ dev->netdev->stats.rx_packets++;
+ dev->netdev->stats.rx_bytes += dev->rxlen;
+}
+
+static void mctp_serial_push_header(struct mctp_serial *dev, u8 c)
+{
+ switch (dev->rxpos) {
+ case 0:
+ if (c == BYTE_FRAME)
+ dev->rxpos++;
+ else
+ dev->rxstate = STATE_ERR;
+ break;
+ case 1:
+ if (c == MCTP_SERIAL_VERSION) {
+ dev->rxpos++;
+ dev->rxfcs = crc_ccitt_byte(FCS_INIT, c);
+ } else {
+ dev->rxstate = STATE_ERR;
+ }
+ break;
+ case 2:
+ if (c > MCTP_SERIAL_FRAME_MTU) {
+ dev->rxstate = STATE_ERR;
+ } else {
+ dev->rxlen = c;
+ dev->rxpos = 0;
+ dev->rxstate = STATE_DATA;
+ dev->rxfcs = crc_ccitt_byte(dev->rxfcs, c);
+ }
+ break;
+ }
+}
+
+static void mctp_serial_push_trailer(struct mctp_serial *dev, u8 c)
+{
+ switch (dev->rxpos) {
+ case 0:
+ dev->rxfcs_rcvd = c << 8;
+ dev->rxpos++;
+ break;
+ case 1:
+ dev->rxfcs_rcvd |= c;
+ dev->rxpos++;
+ break;
+ case 2:
+ if (c != BYTE_FRAME) {
+ dev->rxstate = STATE_ERR;
+ } else {
+ mctp_serial_rx(dev);
+ dev->rxlen = 0;
+ dev->rxpos = 0;
+ dev->rxstate = STATE_IDLE;
+ }
+ break;
+ }
+}
+
+static void mctp_serial_push(struct mctp_serial *dev, u8 c)
+{
+ switch (dev->rxstate) {
+ case STATE_IDLE:
+ dev->rxstate = STATE_HEADER;
+ fallthrough;
+ case STATE_HEADER:
+ mctp_serial_push_header(dev, c);
+ break;
+
+ case STATE_ESCAPE:
+ c |= 0x20;
+ fallthrough;
+ case STATE_DATA:
+ if (dev->rxstate != STATE_ESCAPE && c == BYTE_ESC) {
+ dev->rxstate = STATE_ESCAPE;
+ } else {
+ dev->rxfcs = crc_ccitt_byte(dev->rxfcs, c);
+ dev->rxbuf[dev->rxpos] = c;
+ dev->rxpos++;
+ dev->rxstate = STATE_DATA;
+ if (dev->rxpos == dev->rxlen) {
+ dev->rxpos = 0;
+ dev->rxstate = STATE_TRAILER;
+ }
+ }
+ break;
+
+ case STATE_TRAILER:
+ mctp_serial_push_trailer(dev, c);
+ break;
+
+ case STATE_ERR:
+ if (c == BYTE_FRAME)
+ dev->rxstate = STATE_IDLE;
+ break;
+
+ default:
+ netdev_err_once(dev->netdev, "invalid rx state %d\n",
+ dev->rxstate);
+ }
+}
+
+static void mctp_serial_tty_receive_buf(struct tty_struct *tty, const u8 *c,
+ const u8 *f, size_t len)
+{
+ struct mctp_serial *dev = tty->disc_data;
+ size_t i;
+
+ if (!netif_running(dev->netdev))
+ return;
+
+ /* we don't (currently) use the flag bytes, just data. */
+ for (i = 0; i < len; i++)
+ mctp_serial_push(dev, c[i]);
+}
+
+static void mctp_serial_uninit(struct net_device *ndev)
+{
+ struct mctp_serial *dev = netdev_priv(ndev);
+
+ cancel_work_sync(&dev->tx_work);
+}
+
+static const struct net_device_ops mctp_serial_netdev_ops = {
+ .ndo_start_xmit = mctp_serial_tx,
+ .ndo_uninit = mctp_serial_uninit,
+};
+
+static void mctp_serial_setup(struct net_device *ndev)
+{
+ ndev->type = ARPHRD_MCTP;
+
+ /* we limit at the fixed MTU, which is also the MCTP-standard
+ * baseline MTU, so is also our minimum
+ */
+ ndev->mtu = MCTP_SERIAL_MTU;
+ ndev->max_mtu = MCTP_SERIAL_MTU;
+ ndev->min_mtu = MCTP_SERIAL_MTU;
+
+ ndev->hard_header_len = 0;
+ ndev->addr_len = 0;
+ ndev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
+ ndev->flags = IFF_NOARP;
+ ndev->netdev_ops = &mctp_serial_netdev_ops;
+ ndev->needs_free_netdev = true;
+}
+
+static int mctp_serial_open(struct tty_struct *tty)
+{
+ struct mctp_serial *dev;
+ struct net_device *ndev;
+ char name[32];
+ int idx, rc;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!tty->ops->write)
+ return -EOPNOTSUPP;
+
+ idx = ida_alloc(&mctp_serial_ida, GFP_KERNEL);
+ if (idx < 0)
+ return idx;
+
+ snprintf(name, sizeof(name), "mctpserial%d", idx);
+ ndev = alloc_netdev(sizeof(*dev), name, NET_NAME_ENUM,
+ mctp_serial_setup);
+ if (!ndev) {
+ rc = -ENOMEM;
+ goto free_ida;
+ }
+
+ dev = netdev_priv(ndev);
+ dev->idx = idx;
+ dev->tty = tty;
+ dev->netdev = ndev;
+ dev->txstate = STATE_IDLE;
+ dev->rxstate = STATE_IDLE;
+ spin_lock_init(&dev->lock);
+ INIT_WORK(&dev->tx_work, mctp_serial_tx_work);
+
+ rc = mctp_register_netdev(ndev, NULL, MCTP_PHYS_BINDING_SERIAL);
+ if (rc)
+ goto free_netdev;
+
+ tty->receive_room = 64 * 1024;
+ tty->disc_data = dev;
+
+ return 0;
+
+free_netdev:
+ free_netdev(ndev);
+
+free_ida:
+ ida_free(&mctp_serial_ida, idx);
+ return rc;
+}
+
+static void mctp_serial_close(struct tty_struct *tty)
+{
+ struct mctp_serial *dev = tty->disc_data;
+ int idx = dev->idx;
+
+ mctp_unregister_netdev(dev->netdev);
+ ida_free(&mctp_serial_ida, idx);
+}
+
+static struct tty_ldisc_ops mctp_ldisc = {
+ .owner = THIS_MODULE,
+ .num = N_MCTP,
+ .name = "mctp",
+ .open = mctp_serial_open,
+ .close = mctp_serial_close,
+ .receive_buf = mctp_serial_tty_receive_buf,
+ .write_wakeup = mctp_serial_tty_write_wakeup,
+};
+
+static int __init mctp_serial_init(void)
+{
+ return tty_register_ldisc(&mctp_ldisc);
+}
+
+static void __exit mctp_serial_exit(void)
+{
+ tty_unregister_ldisc(&mctp_ldisc);
+}
+
+module_init(mctp_serial_init);
+module_exit(mctp_serial_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jeremy Kerr <jk@codeconstruct.com.au>");
+MODULE_DESCRIPTION("MCTP Serial transport");
+
+#if IS_ENABLED(CONFIG_MCTP_SERIAL_TEST)
+#include <kunit/test.h>
+
+#define MAX_CHUNKS 6
+struct test_chunk_tx {
+ u8 input_len;
+ u8 input[MCTP_SERIAL_MTU];
+ u8 chunks[MAX_CHUNKS];
+};
+
+static void test_next_chunk_len(struct kunit *test)
+{
+ struct mctp_serial devx;
+ struct mctp_serial *dev = &devx;
+ int next;
+
+ const struct test_chunk_tx *params = test->param_value;
+
+ memset(dev, 0x0, sizeof(*dev));
+ memcpy(dev->txbuf, params->input, params->input_len);
+ dev->txlen = params->input_len;
+
+ for (size_t i = 0; i < MAX_CHUNKS; i++) {
+ next = next_chunk_len(dev);
+ dev->txpos += next;
+ KUNIT_EXPECT_EQ(test, next, params->chunks[i]);
+
+ if (next == 0) {
+ KUNIT_EXPECT_EQ(test, dev->txpos, dev->txlen);
+ return;
+ }
+ }
+
+ KUNIT_FAIL_AND_ABORT(test, "Ran out of chunks");
+}
+
+static struct test_chunk_tx chunk_tx_tests[] = {
+ {
+ .input_len = 5,
+ .input = { 0x00, 0x11, 0x22, 0x7e, 0x80 },
+ .chunks = { 3, 1, 1, 0},
+ },
+ {
+ .input_len = 5,
+ .input = { 0x00, 0x11, 0x22, 0x7e, 0x7d },
+ .chunks = { 3, 1, 1, 0},
+ },
+ {
+ .input_len = 3,
+ .input = { 0x7e, 0x11, 0x22, },
+ .chunks = { 1, 2, 0},
+ },
+ {
+ .input_len = 3,
+ .input = { 0x7e, 0x7e, 0x7d, },
+ .chunks = { 1, 1, 1, 0},
+ },
+ {
+ .input_len = 4,
+ .input = { 0x7e, 0x7e, 0x00, 0x7d, },
+ .chunks = { 1, 1, 1, 1, 0},
+ },
+ {
+ .input_len = 6,
+ .input = { 0x7e, 0x7e, 0x00, 0x7d, 0x10, 0x10},
+ .chunks = { 1, 1, 1, 1, 2, 0},
+ },
+ {
+ .input_len = 1,
+ .input = { 0x7e },
+ .chunks = { 1, 0 },
+ },
+ {
+ .input_len = 1,
+ .input = { 0x80 },
+ .chunks = { 1, 0 },
+ },
+ {
+ .input_len = 3,
+ .input = { 0x80, 0x80, 0x00 },
+ .chunks = { 3, 0 },
+ },
+ {
+ .input_len = 7,
+ .input = { 0x01, 0x00, 0x08, 0xc8, 0x00, 0x80, 0x02 },
+ .chunks = { 7, 0 },
+ },
+ {
+ .input_len = 7,
+ .input = { 0x01, 0x00, 0x08, 0xc8, 0x7e, 0x80, 0x02 },
+ .chunks = { 4, 1, 2, 0 },
+ },
+};
+
+KUNIT_ARRAY_PARAM(chunk_tx, chunk_tx_tests, NULL);
+
+static struct kunit_case mctp_serial_test_cases[] = {
+ KUNIT_CASE_PARAM(test_next_chunk_len, chunk_tx_gen_params),
+};
+
+static struct kunit_suite mctp_serial_test_suite = {
+ .name = "mctp_serial",
+ .test_cases = mctp_serial_test_cases,
+};
+
+kunit_test_suite(mctp_serial_test_suite);
+
+#endif /* CONFIG_MCTP_SERIAL_TEST */
diff --git a/drivers/net/mctp/mctp-usb.c b/drivers/net/mctp/mctp-usb.c
new file mode 100644
index 000000000000..ef860cfc629f
--- /dev/null
+++ b/drivers/net/mctp/mctp-usb.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * mctp-usb.c - MCTP-over-USB (DMTF DSP0283) transport binding driver.
+ *
+ * DSP0283 is available at:
+ * https://www.dmtf.org/sites/default/files/standards/documents/DSP0283_1.0.1.pdf
+ *
+ * Copyright (C) 2024-2025 Code Construct Pty Ltd
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/usb.h>
+#include <linux/usb/mctp-usb.h>
+
+#include <net/mctp.h>
+#include <net/mctpdevice.h>
+#include <net/pkt_sched.h>
+
+#include <uapi/linux/if_arp.h>
+
+struct mctp_usb {
+ struct usb_device *usbdev;
+ struct usb_interface *intf;
+ bool stopped;
+
+ struct net_device *netdev;
+
+ u8 ep_in;
+ u8 ep_out;
+
+ struct urb *tx_urb;
+ struct urb *rx_urb;
+
+ struct delayed_work rx_retry_work;
+};
+
+static void mctp_usb_out_complete(struct urb *urb)
+{
+ struct sk_buff *skb = urb->context;
+ struct net_device *netdev = skb->dev;
+ int status;
+
+ status = urb->status;
+
+ switch (status) {
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case -EPROTO:
+ dev_dstats_tx_dropped(netdev);
+ break;
+ case 0:
+ dev_dstats_tx_add(netdev, skb->len);
+ netif_wake_queue(netdev);
+ consume_skb(skb);
+ return;
+ default:
+ netdev_dbg(netdev, "unexpected tx urb status: %d\n", status);
+ dev_dstats_tx_dropped(netdev);
+ }
+
+ kfree_skb(skb);
+}
+
+static netdev_tx_t mctp_usb_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct mctp_usb *mctp_usb = netdev_priv(dev);
+ struct mctp_usb_hdr *hdr;
+ unsigned int plen;
+ struct urb *urb;
+ int rc;
+
+ plen = skb->len;
+
+ if (plen + sizeof(*hdr) > MCTP_USB_XFER_SIZE)
+ goto err_drop;
+
+ rc = skb_cow_head(skb, sizeof(*hdr));
+ if (rc)
+ goto err_drop;
+
+ hdr = skb_push(skb, sizeof(*hdr));
+ if (!hdr)
+ goto err_drop;
+
+ hdr->id = cpu_to_be16(MCTP_USB_DMTF_ID);
+ hdr->rsvd = 0;
+ hdr->len = plen + sizeof(*hdr);
+
+ urb = mctp_usb->tx_urb;
+
+ usb_fill_bulk_urb(urb, mctp_usb->usbdev,
+ usb_sndbulkpipe(mctp_usb->usbdev, mctp_usb->ep_out),
+ skb->data, skb->len,
+ mctp_usb_out_complete, skb);
+
+ /* Stops TX queue first to prevent race condition with URB complete */
+ netif_stop_queue(dev);
+ rc = usb_submit_urb(urb, GFP_ATOMIC);
+ if (rc) {
+ netif_wake_queue(dev);
+ goto err_drop;
+ }
+
+ return NETDEV_TX_OK;
+
+err_drop:
+ dev_dstats_tx_dropped(dev);
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static void mctp_usb_in_complete(struct urb *urb);
+
+/* If we fail to queue an in urb atomically (either due to skb allocation or
+ * urb submission), we will schedule a rx queue in nonatomic context
+ * after a delay, specified in jiffies
+ */
+static const unsigned long RX_RETRY_DELAY = HZ / 4;
+
+static int mctp_usb_rx_queue(struct mctp_usb *mctp_usb, gfp_t gfp)
+{
+ struct sk_buff *skb;
+ int rc;
+
+ skb = __netdev_alloc_skb(mctp_usb->netdev, MCTP_USB_XFER_SIZE, gfp);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto err_retry;
+ }
+
+ usb_fill_bulk_urb(mctp_usb->rx_urb, mctp_usb->usbdev,
+ usb_rcvbulkpipe(mctp_usb->usbdev, mctp_usb->ep_in),
+ skb->data, MCTP_USB_XFER_SIZE,
+ mctp_usb_in_complete, skb);
+
+ rc = usb_submit_urb(mctp_usb->rx_urb, gfp);
+ if (rc) {
+ netdev_dbg(mctp_usb->netdev, "rx urb submit failure: %d\n", rc);
+ kfree_skb(skb);
+ if (rc == -ENOMEM)
+ goto err_retry;
+ }
+
+ return rc;
+
+err_retry:
+ schedule_delayed_work(&mctp_usb->rx_retry_work, RX_RETRY_DELAY);
+ return rc;
+}
+
+static void mctp_usb_in_complete(struct urb *urb)
+{
+ struct sk_buff *skb = urb->context;
+ struct net_device *netdev = skb->dev;
+ struct mctp_usb *mctp_usb = netdev_priv(netdev);
+ struct mctp_skb_cb *cb;
+ unsigned int len;
+ int status;
+
+ status = urb->status;
+
+ switch (status) {
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case -EPROTO:
+ kfree_skb(skb);
+ return;
+ case 0:
+ break;
+ default:
+ netdev_dbg(netdev, "unexpected rx urb status: %d\n", status);
+ kfree_skb(skb);
+ return;
+ }
+
+ len = urb->actual_length;
+ __skb_put(skb, len);
+
+ while (skb) {
+ struct sk_buff *skb2 = NULL;
+ struct mctp_usb_hdr *hdr;
+ u8 pkt_len; /* length of MCTP packet, no USB header */
+
+ skb_reset_mac_header(skb);
+ hdr = skb_pull_data(skb, sizeof(*hdr));
+ if (!hdr)
+ break;
+
+ if (be16_to_cpu(hdr->id) != MCTP_USB_DMTF_ID) {
+ netdev_dbg(netdev, "rx: invalid id %04x\n",
+ be16_to_cpu(hdr->id));
+ break;
+ }
+
+ if (hdr->len <
+ sizeof(struct mctp_hdr) + sizeof(struct mctp_usb_hdr)) {
+ netdev_dbg(netdev, "rx: short packet (hdr) %d\n",
+ hdr->len);
+ break;
+ }
+
+ /* we know we have at least sizeof(struct mctp_usb_hdr) here */
+ pkt_len = hdr->len - sizeof(struct mctp_usb_hdr);
+ if (pkt_len > skb->len) {
+ netdev_dbg(netdev,
+ "rx: short packet (xfer) %d, actual %d\n",
+ hdr->len, skb->len);
+ break;
+ }
+
+ if (pkt_len < skb->len) {
+ /* more packets may follow - clone to a new
+ * skb to use on the next iteration
+ */
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (skb2) {
+ if (!skb_pull(skb2, pkt_len)) {
+ kfree_skb(skb2);
+ skb2 = NULL;
+ }
+ }
+ skb_trim(skb, pkt_len);
+ }
+
+ dev_dstats_rx_add(netdev, skb->len);
+
+ skb->protocol = htons(ETH_P_MCTP);
+ skb_reset_network_header(skb);
+ cb = __mctp_cb(skb);
+ cb->halen = 0;
+ netif_rx(skb);
+
+ skb = skb2;
+ }
+
+ if (skb)
+ kfree_skb(skb);
+
+ mctp_usb_rx_queue(mctp_usb, GFP_ATOMIC);
+}
+
+static void mctp_usb_rx_retry_work(struct work_struct *work)
+{
+ struct mctp_usb *mctp_usb = container_of(work, struct mctp_usb,
+ rx_retry_work.work);
+
+ if (READ_ONCE(mctp_usb->stopped))
+ return;
+
+ mctp_usb_rx_queue(mctp_usb, GFP_KERNEL);
+}
+
+static int mctp_usb_open(struct net_device *dev)
+{
+ struct mctp_usb *mctp_usb = netdev_priv(dev);
+
+ WRITE_ONCE(mctp_usb->stopped, false);
+
+ netif_start_queue(dev);
+
+ return mctp_usb_rx_queue(mctp_usb, GFP_KERNEL);
+}
+
+static int mctp_usb_stop(struct net_device *dev)
+{
+ struct mctp_usb *mctp_usb = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ /* prevent RX submission retry */
+ WRITE_ONCE(mctp_usb->stopped, true);
+
+ usb_kill_urb(mctp_usb->rx_urb);
+ usb_kill_urb(mctp_usb->tx_urb);
+
+ cancel_delayed_work_sync(&mctp_usb->rx_retry_work);
+
+ return 0;
+}
+
+static const struct net_device_ops mctp_usb_netdev_ops = {
+ .ndo_start_xmit = mctp_usb_start_xmit,
+ .ndo_open = mctp_usb_open,
+ .ndo_stop = mctp_usb_stop,
+};
+
+static void mctp_usb_netdev_setup(struct net_device *dev)
+{
+ dev->type = ARPHRD_MCTP;
+
+ dev->mtu = MCTP_USB_MTU_MIN;
+ dev->min_mtu = MCTP_USB_MTU_MIN;
+ dev->max_mtu = MCTP_USB_MTU_MAX;
+
+ dev->hard_header_len = sizeof(struct mctp_usb_hdr);
+ dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
+ dev->flags = IFF_NOARP;
+ dev->netdev_ops = &mctp_usb_netdev_ops;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
+}
+
+static int mctp_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_endpoint_descriptor *ep_in, *ep_out;
+ struct usb_host_interface *iface_desc;
+ struct net_device *netdev;
+ struct mctp_usb *dev;
+ int rc;
+
+ /* only one alternate */
+ iface_desc = intf->cur_altsetting;
+
+ rc = usb_find_common_endpoints(iface_desc, &ep_in, &ep_out, NULL, NULL);
+ if (rc) {
+ dev_err(&intf->dev, "invalid endpoints on device?\n");
+ return rc;
+ }
+
+ netdev = alloc_netdev(sizeof(*dev), "mctpusb%d", NET_NAME_ENUM,
+ mctp_usb_netdev_setup);
+ if (!netdev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(netdev, &intf->dev);
+ dev = netdev_priv(netdev);
+ dev->netdev = netdev;
+ dev->usbdev = usb_get_dev(interface_to_usbdev(intf));
+ dev->intf = intf;
+ usb_set_intfdata(intf, dev);
+
+ dev->ep_in = ep_in->bEndpointAddress;
+ dev->ep_out = ep_out->bEndpointAddress;
+
+ dev->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ dev->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!dev->tx_urb || !dev->rx_urb) {
+ rc = -ENOMEM;
+ goto err_free_urbs;
+ }
+
+ INIT_DELAYED_WORK(&dev->rx_retry_work, mctp_usb_rx_retry_work);
+
+ rc = mctp_register_netdev(netdev, NULL, MCTP_PHYS_BINDING_USB);
+ if (rc)
+ goto err_free_urbs;
+
+ return 0;
+
+err_free_urbs:
+ usb_free_urb(dev->tx_urb);
+ usb_free_urb(dev->rx_urb);
+ free_netdev(netdev);
+ return rc;
+}
+
+static void mctp_usb_disconnect(struct usb_interface *intf)
+{
+ struct mctp_usb *dev = usb_get_intfdata(intf);
+
+ mctp_unregister_netdev(dev->netdev);
+ usb_free_urb(dev->tx_urb);
+ usb_free_urb(dev->rx_urb);
+ usb_put_dev(dev->usbdev);
+ free_netdev(dev->netdev);
+}
+
+static const struct usb_device_id mctp_usb_devices[] = {
+ { USB_INTERFACE_INFO(USB_CLASS_MCTP, 0x0, 0x1) },
+ { 0 },
+};
+
+MODULE_DEVICE_TABLE(usb, mctp_usb_devices);
+
+static struct usb_driver mctp_usb_driver = {
+ .name = "mctp-usb",
+ .id_table = mctp_usb_devices,
+ .probe = mctp_usb_probe,
+ .disconnect = mctp_usb_disconnect,
+};
+
+module_usb_driver(mctp_usb_driver)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jeremy Kerr <jk@codeconstruct.com.au>");
+MODULE_DESCRIPTION("MCTP USB transport");