summaryrefslogtreecommitdiff
path: root/drivers/i3c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/i3c')
-rw-r--r--drivers/i3c/device.c48
-rw-r--r--drivers/i3c/internals.h54
-rw-r--r--drivers/i3c/master.c496
-rw-r--r--drivers/i3c/master/Kconfig50
-rw-r--r--drivers/i3c/master/Makefile3
-rw-r--r--drivers/i3c/master/adi-i3c-master.c1019
-rw-r--r--drivers/i3c/master/ast2600-i3c-master.c187
-rw-r--r--drivers/i3c/master/dw-i3c-master.c848
-rw-r--r--drivers/i3c/master/dw-i3c-master.h97
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c122
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/Makefile4
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v1.c31
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v2.c7
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c154
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dat_v1.c40
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dma.c180
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/ext_caps.c11
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/hci.h17
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/hci_quirks.c44
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c296
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/pio.c77
-rw-r--r--drivers/i3c/master/renesas-i3c.c1404
-rw-r--r--drivers/i3c/master/svc-i3c-master.c916
23 files changed, 5364 insertions, 741 deletions
diff --git a/drivers/i3c/device.c b/drivers/i3c/device.c
index 9762630b917e..8a156f5ad692 100644
--- a/drivers/i3c/device.c
+++ b/drivers/i3c/device.c
@@ -15,22 +15,26 @@
#include "internals.h"
/**
- * i3c_device_do_priv_xfers() - do I3C SDR private transfers directed to a
- * specific device
+ * i3c_device_do_xfers() - do I3C transfers directed to a specific device
*
* @dev: device with which the transfers should be done
* @xfers: array of transfers
* @nxfers: number of transfers
+ * @mode: transfer mode
*
* Initiate one or several private SDR transfers with @dev.
*
* This function can sleep and thus cannot be called in atomic context.
*
- * Return: 0 in case of success, a negative error core otherwise.
+ * Return:
+ * * 0 in case of success, a negative error core otherwise.
+ * * -EAGAIN: controller lost address arbitration. Target (IBI, HJ or
+ * controller role request) win the bus. Client driver needs to resend the
+ * 'xfers' some time later. See I3C spec ver 1.1.1 09-Jun-2021. Section:
+ * 5.1.2.2.3.
*/
-int i3c_device_do_priv_xfers(struct i3c_device *dev,
- struct i3c_priv_xfer *xfers,
- int nxfers)
+int i3c_device_do_xfers(struct i3c_device *dev, struct i3c_xfer *xfers,
+ int nxfers, enum i3c_xfer_mode mode)
{
int ret, i;
@@ -43,12 +47,12 @@ int i3c_device_do_priv_xfers(struct i3c_device *dev,
}
i3c_bus_normaluse_lock(dev->bus);
- ret = i3c_dev_do_priv_xfers_locked(dev->desc, xfers, nxfers);
+ ret = i3c_dev_do_xfers_locked(dev->desc, xfers, nxfers, mode);
i3c_bus_normaluse_unlock(dev->bus);
return ret;
}
-EXPORT_SYMBOL_GPL(i3c_device_do_priv_xfers);
+EXPORT_SYMBOL_GPL(i3c_device_do_xfers);
/**
* i3c_device_do_setdasa() - do I3C dynamic address assignement with
@@ -78,7 +82,7 @@ EXPORT_SYMBOL_GPL(i3c_device_do_setdasa);
*
* Retrieve I3C dev info.
*/
-void i3c_device_get_info(struct i3c_device *dev,
+void i3c_device_get_info(const struct i3c_device *dev,
struct i3c_device_info *info)
{
if (!info)
@@ -209,18 +213,6 @@ struct device *i3cdev_to_dev(struct i3c_device *i3cdev)
EXPORT_SYMBOL_GPL(i3cdev_to_dev);
/**
- * dev_to_i3cdev() - Returns the I3C device containing @dev
- * @dev: device object
- *
- * Return: a pointer to an I3C device object.
- */
-struct i3c_device *dev_to_i3cdev(struct device *dev)
-{
- return container_of(dev, struct i3c_device, dev);
-}
-EXPORT_SYMBOL_GPL(dev_to_i3cdev);
-
-/**
* i3c_device_match_id() - Returns the i3c_device_id entry matching @i3cdev
* @i3cdev: I3C device
* @id_table: I3C device match table
@@ -268,6 +260,20 @@ i3c_device_match_id(struct i3c_device *i3cdev,
EXPORT_SYMBOL_GPL(i3c_device_match_id);
/**
+ * i3c_device_get_supported_xfer_mode - Returns the supported transfer mode by
+ * connected master controller.
+ * @dev: I3C device
+ *
+ * Return: a bit mask, which supported transfer mode, bit position is defined at
+ * enum i3c_hdr_mode
+ */
+u32 i3c_device_get_supported_xfer_mode(struct i3c_device *dev)
+{
+ return i3c_dev_get_master(dev->desc)->this->info.hdr_cap | BIT(I3C_SDR);
+}
+EXPORT_SYMBOL_GPL(i3c_device_get_supported_xfer_mode);
+
+/**
* i3c_driver_register_with_owner() - register an I3C device driver
*
* @drv: driver to register
diff --git a/drivers/i3c/internals.h b/drivers/i3c/internals.h
index 908a807badaf..f609e5098137 100644
--- a/drivers/i3c/internals.h
+++ b/drivers/i3c/internals.h
@@ -9,19 +9,63 @@
#define I3C_INTERNALS_H
#include <linux/i3c/master.h>
-
-extern struct bus_type i3c_bus_type;
+#include <linux/io.h>
void i3c_bus_normaluse_lock(struct i3c_bus *bus);
void i3c_bus_normaluse_unlock(struct i3c_bus *bus);
int i3c_dev_setdasa_locked(struct i3c_dev_desc *dev);
-int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev,
- struct i3c_priv_xfer *xfers,
- int nxfers);
+int i3c_dev_do_xfers_locked(struct i3c_dev_desc *dev,
+ struct i3c_xfer *xfers,
+ int nxfers, enum i3c_xfer_mode mode);
int i3c_dev_disable_ibi_locked(struct i3c_dev_desc *dev);
int i3c_dev_enable_ibi_locked(struct i3c_dev_desc *dev);
int i3c_dev_request_ibi_locked(struct i3c_dev_desc *dev,
const struct i3c_ibi_setup *req);
void i3c_dev_free_ibi_locked(struct i3c_dev_desc *dev);
+
+/**
+ * i3c_writel_fifo - Write data buffer to 32bit FIFO
+ * @addr: FIFO Address to write to
+ * @buf: Pointer to the data bytes to write
+ * @nbytes: Number of bytes to write
+ */
+static inline void i3c_writel_fifo(void __iomem *addr, const void *buf,
+ int nbytes)
+{
+ writesl(addr, buf, nbytes / 4);
+ if (nbytes & 3) {
+ u32 tmp = 0;
+
+ memcpy(&tmp, buf + (nbytes & ~3), nbytes & 3);
+ /*
+ * writesl() instead of writel() to keep FIFO
+ * byteorder on big-endian targets
+ */
+ writesl(addr, &tmp, 1);
+ }
+}
+
+/**
+ * i3c_readl_fifo - Read data buffer from 32bit FIFO
+ * @addr: FIFO Address to read from
+ * @buf: Pointer to the buffer to store read bytes
+ * @nbytes: Number of bytes to read
+ */
+static inline void i3c_readl_fifo(const void __iomem *addr, void *buf,
+ int nbytes)
+{
+ readsl(addr, buf, nbytes / 4);
+ if (nbytes & 3) {
+ u32 tmp;
+
+ /*
+ * readsl() instead of readl() to keep FIFO
+ * byteorder on big-endian targets
+ */
+ readsl(addr, &tmp, 1);
+ memcpy(buf + (nbytes & ~3), &tmp, nbytes & 3);
+ }
+}
+
#endif /* I3C_INTERNAL_H */
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index d7e6f6c99aea..f88f7e19203a 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -8,11 +8,13 @@
#include <linux/atomic.h>
#include <linux/bug.h>
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/of.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
@@ -21,6 +23,8 @@
static DEFINE_IDR(i3c_bus_idr);
static DEFINE_MUTEX(i3c_core_lock);
+static int __i3c_first_dynamic_bus_num;
+static BLOCKING_NOTIFIER_HEAD(i3c_bus_notifier);
/**
* i3c_bus_maintenance_lock - Lock the bus for a maintenance operation
@@ -138,7 +142,7 @@ static ssize_t bcr_show(struct device *dev,
i3c_bus_normaluse_lock(bus);
desc = dev_to_i3cdesc(dev);
- ret = sprintf(buf, "%x\n", desc->info.bcr);
+ ret = sprintf(buf, "0x%02x\n", desc->info.bcr);
i3c_bus_normaluse_unlock(bus);
return ret;
@@ -155,7 +159,7 @@ static ssize_t dcr_show(struct device *dev,
i3c_bus_normaluse_lock(bus);
desc = dev_to_i3cdesc(dev);
- ret = sprintf(buf, "%x\n", desc->info.dcr);
+ ret = sprintf(buf, "0x%02x\n", desc->info.dcr);
i3c_bus_normaluse_unlock(bus);
return ret;
@@ -273,13 +277,14 @@ static struct attribute *i3c_device_attrs[] = {
};
ATTRIBUTE_GROUPS(i3c_device);
-static int i3c_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int i3c_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct i3c_device *i3cdev = dev_to_i3cdev(dev);
+ const struct i3c_device *i3cdev = dev_to_i3cdev(dev);
struct i3c_device_info devinfo;
u16 manuf, part, ext;
- i3c_device_get_info(i3cdev, &devinfo);
+ if (i3cdev->desc)
+ devinfo = i3cdev->desc->info;
manuf = I3C_PID_MANUF_ID(devinfo.pid);
part = I3C_PID_PART_ID(devinfo.pid);
ext = I3C_PID_EXTRA_INFO(devinfo.pid);
@@ -298,10 +303,10 @@ static const struct device_type i3c_device_type = {
.uevent = i3c_device_uevent,
};
-static int i3c_device_match(struct device *dev, struct device_driver *drv)
+static int i3c_device_match(struct device *dev, const struct device_driver *drv)
{
struct i3c_device *i3cdev;
- struct i3c_driver *i3cdrv;
+ const struct i3c_driver *i3cdrv;
if (dev->type != &i3c_device_type)
return 0;
@@ -329,22 +334,21 @@ static void i3c_device_remove(struct device *dev)
if (driver->remove)
driver->remove(i3cdev);
-
- i3c_device_free_ibi(i3cdev);
}
-struct bus_type i3c_bus_type = {
+const struct bus_type i3c_bus_type = {
.name = "i3c",
.match = i3c_device_match,
.probe = i3c_device_probe,
.remove = i3c_device_remove,
};
+EXPORT_SYMBOL_GPL(i3c_bus_type);
static enum i3c_addr_slot_status
-i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr)
+i3c_bus_get_addr_slot_status_mask(struct i3c_bus *bus, u16 addr, u32 mask)
{
unsigned long status;
- int bitpos = addr * 2;
+ int bitpos = addr * I3C_ADDR_SLOT_STATUS_BITS;
if (addr > I2C_MAX_ADDR)
return I3C_ADDR_SLOT_RSVD;
@@ -352,22 +356,33 @@ i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr)
status = bus->addrslots[bitpos / BITS_PER_LONG];
status >>= bitpos % BITS_PER_LONG;
- return status & I3C_ADDR_SLOT_STATUS_MASK;
+ return status & mask;
}
-static void i3c_bus_set_addr_slot_status(struct i3c_bus *bus, u16 addr,
- enum i3c_addr_slot_status status)
+static enum i3c_addr_slot_status
+i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr)
+{
+ return i3c_bus_get_addr_slot_status_mask(bus, addr, I3C_ADDR_SLOT_STATUS_MASK);
+}
+
+static void i3c_bus_set_addr_slot_status_mask(struct i3c_bus *bus, u16 addr,
+ enum i3c_addr_slot_status status, u32 mask)
{
- int bitpos = addr * 2;
+ int bitpos = addr * I3C_ADDR_SLOT_STATUS_BITS;
unsigned long *ptr;
if (addr > I2C_MAX_ADDR)
return;
ptr = bus->addrslots + (bitpos / BITS_PER_LONG);
- *ptr &= ~((unsigned long)I3C_ADDR_SLOT_STATUS_MASK <<
- (bitpos % BITS_PER_LONG));
- *ptr |= (unsigned long)status << (bitpos % BITS_PER_LONG);
+ *ptr &= ~((unsigned long)mask << (bitpos % BITS_PER_LONG));
+ *ptr |= ((unsigned long)status & mask) << (bitpos % BITS_PER_LONG);
+}
+
+static void i3c_bus_set_addr_slot_status(struct i3c_bus *bus, u16 addr,
+ enum i3c_addr_slot_status status)
+{
+ i3c_bus_set_addr_slot_status_mask(bus, addr, status, I3C_ADDR_SLOT_STATUS_MASK);
}
static bool i3c_bus_dev_addr_is_avail(struct i3c_bus *bus, u8 addr)
@@ -379,13 +394,44 @@ static bool i3c_bus_dev_addr_is_avail(struct i3c_bus *bus, u8 addr)
return status == I3C_ADDR_SLOT_FREE;
}
+/*
+ * ┌────┬─────────────┬───┬─────────┬───┐
+ * │S/Sr│ 7'h7E RnW=0 │ACK│ ENTDAA │ T ├────┐
+ * └────┴─────────────┴───┴─────────┴───┘ │
+ * ┌─────────────────────────────────────────┘
+ * │ ┌──┬─────────────┬───┬─────────────────┬────────────────┬───┬─────────┐
+ * └─►│Sr│7'h7E RnW=1 │ACK│48bit UID BCR DCR│Assign 7bit Addr│PAR│ ACK/NACK│
+ * └──┴─────────────┴───┴─────────────────┴────────────────┴───┴─────────┘
+ * Some master controllers (such as HCI) need to prepare the entire above transaction before
+ * sending it out to the I3C bus. This means that a 7-bit dynamic address needs to be allocated
+ * before knowing the target device's UID information.
+ *
+ * However, some I3C targets may request specific addresses (called as "init_dyn_addr"), which is
+ * typically specified by the DT-'s assigned-address property. Lower addresses having higher IBI
+ * priority. If it is available, i3c_bus_get_free_addr() preferably return a free address that is
+ * not in the list of desired addresses (called as "init_dyn_addr"). This allows the device with
+ * the "init_dyn_addr" to switch to its "init_dyn_addr" when it hot-joins the I3C bus. Otherwise,
+ * if the "init_dyn_addr" is already in use by another I3C device, the target device will not be
+ * able to switch to its desired address.
+ *
+ * If the previous step fails, fallback returning one of the remaining unassigned address,
+ * regardless of its state in the desired list.
+ */
static int i3c_bus_get_free_addr(struct i3c_bus *bus, u8 start_addr)
{
enum i3c_addr_slot_status status;
u8 addr;
for (addr = start_addr; addr < I3C_MAX_ADDR; addr++) {
- status = i3c_bus_get_addr_slot_status(bus, addr);
+ status = i3c_bus_get_addr_slot_status_mask(bus, addr,
+ I3C_ADDR_SLOT_EXT_STATUS_MASK);
+ if (status == I3C_ADDR_SLOT_FREE)
+ return addr;
+ }
+
+ for (addr = start_addr; addr < I3C_MAX_ADDR; addr++) {
+ status = i3c_bus_get_addr_slot_status_mask(bus, addr,
+ I3C_ADDR_SLOT_STATUS_MASK);
if (status == I3C_ADDR_SLOT_FREE)
return addr;
}
@@ -419,9 +465,9 @@ static void i3c_bus_cleanup(struct i3c_bus *i3cbus)
mutex_unlock(&i3c_core_lock);
}
-static int i3c_bus_init(struct i3c_bus *i3cbus)
+static int i3c_bus_init(struct i3c_bus *i3cbus, struct device_node *np)
{
- int ret;
+ int ret, start, end, id = -1;
init_rwsem(&i3cbus->lock);
INIT_LIST_HEAD(&i3cbus->devs.i2c);
@@ -429,8 +475,19 @@ static int i3c_bus_init(struct i3c_bus *i3cbus)
i3c_bus_init_addrslots(i3cbus);
i3cbus->mode = I3C_BUS_MODE_PURE;
+ if (np)
+ id = of_alias_get_id(np, "i3c");
+
mutex_lock(&i3c_core_lock);
- ret = idr_alloc(&i3c_bus_idr, i3cbus, 0, 0, GFP_KERNEL);
+ if (id >= 0) {
+ start = id;
+ end = start + 1;
+ } else {
+ start = __i3c_first_dynamic_bus_num;
+ end = 0;
+ }
+
+ ret = idr_alloc(&i3c_bus_idr, i3cbus, start, end, GFP_KERNEL);
mutex_unlock(&i3c_core_lock);
if (ret < 0)
@@ -441,6 +498,36 @@ static int i3c_bus_init(struct i3c_bus *i3cbus)
return 0;
}
+void i3c_for_each_bus_locked(int (*fn)(struct i3c_bus *bus, void *data),
+ void *data)
+{
+ struct i3c_bus *bus;
+ int id;
+
+ mutex_lock(&i3c_core_lock);
+ idr_for_each_entry(&i3c_bus_idr, bus, id)
+ fn(bus, data);
+ mutex_unlock(&i3c_core_lock);
+}
+EXPORT_SYMBOL_GPL(i3c_for_each_bus_locked);
+
+int i3c_register_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&i3c_bus_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(i3c_register_notifier);
+
+int i3c_unregister_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&i3c_bus_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(i3c_unregister_notifier);
+
+static void i3c_bus_notify(struct i3c_bus *bus, unsigned int action)
+{
+ blocking_notifier_call_chain(&i3c_bus_notifier, action, bus);
+}
+
static const char * const i3c_bus_mode_strings[] = {
[I3C_BUS_MODE_PURE] = "pure",
[I3C_BUS_MODE_MIXED_FAST] = "mixed-fast",
@@ -514,6 +601,88 @@ static ssize_t i2c_scl_frequency_show(struct device *dev,
}
static DEVICE_ATTR_RO(i2c_scl_frequency);
+static int i3c_set_hotjoin(struct i3c_master_controller *master, bool enable)
+{
+ int ret;
+
+ if (!master || !master->ops)
+ return -EINVAL;
+
+ if (!master->ops->enable_hotjoin || !master->ops->disable_hotjoin)
+ return -EINVAL;
+
+ i3c_bus_normaluse_lock(&master->bus);
+
+ if (enable)
+ ret = master->ops->enable_hotjoin(master);
+ else
+ ret = master->ops->disable_hotjoin(master);
+
+ master->hotjoin = enable;
+
+ i3c_bus_normaluse_unlock(&master->bus);
+
+ return ret;
+}
+
+static ssize_t hotjoin_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
+ int ret;
+ bool res;
+
+ if (!i3cbus->cur_master)
+ return -EINVAL;
+
+ if (kstrtobool(buf, &res))
+ return -EINVAL;
+
+ ret = i3c_set_hotjoin(i3cbus->cur_master->common.master, res);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+/*
+ * i3c_master_enable_hotjoin - Enable hotjoin
+ * @master: I3C master object
+ *
+ * Return: a 0 in case of success, an negative error code otherwise.
+ */
+int i3c_master_enable_hotjoin(struct i3c_master_controller *master)
+{
+ return i3c_set_hotjoin(master, true);
+}
+EXPORT_SYMBOL_GPL(i3c_master_enable_hotjoin);
+
+/*
+ * i3c_master_disable_hotjoin - Disable hotjoin
+ * @master: I3C master object
+ *
+ * Return: a 0 in case of success, an negative error code otherwise.
+ */
+int i3c_master_disable_hotjoin(struct i3c_master_controller *master)
+{
+ return i3c_set_hotjoin(master, false);
+}
+EXPORT_SYMBOL_GPL(i3c_master_disable_hotjoin);
+
+static ssize_t hotjoin_show(struct device *dev, struct device_attribute *da, char *buf)
+{
+ struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(i3cbus);
+ ret = sysfs_emit(buf, "%d\n", i3cbus->cur_master->common.master->hotjoin);
+ i3c_bus_normaluse_unlock(i3cbus);
+
+ return ret;
+}
+
+static DEVICE_ATTR_RW(hotjoin);
+
static struct attribute *i3c_masterdev_attrs[] = {
&dev_attr_mode.attr,
&dev_attr_current_master.attr,
@@ -524,6 +693,7 @@ static struct attribute *i3c_masterdev_attrs[] = {
&dev_attr_pid.attr,
&dev_attr_dynamic_address.attr,
&dev_attr_hdrcap.attr,
+ &dev_attr_hotjoin.attr,
NULL,
};
ATTRIBUTE_GROUPS(i3c_masterdev);
@@ -556,12 +726,12 @@ static int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode,
switch (i3cbus->mode) {
case I3C_BUS_MODE_PURE:
if (!i3cbus->scl_rate.i3c)
- i3cbus->scl_rate.i3c = I3C_BUS_TYP_I3C_SCL_RATE;
+ i3cbus->scl_rate.i3c = I3C_BUS_I3C_SCL_TYP_RATE;
break;
case I3C_BUS_MODE_MIXED_FAST:
case I3C_BUS_MODE_MIXED_LIMITED:
if (!i3cbus->scl_rate.i3c)
- i3cbus->scl_rate.i3c = I3C_BUS_TYP_I3C_SCL_RATE;
+ i3cbus->scl_rate.i3c = I3C_BUS_I3C_SCL_TYP_RATE;
if (!i3cbus->scl_rate.i2c)
i3cbus->scl_rate.i2c = max_i2c_scl_rate;
break;
@@ -583,8 +753,8 @@ static int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode,
* I3C/I2C frequency may have been overridden, check that user-provided
* values are not exceeding max possible frequency.
*/
- if (i3cbus->scl_rate.i3c > I3C_BUS_MAX_I3C_SCL_RATE ||
- i3cbus->scl_rate.i2c > I3C_BUS_I2C_FM_PLUS_SCL_RATE)
+ if (i3cbus->scl_rate.i3c > I3C_BUS_I3C_SCL_MAX_RATE ||
+ i3cbus->scl_rate.i2c > I3C_BUS_I2C_FM_PLUS_SCL_MAX_RATE)
return -EINVAL;
return 0;
@@ -666,14 +836,14 @@ static int i3c_master_send_ccc_cmd_locked(struct i3c_master_controller *master,
return -EINVAL;
if (!master->ops->send_ccc_cmd)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if ((cmd->id & I3C_CCC_DIRECT) && (!cmd->dests || !cmd->ndests))
return -EINVAL;
if (master->ops->supports_ccc_cmd &&
!master->ops->supports_ccc_cmd(master, cmd))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
ret = master->ops->send_ccc_cmd(master, cmd);
if (ret) {
@@ -1087,8 +1257,16 @@ static int i3c_master_getmxds_locked(struct i3c_master_controller *master,
i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMXDS, &dest, 1);
ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
- if (ret)
- goto out;
+ if (ret) {
+ /*
+ * Retry when the device does not support max read turnaround
+ * while expecting shorter length from this CCC command.
+ */
+ dest.payload.len -= 3;
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+ }
if (dest.payload.len != 2 && dest.payload.len != 5) {
ret = -EIO;
@@ -1260,7 +1438,7 @@ static int i3c_master_retrieve_dev_info(struct i3c_dev_desc *dev)
if (dev->info.bcr & I3C_BCR_HDR_CAP) {
ret = i3c_master_gethdrcap_locked(master, &dev->info);
- if (ret)
+ if (ret && ret != -EOPNOTSUPP)
return ret;
}
@@ -1281,7 +1459,7 @@ static void i3c_master_put_i3c_addrs(struct i3c_dev_desc *dev)
I3C_ADDR_SLOT_FREE);
if (dev->boardinfo && dev->boardinfo->init_dyn_addr)
- i3c_bus_set_addr_slot_status(&master->bus, dev->info.dyn_addr,
+ i3c_bus_set_addr_slot_status(&master->bus, dev->boardinfo->init_dyn_addr,
I3C_ADDR_SLOT_FREE);
}
@@ -1296,7 +1474,11 @@ static int i3c_master_get_i3c_addrs(struct i3c_dev_desc *dev)
if (dev->info.static_addr) {
status = i3c_bus_get_addr_slot_status(&master->bus,
dev->info.static_addr);
- if (status != I3C_ADDR_SLOT_FREE)
+ /* Since static address and assigned dynamic address can be
+ * equal, allow this case to pass.
+ */
+ if (status != I3C_ADDR_SLOT_FREE &&
+ dev->info.static_addr != dev->boardinfo->init_dyn_addr)
return -EBUSY;
i3c_bus_set_addr_slot_status(&master->bus,
@@ -1366,16 +1548,9 @@ static int i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
u8 old_dyn_addr)
{
struct i3c_master_controller *master = i3c_dev_get_master(dev);
- enum i3c_addr_slot_status status;
int ret;
- if (dev->info.dyn_addr != old_dyn_addr &&
- (!dev->boardinfo ||
- dev->info.dyn_addr != dev->boardinfo->init_dyn_addr)) {
- status = i3c_bus_get_addr_slot_status(&master->bus,
- dev->info.dyn_addr);
- if (status != I3C_ADDR_SLOT_FREE)
- return -EBUSY;
+ if (dev->info.dyn_addr != old_dyn_addr) {
i3c_bus_set_addr_slot_status(&master->bus,
dev->info.dyn_addr,
I3C_ADDR_SLOT_I3C_DEV);
@@ -1438,6 +1613,7 @@ static int i3c_master_early_i3c_dev_add(struct i3c_master_controller *master,
{
struct i3c_device_info info = {
.static_addr = boardinfo->static_addr,
+ .pid = boardinfo->pid,
};
struct i3c_dev_desc *i3cdev;
int ret;
@@ -1508,9 +1684,11 @@ i3c_master_register_new_i3c_devs(struct i3c_master_controller *master)
desc->dev->dev.of_node = desc->boardinfo->of_node;
ret = device_register(&desc->dev->dev);
- if (ret)
+ if (ret) {
dev_err(&master->dev,
"Failed to add I3C device (err = %d)\n", ret);
+ put_device(&desc->dev->dev);
+ }
}
}
@@ -1549,6 +1727,79 @@ int i3c_master_do_daa(struct i3c_master_controller *master)
EXPORT_SYMBOL_GPL(i3c_master_do_daa);
/**
+ * i3c_master_dma_map_single() - Map buffer for single DMA transfer
+ * @dev: device object of a device doing DMA
+ * @buf: destination/source buffer for DMA
+ * @len: length of transfer
+ * @force_bounce: true, force to use a bounce buffer,
+ * false, function will auto check is a bounce buffer required
+ * @dir: DMA direction
+ *
+ * Map buffer for a DMA transfer and allocate a bounce buffer if required.
+ *
+ * Return: I3C DMA transfer descriptor or NULL in case of error.
+ */
+struct i3c_dma *i3c_master_dma_map_single(struct device *dev, void *buf,
+ size_t len, bool force_bounce, enum dma_data_direction dir)
+{
+ struct i3c_dma *dma_xfer __free(kfree) = NULL;
+ void *bounce __free(kfree) = NULL;
+ void *dma_buf = buf;
+
+ dma_xfer = kzalloc(sizeof(*dma_xfer), GFP_KERNEL);
+ if (!dma_xfer)
+ return NULL;
+
+ dma_xfer->dev = dev;
+ dma_xfer->buf = buf;
+ dma_xfer->dir = dir;
+ dma_xfer->len = len;
+ dma_xfer->map_len = len;
+
+ if (is_vmalloc_addr(buf))
+ force_bounce = true;
+
+ if (force_bounce) {
+ dma_xfer->map_len = ALIGN(len, cache_line_size());
+ if (dir == DMA_FROM_DEVICE)
+ bounce = kzalloc(dma_xfer->map_len, GFP_KERNEL);
+ else
+ bounce = kmemdup(buf, dma_xfer->map_len, GFP_KERNEL);
+ if (!bounce)
+ return NULL;
+ dma_buf = bounce;
+ }
+
+ dma_xfer->addr = dma_map_single(dev, dma_buf, dma_xfer->map_len, dir);
+ if (dma_mapping_error(dev, dma_xfer->addr))
+ return NULL;
+
+ dma_xfer->bounce_buf = no_free_ptr(bounce);
+ return no_free_ptr(dma_xfer);
+}
+EXPORT_SYMBOL_GPL(i3c_master_dma_map_single);
+
+/**
+ * i3c_master_dma_unmap_single() - Unmap buffer after DMA
+ * @dma_xfer: DMA transfer and mapping descriptor
+ *
+ * Unmap buffer and cleanup DMA transfer descriptor.
+ */
+void i3c_master_dma_unmap_single(struct i3c_dma *dma_xfer)
+{
+ dma_unmap_single(dma_xfer->dev, dma_xfer->addr,
+ dma_xfer->map_len, dma_xfer->dir);
+ if (dma_xfer->bounce_buf) {
+ if (dma_xfer->dir == DMA_FROM_DEVICE)
+ memcpy(dma_xfer->buf, dma_xfer->bounce_buf,
+ dma_xfer->len);
+ kfree(dma_xfer->bounce_buf);
+ }
+ kfree(dma_xfer);
+}
+EXPORT_SYMBOL_GPL(i3c_master_dma_unmap_single);
+
+/**
* i3c_master_set_info() - set master device information
* @master: master used to send frames on the bus
* @info: I3C device information
@@ -1725,6 +1976,12 @@ static int i3c_master_bus_init(struct i3c_master_controller *master)
goto err_bus_cleanup;
}
+ if (master->ops->set_speed) {
+ ret = master->ops->set_speed(master, I3C_OPEN_DRAIN_SLOW_SPEED);
+ if (ret)
+ goto err_bus_cleanup;
+ }
+
/*
* Reset all dynamic address that may have been assigned before
* (assigned by the bootloader for example).
@@ -1733,6 +1990,12 @@ static int i3c_master_bus_init(struct i3c_master_controller *master)
if (ret && ret != I3C_ERROR_M2)
goto err_bus_cleanup;
+ if (master->ops->set_speed) {
+ ret = master->ops->set_speed(master, I3C_OPEN_DRAIN_NORMAL_SPEED);
+ if (ret)
+ goto err_bus_cleanup;
+ }
+
/* Disable all slave events before starting DAA. */
ret = i3c_master_disec_locked(master, I3C_BROADCAST_ADDR,
I3C_CCC_EVENT_SIR | I3C_CCC_EVENT_MR |
@@ -1763,9 +2026,11 @@ static int i3c_master_bus_init(struct i3c_master_controller *master)
goto err_rstdaa;
}
- i3c_bus_set_addr_slot_status(&master->bus,
- i3cboardinfo->init_dyn_addr,
- I3C_ADDR_SLOT_I3C_DEV);
+ /* Do not mark as occupied until real device exist in bus */
+ i3c_bus_set_addr_slot_status_mask(&master->bus,
+ i3cboardinfo->init_dyn_addr,
+ I3C_ADDR_SLOT_EXT_DESIRED,
+ I3C_ADDR_SLOT_EXT_STATUS_MASK);
/*
* Only try to create/attach devices that have a static
@@ -1896,11 +2161,16 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
ibireq.max_payload_len = olddev->ibi->max_payload_len;
ibireq.num_slots = olddev->ibi->num_slots;
- if (olddev->ibi->enabled) {
+ if (olddev->ibi->enabled)
enable_ibi = true;
- i3c_dev_disable_ibi_locked(olddev);
- }
-
+ /*
+ * The olddev should not receive any commands on the
+ * i3c bus as it does not exist and has been assigned
+ * a new address. This will result in NACK or timeout.
+ * So, update the olddev->ibi->enabled flag to false
+ * to avoid DISEC with OldAddr.
+ */
+ olddev->ibi->enabled = false;
i3c_dev_free_ibi_locked(olddev);
}
mutex_unlock(&olddev->ibi_lock);
@@ -1928,7 +2198,8 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
else
expected_dyn_addr = newdev->info.dyn_addr;
- if (newdev->info.dyn_addr != expected_dyn_addr) {
+ if (newdev->info.dyn_addr != expected_dyn_addr &&
+ i3c_bus_get_addr_slot_status(&master->bus, expected_dyn_addr) == I3C_ADDR_SLOT_FREE) {
/*
* Try to apply the expected dynamic address. If it fails, keep
* the address assigned by the master.
@@ -2011,7 +2282,7 @@ of_i3c_master_add_i2c_boardinfo(struct i3c_master_controller *master,
*/
if (boardinfo->base.flags & I2C_CLIENT_TEN) {
dev_err(dev, "I2C device with 10 bit address not supported.");
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
/* LVR is encoded in reg[2]. */
@@ -2077,7 +2348,7 @@ static int of_i3c_master_add_dev(struct i3c_master_controller *master,
u32 reg[3];
int ret;
- if (!master || !node)
+ if (!master)
return -EINVAL;
ret = of_property_read_u32_array(node, "reg", reg, ARRAY_SIZE(reg));
@@ -2141,13 +2412,13 @@ static int i3c_master_i2c_adapter_xfer(struct i2c_adapter *adap,
return -EINVAL;
if (!master->ops->i2c_xfers)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
/* Doing transfers to different devices is not supported. */
addr = xfers[0].addr;
for (i = 1; i < nxfers; i++) {
if (addr != xfers[i].addr)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
i3c_bus_normaluse_lock(&master->bus);
@@ -2170,14 +2441,10 @@ static u8 i3c_master_i2c_get_lvr(struct i2c_client *client)
{
/* Fall back to no spike filters and FM bus mode. */
u8 lvr = I3C_LVR_I2C_INDEX(2) | I3C_LVR_I2C_FM_MODE;
+ u32 reg[3];
- if (client->dev.of_node) {
- u32 reg[3];
-
- if (!of_property_read_u32_array(client->dev.of_node, "reg",
- reg, ARRAY_SIZE(reg)))
- lvr = reg[2];
- }
+ if (!of_property_read_u32_array(client->dev.of_node, "reg", reg, ARRAY_SIZE(reg)))
+ lvr = reg[2];
return lvr;
}
@@ -2272,6 +2539,8 @@ static int i3c_i2c_notifier_call(struct notifier_block *nb, unsigned long action
case BUS_NOTIFY_DEL_DEVICE:
ret = i3c_master_i2c_detach(adap, client);
break;
+ default:
+ ret = -EINVAL;
}
i3c_bus_maintenance_unlock(&master->bus);
@@ -2287,18 +2556,22 @@ static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
struct i2c_adapter *adap = i3c_master_to_i2c_adapter(master);
struct i2c_dev_desc *i2cdev;
struct i2c_dev_boardinfo *i2cboardinfo;
- int ret;
+ int ret, id;
adap->dev.parent = master->dev.parent;
adap->owner = master->dev.parent->driver->owner;
adap->algo = &i3c_master_i2c_algo;
- strncpy(adap->name, dev_name(master->dev.parent), sizeof(adap->name));
-
- /* FIXME: Should we allow i3c masters to override these values? */
- adap->timeout = 1000;
+ strscpy(adap->name, dev_name(master->dev.parent), sizeof(adap->name));
+ adap->timeout = HZ;
adap->retries = 3;
- ret = i2c_add_adapter(adap);
+ id = of_alias_get_id(master->dev.of_node, "i2c");
+ if (id >= 0) {
+ adap->nr = id;
+ ret = i2c_add_numbered_adapter(adap);
+ } else {
+ ret = i2c_add_adapter(adap);
+ }
if (ret)
return ret;
@@ -2354,8 +2627,11 @@ static void i3c_master_unregister_i3c_devs(struct i3c_master_controller *master)
*/
void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot)
{
+ if (!dev->ibi || !slot)
+ return;
+
atomic_inc(&dev->ibi->pending_ibis);
- queue_work(dev->common.master->wq, &slot->work);
+ queue_work(dev->ibi->wq, &slot->work);
}
EXPORT_SYMBOL_GPL(i3c_master_queue_ibi);
@@ -2543,10 +2819,14 @@ EXPORT_SYMBOL_GPL(i3c_generic_ibi_recycle_slot);
static int i3c_master_check_ops(const struct i3c_master_controller_ops *ops)
{
- if (!ops || !ops->bus_init || !ops->priv_xfers ||
+ if (!ops || !ops->bus_init ||
!ops->send_ccc_cmd || !ops->do_daa || !ops->i2c_xfers)
return -EINVAL;
+ /* Must provide one of priv_xfers (SDR only) or i3c_xfers (all modes) */
+ if (!ops->priv_xfers && !ops->i3c_xfers)
+ return -EINVAL;
+
if (ops->request_ibi &&
(!ops->enable_ibi || !ops->disable_ibi || !ops->free_ibi ||
!ops->recycle_ibi_slot))
@@ -2562,7 +2842,7 @@ static int i3c_master_check_ops(const struct i3c_master_controller_ops *ops)
* controller)
* @ops: the master controller operations
* @secondary: true if you are registering a secondary master. Will return
- * -ENOTSUPP if set to true since secondary masters are not yet
+ * -EOPNOTSUPP if set to true since secondary masters are not yet
* supported
*
* This function takes care of everything for you:
@@ -2581,7 +2861,7 @@ int i3c_master_register(struct i3c_master_controller *master,
const struct i3c_master_controller_ops *ops,
bool secondary)
{
- unsigned long i2c_scl_rate = I3C_BUS_I2C_FM_PLUS_SCL_RATE;
+ unsigned long i2c_scl_rate = I3C_BUS_I2C_FM_PLUS_SCL_MAX_RATE;
struct i3c_bus *i3cbus = i3c_master_get_bus(master);
enum i3c_bus_mode mode = I3C_BUS_MODE_PURE;
struct i2c_dev_boardinfo *i2cbi;
@@ -2589,7 +2869,7 @@ int i3c_master_register(struct i3c_master_controller *master,
/* We do not support secondary masters yet. */
if (secondary)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
ret = i3c_master_check_ops(ops);
if (ret)
@@ -2605,13 +2885,17 @@ int i3c_master_register(struct i3c_master_controller *master,
INIT_LIST_HEAD(&master->boardinfo.i2c);
INIT_LIST_HEAD(&master->boardinfo.i3c);
- ret = i3c_bus_init(i3cbus);
- if (ret)
- return ret;
-
device_initialize(&master->dev);
dev_set_name(&master->dev, "i3c-%d", i3cbus->id);
+ master->dev.dma_mask = parent->dma_mask;
+ master->dev.coherent_dma_mask = parent->coherent_dma_mask;
+ master->dev.dma_parms = parent->dma_parms;
+
+ ret = i3c_bus_init(i3cbus, master->dev.of_node);
+ if (ret)
+ goto err_put_dev;
+
ret = of_populate_i3c_bus(master);
if (ret)
goto err_put_dev;
@@ -2636,14 +2920,14 @@ int i3c_master_register(struct i3c_master_controller *master,
}
if (i2cbi->lvr & I3C_LVR_I2C_FM_MODE)
- i2c_scl_rate = I3C_BUS_I2C_FM_SCL_RATE;
+ i2c_scl_rate = I3C_BUS_I2C_FM_SCL_MAX_RATE;
}
ret = i3c_bus_set_mode(i3cbus, mode, i2c_scl_rate);
if (ret)
goto err_put_dev;
- master->wq = alloc_workqueue("%s", 0, 0, dev_name(parent));
+ master->wq = alloc_workqueue("%s", WQ_PERCPU, 0, dev_name(parent));
if (!master->wq) {
ret = -ENOMEM;
goto err_put_dev;
@@ -2665,6 +2949,12 @@ int i3c_master_register(struct i3c_master_controller *master,
if (ret)
goto err_del_dev;
+ i3c_bus_notify(i3cbus, I3C_NOTIFY_BUS_ADD);
+
+ pm_runtime_no_callbacks(&master->dev);
+ pm_suspend_ignore_children(&master->dev, true);
+ pm_runtime_enable(&master->dev);
+
/*
* We're done initializing the bus and the controller, we can now
* register I3C devices discovered during the initial DAA.
@@ -2694,17 +2984,16 @@ EXPORT_SYMBOL_GPL(i3c_master_register);
* @master: master used to send frames on the bus
*
* Basically undo everything done in i3c_master_register().
- *
- * Return: 0 in case of success, a negative error code otherwise.
*/
-int i3c_master_unregister(struct i3c_master_controller *master)
+void i3c_master_unregister(struct i3c_master_controller *master)
{
+ i3c_bus_notify(&master->bus, I3C_NOTIFY_BUS_REMOVE);
+
i3c_master_i2c_adapter_cleanup(master);
i3c_master_unregister_i3c_devs(master);
i3c_master_bus_cleanup(master);
+ pm_runtime_disable(&master->dev);
device_unregister(&master->dev);
-
- return 0;
}
EXPORT_SYMBOL_GPL(i3c_master_unregister);
@@ -2727,9 +3016,8 @@ int i3c_dev_setdasa_locked(struct i3c_dev_desc *dev)
dev->boardinfo->init_dyn_addr);
}
-int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev,
- struct i3c_priv_xfer *xfers,
- int nxfers)
+int i3c_dev_do_xfers_locked(struct i3c_dev_desc *dev, struct i3c_xfer *xfers,
+ int nxfers, enum i3c_xfer_mode mode)
{
struct i3c_master_controller *master;
@@ -2740,8 +3028,14 @@ int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev,
if (!master || !xfers)
return -EINVAL;
- if (!master->ops->priv_xfers)
- return -ENOTSUPP;
+ if (mode != I3C_SDR && !(master->this->info.hdr_cap & BIT(mode)))
+ return -EOPNOTSUPP;
+
+ if (master->ops->i3c_xfers)
+ return master->ops->i3c_xfers(dev, xfers, nxfers, mode);
+
+ if (mode != I3C_SDR)
+ return -EINVAL;
return master->ops->priv_xfers(dev, xfers, nxfers);
}
@@ -2791,7 +3085,7 @@ int i3c_dev_request_ibi_locked(struct i3c_dev_desc *dev,
int ret;
if (!master->ops->request_ibi)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (dev->ibi)
return -EBUSY;
@@ -2800,6 +3094,12 @@ int i3c_dev_request_ibi_locked(struct i3c_dev_desc *dev,
if (!ibi)
return -ENOMEM;
+ ibi->wq = alloc_ordered_workqueue(dev_name(i3cdev_to_dev(dev->dev)), WQ_MEM_RECLAIM);
+ if (!ibi->wq) {
+ kfree(ibi);
+ return -ENOMEM;
+ }
+
atomic_set(&ibi->pending_ibis, 0);
init_completion(&ibi->all_ibis_handled);
ibi->handler = req->handler;
@@ -2827,14 +3127,28 @@ void i3c_dev_free_ibi_locked(struct i3c_dev_desc *dev)
WARN_ON(i3c_dev_disable_ibi_locked(dev));
master->ops->free_ibi(dev);
+
+ if (dev->ibi->wq) {
+ destroy_workqueue(dev->ibi->wq);
+ dev->ibi->wq = NULL;
+ }
+
kfree(dev->ibi);
dev->ibi = NULL;
}
static int __init i3c_init(void)
{
- int res = bus_register_notifier(&i2c_bus_type, &i2cdev_notifier);
+ int res;
+
+ res = of_alias_get_highest_id("i3c");
+ if (res >= 0) {
+ mutex_lock(&i3c_core_lock);
+ __i3c_first_dynamic_bus_num = res + 1;
+ mutex_unlock(&i3c_core_lock);
+ }
+ res = bus_register_notifier(&i2c_bus_type, &i2cdev_notifier);
if (res)
return res;
diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig
index 3b8f95916f46..82cf330778d5 100644
--- a/drivers/i3c/master/Kconfig
+++ b/drivers/i3c/master/Kconfig
@@ -1,7 +1,17 @@
# SPDX-License-Identifier: GPL-2.0-only
+config ADI_I3C_MASTER
+ tristate "Analog Devices I3C master driver"
+ depends on HAS_IOMEM
+ help
+ Support for Analog Devices I3C Controller IP, an AXI-interfaced IP
+ core that supports I3C and I2C devices, multiple speed-grades and I3C
+ IBIs.
+
+ This driver can also be built as a module. If so, the module will be
+ called adi-i3c-master.
+
config CDNS_I3C_MASTER
tristate "Cadence I3C master driver"
- depends on I3C
depends on HAS_IOMEM
depends on !(ALPHA || PARISC)
help
@@ -9,7 +19,6 @@ config CDNS_I3C_MASTER
config DW_I3C_MASTER
tristate "Synospsys DesignWare I3C master driver"
- depends on I3C
depends on HAS_IOMEM
depends on !(ALPHA || PARISC)
# ALPHA and PARISC needs {read,write}sl()
@@ -22,9 +31,22 @@ config DW_I3C_MASTER
This driver can also be built as a module. If so, the module
will be called dw-i3c-master.
+config AST2600_I3C_MASTER
+ tristate "ASPEED AST2600 I3C master driver"
+ depends on DW_I3C_MASTER
+ depends on ARCH_ASPEED || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ Support for ASPEED AST2600 I3C Controller.
+
+ This hardware is an instance of the DW I3C controller; this
+ driver adds platform- specific support for AST2600 hardware.
+
+ This driver can also be built as a module. If so, the module
+ will be called ast2600-i3c-master.
+
config SVC_I3C_MASTER
tristate "Silvaco I3C Dual-Role Master driver"
- depends on I3C
depends on HAS_IOMEM
depends on !(ALPHA || PARISC)
help
@@ -32,7 +54,6 @@ config SVC_I3C_MASTER
config MIPI_I3C_HCI
tristate "MIPI I3C Host Controller Interface driver (EXPERIMENTAL)"
- depends on I3C
depends on HAS_IOMEM
help
Support for hardware following the MIPI Aliance's I3C Host Controller
@@ -43,3 +64,24 @@ config MIPI_I3C_HCI
This driver can also be built as a module. If so, the module will be
called mipi-i3c-hci.
+
+config MIPI_I3C_HCI_PCI
+ tristate "MIPI I3C Host Controller Interface PCI support"
+ depends on MIPI_I3C_HCI
+ depends on PCI
+ help
+ Support for MIPI I3C Host Controller Interface compatible hardware
+ on the PCI bus.
+
+ This driver can also be built as a module. If so, the module will be
+ called mipi-i3c-hci-pci.
+
+config RENESAS_I3C
+ tristate "Renesas I3C controller driver"
+ depends on HAS_IOMEM
+ depends on ARCH_RENESAS || COMPILE_TEST
+ help
+ Support the Renesas I3C controller as found in some RZ variants.
+
+ This driver can also be built as a module. If so, the module will be
+ called renesas-i3c.
diff --git a/drivers/i3c/master/Makefile b/drivers/i3c/master/Makefile
index b3fee0f690b2..816a227b6f7a 100644
--- a/drivers/i3c/master/Makefile
+++ b/drivers/i3c/master/Makefile
@@ -1,5 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_ADI_I3C_MASTER) += adi-i3c-master.o
obj-$(CONFIG_CDNS_I3C_MASTER) += i3c-master-cdns.o
obj-$(CONFIG_DW_I3C_MASTER) += dw-i3c-master.o
+obj-$(CONFIG_AST2600_I3C_MASTER) += ast2600-i3c-master.o
obj-$(CONFIG_SVC_I3C_MASTER) += svc-i3c-master.o
obj-$(CONFIG_MIPI_I3C_HCI) += mipi-i3c-hci/
+obj-$(CONFIG_RENESAS_I3C) += renesas-i3c.o
diff --git a/drivers/i3c/master/adi-i3c-master.c b/drivers/i3c/master/adi-i3c-master.c
new file mode 100644
index 000000000000..82ac0b3d057a
--- /dev/null
+++ b/drivers/i3c/master/adi-i3c-master.c
@@ -0,0 +1,1019 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * I3C Controller driver
+ * Copyright 2025 Analog Devices Inc.
+ * Author: Jorge Marques <jorge.marques@analog.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/adi-axi-common.h>
+#include <linux/i3c/master.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "../internals.h"
+
+#define ADI_MAX_DEVS 16
+#define ADI_HAS_MDB_FROM_BCR(x) (FIELD_GET(BIT(2), (x)))
+
+#define REG_ENABLE 0x040
+
+#define REG_PID_L 0x054
+#define REG_PID_H 0x058
+#define REG_DCR_BCR_DA 0x05c
+#define REG_DCR_BCR_DA_GET_DA(x) FIELD_GET(GENMASK(22, 16), (x))
+#define REG_DCR_BCR_DA_GET_BCR(x) FIELD_GET(GENMASK(15, 8), (x))
+#define REG_DCR_BCR_DA_GET_DCR(x) FIELD_GET(GENMASK(7, 0), (x))
+
+#define REG_IRQ_MASK 0x080
+#define REG_IRQ_PENDING 0x084
+#define REG_IRQ_PENDING_DAA BIT(7)
+#define REG_IRQ_PENDING_IBI BIT(6)
+#define REG_IRQ_PENDING_CMDR BIT(5)
+
+#define REG_CMD_FIFO 0x0d4
+#define REG_CMD_FIFO_0_IS_CCC BIT(22)
+#define REG_CMD_FIFO_0_BCAST BIT(21)
+#define REG_CMD_FIFO_0_SR BIT(20)
+#define REG_CMD_FIFO_0_LEN(l) FIELD_PREP(GENMASK(19, 8), (l))
+#define REG_CMD_FIFO_0_DEV_ADDR(a) FIELD_PREP(GENMASK(7, 1), (a))
+#define REG_CMD_FIFO_0_RNW BIT(0)
+#define REG_CMD_FIFO_1_CCC(id) FIELD_PREP(GENMASK(7, 0), (id))
+
+#define REG_CMD_FIFO_ROOM 0x0c0
+#define REG_CMDR_FIFO 0x0d8
+#define REG_CMDR_FIFO_UDA_ERROR 8
+#define REG_CMDR_FIFO_NACK_RESP 6
+#define REG_CMDR_FIFO_CE2_ERROR 4
+#define REG_CMDR_FIFO_CE0_ERROR 1
+#define REG_CMDR_FIFO_NO_ERROR 0
+#define REG_CMDR_FIFO_ERROR(x) FIELD_GET(GENMASK(23, 20), (x))
+#define REG_CMDR_FIFO_XFER_BYTES(x) FIELD_GET(GENMASK(19, 8), (x))
+
+#define REG_SDO_FIFO 0x0dc
+#define REG_SDO_FIFO_ROOM 0x0c8
+#define REG_SDI_FIFO 0x0e0
+#define REG_IBI_FIFO 0x0e4
+#define REG_FIFO_STATUS 0x0e8
+#define REG_FIFO_STATUS_CMDR_EMPTY BIT(0)
+#define REG_FIFO_STATUS_IBI_EMPTY BIT(1)
+
+#define REG_OPS 0x100
+#define REG_OPS_PP_SG_MASK GENMASK(6, 5)
+#define REG_OPS_SET_SG(x) FIELD_PREP(REG_OPS_PP_SG_MASK, (x))
+
+#define REG_IBI_CONFIG 0x140
+#define REG_IBI_CONFIG_ENABLE BIT(0)
+#define REG_IBI_CONFIG_LISTEN BIT(1)
+
+#define REG_DEV_CHAR 0x180
+#define REG_DEV_CHAR_IS_I2C BIT(0)
+#define REG_DEV_CHAR_IS_ATTACHED BIT(1)
+#define REG_DEV_CHAR_BCR_IBI(x) FIELD_PREP(GENMASK(3, 2), (x))
+#define REG_DEV_CHAR_WEN BIT(8)
+#define REG_DEV_CHAR_ADDR(x) FIELD_PREP(GENMASK(15, 9), (x))
+
+enum speed_grade {PP_SG_UNSET, PP_SG_1MHZ, PP_SG_3MHZ, PP_SG_6MHZ, PP_SG_12MHZ};
+
+struct adi_i3c_cmd {
+ u32 cmd0;
+ u32 cmd1;
+ u32 tx_len;
+ const void *tx_buf;
+ u32 rx_len;
+ void *rx_buf;
+ u32 error;
+};
+
+struct adi_i3c_xfer {
+ struct list_head node;
+ struct completion comp;
+ int ret;
+ unsigned int ncmds;
+ unsigned int ncmds_comp;
+ struct adi_i3c_cmd cmds[] __counted_by(ncmds);
+};
+
+struct adi_i3c_master {
+ struct i3c_master_controller base;
+ u32 free_rr_slots;
+ struct {
+ unsigned int num_slots;
+ struct i3c_dev_desc **slots;
+ spinlock_t lock; /* Protect IBI slot access */
+ } ibi;
+ struct {
+ struct list_head list;
+ struct adi_i3c_xfer *cur;
+ spinlock_t lock; /* Protect transfer */
+ } xferqueue;
+ void __iomem *regs;
+ struct clk *clk;
+ unsigned long i3c_scl_lim;
+ struct {
+ u8 addrs[ADI_MAX_DEVS];
+ u8 index;
+ } daa;
+};
+
+static inline struct adi_i3c_master *to_adi_i3c_master(struct i3c_master_controller *master)
+{
+ return container_of(master, struct adi_i3c_master, base);
+}
+
+static void adi_i3c_master_wr_to_tx_fifo(struct adi_i3c_master *master,
+ const u8 *buf, unsigned int nbytes)
+{
+ unsigned int n, m;
+
+ n = readl(master->regs + REG_SDO_FIFO_ROOM);
+ m = min(n, nbytes);
+ i3c_writel_fifo(master->regs + REG_SDO_FIFO, buf, m);
+}
+
+static void adi_i3c_master_rd_from_rx_fifo(struct adi_i3c_master *master,
+ u8 *buf, unsigned int nbytes)
+{
+ i3c_readl_fifo(master->regs + REG_SDI_FIFO, buf, nbytes);
+}
+
+static bool adi_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
+ const struct i3c_ccc_cmd *cmd)
+{
+ if (cmd->ndests > 1)
+ return false;
+
+ switch (cmd->id) {
+ case I3C_CCC_ENEC(true):
+ case I3C_CCC_ENEC(false):
+ case I3C_CCC_DISEC(true):
+ case I3C_CCC_DISEC(false):
+ case I3C_CCC_RSTDAA(true):
+ case I3C_CCC_RSTDAA(false):
+ case I3C_CCC_ENTDAA:
+ case I3C_CCC_SETDASA:
+ case I3C_CCC_SETNEWDA:
+ case I3C_CCC_GETMWL:
+ case I3C_CCC_GETMRL:
+ case I3C_CCC_GETPID:
+ case I3C_CCC_GETBCR:
+ case I3C_CCC_GETDCR:
+ case I3C_CCC_GETSTATUS:
+ case I3C_CCC_GETHDRCAP:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static int adi_i3c_master_disable(struct adi_i3c_master *master)
+{
+ writel(0, master->regs + REG_IBI_CONFIG);
+
+ return 0;
+}
+
+static struct adi_i3c_xfer *adi_i3c_master_alloc_xfer(struct adi_i3c_master *master,
+ unsigned int ncmds)
+{
+ struct adi_i3c_xfer *xfer;
+
+ xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
+ if (!xfer)
+ return NULL;
+
+ INIT_LIST_HEAD(&xfer->node);
+ xfer->ncmds = ncmds;
+ xfer->ret = -ETIMEDOUT;
+
+ return xfer;
+}
+
+static void adi_i3c_master_start_xfer_locked(struct adi_i3c_master *master)
+{
+ struct adi_i3c_xfer *xfer = master->xferqueue.cur;
+ unsigned int i, n, m;
+
+ if (!xfer)
+ return;
+
+ for (i = 0; i < xfer->ncmds; i++) {
+ struct adi_i3c_cmd *cmd = &xfer->cmds[i];
+
+ if (!(cmd->cmd0 & REG_CMD_FIFO_0_RNW))
+ adi_i3c_master_wr_to_tx_fifo(master, cmd->tx_buf, cmd->tx_len);
+ }
+
+ n = readl(master->regs + REG_CMD_FIFO_ROOM);
+ for (i = 0; i < xfer->ncmds; i++) {
+ struct adi_i3c_cmd *cmd = &xfer->cmds[i];
+
+ m = cmd->cmd0 & REG_CMD_FIFO_0_IS_CCC ? 2 : 1;
+ if (m > n)
+ break;
+ writel(cmd->cmd0, master->regs + REG_CMD_FIFO);
+ if (cmd->cmd0 & REG_CMD_FIFO_0_IS_CCC)
+ writel(cmd->cmd1, master->regs + REG_CMD_FIFO);
+ n -= m;
+ }
+}
+
+static void adi_i3c_master_end_xfer_locked(struct adi_i3c_master *master,
+ u32 pending)
+{
+ struct adi_i3c_xfer *xfer = master->xferqueue.cur;
+ int i, ret = 0;
+
+ if (!xfer)
+ return;
+
+ while (!(readl(master->regs + REG_FIFO_STATUS) & REG_FIFO_STATUS_CMDR_EMPTY)) {
+ struct adi_i3c_cmd *cmd;
+ u32 cmdr, rx_len;
+
+ cmdr = readl(master->regs + REG_CMDR_FIFO);
+
+ cmd = &xfer->cmds[xfer->ncmds_comp++];
+ if (cmd->cmd0 & REG_CMD_FIFO_0_RNW) {
+ rx_len = min_t(u32, REG_CMDR_FIFO_XFER_BYTES(cmdr), cmd->rx_len);
+ adi_i3c_master_rd_from_rx_fifo(master, cmd->rx_buf, rx_len);
+ }
+ cmd->error = REG_CMDR_FIFO_ERROR(cmdr);
+ }
+
+ for (i = 0; i < xfer->ncmds_comp; i++) {
+ switch (xfer->cmds[i].error) {
+ case REG_CMDR_FIFO_NO_ERROR:
+ break;
+
+ case REG_CMDR_FIFO_CE0_ERROR:
+ case REG_CMDR_FIFO_CE2_ERROR:
+ case REG_CMDR_FIFO_NACK_RESP:
+ case REG_CMDR_FIFO_UDA_ERROR:
+ ret = -EIO;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ xfer->ret = ret;
+
+ if (xfer->ncmds_comp != xfer->ncmds)
+ return;
+
+ complete(&xfer->comp);
+
+ xfer = list_first_entry_or_null(&master->xferqueue.list,
+ struct adi_i3c_xfer, node);
+ if (xfer)
+ list_del_init(&xfer->node);
+
+ master->xferqueue.cur = xfer;
+ adi_i3c_master_start_xfer_locked(master);
+}
+
+static void adi_i3c_master_queue_xfer(struct adi_i3c_master *master,
+ struct adi_i3c_xfer *xfer)
+{
+ init_completion(&xfer->comp);
+ guard(spinlock_irqsave)(&master->xferqueue.lock);
+ if (master->xferqueue.cur) {
+ list_add_tail(&xfer->node, &master->xferqueue.list);
+ } else {
+ master->xferqueue.cur = xfer;
+ adi_i3c_master_start_xfer_locked(master);
+ }
+}
+
+static void adi_i3c_master_unqueue_xfer(struct adi_i3c_master *master,
+ struct adi_i3c_xfer *xfer)
+{
+ guard(spinlock_irqsave)(&master->xferqueue.lock);
+ if (master->xferqueue.cur == xfer)
+ master->xferqueue.cur = NULL;
+ else
+ list_del_init(&xfer->node);
+
+ writel(0x01, master->regs + REG_ENABLE);
+ writel(0x00, master->regs + REG_ENABLE);
+ writel(REG_IRQ_PENDING_CMDR, master->regs + REG_IRQ_MASK);
+}
+
+static enum i3c_error_code adi_i3c_cmd_get_err(struct adi_i3c_cmd *cmd)
+{
+ switch (cmd->error) {
+ case REG_CMDR_FIFO_CE0_ERROR:
+ return I3C_ERROR_M0;
+
+ case REG_CMDR_FIFO_CE2_ERROR:
+ case REG_CMDR_FIFO_NACK_RESP:
+ return I3C_ERROR_M2;
+
+ default:
+ break;
+ }
+
+ return I3C_ERROR_UNKNOWN;
+}
+
+static int adi_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
+ struct i3c_ccc_cmd *cmd)
+{
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_xfer *xfer __free(kfree) = NULL;
+ struct adi_i3c_cmd *ccmd;
+
+ xfer = adi_i3c_master_alloc_xfer(master, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ ccmd = xfer->cmds;
+ ccmd->cmd1 = REG_CMD_FIFO_1_CCC(cmd->id);
+ ccmd->cmd0 = REG_CMD_FIFO_0_IS_CCC |
+ REG_CMD_FIFO_0_LEN(cmd->dests[0].payload.len);
+
+ if (cmd->id & I3C_CCC_DIRECT)
+ ccmd->cmd0 |= REG_CMD_FIFO_0_DEV_ADDR(cmd->dests[0].addr);
+
+ if (cmd->rnw) {
+ ccmd->cmd0 |= REG_CMD_FIFO_0_RNW;
+ ccmd->rx_buf = cmd->dests[0].payload.data;
+ ccmd->rx_len = cmd->dests[0].payload.len;
+ } else {
+ ccmd->tx_buf = cmd->dests[0].payload.data;
+ ccmd->tx_len = cmd->dests[0].payload.len;
+ }
+
+ adi_i3c_master_queue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ adi_i3c_master_unqueue_xfer(master, xfer);
+
+ cmd->err = adi_i3c_cmd_get_err(&xfer->cmds[0]);
+
+ return 0;
+}
+
+static int adi_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_xfer *xfer __free(kfree) = NULL;
+ int i, ret;
+
+ if (!nxfers)
+ return 0;
+
+ xfer = adi_i3c_master_alloc_xfer(master, nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ for (i = 0; i < nxfers; i++) {
+ struct adi_i3c_cmd *ccmd = &xfer->cmds[i];
+
+ ccmd->cmd0 = REG_CMD_FIFO_0_DEV_ADDR(dev->info.dyn_addr);
+
+ if (xfers[i].rnw) {
+ ccmd->cmd0 |= REG_CMD_FIFO_0_RNW;
+ ccmd->rx_buf = xfers[i].data.in;
+ ccmd->rx_len = xfers[i].len;
+ } else {
+ ccmd->tx_buf = xfers[i].data.out;
+ ccmd->tx_len = xfers[i].len;
+ }
+
+ ccmd->cmd0 |= REG_CMD_FIFO_0_LEN(xfers[i].len);
+
+ if (i < nxfers - 1)
+ ccmd->cmd0 |= REG_CMD_FIFO_0_SR;
+
+ if (!i)
+ ccmd->cmd0 |= REG_CMD_FIFO_0_BCAST;
+ }
+
+ adi_i3c_master_queue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp,
+ msecs_to_jiffies(1000)))
+ adi_i3c_master_unqueue_xfer(master, xfer);
+
+ ret = xfer->ret;
+
+ for (i = 0; i < nxfers; i++)
+ xfers[i].err = adi_i3c_cmd_get_err(&xfer->cmds[i]);
+
+ return ret;
+}
+
+struct adi_i3c_i2c_dev_data {
+ struct i3c_generic_ibi_pool *ibi_pool;
+ u16 id;
+ s16 ibi;
+};
+
+static int adi_i3c_master_get_rr_slot(struct adi_i3c_master *master,
+ u8 dyn_addr)
+{
+ if (!master->free_rr_slots)
+ return -ENOSPC;
+
+ return ffs(master->free_rr_slots) - 1;
+}
+
+static int adi_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev, u8 dyn_addr)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ u8 addr;
+
+ addr = dev->info.dyn_addr ? dev->info.dyn_addr : dev->info.static_addr;
+
+ writel(REG_DEV_CHAR_ADDR(dyn_addr), master->regs + REG_DEV_CHAR);
+ writel((readl(master->regs + REG_DEV_CHAR) &
+ ~REG_DEV_CHAR_IS_ATTACHED) | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+
+ writel(REG_DEV_CHAR_ADDR(addr), master->regs + REG_DEV_CHAR);
+ writel(readl(master->regs + REG_DEV_CHAR) |
+ REG_DEV_CHAR_IS_ATTACHED | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+
+ return 0;
+}
+
+static int adi_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_i2c_dev_data *data;
+ int slot;
+ u8 addr;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ slot = adi_i3c_master_get_rr_slot(master, dev->info.dyn_addr);
+ if (slot < 0) {
+ kfree(data);
+ return slot;
+ }
+
+ data->id = slot;
+ i3c_dev_set_master_data(dev, data);
+ master->free_rr_slots &= ~BIT(slot);
+
+ addr = dev->info.dyn_addr ? dev->info.dyn_addr : dev->info.static_addr;
+
+ writel(REG_DEV_CHAR_ADDR(addr), master->regs + REG_DEV_CHAR);
+ writel(readl(master->regs + REG_DEV_CHAR) |
+ REG_DEV_CHAR_IS_ATTACHED | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+
+ return 0;
+}
+
+static void adi_i3c_master_sync_dev_char(struct i3c_master_controller *m)
+{
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct i3c_dev_desc *i3cdev;
+ u32 bcr_ibi;
+ u8 addr;
+
+ i3c_bus_for_each_i3cdev(&m->bus, i3cdev) {
+ addr = i3cdev->info.dyn_addr ?
+ i3cdev->info.dyn_addr : i3cdev->info.static_addr;
+ writel(REG_DEV_CHAR_ADDR(addr), master->regs + REG_DEV_CHAR);
+ bcr_ibi = FIELD_GET(I3C_BCR_IBI_PAYLOAD | I3C_BCR_IBI_REQ_CAP, (i3cdev->info.bcr));
+ writel(readl(master->regs + REG_DEV_CHAR) |
+ REG_DEV_CHAR_BCR_IBI(bcr_ibi) | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+ }
+}
+
+static void adi_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ u8 addr;
+
+ addr = dev->info.dyn_addr ? dev->info.dyn_addr : dev->info.static_addr;
+
+ writel(REG_DEV_CHAR_ADDR(addr), master->regs + REG_DEV_CHAR);
+ writel((readl(master->regs + REG_DEV_CHAR) &
+ ~REG_DEV_CHAR_IS_ATTACHED) | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+
+ i3c_dev_set_master_data(dev, NULL);
+ master->free_rr_slots |= BIT(data->id);
+ kfree(data);
+}
+
+static int adi_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_i2c_dev_data *data;
+ int slot;
+
+ slot = adi_i3c_master_get_rr_slot(master, 0);
+ if (slot < 0)
+ return slot;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->id = slot;
+ master->free_rr_slots &= ~BIT(slot);
+ i2c_dev_set_master_data(dev, data);
+
+ writel(REG_DEV_CHAR_ADDR(dev->addr) |
+ REG_DEV_CHAR_IS_I2C | REG_DEV_CHAR_IS_ATTACHED | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+
+ return 0;
+}
+
+static void adi_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
+
+ writel(REG_DEV_CHAR_ADDR(dev->addr) |
+ REG_DEV_CHAR_IS_I2C | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+
+ i2c_dev_set_master_data(dev, NULL);
+ master->free_rr_slots |= BIT(data->id);
+ kfree(data);
+}
+
+static void adi_i3c_master_bus_cleanup(struct i3c_master_controller *m)
+{
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+
+ adi_i3c_master_disable(master);
+}
+
+static void adi_i3c_master_upd_i3c_scl_lim(struct adi_i3c_master *master)
+{
+ struct i3c_master_controller *m = &master->base;
+ struct i3c_bus *bus = i3c_master_get_bus(m);
+ u8 i3c_scl_lim = 0;
+ struct i3c_dev_desc *dev;
+ u8 pp_sg;
+
+ i3c_bus_for_each_i3cdev(bus, dev) {
+ u8 max_fscl;
+
+ max_fscl = max(I3C_CCC_MAX_SDR_FSCL(dev->info.max_read_ds),
+ I3C_CCC_MAX_SDR_FSCL(dev->info.max_write_ds));
+
+ switch (max_fscl) {
+ case I3C_SDR1_FSCL_8MHZ:
+ max_fscl = PP_SG_6MHZ;
+ break;
+ case I3C_SDR2_FSCL_6MHZ:
+ max_fscl = PP_SG_3MHZ;
+ break;
+ case I3C_SDR3_FSCL_4MHZ:
+ max_fscl = PP_SG_3MHZ;
+ break;
+ case I3C_SDR4_FSCL_2MHZ:
+ max_fscl = PP_SG_1MHZ;
+ break;
+ case I3C_SDR0_FSCL_MAX:
+ default:
+ max_fscl = PP_SG_12MHZ;
+ break;
+ }
+
+ if (max_fscl &&
+ (i3c_scl_lim > max_fscl || !i3c_scl_lim))
+ i3c_scl_lim = max_fscl;
+ }
+
+ if (!i3c_scl_lim)
+ return;
+
+ master->i3c_scl_lim = i3c_scl_lim - 1;
+
+ pp_sg = readl(master->regs + REG_OPS) & ~REG_OPS_PP_SG_MASK;
+ pp_sg |= REG_OPS_SET_SG(master->i3c_scl_lim);
+
+ writel(pp_sg, master->regs + REG_OPS);
+}
+
+static void adi_i3c_master_get_features(struct adi_i3c_master *master,
+ unsigned int slot,
+ struct i3c_device_info *info)
+{
+ u32 buf;
+
+ /* Dynamic address and PID are for identification only */
+ memset(info, 0, sizeof(*info));
+ buf = readl(master->regs + REG_DCR_BCR_DA);
+ info->dyn_addr = REG_DCR_BCR_DA_GET_DA(buf);
+ info->dcr = REG_DCR_BCR_DA_GET_DCR(buf);
+ info->bcr = REG_DCR_BCR_DA_GET_BCR(buf);
+ info->pid = readl(master->regs + REG_PID_L);
+ info->pid |= (u64)readl(master->regs + REG_PID_H) << 32;
+}
+
+static int adi_i3c_master_do_daa(struct i3c_master_controller *m)
+{
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ int ret, addr = 0;
+ u32 irq_mask;
+
+ for (u8 i = 0; i < ADI_MAX_DEVS; i++) {
+ addr = i3c_master_get_free_addr(m, addr);
+ if (addr < 0)
+ return addr;
+ master->daa.addrs[i] = addr;
+ }
+
+ irq_mask = readl(master->regs + REG_IRQ_MASK);
+ writel(irq_mask | REG_IRQ_PENDING_DAA,
+ master->regs + REG_IRQ_MASK);
+
+ master->daa.index = 0;
+ ret = i3c_master_entdaa_locked(&master->base);
+
+ writel(irq_mask, master->regs + REG_IRQ_MASK);
+
+ /* DAA always finishes with CE2_ERROR or NACK_RESP */
+ if (ret && ret != I3C_ERROR_M2)
+ return ret;
+
+ /* Add I3C devices discovered */
+ for (u8 i = 0; i < master->daa.index; i++)
+ i3c_master_add_i3c_dev_locked(m, master->daa.addrs[i]);
+ /* Sync retrieved devs info with the IP */
+ adi_i3c_master_sync_dev_char(m);
+
+ i3c_master_defslvs_locked(&master->base);
+
+ adi_i3c_master_upd_i3c_scl_lim(master);
+
+ return 0;
+}
+
+static int adi_i3c_master_bus_init(struct i3c_master_controller *m)
+{
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct i3c_device_info info = { };
+ int ret;
+
+ ret = i3c_master_get_free_addr(m, 0);
+ if (ret < 0)
+ return ret;
+
+ adi_i3c_master_get_features(master, 0, &info);
+ ret = i3c_master_set_info(&master->base, &info);
+ if (ret)
+ return ret;
+
+ writel(REG_IBI_CONFIG_LISTEN,
+ master->regs + REG_IBI_CONFIG);
+
+ return 0;
+}
+
+static void adi_i3c_master_handle_ibi(struct adi_i3c_master *master,
+ u32 raw)
+{
+ struct adi_i3c_i2c_dev_data *data;
+ struct i3c_ibi_slot *slot;
+ struct i3c_dev_desc *dev;
+ u8 da, id, mdb, len;
+ u8 *buf;
+
+ da = FIELD_GET(GENMASK(23, 17), raw);
+ mdb = FIELD_GET(GENMASK(15, 8), raw);
+ for (id = 0; id < master->ibi.num_slots; id++) {
+ if (master->ibi.slots[id] &&
+ master->ibi.slots[id]->info.dyn_addr == da)
+ break;
+ }
+
+ if (id == master->ibi.num_slots)
+ return;
+
+ dev = master->ibi.slots[id];
+ len = ADI_HAS_MDB_FROM_BCR(dev->info.bcr);
+ data = i3c_dev_get_master_data(dev);
+
+ guard(spinlock)(&master->ibi.lock);
+ slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
+ if (!slot)
+ return;
+
+ slot->len = len;
+ buf = slot->data;
+ buf[0] = mdb;
+ i3c_master_queue_ibi(dev, slot);
+}
+
+static void adi_i3c_master_demux_ibis(struct adi_i3c_master *master)
+{
+ while (!(readl(master->regs + REG_FIFO_STATUS) & REG_FIFO_STATUS_IBI_EMPTY)) {
+ u32 raw = readl(master->regs + REG_IBI_FIFO);
+
+ adi_i3c_master_handle_ibi(master, raw);
+ }
+}
+
+static void adi_i3c_master_handle_da_req(struct adi_i3c_master *master)
+{
+ u8 payload0[8];
+ u32 addr;
+
+ adi_i3c_master_rd_from_rx_fifo(master, payload0, 6);
+ addr = master->daa.addrs[master->daa.index++];
+ addr = (addr << 1) | (parity8(addr) ? 0 : 1);
+
+ writel(addr, master->regs + REG_SDO_FIFO);
+}
+
+static irqreturn_t adi_i3c_master_irq(int irq, void *data)
+{
+ struct adi_i3c_master *master = data;
+ u32 pending;
+
+ pending = readl(master->regs + REG_IRQ_PENDING);
+ writel(pending, master->regs + REG_IRQ_PENDING);
+ if (pending & REG_IRQ_PENDING_CMDR) {
+ scoped_guard(spinlock_irqsave, &master->xferqueue.lock) {
+ adi_i3c_master_end_xfer_locked(master, pending);
+ }
+ }
+ if (pending & REG_IRQ_PENDING_IBI)
+ adi_i3c_master_demux_ibis(master);
+ if (pending & REG_IRQ_PENDING_DAA)
+ adi_i3c_master_handle_da_req(master);
+
+ return IRQ_HANDLED;
+}
+
+static int adi_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
+ struct i2c_msg *xfers,
+ int nxfers)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_xfer *xfer __free(kfree) = NULL;
+ int i;
+
+ if (!nxfers)
+ return 0;
+ for (i = 0; i < nxfers; i++) {
+ if (xfers[i].flags & I2C_M_TEN)
+ return -EOPNOTSUPP;
+ }
+ xfer = adi_i3c_master_alloc_xfer(master, nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ for (i = 0; i < nxfers; i++) {
+ struct adi_i3c_cmd *ccmd = &xfer->cmds[i];
+
+ ccmd->cmd0 = REG_CMD_FIFO_0_DEV_ADDR(xfers[i].addr);
+
+ if (xfers[i].flags & I2C_M_RD) {
+ ccmd->cmd0 |= REG_CMD_FIFO_0_RNW;
+ ccmd->rx_buf = xfers[i].buf;
+ ccmd->rx_len = xfers[i].len;
+ } else {
+ ccmd->tx_buf = xfers[i].buf;
+ ccmd->tx_len = xfers[i].len;
+ }
+
+ ccmd->cmd0 |= REG_CMD_FIFO_0_LEN(xfers[i].len);
+ }
+
+ adi_i3c_master_queue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp,
+ m->i2c.timeout))
+ adi_i3c_master_unqueue_xfer(master, xfer);
+
+ return xfer->ret;
+}
+
+static int adi_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct i3c_dev_desc *i3cdev;
+ u32 enabled = 0;
+ int ret;
+
+ ret = i3c_master_disec_locked(m, dev->info.dyn_addr,
+ I3C_CCC_EVENT_SIR);
+
+ i3c_bus_for_each_i3cdev(&m->bus, i3cdev) {
+ if (dev != i3cdev && i3cdev->ibi)
+ enabled |= i3cdev->ibi->enabled;
+ }
+ if (!enabled) {
+ writel(REG_IBI_CONFIG_LISTEN,
+ master->regs + REG_IBI_CONFIG);
+ writel(readl(master->regs + REG_IRQ_MASK) & ~REG_IRQ_PENDING_IBI,
+ master->regs + REG_IRQ_MASK);
+ }
+
+ return ret;
+}
+
+static int adi_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+
+ writel(REG_IBI_CONFIG_LISTEN | REG_IBI_CONFIG_ENABLE,
+ master->regs + REG_IBI_CONFIG);
+
+ writel(readl(master->regs + REG_IRQ_MASK) | REG_IRQ_PENDING_IBI,
+ master->regs + REG_IRQ_MASK);
+
+ return i3c_master_enec_locked(m, dev->info.dyn_addr,
+ I3C_CCC_EVENT_SIR);
+}
+
+static int adi_i3c_master_request_ibi(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_i2c_dev_data *data;
+ unsigned int i;
+
+ data = i3c_dev_get_master_data(dev);
+ data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
+ if (IS_ERR(data->ibi_pool))
+ return PTR_ERR(data->ibi_pool);
+
+ scoped_guard(spinlock_irqsave, &master->ibi.lock) {
+ for (i = 0; i < master->ibi.num_slots; i++) {
+ if (!master->ibi.slots[i]) {
+ data->ibi = i;
+ master->ibi.slots[i] = dev;
+ break;
+ }
+ }
+ }
+
+ if (i < master->ibi.num_slots)
+ return 0;
+
+ i3c_generic_ibi_free_pool(data->ibi_pool);
+ data->ibi_pool = NULL;
+
+ return -ENOSPC;
+}
+
+static void adi_i3c_master_free_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+ scoped_guard(spinlock_irqsave, &master->ibi.lock) {
+ master->ibi.slots[data->ibi] = NULL;
+ }
+
+ i3c_generic_ibi_free_pool(data->ibi_pool);
+}
+
+static void adi_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot)
+{
+ struct adi_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+ i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
+}
+
+static const struct i3c_master_controller_ops adi_i3c_master_ops = {
+ .bus_init = adi_i3c_master_bus_init,
+ .bus_cleanup = adi_i3c_master_bus_cleanup,
+ .attach_i3c_dev = adi_i3c_master_attach_i3c_dev,
+ .reattach_i3c_dev = adi_i3c_master_reattach_i3c_dev,
+ .detach_i3c_dev = adi_i3c_master_detach_i3c_dev,
+ .attach_i2c_dev = adi_i3c_master_attach_i2c_dev,
+ .detach_i2c_dev = adi_i3c_master_detach_i2c_dev,
+ .do_daa = adi_i3c_master_do_daa,
+ .supports_ccc_cmd = adi_i3c_master_supports_ccc_cmd,
+ .send_ccc_cmd = adi_i3c_master_send_ccc_cmd,
+ .priv_xfers = adi_i3c_master_priv_xfers,
+ .i2c_xfers = adi_i3c_master_i2c_xfers,
+ .request_ibi = adi_i3c_master_request_ibi,
+ .enable_ibi = adi_i3c_master_enable_ibi,
+ .disable_ibi = adi_i3c_master_disable_ibi,
+ .free_ibi = adi_i3c_master_free_ibi,
+ .recycle_ibi_slot = adi_i3c_master_recycle_ibi_slot,
+};
+
+static const struct of_device_id adi_i3c_master_of_match[] = {
+ { .compatible = "adi,i3c-master-v1" },
+ {}
+};
+
+static int adi_i3c_master_probe(struct platform_device *pdev)
+{
+ struct adi_i3c_master *master;
+ struct clk_bulk_data *clk;
+ unsigned int version;
+ int ret, irq;
+
+ master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ master->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(master->regs))
+ return PTR_ERR(master->regs);
+
+ ret = devm_clk_bulk_get_all_enabled(&pdev->dev, &clk);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to get clocks\n");
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ version = readl(master->regs + ADI_AXI_REG_VERSION);
+ if (ADI_AXI_PCORE_VER_MAJOR(version) != 1)
+ dev_err_probe(&pdev->dev, -ENODEV, "Unsupported peripheral version %u.%u.%u\n",
+ ADI_AXI_PCORE_VER_MAJOR(version),
+ ADI_AXI_PCORE_VER_MINOR(version),
+ ADI_AXI_PCORE_VER_PATCH(version));
+
+ writel(0x00, master->regs + REG_ENABLE);
+ writel(0x00, master->regs + REG_IRQ_MASK);
+
+ ret = devm_request_irq(&pdev->dev, irq, adi_i3c_master_irq, 0,
+ dev_name(&pdev->dev), master);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, master);
+
+ master->free_rr_slots = GENMASK(ADI_MAX_DEVS, 1);
+
+ writel(REG_IRQ_PENDING_CMDR, master->regs + REG_IRQ_MASK);
+
+ spin_lock_init(&master->ibi.lock);
+ master->ibi.num_slots = 15;
+ master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
+ sizeof(*master->ibi.slots),
+ GFP_KERNEL);
+ if (!master->ibi.slots)
+ return -ENOMEM;
+
+ spin_lock_init(&master->xferqueue.lock);
+ INIT_LIST_HEAD(&master->xferqueue.list);
+
+ return i3c_master_register(&master->base, &pdev->dev,
+ &adi_i3c_master_ops, false);
+}
+
+static void adi_i3c_master_remove(struct platform_device *pdev)
+{
+ struct adi_i3c_master *master = platform_get_drvdata(pdev);
+
+ writel(0xff, master->regs + REG_IRQ_PENDING);
+ writel(0x00, master->regs + REG_IRQ_MASK);
+ writel(0x01, master->regs + REG_ENABLE);
+
+ i3c_master_unregister(&master->base);
+}
+
+static struct platform_driver adi_i3c_master = {
+ .probe = adi_i3c_master_probe,
+ .remove = adi_i3c_master_remove,
+ .driver = {
+ .name = "adi-i3c-master",
+ .of_match_table = adi_i3c_master_of_match,
+ },
+};
+module_platform_driver(adi_i3c_master);
+
+MODULE_AUTHOR("Jorge Marques <jorge.marques@analog.com>");
+MODULE_DESCRIPTION("Analog Devices I3C master driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i3c/master/ast2600-i3c-master.c b/drivers/i3c/master/ast2600-i3c-master.c
new file mode 100644
index 000000000000..e05e83812c71
--- /dev/null
+++ b/drivers/i3c/master/ast2600-i3c-master.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023 Code Construct
+ *
+ * Author: Jeremy Kerr <jk@codeconstruct.com.au>
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "dw-i3c-master.h"
+
+/* AST2600-specific global register set */
+#define AST2600_I3CG_REG0(idx) (((idx) * 4 * 4) + 0x10)
+#define AST2600_I3CG_REG1(idx) (((idx) * 4 * 4) + 0x14)
+
+#define AST2600_I3CG_REG0_SDA_PULLUP_EN_MASK GENMASK(29, 28)
+#define AST2600_I3CG_REG0_SDA_PULLUP_EN_2K (0x0 << 28)
+#define AST2600_I3CG_REG0_SDA_PULLUP_EN_750 (0x2 << 28)
+#define AST2600_I3CG_REG0_SDA_PULLUP_EN_545 (0x3 << 28)
+
+#define AST2600_I3CG_REG1_I2C_MODE BIT(0)
+#define AST2600_I3CG_REG1_TEST_MODE BIT(1)
+#define AST2600_I3CG_REG1_ACT_MODE_MASK GENMASK(3, 2)
+#define AST2600_I3CG_REG1_ACT_MODE(x) (((x) << 2) & AST2600_I3CG_REG1_ACT_MODE_MASK)
+#define AST2600_I3CG_REG1_PENDING_INT_MASK GENMASK(7, 4)
+#define AST2600_I3CG_REG1_PENDING_INT(x) (((x) << 4) & AST2600_I3CG_REG1_PENDING_INT_MASK)
+#define AST2600_I3CG_REG1_SA_MASK GENMASK(14, 8)
+#define AST2600_I3CG_REG1_SA(x) (((x) << 8) & AST2600_I3CG_REG1_SA_MASK)
+#define AST2600_I3CG_REG1_SA_EN BIT(15)
+#define AST2600_I3CG_REG1_INST_ID_MASK GENMASK(19, 16)
+#define AST2600_I3CG_REG1_INST_ID(x) (((x) << 16) & AST2600_I3CG_REG1_INST_ID_MASK)
+
+#define AST2600_DEFAULT_SDA_PULLUP_OHMS 2000
+
+#define DEV_ADDR_TABLE_IBI_PEC BIT(11)
+
+struct ast2600_i3c {
+ struct dw_i3c_master dw;
+ struct regmap *global_regs;
+ unsigned int global_idx;
+ unsigned int sda_pullup;
+};
+
+static struct ast2600_i3c *to_ast2600_i3c(struct dw_i3c_master *dw)
+{
+ return container_of(dw, struct ast2600_i3c, dw);
+}
+
+static int ast2600_i3c_pullup_to_reg(unsigned int ohms, u32 *regp)
+{
+ u32 reg;
+
+ switch (ohms) {
+ case 2000:
+ reg = AST2600_I3CG_REG0_SDA_PULLUP_EN_2K;
+ break;
+ case 750:
+ reg = AST2600_I3CG_REG0_SDA_PULLUP_EN_750;
+ break;
+ case 545:
+ reg = AST2600_I3CG_REG0_SDA_PULLUP_EN_545;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (regp)
+ *regp = reg;
+
+ return 0;
+}
+
+static int ast2600_i3c_init(struct dw_i3c_master *dw)
+{
+ struct ast2600_i3c *i3c = to_ast2600_i3c(dw);
+ u32 reg = 0;
+ int rc;
+
+ /* reg0: set SDA pullup values */
+ rc = ast2600_i3c_pullup_to_reg(i3c->sda_pullup, &reg);
+ if (rc)
+ return rc;
+
+ rc = regmap_write(i3c->global_regs,
+ AST2600_I3CG_REG0(i3c->global_idx), reg);
+ if (rc)
+ return rc;
+
+ /* reg1: set up the instance id, but leave everything else disabled,
+ * as it's all for client mode
+ */
+ reg = AST2600_I3CG_REG1_INST_ID(i3c->global_idx);
+ rc = regmap_write(i3c->global_regs,
+ AST2600_I3CG_REG1(i3c->global_idx), reg);
+
+ return rc;
+}
+
+static void ast2600_i3c_set_dat_ibi(struct dw_i3c_master *i3c,
+ struct i3c_dev_desc *dev,
+ bool enable, u32 *dat)
+{
+ /*
+ * The ast2600 i3c controller will lock up on receiving 4n+1-byte IBIs
+ * if the PEC is disabled. We have no way to restrict the length of
+ * IBIs sent to the controller, so we need to unconditionally enable
+ * PEC checking, which means we drop a byte of payload data
+ */
+ if (enable && dev->info.bcr & I3C_BCR_IBI_PAYLOAD) {
+ dev_warn_once(&i3c->base.dev,
+ "Enabling PEC workaround. IBI payloads will be truncated\n");
+ *dat |= DEV_ADDR_TABLE_IBI_PEC;
+ }
+}
+
+static const struct dw_i3c_platform_ops ast2600_i3c_ops = {
+ .init = ast2600_i3c_init,
+ .set_dat_ibi = ast2600_i3c_set_dat_ibi,
+};
+
+static int ast2600_i3c_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct of_phandle_args gspec;
+ struct ast2600_i3c *i3c;
+ int rc;
+
+ i3c = devm_kzalloc(&pdev->dev, sizeof(*i3c), GFP_KERNEL);
+ if (!i3c)
+ return -ENOMEM;
+
+ rc = of_parse_phandle_with_fixed_args(np, "aspeed,global-regs", 1, 0,
+ &gspec);
+ if (rc)
+ return -ENODEV;
+
+ i3c->global_regs = syscon_node_to_regmap(gspec.np);
+ of_node_put(gspec.np);
+
+ if (IS_ERR(i3c->global_regs))
+ return PTR_ERR(i3c->global_regs);
+
+ i3c->global_idx = gspec.args[0];
+
+ rc = of_property_read_u32(np, "sda-pullup-ohms", &i3c->sda_pullup);
+ if (rc)
+ i3c->sda_pullup = AST2600_DEFAULT_SDA_PULLUP_OHMS;
+
+ rc = ast2600_i3c_pullup_to_reg(i3c->sda_pullup, NULL);
+ if (rc)
+ dev_err(&pdev->dev, "invalid sda-pullup value %d\n",
+ i3c->sda_pullup);
+
+ i3c->dw.platform_ops = &ast2600_i3c_ops;
+ return dw_i3c_common_probe(&i3c->dw, pdev);
+}
+
+static void ast2600_i3c_remove(struct platform_device *pdev)
+{
+ struct dw_i3c_master *dw_i3c = platform_get_drvdata(pdev);
+
+ dw_i3c_common_remove(dw_i3c);
+}
+
+static const struct of_device_id ast2600_i3c_master_of_match[] = {
+ { .compatible = "aspeed,ast2600-i3c", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ast2600_i3c_master_of_match);
+
+static struct platform_driver ast2600_i3c_driver = {
+ .probe = ast2600_i3c_probe,
+ .remove = ast2600_i3c_remove,
+ .driver = {
+ .name = "ast2600-i3c-master",
+ .of_match_table = ast2600_i3c_master_of_match,
+ },
+};
+module_platform_driver(ast2600_i3c_driver);
+
+MODULE_AUTHOR("Jeremy Kerr <jk@codeconstruct.com.au>");
+MODULE_DESCRIPTION("ASPEED AST2600 I3C driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index 51a8608203de..276592a8222e 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -17,10 +17,15 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>
+#include "../internals.h"
+#include "dw-i3c-master.h"
+
#define DEVICE_CTRL 0x0
#define DEV_CTRL_ENABLE BIT(31)
#define DEV_CTRL_RESUME BIT(30)
@@ -74,7 +79,22 @@
#define RX_TX_DATA_PORT 0x14
#define IBI_QUEUE_STATUS 0x18
+#define IBI_QUEUE_STATUS_IBI_ID(x) (((x) & GENMASK(15, 8)) >> 8)
+#define IBI_QUEUE_STATUS_DATA_LEN(x) ((x) & GENMASK(7, 0))
+#define IBI_QUEUE_IBI_ADDR(x) (IBI_QUEUE_STATUS_IBI_ID(x) >> 1)
+#define IBI_QUEUE_IBI_RNW(x) (IBI_QUEUE_STATUS_IBI_ID(x) & BIT(0))
+#define IBI_TYPE_MR(x) \
+ ((IBI_QUEUE_IBI_ADDR(x) != I3C_HOT_JOIN_ADDR) && !IBI_QUEUE_IBI_RNW(x))
+#define IBI_TYPE_HJ(x) \
+ ((IBI_QUEUE_IBI_ADDR(x) == I3C_HOT_JOIN_ADDR) && !IBI_QUEUE_IBI_RNW(x))
+#define IBI_TYPE_SIRQ(x) \
+ ((IBI_QUEUE_IBI_ADDR(x) != I3C_HOT_JOIN_ADDR) && IBI_QUEUE_IBI_RNW(x))
+
#define QUEUE_THLD_CTRL 0x1c
+#define QUEUE_THLD_CTRL_IBI_STAT_MASK GENMASK(31, 24)
+#define QUEUE_THLD_CTRL_IBI_STAT(x) (((x) - 1) << 24)
+#define QUEUE_THLD_CTRL_IBI_DATA_MASK GENMASK(20, 16)
+#define QUEUE_THLD_CTRL_IBI_DATA(x) ((x) << 16)
#define QUEUE_THLD_CTRL_RESP_BUF_MASK GENMASK(15, 8)
#define QUEUE_THLD_CTRL_RESP_BUF(x) (((x) - 1) << 8)
@@ -184,13 +204,13 @@
#define EXTENDED_CAPABILITY 0xe8
#define SLAVE_CONFIG 0xec
+#define DEV_ADDR_TABLE_IBI_MDB BIT(12)
+#define DEV_ADDR_TABLE_SIR_REJECT BIT(13)
#define DEV_ADDR_TABLE_LEGACY_I2C_DEV BIT(31)
#define DEV_ADDR_TABLE_DYNAMIC_ADDR(x) (((x) << 16) & GENMASK(23, 16))
#define DEV_ADDR_TABLE_STATIC_ADDR(x) ((x) & GENMASK(6, 0))
#define DEV_ADDR_TABLE_LOC(start, idx) ((start) + ((idx) << 2))
-#define MAX_DEVS 32
-
#define I3C_BUS_SDR1_SCL_RATE 8000000
#define I3C_BUS_SDR2_SCL_RATE 6000000
#define I3C_BUS_SDR3_SCL_RATE 4000000
@@ -200,11 +220,15 @@
#define I3C_BUS_THIGH_MAX_NS 41
#define XFER_TIMEOUT (msecs_to_jiffies(1000))
+#define RPM_AUTOSUSPEND_TIMEOUT 1000 /* ms */
-struct dw_i3c_master_caps {
- u8 cmdfifodepth;
- u8 datafifodepth;
-};
+/* Timing values to configure 12.5MHz frequency */
+#define AMD_I3C_OD_TIMING 0x4C007C
+#define AMD_I3C_PP_TIMING 0x8001A
+
+/* List of quirks */
+#define AMD_I3C_OD_PP_TIMING BIT(1)
+#define DW_I3C_DISABLE_RUNTIME_PM_QUIRK BIT(2)
struct dw_i3c_cmd {
u32 cmd_lo;
@@ -221,39 +245,17 @@ struct dw_i3c_xfer {
struct completion comp;
int ret;
unsigned int ncmds;
- struct dw_i3c_cmd cmds[];
-};
-
-struct dw_i3c_master {
- struct i3c_master_controller base;
- u16 maxdevs;
- u16 datstartaddr;
- u32 free_pos;
- struct {
- struct list_head list;
- struct dw_i3c_xfer *cur;
- spinlock_t lock;
- } xferqueue;
- struct dw_i3c_master_caps caps;
- void __iomem *regs;
- struct reset_control *core_rst;
- struct clk *core_clk;
- char version[5];
- char type[5];
- u8 addrs[MAX_DEVS];
+ struct dw_i3c_cmd cmds[] __counted_by(ncmds);
};
struct dw_i3c_i2c_dev_data {
u8 index;
+ struct i3c_generic_ibi_pool *ibi_pool;
};
-static u8 even_parity(u8 p)
-{
- p ^= p >> 4;
- p &= 0xf;
-
- return (0x9669 >> p) & 1;
-}
+struct dw_i3c_drvdata {
+ u32 flags;
+};
static bool dw_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
const struct i3c_ccc_cmd *cmd)
@@ -306,7 +308,14 @@ static void dw_i3c_master_disable(struct dw_i3c_master *master)
static void dw_i3c_master_enable(struct dw_i3c_master *master)
{
- writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_ENABLE,
+ u32 dev_ctrl;
+
+ dev_ctrl = readl(master->regs + DEVICE_CTRL);
+ /* For now don't support Hot-Join */
+ dev_ctrl |= DEV_CTRL_HOT_JOIN_NACK;
+ if (master->i2c_slv_prsnt)
+ dev_ctrl |= DEV_CTRL_I2C_SLAVE_PRESENT;
+ writel(dev_ctrl | DEV_CTRL_ENABLE,
master->regs + DEVICE_CTRL);
}
@@ -315,7 +324,7 @@ static int dw_i3c_master_get_addr_pos(struct dw_i3c_master *master, u8 addr)
int pos;
for (pos = 0; pos < master->maxdevs; pos++) {
- if (addr == master->addrs[pos])
+ if (addr == master->devs[pos].addr)
return pos;
}
@@ -333,25 +342,19 @@ static int dw_i3c_master_get_free_pos(struct dw_i3c_master *master)
static void dw_i3c_master_wr_tx_fifo(struct dw_i3c_master *master,
const u8 *bytes, int nbytes)
{
- writesl(master->regs + RX_TX_DATA_PORT, bytes, nbytes / 4);
- if (nbytes & 3) {
- u32 tmp = 0;
-
- memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3);
- writesl(master->regs + RX_TX_DATA_PORT, &tmp, 1);
- }
+ i3c_writel_fifo(master->regs + RX_TX_DATA_PORT, bytes, nbytes);
}
static void dw_i3c_master_read_rx_fifo(struct dw_i3c_master *master,
u8 *bytes, int nbytes)
{
- readsl(master->regs + RX_TX_DATA_PORT, bytes, nbytes / 4);
- if (nbytes & 3) {
- u32 tmp;
+ i3c_readl_fifo(master->regs + RX_TX_DATA_PORT, bytes, nbytes);
+}
- readsl(master->regs + RX_TX_DATA_PORT, &tmp, 1);
- memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3);
- }
+static void dw_i3c_master_read_ibi_fifo(struct dw_i3c_master *master,
+ u8 *bytes, int nbytes)
+{
+ i3c_readl_fifo(master->regs + IBI_QUEUE_STATUS, bytes, nbytes);
}
static struct dw_i3c_xfer *
@@ -515,6 +518,32 @@ static void dw_i3c_master_end_xfer_locked(struct dw_i3c_master *master, u32 isr)
dw_i3c_master_start_xfer_locked(master);
}
+static void dw_i3c_master_set_intr_regs(struct dw_i3c_master *master)
+{
+ u32 thld_ctrl;
+
+ thld_ctrl = readl(master->regs + QUEUE_THLD_CTRL);
+ thld_ctrl &= ~(QUEUE_THLD_CTRL_RESP_BUF_MASK |
+ QUEUE_THLD_CTRL_IBI_STAT_MASK |
+ QUEUE_THLD_CTRL_IBI_DATA_MASK);
+ thld_ctrl |= QUEUE_THLD_CTRL_IBI_STAT(1) |
+ QUEUE_THLD_CTRL_IBI_DATA(31);
+ writel(thld_ctrl, master->regs + QUEUE_THLD_CTRL);
+
+ thld_ctrl = readl(master->regs + DATA_BUFFER_THLD_CTRL);
+ thld_ctrl &= ~DATA_BUFFER_THLD_CTRL_RX_BUF;
+ writel(thld_ctrl, master->regs + DATA_BUFFER_THLD_CTRL);
+
+ writel(INTR_ALL, master->regs + INTR_STATUS);
+ writel(INTR_MASTER_MASK, master->regs + INTR_STATUS_EN);
+ writel(INTR_MASTER_MASK, master->regs + INTR_SIGNAL_EN);
+
+ master->sir_rej_mask = IBI_REQ_REJECT_ALL;
+ writel(master->sir_rej_mask, master->regs + IBI_SIR_REQ_REJECT);
+
+ writel(IBI_REQ_REJECT_ALL, master->regs + IBI_MR_REQ_REJECT);
+}
+
static int dw_i3c_clk_cfg(struct dw_i3c_master *master)
{
unsigned long core_rate, core_period;
@@ -531,19 +560,28 @@ static int dw_i3c_clk_cfg(struct dw_i3c_master *master)
if (hcnt < SCL_I3C_TIMING_CNT_MIN)
hcnt = SCL_I3C_TIMING_CNT_MIN;
- lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_TYP_I3C_SCL_RATE) - hcnt;
+ lcnt = DIV_ROUND_UP(core_rate, master->base.bus.scl_rate.i3c) - hcnt;
if (lcnt < SCL_I3C_TIMING_CNT_MIN)
lcnt = SCL_I3C_TIMING_CNT_MIN;
scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt);
writel(scl_timing, master->regs + SCL_I3C_PP_TIMING);
+ master->i3c_pp_timing = scl_timing;
- if (!(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_I2C_SLAVE_PRESENT))
+ /*
+ * In pure i3c mode, MST_FREE represents tCAS. In shared mode, this
+ * will be set up by dw_i2c_clk_cfg as tLOW.
+ */
+ if (master->base.bus.mode == I3C_BUS_MODE_PURE) {
writel(BUS_I3C_MST_FREE(lcnt), master->regs + BUS_FREE_TIMING);
+ master->bus_free_timing = BUS_I3C_MST_FREE(lcnt);
+ }
- lcnt = DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, core_period);
+ lcnt = max_t(u8,
+ DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, core_period), lcnt);
scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt);
writel(scl_timing, master->regs + SCL_I3C_OD_TIMING);
+ master->i3c_od_timing = scl_timing;
lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR1_SCL_RATE) - hcnt;
scl_timing = SCL_EXT_LCNT_1(lcnt);
@@ -554,6 +592,7 @@ static int dw_i3c_clk_cfg(struct dw_i3c_master *master)
lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR4_SCL_RATE) - hcnt;
scl_timing |= SCL_EXT_LCNT_4(lcnt);
writel(scl_timing, master->regs + SCL_EXT_LCNT_TIMING);
+ master->ext_lcnt_timing = scl_timing;
return 0;
}
@@ -571,20 +610,25 @@ static int dw_i2c_clk_cfg(struct dw_i3c_master *master)
core_period = DIV_ROUND_UP(1000000000, core_rate);
lcnt = DIV_ROUND_UP(I3C_BUS_I2C_FMP_TLOW_MIN_NS, core_period);
- hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_PLUS_SCL_RATE) - lcnt;
+ hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_PLUS_SCL_MAX_RATE) - lcnt;
scl_timing = SCL_I2C_FMP_TIMING_HCNT(hcnt) |
SCL_I2C_FMP_TIMING_LCNT(lcnt);
writel(scl_timing, master->regs + SCL_I2C_FMP_TIMING);
+ master->i2c_fmp_timing = scl_timing;
lcnt = DIV_ROUND_UP(I3C_BUS_I2C_FM_TLOW_MIN_NS, core_period);
- hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_SCL_RATE) - lcnt;
+ hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_SCL_MAX_RATE) - lcnt;
scl_timing = SCL_I2C_FM_TIMING_HCNT(hcnt) |
SCL_I2C_FM_TIMING_LCNT(lcnt);
writel(scl_timing, master->regs + SCL_I2C_FM_TIMING);
+ master->i2c_fm_timing = scl_timing;
writel(BUS_I3C_MST_FREE(lcnt), master->regs + BUS_FREE_TIMING);
+ master->bus_free_timing = BUS_I3C_MST_FREE(lcnt);
+
writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_I2C_SLAVE_PRESENT,
master->regs + DEVICE_CTRL);
+ master->i2c_slv_prsnt = true;
return 0;
}
@@ -594,61 +638,57 @@ static int dw_i3c_master_bus_init(struct i3c_master_controller *m)
struct dw_i3c_master *master = to_dw_i3c_master(m);
struct i3c_bus *bus = i3c_master_get_bus(m);
struct i3c_device_info info = { };
- u32 thld_ctrl;
int ret;
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev,
+ "<%s> cannot resume i3c bus master, err: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = master->platform_ops->init(master);
+ if (ret)
+ goto rpm_out;
+
switch (bus->mode) {
case I3C_BUS_MODE_MIXED_FAST:
case I3C_BUS_MODE_MIXED_LIMITED:
ret = dw_i2c_clk_cfg(master);
if (ret)
- return ret;
+ goto rpm_out;
fallthrough;
case I3C_BUS_MODE_PURE:
ret = dw_i3c_clk_cfg(master);
if (ret)
- return ret;
+ goto rpm_out;
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ goto rpm_out;
}
- thld_ctrl = readl(master->regs + QUEUE_THLD_CTRL);
- thld_ctrl &= ~QUEUE_THLD_CTRL_RESP_BUF_MASK;
- writel(thld_ctrl, master->regs + QUEUE_THLD_CTRL);
-
- thld_ctrl = readl(master->regs + DATA_BUFFER_THLD_CTRL);
- thld_ctrl &= ~DATA_BUFFER_THLD_CTRL_RX_BUF;
- writel(thld_ctrl, master->regs + DATA_BUFFER_THLD_CTRL);
-
- writel(INTR_ALL, master->regs + INTR_STATUS);
- writel(INTR_MASTER_MASK, master->regs + INTR_STATUS_EN);
- writel(INTR_MASTER_MASK, master->regs + INTR_SIGNAL_EN);
-
ret = i3c_master_get_free_addr(m, 0);
if (ret < 0)
- return ret;
+ goto rpm_out;
writel(DEV_ADDR_DYNAMIC_ADDR_VALID | DEV_ADDR_DYNAMIC(ret),
master->regs + DEVICE_ADDR);
-
+ master->dev_addr = ret;
memset(&info, 0, sizeof(info));
info.dyn_addr = ret;
ret = i3c_master_set_info(&master->base, &info);
if (ret)
- return ret;
-
- writel(IBI_REQ_REJECT_ALL, master->regs + IBI_SIR_REQ_REJECT);
- writel(IBI_REQ_REJECT_ALL, master->regs + IBI_MR_REQ_REJECT);
-
- /* For now don't support Hot-Join */
- writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_HOT_JOIN_NACK,
- master->regs + DEVICE_CTRL);
+ goto rpm_out;
+ dw_i3c_master_set_intr_regs(master);
dw_i3c_master_enable(master);
- return 0;
+rpm_out:
+ pm_runtime_put_autosuspend(master->dev);
+ return ret;
}
static void dw_i3c_master_bus_cleanup(struct i3c_master_controller *m)
@@ -741,6 +781,12 @@ static int dw_i3c_ccc_get(struct dw_i3c_master *master, struct i3c_ccc_cmd *ccc)
return ret;
}
+static void amd_configure_od_pp_quirk(struct dw_i3c_master *master)
+{
+ master->i3c_od_timing = AMD_I3C_OD_TIMING;
+ master->i3c_pp_timing = AMD_I3C_PP_TIMING;
+}
+
static int dw_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
struct i3c_ccc_cmd *ccc)
{
@@ -750,11 +796,27 @@ static int dw_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
if (ccc->id == I3C_CCC_ENTDAA)
return -EINVAL;
+ /* AMD platform specific OD and PP timings */
+ if (master->quirks & AMD_I3C_OD_PP_TIMING) {
+ amd_configure_od_pp_quirk(master);
+ writel(master->i3c_pp_timing, master->regs + SCL_I3C_PP_TIMING);
+ writel(master->i3c_od_timing, master->regs + SCL_I3C_OD_TIMING);
+ }
+
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev,
+ "<%s> cannot resume i3c bus master, err: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
if (ccc->rnw)
ret = dw_i3c_ccc_get(master, ccc);
else
ret = dw_i3c_ccc_set(master, ccc);
+ pm_runtime_put_autosuspend(master->dev);
return ret;
}
@@ -764,9 +826,17 @@ static int dw_i3c_master_daa(struct i3c_master_controller *m)
struct dw_i3c_xfer *xfer;
struct dw_i3c_cmd *cmd;
u32 olddevs, newdevs;
- u8 p, last_addr = 0;
+ u8 last_addr = 0;
int ret, pos;
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev,
+ "<%s> cannot resume i3c bus master, err: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
olddevs = ~(master->free_pos);
/* Prepare DAT before launching DAA. */
@@ -775,27 +845,34 @@ static int dw_i3c_master_daa(struct i3c_master_controller *m)
continue;
ret = i3c_master_get_free_addr(m, last_addr + 1);
- if (ret < 0)
- return -ENOSPC;
+ if (ret < 0) {
+ ret = -ENOSPC;
+ goto rpm_out;
+ }
- master->addrs[pos] = ret;
- p = even_parity(ret);
+ master->devs[pos].addr = ret;
last_addr = ret;
- ret |= (p << 7);
+
+ ret |= parity8(ret) ? 0 : BIT(7);
writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(ret),
master->regs +
DEV_ADDR_TABLE_LOC(master->datstartaddr, pos));
+
+ ret = 0;
}
xfer = dw_i3c_master_alloc_xfer(master, 1);
- if (!xfer)
- return -ENOMEM;
+ if (!xfer) {
+ ret = -ENOMEM;
+ goto rpm_out;
+ }
pos = dw_i3c_master_get_free_pos(master);
if (pos < 0) {
dw_i3c_master_free_xfer(xfer);
- return pos;
+ ret = pos;
+ goto rpm_out;
}
cmd = &xfer->cmds[0];
cmd->cmd_hi = 0x1;
@@ -815,12 +892,14 @@ static int dw_i3c_master_daa(struct i3c_master_controller *m)
for (pos = 0; pos < master->maxdevs; pos++) {
if (newdevs & BIT(pos))
- i3c_master_add_i3c_dev_locked(m, master->addrs[pos]);
+ i3c_master_add_i3c_dev_locked(m, master->devs[pos].addr);
}
dw_i3c_master_free_xfer(xfer);
- return 0;
+rpm_out:
+ pm_runtime_put_autosuspend(master->dev);
+ return ret;
}
static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
@@ -838,7 +917,7 @@ static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
return 0;
if (i3c_nxfers > master->caps.cmdfifodepth)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
for (i = 0; i < i3c_nxfers; i++) {
if (i3c_xfers[i].rnw)
@@ -849,12 +928,20 @@ static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
if (ntxwords > master->caps.datafifodepth ||
nrxwords > master->caps.datafifodepth)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
xfer = dw_i3c_master_alloc_xfer(master, i3c_nxfers);
if (!xfer)
return -ENOMEM;
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev,
+ "<%s> cannot resume i3c bus master, err: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
for (i = 0; i < i3c_nxfers; i++) {
struct dw_i3c_cmd *cmd = &xfer->cmds[i];
@@ -886,9 +973,17 @@ static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
dw_i3c_master_dequeue_xfer(master, xfer);
+ for (i = 0; i < i3c_nxfers; i++) {
+ struct dw_i3c_cmd *cmd = &xfer->cmds[i];
+
+ if (i3c_xfers[i].rnw)
+ i3c_xfers[i].len = cmd->rx_len;
+ }
+
ret = xfer->ret;
dw_i3c_master_free_xfer(xfer);
+ pm_runtime_put_autosuspend(master->dev);
return ret;
}
@@ -907,11 +1002,11 @@ static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
master->regs +
DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
- master->addrs[data->index] = 0;
+ master->devs[data->index].addr = 0;
master->free_pos |= BIT(data->index);
data->index = pos;
- master->addrs[pos] = dev->info.dyn_addr;
+ master->devs[pos].addr = dev->info.dyn_addr;
master->free_pos &= ~BIT(pos);
}
@@ -919,7 +1014,7 @@ static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
master->regs +
DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
- master->addrs[data->index] = dev->info.dyn_addr;
+ master->devs[data->index].addr = dev->info.dyn_addr;
return 0;
}
@@ -940,11 +1035,11 @@ static int dw_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
return -ENOMEM;
data->index = pos;
- master->addrs[pos] = dev->info.dyn_addr ? : dev->info.static_addr;
+ master->devs[pos].addr = dev->info.dyn_addr ? : dev->info.static_addr;
master->free_pos &= ~BIT(pos);
i3c_dev_set_master_data(dev, data);
- writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->addrs[pos]),
+ writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->devs[pos].addr),
master->regs +
DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
@@ -962,13 +1057,13 @@ static void dw_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
i3c_dev_set_master_data(dev, NULL);
- master->addrs[data->index] = 0;
+ master->devs[data->index].addr = 0;
master->free_pos |= BIT(data->index);
kfree(data);
}
static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
- const struct i2c_msg *i2c_xfers,
+ struct i2c_msg *i2c_xfers,
int i2c_nxfers)
{
struct dw_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
@@ -982,7 +1077,7 @@ static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
return 0;
if (i2c_nxfers > master->caps.cmdfifodepth)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
for (i = 0; i < i2c_nxfers; i++) {
if (i2c_xfers[i].flags & I2C_M_RD)
@@ -993,12 +1088,20 @@ static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
if (ntxwords > master->caps.datafifodepth ||
nrxwords > master->caps.datafifodepth)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
xfer = dw_i3c_master_alloc_xfer(master, i2c_nxfers);
if (!xfer)
return -ENOMEM;
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev,
+ "<%s> cannot resume i3c bus master, err: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
for (i = 0; i < i2c_nxfers; i++) {
struct dw_i3c_cmd *cmd = &xfer->cmds[i];
@@ -1023,12 +1126,13 @@ static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
}
dw_i3c_master_enqueue_xfer(master, xfer);
- if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
+ if (!wait_for_completion_timeout(&xfer->comp, m->i2c.timeout))
dw_i3c_master_dequeue_xfer(master, xfer);
ret = xfer->ret;
dw_i3c_master_free_xfer(xfer);
+ pm_runtime_put_autosuspend(master->dev);
return ret;
}
@@ -1048,7 +1152,8 @@ static int dw_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
return -ENOMEM;
data->index = pos;
- master->addrs[pos] = dev->addr;
+ master->devs[pos].addr = dev->addr;
+ master->devs[pos].is_i2c_addr = true;
master->free_pos &= ~BIT(pos);
i2c_dev_set_master_data(dev, data);
@@ -1071,11 +1176,295 @@ static void dw_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
i2c_dev_set_master_data(dev, NULL);
- master->addrs[data->index] = 0;
+ master->devs[data->index].addr = 0;
master->free_pos |= BIT(data->index);
kfree(data);
}
+static int dw_i3c_master_request_ibi(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ unsigned long flags;
+
+ data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
+ if (IS_ERR(data->ibi_pool))
+ return PTR_ERR(data->ibi_pool);
+
+ spin_lock_irqsave(&master->devs_lock, flags);
+ master->devs[data->index].ibi_dev = dev;
+ spin_unlock_irqrestore(&master->devs_lock, flags);
+
+ return 0;
+}
+
+static void dw_i3c_master_free_ibi(struct i3c_dev_desc *dev)
+{
+ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ unsigned long flags;
+
+ spin_lock_irqsave(&master->devs_lock, flags);
+ master->devs[data->index].ibi_dev = NULL;
+ spin_unlock_irqrestore(&master->devs_lock, flags);
+
+ i3c_generic_ibi_free_pool(data->ibi_pool);
+ data->ibi_pool = NULL;
+}
+
+static void dw_i3c_master_enable_sir_signal(struct dw_i3c_master *master, bool enable)
+{
+ u32 reg;
+
+ reg = readl(master->regs + INTR_STATUS_EN);
+ reg &= ~INTR_IBI_THLD_STAT;
+ if (enable)
+ reg |= INTR_IBI_THLD_STAT;
+ writel(reg, master->regs + INTR_STATUS_EN);
+
+ reg = readl(master->regs + INTR_SIGNAL_EN);
+ reg &= ~INTR_IBI_THLD_STAT;
+ if (enable)
+ reg |= INTR_IBI_THLD_STAT;
+ writel(reg, master->regs + INTR_SIGNAL_EN);
+}
+
+static void dw_i3c_master_set_sir_enabled(struct dw_i3c_master *master,
+ struct i3c_dev_desc *dev,
+ u8 idx, bool enable)
+{
+ unsigned long flags;
+ u32 dat_entry, reg;
+ bool global;
+
+ dat_entry = DEV_ADDR_TABLE_LOC(master->datstartaddr, idx);
+
+ spin_lock_irqsave(&master->devs_lock, flags);
+ reg = readl(master->regs + dat_entry);
+ if (enable) {
+ reg &= ~DEV_ADDR_TABLE_SIR_REJECT;
+ if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD)
+ reg |= DEV_ADDR_TABLE_IBI_MDB;
+ } else {
+ reg |= DEV_ADDR_TABLE_SIR_REJECT;
+ }
+ master->platform_ops->set_dat_ibi(master, dev, enable, &reg);
+ writel(reg, master->regs + dat_entry);
+
+ if (enable) {
+ global = (master->sir_rej_mask == IBI_REQ_REJECT_ALL);
+ master->sir_rej_mask &= ~BIT(idx);
+ } else {
+ bool hj_rejected = !!(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_HOT_JOIN_NACK);
+
+ master->sir_rej_mask |= BIT(idx);
+ global = (master->sir_rej_mask == IBI_REQ_REJECT_ALL) && hj_rejected;
+ }
+ writel(master->sir_rej_mask, master->regs + IBI_SIR_REQ_REJECT);
+
+ if (global)
+ dw_i3c_master_enable_sir_signal(master, enable);
+
+
+ spin_unlock_irqrestore(&master->devs_lock, flags);
+}
+
+static int dw_i3c_master_enable_hotjoin(struct i3c_master_controller *m)
+{
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev,
+ "<%s> cannot resume i3c bus master, err: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ dw_i3c_master_enable_sir_signal(master, true);
+ writel(readl(master->regs + DEVICE_CTRL) & ~DEV_CTRL_HOT_JOIN_NACK,
+ master->regs + DEVICE_CTRL);
+
+ return 0;
+}
+
+static int dw_i3c_master_disable_hotjoin(struct i3c_master_controller *m)
+{
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+
+ writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_HOT_JOIN_NACK,
+ master->regs + DEVICE_CTRL);
+
+ pm_runtime_put_autosuspend(master->dev);
+ return 0;
+}
+
+static int dw_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
+{
+ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ int rc;
+
+ rc = pm_runtime_resume_and_get(master->dev);
+ if (rc < 0) {
+ dev_err(master->dev,
+ "<%s> cannot resume i3c bus master, err: %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ dw_i3c_master_set_sir_enabled(master, dev, data->index, true);
+
+ rc = i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
+
+ if (rc) {
+ dw_i3c_master_set_sir_enabled(master, dev, data->index, false);
+ pm_runtime_put_autosuspend(master->dev);
+ }
+
+ return rc;
+}
+
+static int dw_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
+{
+ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ int rc;
+
+ rc = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
+ if (rc)
+ return rc;
+
+ dw_i3c_master_set_sir_enabled(master, dev, data->index, false);
+
+ pm_runtime_put_autosuspend(master->dev);
+ return 0;
+}
+
+static void dw_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot)
+{
+ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+ i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
+}
+
+static void dw_i3c_master_drain_ibi_queue(struct dw_i3c_master *master,
+ int len)
+{
+ int i;
+
+ for (i = 0; i < DIV_ROUND_UP(len, 4); i++)
+ readl(master->regs + IBI_QUEUE_STATUS);
+}
+
+static void dw_i3c_master_handle_ibi_sir(struct dw_i3c_master *master,
+ u32 status)
+{
+ struct dw_i3c_i2c_dev_data *data;
+ struct i3c_ibi_slot *slot;
+ struct i3c_dev_desc *dev;
+ unsigned long flags;
+ u8 addr, len;
+ int idx;
+
+ addr = IBI_QUEUE_IBI_ADDR(status);
+ len = IBI_QUEUE_STATUS_DATA_LEN(status);
+
+ /*
+ * We be tempted to check the error status in bit 30; however, due
+ * to the PEC errata workaround on some platform implementations (see
+ * ast2600_i3c_set_dat_ibi()), those will almost always have a PEC
+ * error on IBI payload data, as well as losing the last byte of
+ * payload.
+ *
+ * If we implement error status checking on that bit, we may need
+ * a new platform op to validate it.
+ */
+
+ spin_lock_irqsave(&master->devs_lock, flags);
+ idx = dw_i3c_master_get_addr_pos(master, addr);
+ if (idx < 0) {
+ dev_dbg_ratelimited(&master->base.dev,
+ "IBI from unknown addr 0x%x\n", addr);
+ goto err_drain;
+ }
+
+ dev = master->devs[idx].ibi_dev;
+ if (!dev || !dev->ibi) {
+ dev_dbg_ratelimited(&master->base.dev,
+ "IBI from non-requested dev idx %d\n", idx);
+ goto err_drain;
+ }
+
+ data = i3c_dev_get_master_data(dev);
+ slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
+ if (!slot) {
+ dev_dbg_ratelimited(&master->base.dev,
+ "No IBI slots available\n");
+ goto err_drain;
+ }
+
+ if (dev->ibi->max_payload_len < len) {
+ dev_dbg_ratelimited(&master->base.dev,
+ "IBI payload len %d greater than max %d\n",
+ len, dev->ibi->max_payload_len);
+ goto err_drain;
+ }
+
+ if (len) {
+ dw_i3c_master_read_ibi_fifo(master, slot->data, len);
+ slot->len = len;
+ }
+ i3c_master_queue_ibi(dev, slot);
+
+ spin_unlock_irqrestore(&master->devs_lock, flags);
+
+ return;
+
+err_drain:
+ dw_i3c_master_drain_ibi_queue(master, len);
+
+ spin_unlock_irqrestore(&master->devs_lock, flags);
+}
+
+/* "ibis": referring to In-Band Interrupts, and not
+ * https://en.wikipedia.org/wiki/Australian_white_ibis. The latter should
+ * not be handled.
+ */
+static void dw_i3c_master_irq_handle_ibis(struct dw_i3c_master *master)
+{
+ unsigned int i, len, n_ibis;
+ u32 reg;
+
+ reg = readl(master->regs + QUEUE_STATUS_LEVEL);
+ n_ibis = QUEUE_STATUS_IBI_STATUS_CNT(reg);
+ if (!n_ibis)
+ return;
+
+ for (i = 0; i < n_ibis; i++) {
+ reg = readl(master->regs + IBI_QUEUE_STATUS);
+
+ if (IBI_TYPE_SIRQ(reg)) {
+ dw_i3c_master_handle_ibi_sir(master, reg);
+ } else if (IBI_TYPE_HJ(reg)) {
+ queue_work(master->base.wq, &master->hj_work);
+ } else {
+ len = IBI_QUEUE_STATUS_DATA_LEN(reg);
+ dev_info(&master->base.dev,
+ "unsupported IBI type 0x%lx len %d\n",
+ IBI_QUEUE_STATUS_IBI_ID(reg), len);
+ dw_i3c_master_drain_ibi_queue(master, len);
+ }
+ }
+}
+
static irqreturn_t dw_i3c_master_irq_handler(int irq, void *dev_id)
{
struct dw_i3c_master *master = dev_id;
@@ -1094,6 +1483,9 @@ static irqreturn_t dw_i3c_master_irq_handler(int irq, void *dev_id)
writel(INTR_TRANSFER_ERR_STAT, master->regs + INTR_STATUS);
spin_unlock(&master->xferqueue.lock);
+ if (status & INTR_IBI_THLD_STAT)
+ dw_i3c_master_irq_handle_ibis(master);
+
return IRQ_HANDLED;
}
@@ -1110,34 +1502,69 @@ static const struct i3c_master_controller_ops dw_mipi_i3c_ops = {
.attach_i2c_dev = dw_i3c_master_attach_i2c_dev,
.detach_i2c_dev = dw_i3c_master_detach_i2c_dev,
.i2c_xfers = dw_i3c_master_i2c_xfers,
+ .request_ibi = dw_i3c_master_request_ibi,
+ .free_ibi = dw_i3c_master_free_ibi,
+ .enable_ibi = dw_i3c_master_enable_ibi,
+ .disable_ibi = dw_i3c_master_disable_ibi,
+ .recycle_ibi_slot = dw_i3c_master_recycle_ibi_slot,
+ .enable_hotjoin = dw_i3c_master_enable_hotjoin,
+ .disable_hotjoin = dw_i3c_master_disable_hotjoin,
};
-static int dw_i3c_probe(struct platform_device *pdev)
+/* default platform ops implementations */
+static int dw_i3c_platform_init_nop(struct dw_i3c_master *i3c)
+{
+ return 0;
+}
+
+static void dw_i3c_platform_set_dat_ibi_nop(struct dw_i3c_master *i3c,
+ struct i3c_dev_desc *dev,
+ bool enable, u32 *dat)
+{
+}
+
+static const struct dw_i3c_platform_ops dw_i3c_platform_ops_default = {
+ .init = dw_i3c_platform_init_nop,
+ .set_dat_ibi = dw_i3c_platform_set_dat_ibi_nop,
+};
+
+static void dw_i3c_hj_work(struct work_struct *work)
+{
+ struct dw_i3c_master *master =
+ container_of(work, typeof(*master), hj_work);
+
+ i3c_master_do_daa(&master->base);
+}
+
+int dw_i3c_common_probe(struct dw_i3c_master *master,
+ struct platform_device *pdev)
{
- struct dw_i3c_master *master;
int ret, irq;
+ const struct dw_i3c_drvdata *drvdata;
+ unsigned long quirks = 0;
- master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
- if (!master)
- return -ENOMEM;
+ if (!master->platform_ops)
+ master->platform_ops = &dw_i3c_platform_ops_default;
+
+ master->dev = &pdev->dev;
master->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(master->regs))
return PTR_ERR(master->regs);
- master->core_clk = devm_clk_get(&pdev->dev, NULL);
+ master->core_clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(master->core_clk))
return PTR_ERR(master->core_clk);
+ master->pclk = devm_clk_get_optional_enabled(&pdev->dev, "pclk");
+ if (IS_ERR(master->pclk))
+ return PTR_ERR(master->pclk);
+
master->core_rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
"core_rst");
if (IS_ERR(master->core_rst))
return PTR_ERR(master->core_rst);
- ret = clk_prepare_enable(master->core_clk);
- if (ret)
- goto err_disable_core_clk;
-
reset_control_deassert(master->core_rst);
spin_lock_init(&master->xferqueue.lock);
@@ -1153,6 +1580,11 @@ static int dw_i3c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, RPM_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
/* Information regarding the FIFOs/QUEUEs depth */
ret = readl(master->regs + QUEUE_STATUS_LEVEL);
master->caps.cmdfifodepth = QUEUE_STATUS_LEVEL_CMD(ret);
@@ -1165,50 +1597,218 @@ static int dw_i3c_probe(struct platform_device *pdev)
master->maxdevs = ret >> 16;
master->free_pos = GENMASK(master->maxdevs - 1, 0);
+ if (has_acpi_companion(&pdev->dev)) {
+ quirks = (unsigned long)device_get_match_data(&pdev->dev);
+ } else if (pdev->dev.of_node) {
+ drvdata = device_get_match_data(&pdev->dev);
+ if (drvdata)
+ quirks = drvdata->flags;
+ }
+ master->quirks = quirks;
+
+ /* Keep controller enabled by preventing runtime suspend */
+ if (master->quirks & DW_I3C_DISABLE_RUNTIME_PM_QUIRK)
+ pm_runtime_get_noresume(&pdev->dev);
+
+ INIT_WORK(&master->hj_work, dw_i3c_hj_work);
ret = i3c_master_register(&master->base, &pdev->dev,
&dw_mipi_i3c_ops, false);
if (ret)
- goto err_assert_rst;
+ goto err_disable_pm;
return 0;
+err_disable_pm:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+
err_assert_rst:
reset_control_assert(master->core_rst);
-err_disable_core_clk:
- clk_disable_unprepare(master->core_clk);
-
return ret;
}
+EXPORT_SYMBOL_GPL(dw_i3c_common_probe);
+
+void dw_i3c_common_remove(struct dw_i3c_master *master)
+{
+ cancel_work_sync(&master->hj_work);
+ i3c_master_unregister(&master->base);
-static int dw_i3c_remove(struct platform_device *pdev)
+ /* Balance pm_runtime_get_noresume() from probe() */
+ if (master->quirks & DW_I3C_DISABLE_RUNTIME_PM_QUIRK)
+ pm_runtime_put_noidle(master->dev);
+
+ pm_runtime_disable(master->dev);
+ pm_runtime_set_suspended(master->dev);
+ pm_runtime_dont_use_autosuspend(master->dev);
+}
+EXPORT_SYMBOL_GPL(dw_i3c_common_remove);
+
+/* base platform implementation */
+
+static int dw_i3c_probe(struct platform_device *pdev)
+{
+ struct dw_i3c_master *master;
+
+ master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ return dw_i3c_common_probe(master, pdev);
+}
+
+static void dw_i3c_remove(struct platform_device *pdev)
{
struct dw_i3c_master *master = platform_get_drvdata(pdev);
- int ret;
- ret = i3c_master_unregister(&master->base);
+ dw_i3c_common_remove(master);
+}
+
+static void dw_i3c_master_restore_addrs(struct dw_i3c_master *master)
+{
+ u32 pos, reg_val;
+
+ writel(DEV_ADDR_DYNAMIC_ADDR_VALID | DEV_ADDR_DYNAMIC(master->dev_addr),
+ master->regs + DEVICE_ADDR);
+
+ for (pos = 0; pos < master->maxdevs; pos++) {
+ if (master->free_pos & BIT(pos))
+ continue;
+
+ if (master->devs[pos].is_i2c_addr)
+ reg_val = DEV_ADDR_TABLE_LEGACY_I2C_DEV |
+ DEV_ADDR_TABLE_STATIC_ADDR(master->devs[pos].addr);
+ else
+ reg_val = DEV_ADDR_TABLE_DYNAMIC_ADDR(master->devs[pos].addr);
+
+ writel(reg_val, master->regs + DEV_ADDR_TABLE_LOC(master->datstartaddr, pos));
+ }
+}
+
+static void dw_i3c_master_restore_timing_regs(struct dw_i3c_master *master)
+{
+ /* AMD platform specific OD and PP timings */
+ if (master->quirks & AMD_I3C_OD_PP_TIMING)
+ amd_configure_od_pp_quirk(master);
+
+ writel(master->i3c_pp_timing, master->regs + SCL_I3C_PP_TIMING);
+ writel(master->bus_free_timing, master->regs + BUS_FREE_TIMING);
+ writel(master->i3c_od_timing, master->regs + SCL_I3C_OD_TIMING);
+ writel(master->ext_lcnt_timing, master->regs + SCL_EXT_LCNT_TIMING);
+
+ if (master->i2c_slv_prsnt) {
+ writel(master->i2c_fmp_timing, master->regs + SCL_I2C_FMP_TIMING);
+ writel(master->i2c_fm_timing, master->regs + SCL_I2C_FM_TIMING);
+ }
+}
+
+static int dw_i3c_master_enable_clks(struct dw_i3c_master *master)
+{
+ int ret = 0;
+
+ ret = clk_prepare_enable(master->core_clk);
if (ret)
return ret;
- reset_control_assert(master->core_rst);
+ ret = clk_prepare_enable(master->pclk);
+ if (ret) {
+ clk_disable_unprepare(master->core_clk);
+ return ret;
+ }
+ return 0;
+}
+
+static inline void dw_i3c_master_disable_clks(struct dw_i3c_master *master)
+{
+ clk_disable_unprepare(master->pclk);
clk_disable_unprepare(master->core_clk);
+}
+
+static int __maybe_unused dw_i3c_master_runtime_suspend(struct device *dev)
+{
+ struct dw_i3c_master *master = dev_get_drvdata(dev);
+
+ dw_i3c_master_disable(master);
+
+ reset_control_assert(master->core_rst);
+ dw_i3c_master_disable_clks(master);
+ pinctrl_pm_select_sleep_state(dev);
+ return 0;
+}
+
+static int __maybe_unused dw_i3c_master_runtime_resume(struct device *dev)
+{
+ struct dw_i3c_master *master = dev_get_drvdata(dev);
+
+ pinctrl_pm_select_default_state(dev);
+ dw_i3c_master_enable_clks(master);
+ reset_control_deassert(master->core_rst);
+ dw_i3c_master_set_intr_regs(master);
+ dw_i3c_master_restore_timing_regs(master);
+ dw_i3c_master_restore_addrs(master);
+
+ dw_i3c_master_enable(master);
return 0;
}
+static const struct dev_pm_ops dw_i3c_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(dw_i3c_master_runtime_suspend, dw_i3c_master_runtime_resume, NULL)
+};
+
+static void dw_i3c_shutdown(struct platform_device *pdev)
+{
+ struct dw_i3c_master *master = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev,
+ "<%s> cannot resume i3c bus master, err: %d\n",
+ __func__, ret);
+ return;
+ }
+
+ cancel_work_sync(&master->hj_work);
+
+ /* Disable interrupts */
+ writel((u32)~INTR_ALL, master->regs + INTR_STATUS_EN);
+ writel((u32)~INTR_ALL, master->regs + INTR_SIGNAL_EN);
+
+ pm_runtime_put_autosuspend(master->dev);
+}
+
+static const struct dw_i3c_drvdata altr_agilex5_drvdata = {
+ .flags = DW_I3C_DISABLE_RUNTIME_PM_QUIRK,
+};
+
static const struct of_device_id dw_i3c_master_of_match[] = {
{ .compatible = "snps,dw-i3c-master-1.00a", },
+ { .compatible = "altr,agilex5-dw-i3c-master",
+ .data = &altr_agilex5_drvdata,
+ },
{},
};
MODULE_DEVICE_TABLE(of, dw_i3c_master_of_match);
+static const struct acpi_device_id amd_i3c_device_match[] = {
+ { "AMDI0015", AMD_I3C_OD_PP_TIMING },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, amd_i3c_device_match);
+
static struct platform_driver dw_i3c_driver = {
.probe = dw_i3c_probe,
.remove = dw_i3c_remove,
+ .shutdown = dw_i3c_shutdown,
.driver = {
.name = "dw-i3c-master",
- .of_match_table = of_match_ptr(dw_i3c_master_of_match),
+ .of_match_table = dw_i3c_master_of_match,
+ .acpi_match_table = amd_i3c_device_match,
+ .pm = &dw_i3c_pm_ops,
},
};
module_platform_driver(dw_i3c_driver);
diff --git a/drivers/i3c/master/dw-i3c-master.h b/drivers/i3c/master/dw-i3c-master.h
new file mode 100644
index 000000000000..c5cb695c16ab
--- /dev/null
+++ b/drivers/i3c/master/dw-i3c-master.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Code Construct
+ *
+ * Author: Jeremy Kerr <jk@codeconstruct.com.au>
+ */
+
+#include <linux/clk.h>
+#include <linux/i3c/master.h>
+#include <linux/reset.h>
+#include <linux/types.h>
+
+#define DW_I3C_MAX_DEVS 32
+
+struct dw_i3c_master_caps {
+ u8 cmdfifodepth;
+ u8 datafifodepth;
+};
+
+struct dw_i3c_dat_entry {
+ u8 addr;
+ bool is_i2c_addr;
+ struct i3c_dev_desc *ibi_dev;
+};
+
+struct dw_i3c_master {
+ struct i3c_master_controller base;
+ struct device *dev;
+ u16 maxdevs;
+ u16 datstartaddr;
+ u32 free_pos;
+ struct {
+ struct list_head list;
+ struct dw_i3c_xfer *cur;
+ spinlock_t lock;
+ } xferqueue;
+ struct dw_i3c_master_caps caps;
+ void __iomem *regs;
+ struct reset_control *core_rst;
+ struct clk *core_clk;
+ struct clk *pclk;
+ char version[5];
+ char type[5];
+ u32 sir_rej_mask;
+ bool i2c_slv_prsnt;
+ u32 dev_addr;
+ u32 i3c_pp_timing;
+ u32 i3c_od_timing;
+ u32 ext_lcnt_timing;
+ u32 bus_free_timing;
+ u32 i2c_fm_timing;
+ u32 i2c_fmp_timing;
+ u32 quirks;
+ /*
+ * Per-device hardware data, used to manage the device address table
+ * (DAT)
+ *
+ * Locking: the devs array may be referenced in IRQ context while
+ * processing an IBI. However, IBIs (for a specific device, which
+ * implies a specific DAT entry) can only happen while interrupts are
+ * requested for that device, which is serialised against other
+ * insertions/removals from the array by the global i3c infrastructure.
+ * So, devs_lock protects against concurrent updates to devs->ibi_dev
+ * between request_ibi/free_ibi and the IBI irq event.
+ */
+ struct dw_i3c_dat_entry devs[DW_I3C_MAX_DEVS];
+ spinlock_t devs_lock;
+
+ /* platform-specific data */
+ const struct dw_i3c_platform_ops *platform_ops;
+
+ struct work_struct hj_work;
+};
+
+struct dw_i3c_platform_ops {
+ /*
+ * Called on early bus init: the i3c has been set up, but before any
+ * transactions have taken place. Platform implementations may use to
+ * perform actual device enabling with the i3c core ready.
+ */
+ int (*init)(struct dw_i3c_master *i3c);
+
+ /*
+ * Initialise a DAT entry to enable/disable IBIs. Allows the platform
+ * to perform any device workarounds on the DAT entry before
+ * inserting into the hardware table.
+ *
+ * Called with the DAT lock held; must not sleep.
+ */
+ void (*set_dat_ibi)(struct dw_i3c_master *i3c,
+ struct i3c_dev_desc *dev, bool enable, u32 *reg);
+};
+
+extern int dw_i3c_common_probe(struct dw_i3c_master *master,
+ struct platform_device *pdev);
+extern void dw_i3c_common_remove(struct dw_i3c_master *master);
+
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
index 5b37ffe5ad5b..97b151564d3d 100644
--- a/drivers/i3c/master/i3c-master-cdns.c
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -22,7 +22,8 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
-#include <linux/of_device.h>
+
+#include "../internals.h"
#define DEV_ID 0x0
#define DEV_ID_I3C_MASTER 0x5034
@@ -77,7 +78,8 @@
#define PRESCL_CTRL0 0x14
#define PRESCL_CTRL0_I2C(x) ((x) << 16)
#define PRESCL_CTRL0_I3C(x) (x)
-#define PRESCL_CTRL0_MAX GENMASK(9, 0)
+#define PRESCL_CTRL0_I3C_MAX GENMASK(9, 0)
+#define PRESCL_CTRL0_I2C_MAX GENMASK(15, 0)
#define PRESCL_CTRL1 0x18
#define PRESCL_CTRL1_PP_LOW_MASK GENMASK(15, 8)
@@ -192,7 +194,7 @@
#define SLV_STATUS1_HJ_DIS BIT(18)
#define SLV_STATUS1_MR_DIS BIT(17)
#define SLV_STATUS1_PROT_ERR BIT(16)
-#define SLV_STATUS1_DA(x) (((s) & GENMASK(15, 9)) >> 9)
+#define SLV_STATUS1_DA(s) (((s) & GENMASK(15, 9)) >> 9)
#define SLV_STATUS1_HAS_DA BIT(8)
#define SLV_STATUS1_DDR_RX_FULL BIT(7)
#define SLV_STATUS1_DDR_TX_FULL BIT(6)
@@ -388,7 +390,7 @@ struct cdns_i3c_xfer {
struct completion comp;
int ret;
unsigned int ncmds;
- struct cdns_i3c_cmd cmds[];
+ struct cdns_i3c_cmd cmds[] __counted_by(ncmds);
};
struct cdns_i3c_data {
@@ -412,7 +414,6 @@ struct cdns_i3c_master {
} xferqueue;
void __iomem *regs;
struct clk *sysclk;
- struct clk *pclk;
struct cdns_i3c_master_caps caps;
unsigned long i3c_scl_lim;
const struct cdns_i3c_data *devdata;
@@ -427,25 +428,13 @@ to_cdns_i3c_master(struct i3c_master_controller *master)
static void cdns_i3c_master_wr_to_tx_fifo(struct cdns_i3c_master *master,
const u8 *bytes, int nbytes)
{
- writesl(master->regs + TX_FIFO, bytes, nbytes / 4);
- if (nbytes & 3) {
- u32 tmp = 0;
-
- memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3);
- writesl(master->regs + TX_FIFO, &tmp, 1);
- }
+ i3c_writel_fifo(master->regs + TX_FIFO, bytes, nbytes);
}
static void cdns_i3c_master_rd_from_rx_fifo(struct cdns_i3c_master *master,
u8 *bytes, int nbytes)
{
- readsl(master->regs + RX_FIFO, bytes, nbytes / 4);
- if (nbytes & 3) {
- u32 tmp;
-
- readsl(master->regs + RX_FIFO, &tmp, 1);
- memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3);
- }
+ i3c_readl_fifo(master->regs + RX_FIFO, bytes, nbytes);
}
static bool cdns_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
@@ -742,7 +731,7 @@ static int cdns_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
for (i = 0; i < nxfers; i++) {
if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
if (!nxfers)
@@ -750,7 +739,7 @@ static int cdns_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
if (nxfers > master->caps.cmdfifodepth ||
nxfers > master->caps.cmdrfifodepth)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
/*
* First make sure that all transactions (block of transfers separated
@@ -765,7 +754,7 @@ static int cdns_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
if (rxslots > master->caps.rxfifodepth ||
txslots > master->caps.txfifodepth)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
cdns_xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
if (!cdns_xfer)
@@ -813,7 +802,7 @@ static int cdns_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
}
static int cdns_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
- const struct i2c_msg *xfers, int nxfers)
+ struct i2c_msg *xfers, int nxfers)
{
struct i3c_master_controller *m = i2c_dev_get_master(dev);
struct cdns_i3c_master *master = to_cdns_i3c_master(m);
@@ -822,11 +811,11 @@ static int cdns_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
int i, ret = 0;
if (nxfers > master->caps.cmdfifodepth)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
for (i = 0; i < nxfers; i++) {
if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (xfers[i].flags & I2C_M_RD)
nrxwords += DIV_ROUND_UP(xfers[i].len, 4);
@@ -836,7 +825,7 @@ static int cdns_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
if (ntxwords > master->caps.txfifodepth ||
nrxwords > master->caps.rxfifodepth)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
if (!xfer)
@@ -863,7 +852,7 @@ static int cdns_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
}
cdns_i3c_master_queue_xfer(master, xfer);
- if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ if (!wait_for_completion_timeout(&xfer->comp, m->i2c.timeout))
cdns_i3c_master_unqueue_xfer(master, xfer);
ret = xfer->ret;
@@ -889,8 +878,7 @@ static u32 prepare_rr0_dev_address(u32 addr)
ret |= (addr & GENMASK(9, 7)) << 6;
/* RR0[0] = ~XOR(addr[6:0]) */
- if (!(hweight8(addr & 0x7f) & 1))
- ret |= 1;
+ ret |= parity8(addr & 0x7f) ? 0 : BIT(0);
return ret;
}
@@ -1234,7 +1222,7 @@ static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
return -EINVAL;
pres = DIV_ROUND_UP(sysclk_rate, (bus->scl_rate.i3c * 4)) - 1;
- if (pres > PRESCL_CTRL0_MAX)
+ if (pres > PRESCL_CTRL0_I3C_MAX)
return -ERANGE;
bus->scl_rate.i3c = sysclk_rate / ((pres + 1) * 4);
@@ -1247,7 +1235,7 @@ static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
max_i2cfreq = bus->scl_rate.i2c;
pres = (sysclk_rate / (max_i2cfreq * 5)) - 1;
- if (pres > PRESCL_CTRL0_MAX)
+ if (pres > PRESCL_CTRL0_I2C_MAX)
return -ERANGE;
bus->scl_rate.i2c = sysclk_rate / ((pres + 1) * 5);
@@ -1331,12 +1319,7 @@ static void cdns_i3c_master_handle_ibi(struct cdns_i3c_master *master,
buf = slot->data;
nbytes = IBIR_XFER_BYTES(ibir);
- readsl(master->regs + IBI_DATA_FIFO, buf, nbytes / 4);
- if (nbytes % 3) {
- u32 tmp = __raw_readl(master->regs + IBI_DATA_FIFO);
-
- memcpy(buf + (nbytes & ~3), &tmp, nbytes & 3);
- }
+ i3c_readl_fifo(master->regs + IBI_DATA_FIFO, buf, nbytes);
slot->len = min_t(unsigned int, IBIR_XFER_BYTES(ibir),
dev->ibi->max_payload_len);
@@ -1562,10 +1545,12 @@ static const struct of_device_id cdns_i3c_master_of_ids[] = {
{ .compatible = "cdns,i3c-master", .data = &cdns_i3c_devdata },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, cdns_i3c_master_of_ids);
static int cdns_i3c_master_probe(struct platform_device *pdev)
{
struct cdns_i3c_master *master;
+ struct clk *pclk;
int ret, irq;
u32 val;
@@ -1581,11 +1566,11 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
if (IS_ERR(master->regs))
return PTR_ERR(master->regs);
- master->pclk = devm_clk_get(&pdev->dev, "pclk");
- if (IS_ERR(master->pclk))
- return PTR_ERR(master->pclk);
+ pclk = devm_clk_get_enabled(&pdev->dev, "pclk");
+ if (IS_ERR(pclk))
+ return PTR_ERR(pclk);
- master->sysclk = devm_clk_get(&pdev->dev, "sysclk");
+ master->sysclk = devm_clk_get_enabled(&pdev->dev, "sysclk");
if (IS_ERR(master->sysclk))
return PTR_ERR(master->sysclk);
@@ -1593,18 +1578,8 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- ret = clk_prepare_enable(master->pclk);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(master->sysclk);
- if (ret)
- goto err_disable_pclk;
-
- if (readl(master->regs + DEV_ID) != DEV_ID_I3C_MASTER) {
- ret = -EINVAL;
- goto err_disable_sysclk;
- }
+ if (readl(master->regs + DEV_ID) != DEV_ID_I3C_MASTER)
+ return -EINVAL;
spin_lock_init(&master->xferqueue.lock);
INIT_LIST_HEAD(&master->xferqueue.list);
@@ -1615,7 +1590,7 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
ret = devm_request_irq(&pdev->dev, irq, cdns_i3c_master_interrupt, 0,
dev_name(&pdev->dev), master);
if (ret)
- goto err_disable_sysclk;
+ return ret;
platform_set_drvdata(pdev, master);
@@ -1624,57 +1599,36 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
/* Device ID0 is reserved to describe this master. */
master->maxdevs = CONF_STATUS0_DEVS_NUM(val);
master->free_rr_slots = GENMASK(master->maxdevs, 1);
+ master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
+ master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
val = readl(master->regs + CONF_STATUS1);
master->caps.cmdfifodepth = CONF_STATUS1_CMD_DEPTH(val);
master->caps.rxfifodepth = CONF_STATUS1_RX_DEPTH(val);
master->caps.txfifodepth = CONF_STATUS1_TX_DEPTH(val);
- master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
- master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
spin_lock_init(&master->ibi.lock);
master->ibi.num_slots = CONF_STATUS1_IBI_HW_RES(val);
master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
sizeof(*master->ibi.slots),
GFP_KERNEL);
- if (!master->ibi.slots) {
- ret = -ENOMEM;
- goto err_disable_sysclk;
- }
+ if (!master->ibi.slots)
+ return -ENOMEM;
writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL);
writel(MST_INT_IBIR_THR, master->regs + MST_IER);
writel(DEVS_CTRL_DEV_CLR_ALL, master->regs + DEVS_CTRL);
- ret = i3c_master_register(&master->base, &pdev->dev,
- &cdns_i3c_master_ops, false);
- if (ret)
- goto err_disable_sysclk;
-
- return 0;
-
-err_disable_sysclk:
- clk_disable_unprepare(master->sysclk);
-
-err_disable_pclk:
- clk_disable_unprepare(master->pclk);
-
- return ret;
+ return i3c_master_register(&master->base, &pdev->dev,
+ &cdns_i3c_master_ops, false);
}
-static int cdns_i3c_master_remove(struct platform_device *pdev)
+static void cdns_i3c_master_remove(struct platform_device *pdev)
{
struct cdns_i3c_master *master = platform_get_drvdata(pdev);
- int ret;
- ret = i3c_master_unregister(&master->base);
- if (ret)
- return ret;
-
- clk_disable_unprepare(master->sysclk);
- clk_disable_unprepare(master->pclk);
-
- return 0;
+ cancel_work_sync(&master->hj_work);
+ i3c_master_unregister(&master->base);
}
static struct platform_driver cdns_i3c_master = {
diff --git a/drivers/i3c/master/mipi-i3c-hci/Makefile b/drivers/i3c/master/mipi-i3c-hci/Makefile
index a658e7b8262c..e3d3ef757035 100644
--- a/drivers/i3c/master/mipi-i3c-hci/Makefile
+++ b/drivers/i3c/master/mipi-i3c-hci/Makefile
@@ -3,4 +3,6 @@
obj-$(CONFIG_MIPI_I3C_HCI) += mipi-i3c-hci.o
mipi-i3c-hci-y := core.o ext_caps.o pio.o dma.o \
cmd_v1.o cmd_v2.o \
- dat_v1.o dct_v1.o
+ dat_v1.o dct_v1.o \
+ hci_quirks.o
+obj-$(CONFIG_MIPI_I3C_HCI_PCI) += mipi-i3c-hci-pci.o
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
index d97c3175e0e2..eb8a3ae2990d 100644
--- a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
@@ -123,17 +123,15 @@ static enum hci_cmd_mode get_i3c_mode(struct i3c_hci *hci)
{
struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
- if (bus->scl_rate.i3c >= 12500000)
- return MODE_I3C_SDR0;
if (bus->scl_rate.i3c > 8000000)
- return MODE_I3C_SDR1;
+ return MODE_I3C_SDR0;
if (bus->scl_rate.i3c > 6000000)
- return MODE_I3C_SDR2;
+ return MODE_I3C_SDR1;
if (bus->scl_rate.i3c > 4000000)
- return MODE_I3C_SDR3;
+ return MODE_I3C_SDR2;
if (bus->scl_rate.i3c > 2000000)
- return MODE_I3C_SDR4;
- return MODE_I3C_Fm_FmP;
+ return MODE_I3C_SDR3;
+ return MODE_I3C_SDR4;
}
static enum hci_cmd_mode get_i2c_mode(struct i3c_hci *hci)
@@ -298,7 +296,7 @@ static int hci_cmd_v1_daa(struct i3c_hci *hci)
unsigned int dcr, bcr;
DECLARE_COMPLETION_ONSTACK(done);
- xfer = hci_alloc_xfer(2);
+ xfer = hci_alloc_xfer(1);
if (!xfer)
return -ENOMEM;
@@ -319,7 +317,9 @@ static int hci_cmd_v1_daa(struct i3c_hci *hci)
break;
next_addr = ret;
- DBG("next_addr = 0x%02x, DAA using DAT %d", next_addr, dat_idx);
+ dev_dbg(&hci->master.dev,
+ "next_addr = 0x%02x, DAA using DAT %d",
+ next_addr, dat_idx);
mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, dat_idx, next_addr);
mipi_i3c_hci_dct_index_reset(hci);
@@ -332,25 +332,28 @@ static int hci_cmd_v1_daa(struct i3c_hci *hci)
CMD_A0_DEV_COUNT(1) |
CMD_A0_ROC | CMD_A0_TOC;
xfer->cmd_desc[1] = 0;
+ xfer->completion = &done;
hci->io->queue_xfer(hci, xfer, 1);
if (!wait_for_completion_timeout(&done, HZ) &&
hci->io->dequeue_xfer(hci, xfer, 1)) {
ret = -ETIME;
break;
}
- if (RESP_STATUS(xfer[0].response) == RESP_ERR_NACK &&
- RESP_STATUS(xfer[0].response) == 1) {
+ if ((RESP_STATUS(xfer->response) == RESP_ERR_ADDR_HEADER ||
+ RESP_STATUS(xfer->response) == RESP_ERR_NACK) &&
+ RESP_DATA_LENGTH(xfer->response) == 1) {
ret = 0; /* no more devices to be assigned */
break;
}
- if (RESP_STATUS(xfer[0].response) != RESP_SUCCESS) {
+ if (RESP_STATUS(xfer->response) != RESP_SUCCESS) {
ret = -EIO;
break;
}
i3c_hci_dct_get_val(hci, 0, &pid, &dcr, &bcr);
- DBG("assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
- next_addr, pid, dcr, bcr);
+ dev_dbg(&hci->master.dev,
+ "assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
+ next_addr, pid, dcr, bcr);
mipi_i3c_hci_dat_v1.free_entry(hci, dat_idx);
dat_idx = -1;
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c b/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
index 4493b2b067cb..efb4326a25b7 100644
--- a/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
@@ -261,7 +261,7 @@ static int hci_cmd_v2_daa(struct i3c_hci *hci)
if (ret < 0)
break;
next_addr = ret;
- DBG("next_addr = 0x%02x", next_addr);
+ dev_dbg(&hci->master.dev, "next_addr = 0x%02x", next_addr);
xfer[0].cmd_tid = hci_get_tid();
xfer[0].cmd_desc[0] =
CMD_0_ATTR_A |
@@ -293,8 +293,9 @@ static int hci_cmd_v2_daa(struct i3c_hci *hci)
pid = (pid << 32) | device_id[0];
bcr = FIELD_GET(W1_MASK(55, 48), device_id[1]);
dcr = FIELD_GET(W1_MASK(63, 56), device_id[1]);
- DBG("assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
- next_addr, pid, dcr, bcr);
+ dev_dbg(&hci->master.dev,
+ "assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
+ next_addr, pid, dcr, bcr);
/*
* TODO: Extend the subsystem layer to allow for registering
* new device and provide BCR/DCR/PID at the same time.
diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
index 6aef5ce43cc1..47e42cb4dbe7 100644
--- a/drivers/i3c/master/mipi-i3c-hci/core.c
+++ b/drivers/i3c/master/mipi-i3c-hci/core.c
@@ -12,7 +12,6 @@
#include <linux/errno.h>
#include <linux/i3c/master.h>
#include <linux/interrupt.h>
-#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -27,11 +26,6 @@
* Host Controller Capabilities and Operation Registers
*/
-#define reg_read(r) readl(hci->base_regs + (r))
-#define reg_write(r, v) writel(v, hci->base_regs + (r))
-#define reg_set(r, v) reg_write(r, reg_read(r) | (v))
-#define reg_clear(r, v) reg_write(r, reg_read(r) & ~(v))
-
#define HCI_VERSION 0x00 /* HCI Version (in BCD) */
#define HC_CONTROL 0x04
@@ -84,10 +78,8 @@
#define INTR_SIGNAL_ENABLE 0x28
#define INTR_FORCE 0x2c
#define INTR_HC_CMD_SEQ_UFLOW_STAT BIT(12) /* Cmd Sequence Underflow */
-#define INTR_HC_RESET_CANCEL BIT(11) /* HC Cancelled Reset */
+#define INTR_HC_SEQ_CANCEL BIT(11) /* HC Cancelled Transaction Sequence */
#define INTR_HC_INTERNAL_ERR BIT(10) /* HC Internal Error */
-#define INTR_HC_PIO BIT(8) /* cascaded PIO interrupt */
-#define INTR_HC_RINGS GENMASK(7, 0)
#define DAT_SECTION 0x30 /* Device Address Table */
#define DAT_ENTRY_SIZE GENMASK(31, 28)
@@ -129,8 +121,6 @@ static int i3c_hci_bus_init(struct i3c_master_controller *m)
struct i3c_device_info info;
int ret;
- DBG("");
-
if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
ret = mipi_i3c_hci_dat_v1.init(hci);
if (ret)
@@ -152,8 +142,12 @@ static int i3c_hci_bus_init(struct i3c_master_controller *m)
if (ret)
return ret;
+ /* Set RESP_BUF_THLD to 0(n) to get 1(n+1) response */
+ if (hci->quirks & HCI_QUIRK_RESP_BUF_THLD)
+ amd_set_resp_buf_thld(hci);
+
reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
- DBG("HC_CONTROL = %#x", reg_read(HC_CONTROL));
+ dev_dbg(&hci->master.dev, "HC_CONTROL = %#x", reg_read(HC_CONTROL));
return 0;
}
@@ -161,10 +155,10 @@ static int i3c_hci_bus_init(struct i3c_master_controller *m)
static void i3c_hci_bus_cleanup(struct i3c_master_controller *m)
{
struct i3c_hci *hci = to_i3c_hci(m);
-
- DBG("");
+ struct platform_device *pdev = to_platform_device(m->dev.parent);
reg_clear(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
+ synchronize_irq(platform_get_irq(pdev, 0));
hci->io->cleanup(hci);
if (hci->cmd == &mipi_i3c_hci_cmd_v1)
mipi_i3c_hci_dat_v1.cleanup(hci);
@@ -172,8 +166,7 @@ static void i3c_hci_bus_cleanup(struct i3c_master_controller *m)
void mipi_i3c_hci_resume(struct i3c_hci *hci)
{
- /* the HC_CONTROL_RESUME bit is R/W1C so just read and write back */
- reg_write(HC_CONTROL, reg_read(HC_CONTROL));
+ reg_set(HC_CONTROL, HC_CONTROL_RESUME);
}
/* located here rather than pio.c because needed bits are in core reg space */
@@ -199,8 +192,8 @@ static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
DECLARE_COMPLETION_ONSTACK(done);
int i, last, ret = 0;
- DBG("cmd=%#x rnw=%d ndests=%d data[0].len=%d",
- ccc->id, ccc->rnw, ccc->ndests, ccc->dests[0].payload.len);
+ dev_dbg(&hci->master.dev, "cmd=%#x rnw=%d ndests=%d data[0].len=%d",
+ ccc->id, ccc->rnw, ccc->ndests, ccc->dests[0].payload.len);
xfer = hci_alloc_xfer(nxfers);
if (!xfer)
@@ -244,15 +237,22 @@ static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
if (ccc->rnw)
ccc->dests[i - prefixed].payload.len =
RESP_DATA_LENGTH(xfer[i].response);
- if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
+ switch (RESP_STATUS(xfer[i].response)) {
+ case RESP_SUCCESS:
+ continue;
+ case RESP_ERR_ADDR_HEADER:
+ case RESP_ERR_NACK:
+ ccc->err = I3C_ERROR_M2;
+ fallthrough;
+ default:
ret = -EIO;
goto out;
}
}
if (ccc->rnw)
- DBG("got: %*ph",
- ccc->dests[0].payload.len, ccc->dests[0].payload.data);
+ dev_dbg(&hci->master.dev, "got: %*ph",
+ ccc->dests[0].payload.len, ccc->dests[0].payload.data);
out:
hci_free_xfer(xfer, nxfers);
@@ -263,8 +263,6 @@ static int i3c_hci_daa(struct i3c_master_controller *m)
{
struct i3c_hci *hci = to_i3c_hci(m);
- DBG("");
-
return hci->cmd->perform_daa(hci);
}
@@ -279,7 +277,7 @@ static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
unsigned int size_limit;
int i, last, ret = 0;
- DBG("nxfers = %d", nxfers);
+ dev_dbg(&hci->master.dev, "nxfers = %d", nxfers);
xfer = hci_alloc_xfer(nxfers);
if (!xfer)
@@ -329,7 +327,7 @@ out:
}
static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
- const struct i2c_msg *i2c_xfers, int nxfers)
+ struct i2c_msg *i2c_xfers, int nxfers)
{
struct i3c_master_controller *m = i2c_dev_get_master(dev);
struct i3c_hci *hci = to_i3c_hci(m);
@@ -337,7 +335,7 @@ static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
DECLARE_COMPLETION_ONSTACK(done);
int i, last, ret = 0;
- DBG("nxfers = %d", nxfers);
+ dev_dbg(&hci->master.dev, "nxfers = %d", nxfers);
xfer = hci_alloc_xfer(nxfers);
if (!xfer)
@@ -357,7 +355,7 @@ static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
ret = hci->io->queue_xfer(hci, xfer, nxfers);
if (ret)
goto out;
- if (!wait_for_completion_timeout(&done, HZ) &&
+ if (!wait_for_completion_timeout(&done, m->i2c.timeout) &&
hci->io->dequeue_xfer(hci, xfer, nxfers)) {
ret = -ETIME;
goto out;
@@ -381,8 +379,6 @@ static int i3c_hci_attach_i3c_dev(struct i3c_dev_desc *dev)
struct i3c_hci_dev_data *dev_data;
int ret;
- DBG("");
-
dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
if (!dev_data)
return -ENOMEM;
@@ -392,7 +388,8 @@ static int i3c_hci_attach_i3c_dev(struct i3c_dev_desc *dev)
kfree(dev_data);
return ret;
}
- mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, ret, dev->info.dyn_addr);
+ mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, ret,
+ dev->info.dyn_addr ?: dev->info.static_addr);
dev_data->dat_idx = ret;
}
i3c_dev_set_master_data(dev, dev_data);
@@ -405,8 +402,6 @@ static int i3c_hci_reattach_i3c_dev(struct i3c_dev_desc *dev, u8 old_dyn_addr)
struct i3c_hci *hci = to_i3c_hci(m);
struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
- DBG("");
-
if (hci->cmd == &mipi_i3c_hci_cmd_v1)
mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, dev_data->dat_idx,
dev->info.dyn_addr);
@@ -419,8 +414,6 @@ static void i3c_hci_detach_i3c_dev(struct i3c_dev_desc *dev)
struct i3c_hci *hci = to_i3c_hci(m);
struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
- DBG("");
-
i3c_dev_set_master_data(dev, NULL);
if (hci->cmd == &mipi_i3c_hci_cmd_v1)
mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx);
@@ -434,8 +427,6 @@ static int i3c_hci_attach_i2c_dev(struct i2c_dev_desc *dev)
struct i3c_hci_dev_data *dev_data;
int ret;
- DBG("");
-
if (hci->cmd != &mipi_i3c_hci_cmd_v1)
return 0;
dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
@@ -459,8 +450,6 @@ static void i3c_hci_detach_i2c_dev(struct i2c_dev_desc *dev)
struct i3c_hci *hci = to_i3c_hci(m);
struct i3c_hci_dev_data *dev_data = i2c_dev_get_master_data(dev);
- DBG("");
-
if (dev_data) {
i2c_dev_set_master_data(dev, NULL);
if (hci->cmd == &mipi_i3c_hci_cmd_v1)
@@ -547,34 +536,27 @@ static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
u32 val;
val = reg_read(INTR_STATUS);
- DBG("INTR_STATUS = %#x", val);
+ reg_write(INTR_STATUS, val);
+ dev_dbg(&hci->master.dev, "INTR_STATUS %#x", val);
- if (val) {
- reg_write(INTR_STATUS, val);
- } else {
- /* v1.0 does not have PIO cascaded notification bits */
- val |= INTR_HC_PIO;
- }
+ if (val)
+ result = IRQ_HANDLED;
- if (val & INTR_HC_RESET_CANCEL) {
- DBG("cancelled reset");
- val &= ~INTR_HC_RESET_CANCEL;
+ if (val & INTR_HC_SEQ_CANCEL) {
+ dev_dbg(&hci->master.dev,
+ "Host Controller Cancelled Transaction Sequence\n");
+ val &= ~INTR_HC_SEQ_CANCEL;
}
if (val & INTR_HC_INTERNAL_ERR) {
dev_err(&hci->master.dev, "Host Controller Internal Error\n");
val &= ~INTR_HC_INTERNAL_ERR;
}
- if (val & INTR_HC_PIO) {
- hci->io->irq_handler(hci, 0);
- val &= ~INTR_HC_PIO;
- }
- if (val & INTR_HC_RINGS) {
- hci->io->irq_handler(hci, val & INTR_HC_RINGS);
- val &= ~INTR_HC_RINGS;
- }
+
if (val)
- dev_err(&hci->master.dev, "unexpected INTR_STATUS %#x\n", val);
- else
+ dev_warn_once(&hci->master.dev,
+ "unexpected INTR_STATUS %#x\n", val);
+
+ if (hci->io->irq_handler(hci))
result = IRQ_HANDLED;
return result;
@@ -582,6 +564,7 @@ static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
static int i3c_hci_init(struct i3c_hci *hci)
{
+ bool size_in_dwords, mode_selector;
u32 regval, offset;
int ret;
@@ -604,23 +587,30 @@ static int i3c_hci_init(struct i3c_hci *hci)
}
hci->caps = reg_read(HC_CAPABILITIES);
- DBG("caps = %#x", hci->caps);
+ dev_dbg(&hci->master.dev, "caps = %#x", hci->caps);
+
+ size_in_dwords = hci->version_major < 1 ||
+ (hci->version_major == 1 && hci->version_minor < 1);
regval = reg_read(DAT_SECTION);
offset = FIELD_GET(DAT_TABLE_OFFSET, regval);
hci->DAT_regs = offset ? hci->base_regs + offset : NULL;
hci->DAT_entries = FIELD_GET(DAT_TABLE_SIZE, regval);
- hci->DAT_entry_size = FIELD_GET(DAT_ENTRY_SIZE, regval);
+ hci->DAT_entry_size = FIELD_GET(DAT_ENTRY_SIZE, regval) ? 0 : 8;
+ if (size_in_dwords)
+ hci->DAT_entries = 4 * hci->DAT_entries / hci->DAT_entry_size;
dev_info(&hci->master.dev, "DAT: %u %u-bytes entries at offset %#x\n",
- hci->DAT_entries, hci->DAT_entry_size * 4, offset);
+ hci->DAT_entries, hci->DAT_entry_size, offset);
regval = reg_read(DCT_SECTION);
offset = FIELD_GET(DCT_TABLE_OFFSET, regval);
hci->DCT_regs = offset ? hci->base_regs + offset : NULL;
hci->DCT_entries = FIELD_GET(DCT_TABLE_SIZE, regval);
- hci->DCT_entry_size = FIELD_GET(DCT_ENTRY_SIZE, regval);
+ hci->DCT_entry_size = FIELD_GET(DCT_ENTRY_SIZE, regval) ? 0 : 16;
+ if (size_in_dwords)
+ hci->DCT_entries = 4 * hci->DCT_entries / hci->DCT_entry_size;
dev_info(&hci->master.dev, "DCT: %u %u-bytes entries at offset %#x\n",
- hci->DCT_entries, hci->DCT_entry_size * 4, offset);
+ hci->DCT_entries, hci->DCT_entry_size, offset);
regval = reg_read(RING_HEADERS_SECTION);
offset = FIELD_GET(RING_HEADERS_OFFSET, regval);
@@ -656,9 +646,14 @@ static int i3c_hci_init(struct i3c_hci *hci)
if (ret)
return -ENXIO;
- /* Disable all interrupts and allow all signal updates */
+ /* Disable all interrupts */
reg_write(INTR_SIGNAL_ENABLE, 0x0);
- reg_write(INTR_STATUS_ENABLE, 0xffffffff);
+ /*
+ * Only allow bit 31:10 signal updates because
+ * Bit 0:9 are reserved in IP version >= 0.8
+ * Bit 0:5 are defined in IP version < 0.8 but not handled by PIO code
+ */
+ reg_write(INTR_STATUS_ENABLE, GENMASK(31, 10));
/* Make sure our data ordering fits the host's */
regval = reg_read(HC_CONTROL);
@@ -697,10 +692,17 @@ static int i3c_hci_init(struct i3c_hci *hci)
return -EINVAL;
}
+ mode_selector = hci->version_major > 1 ||
+ (hci->version_major == 1 && hci->version_minor > 0);
+
+ /* Quirk for HCI_QUIRK_PIO_MODE on AMD platforms */
+ if (hci->quirks & HCI_QUIRK_PIO_MODE)
+ hci->RHS_regs = NULL;
+
/* Try activating DMA operations first */
if (hci->RHS_regs) {
reg_clear(HC_CONTROL, HC_CONTROL_PIO_MODE);
- if (reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE) {
+ if (mode_selector && (reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE)) {
dev_err(&hci->master.dev, "PIO mode is stuck\n");
ret = -EIO;
} else {
@@ -712,7 +714,7 @@ static int i3c_hci_init(struct i3c_hci *hci)
/* If no DMA, try PIO */
if (!hci->io && hci->PIO_regs) {
reg_set(HC_CONTROL, HC_CONTROL_PIO_MODE);
- if (!(reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE)) {
+ if (mode_selector && !(reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE)) {
dev_err(&hci->master.dev, "DMA mode is stuck\n");
ret = -EIO;
} else {
@@ -728,6 +730,10 @@ static int i3c_hci_init(struct i3c_hci *hci)
return ret;
}
+ /* Configure OD and PP timings for AMD platforms */
+ if (hci->quirks & HCI_QUIRK_OD_PP_TIMING)
+ amd_set_od_pp_timing(hci);
+
return 0;
}
@@ -747,6 +753,8 @@ static int i3c_hci_probe(struct platform_device *pdev)
/* temporary for dev_printk's, to be replaced in i3c_master_register */
hci->master.dev.init_name = dev_name(&pdev->dev);
+ hci->quirks = (unsigned long)device_get_match_data(&pdev->dev);
+
ret = i3c_hci_init(hci);
if (ret)
return ret;
@@ -765,11 +773,11 @@ static int i3c_hci_probe(struct platform_device *pdev)
return 0;
}
-static int i3c_hci_remove(struct platform_device *pdev)
+static void i3c_hci_remove(struct platform_device *pdev)
{
struct i3c_hci *hci = platform_get_drvdata(pdev);
- return i3c_master_unregister(&hci->master);
+ i3c_master_unregister(&hci->master);
}
static const __maybe_unused struct of_device_id i3c_hci_of_match[] = {
@@ -778,15 +786,23 @@ static const __maybe_unused struct of_device_id i3c_hci_of_match[] = {
};
MODULE_DEVICE_TABLE(of, i3c_hci_of_match);
+static const struct acpi_device_id i3c_hci_acpi_match[] = {
+ { "AMDI5017", HCI_QUIRK_PIO_MODE | HCI_QUIRK_OD_PP_TIMING | HCI_QUIRK_RESP_BUF_THLD },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, i3c_hci_acpi_match);
+
static struct platform_driver i3c_hci_driver = {
.probe = i3c_hci_probe,
.remove = i3c_hci_remove,
.driver = {
.name = "mipi-i3c-hci",
.of_match_table = of_match_ptr(i3c_hci_of_match),
+ .acpi_match_table = i3c_hci_acpi_match,
},
};
module_platform_driver(i3c_hci_driver);
+MODULE_ALIAS("platform:mipi-i3c-hci");
MODULE_AUTHOR("Nicolas Pitre <npitre@baylibre.com>");
MODULE_DESCRIPTION("MIPI I3C HCI driver");
diff --git a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
index 97bb49ff5b53..85c4916972e4 100644
--- a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
+++ b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
@@ -40,15 +40,6 @@
#define dat_w0_write(i, v) writel(v, hci->DAT_regs + (i) * 8)
#define dat_w1_write(i, v) writel(v, hci->DAT_regs + (i) * 8 + 4)
-static inline bool dynaddr_parity(unsigned int addr)
-{
- addr |= 1 << 7;
- addr += addr >> 4;
- addr += addr >> 2;
- addr += addr >> 1;
- return (addr & 1);
-}
-
static int hci_dat_v1_init(struct i3c_hci *hci)
{
unsigned int dat_idx;
@@ -64,15 +55,17 @@ static int hci_dat_v1_init(struct i3c_hci *hci)
return -EOPNOTSUPP;
}
- /* use a bitmap for faster free slot search */
- hci->DAT_data = bitmap_zalloc(hci->DAT_entries, GFP_KERNEL);
- if (!hci->DAT_data)
- return -ENOMEM;
-
- /* clear them */
- for (dat_idx = 0; dat_idx < hci->DAT_entries; dat_idx++) {
- dat_w0_write(dat_idx, 0);
- dat_w1_write(dat_idx, 0);
+ if (!hci->DAT_data) {
+ /* use a bitmap for faster free slot search */
+ hci->DAT_data = bitmap_zalloc(hci->DAT_entries, GFP_KERNEL);
+ if (!hci->DAT_data)
+ return -ENOMEM;
+
+ /* clear them */
+ for (dat_idx = 0; dat_idx < hci->DAT_entries; dat_idx++) {
+ dat_w0_write(dat_idx, 0);
+ dat_w1_write(dat_idx, 0);
+ }
}
return 0;
@@ -87,7 +80,13 @@ static void hci_dat_v1_cleanup(struct i3c_hci *hci)
static int hci_dat_v1_alloc_entry(struct i3c_hci *hci)
{
unsigned int dat_idx;
+ int ret;
+ if (!hci->DAT_data) {
+ ret = hci_dat_v1_init(hci);
+ if (ret)
+ return ret;
+ }
dat_idx = find_first_zero_bit(hci->DAT_data, hci->DAT_entries);
if (dat_idx >= hci->DAT_entries)
return -ENOENT;
@@ -103,7 +102,8 @@ static void hci_dat_v1_free_entry(struct i3c_hci *hci, unsigned int dat_idx)
{
dat_w0_write(dat_idx, 0);
dat_w1_write(dat_idx, 0);
- __clear_bit(dat_idx, hci->DAT_data);
+ if (hci->DAT_data)
+ __clear_bit(dat_idx, hci->DAT_data);
}
static void hci_dat_v1_set_dynamic_addr(struct i3c_hci *hci,
@@ -114,7 +114,7 @@ static void hci_dat_v1_set_dynamic_addr(struct i3c_hci *hci,
dat_w0 = dat_w0_read(dat_idx);
dat_w0 &= ~(DAT_0_DYNAMIC_ADDRESS | DAT_0_DYNADDR_PARITY);
dat_w0 |= FIELD_PREP(DAT_0_DYNAMIC_ADDRESS, address) |
- (dynaddr_parity(address) ? DAT_0_DYNADDR_PARITY : 0);
+ (parity8(address) ? 0 : DAT_0_DYNADDR_PARITY);
dat_w0_write(dat_idx, dat_w0);
}
diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
index 2990ac9eaade..c401a9425cdc 100644
--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
+++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
@@ -14,6 +14,7 @@
#include <linux/errno.h>
#include <linux/i3c/master.h>
#include <linux/io.h>
+#include <linux/pci.h>
#include "hci.h"
#include "cmd.h"
@@ -76,7 +77,6 @@
#define INTR_TRANSFER_COMPLETION BIT(11)
#define INTR_RING_OP BIT(10)
#define INTR_TRANSFER_ERR BIT(9)
-#define INTR_WARN_INS_STOP_MODE BIT(7)
#define INTR_IBI_RING_FULL BIT(6)
#define INTR_TRANSFER_ABORT BIT(5)
@@ -138,8 +138,9 @@ struct hci_rh_data {
};
struct hci_rings_data {
+ struct device *sysdev;
unsigned int total;
- struct hci_rh_data headers[];
+ struct hci_rh_data headers[] __counted_by(total);
};
struct hci_dma_dev_ibi_data {
@@ -147,21 +148,6 @@ struct hci_dma_dev_ibi_data {
unsigned int max_len;
};
-static inline u32 lo32(dma_addr_t physaddr)
-{
- return physaddr;
-}
-
-static inline u32 hi32(dma_addr_t physaddr)
-{
- /* trickery to avoid compiler warnings on 32-bit build targets */
- if (sizeof(dma_addr_t) > 4) {
- u64 hi = physaddr;
- return hi >> 32;
- }
- return 0;
-}
-
static void hci_dma_cleanup(struct i3c_hci *hci)
{
struct hci_rings_data *rings = hci->io_data;
@@ -174,26 +160,26 @@ static void hci_dma_cleanup(struct i3c_hci *hci)
for (i = 0; i < rings->total; i++) {
rh = &rings->headers[i];
+ rh_reg_write(INTR_SIGNAL_ENABLE, 0);
rh_reg_write(RING_CONTROL, 0);
rh_reg_write(CR_SETUP, 0);
rh_reg_write(IBI_SETUP, 0);
- rh_reg_write(INTR_SIGNAL_ENABLE, 0);
if (rh->xfer)
- dma_free_coherent(&hci->master.dev,
+ dma_free_coherent(rings->sysdev,
rh->xfer_struct_sz * rh->xfer_entries,
rh->xfer, rh->xfer_dma);
if (rh->resp)
- dma_free_coherent(&hci->master.dev,
+ dma_free_coherent(rings->sysdev,
rh->resp_struct_sz * rh->xfer_entries,
rh->resp, rh->resp_dma);
kfree(rh->src_xfers);
if (rh->ibi_status)
- dma_free_coherent(&hci->master.dev,
+ dma_free_coherent(rings->sysdev,
rh->ibi_status_sz * rh->ibi_status_entries,
rh->ibi_status, rh->ibi_status_dma);
if (rh->ibi_data_dma)
- dma_unmap_single(&hci->master.dev, rh->ibi_data_dma,
+ dma_unmap_single(rings->sysdev, rh->ibi_data_dma,
rh->ibi_chunk_sz * rh->ibi_chunks_total,
DMA_FROM_DEVICE);
kfree(rh->ibi_data);
@@ -209,11 +195,23 @@ static int hci_dma_init(struct i3c_hci *hci)
{
struct hci_rings_data *rings;
struct hci_rh_data *rh;
+ struct device *sysdev;
u32 regval;
unsigned int i, nr_rings, xfers_sz, resps_sz;
unsigned int ibi_status_ring_sz, ibi_data_ring_sz;
int ret;
+ /*
+ * Set pointer to a physical device that does DMA and has IOMMU setup
+ * done for it in case of enabled IOMMU and use it with the DMA API.
+ * Here such device is either
+ * "mipi-i3c-hci" platform device (OF/ACPI enumeration) parent or
+ * grandparent (PCI enumeration).
+ */
+ sysdev = hci->master.dev.parent;
+ if (sysdev->parent && dev_is_pci(sysdev->parent))
+ sysdev = sysdev->parent;
+
regval = rhs_reg_read(CONTROL);
nr_rings = FIELD_GET(MAX_HEADER_COUNT_CAP, regval);
dev_info(&hci->master.dev, "%d DMA rings available\n", nr_rings);
@@ -228,6 +226,10 @@ static int hci_dma_init(struct i3c_hci *hci)
return -ENOMEM;
hci->io_data = rings;
rings->total = nr_rings;
+ rings->sysdev = sysdev;
+
+ regval = FIELD_PREP(MAX_HEADER_COUNT, rings->total);
+ rhs_reg_write(CONTROL, regval);
for (i = 0; i < rings->total; i++) {
u32 offset = rhs_reg_read(RHn_OFFSET(i));
@@ -246,14 +248,15 @@ static int hci_dma_init(struct i3c_hci *hci)
regval = rh_reg_read(CR_SETUP);
rh->xfer_struct_sz = FIELD_GET(CR_XFER_STRUCT_SIZE, regval);
rh->resp_struct_sz = FIELD_GET(CR_RESP_STRUCT_SIZE, regval);
- DBG("xfer_struct_sz = %d, resp_struct_sz = %d",
- rh->xfer_struct_sz, rh->resp_struct_sz);
+ dev_dbg(&hci->master.dev,
+ "xfer_struct_sz = %d, resp_struct_sz = %d",
+ rh->xfer_struct_sz, rh->resp_struct_sz);
xfers_sz = rh->xfer_struct_sz * rh->xfer_entries;
resps_sz = rh->resp_struct_sz * rh->xfer_entries;
- rh->xfer = dma_alloc_coherent(&hci->master.dev, xfers_sz,
+ rh->xfer = dma_alloc_coherent(rings->sysdev, xfers_sz,
&rh->xfer_dma, GFP_KERNEL);
- rh->resp = dma_alloc_coherent(&hci->master.dev, resps_sz,
+ rh->resp = dma_alloc_coherent(rings->sysdev, resps_sz,
&rh->resp_dma, GFP_KERNEL);
rh->src_xfers =
kmalloc_array(rh->xfer_entries, sizeof(*rh->src_xfers),
@@ -262,10 +265,10 @@ static int hci_dma_init(struct i3c_hci *hci)
if (!rh->xfer || !rh->resp || !rh->src_xfers)
goto err_out;
- rh_reg_write(CMD_RING_BASE_LO, lo32(rh->xfer_dma));
- rh_reg_write(CMD_RING_BASE_HI, hi32(rh->xfer_dma));
- rh_reg_write(RESP_RING_BASE_LO, lo32(rh->resp_dma));
- rh_reg_write(RESP_RING_BASE_HI, hi32(rh->resp_dma));
+ rh_reg_write(CMD_RING_BASE_LO, lower_32_bits(rh->xfer_dma));
+ rh_reg_write(CMD_RING_BASE_HI, upper_32_bits(rh->xfer_dma));
+ rh_reg_write(RESP_RING_BASE_LO, lower_32_bits(rh->resp_dma));
+ rh_reg_write(RESP_RING_BASE_HI, upper_32_bits(rh->resp_dma));
regval = FIELD_PREP(CR_RING_SIZE, rh->xfer_entries);
rh_reg_write(CR_SETUP, regval);
@@ -275,7 +278,6 @@ static int hci_dma_init(struct i3c_hci *hci)
INTR_TRANSFER_COMPLETION |
INTR_RING_OP |
INTR_TRANSFER_ERR |
- INTR_WARN_INS_STOP_MODE |
INTR_IBI_RING_FULL |
INTR_TRANSFER_ABORT);
@@ -291,27 +293,42 @@ static int hci_dma_init(struct i3c_hci *hci)
rh->ibi_chunk_sz = dma_get_cache_alignment();
rh->ibi_chunk_sz *= IBI_CHUNK_CACHELINES;
- BUG_ON(rh->ibi_chunk_sz > 256);
+ /*
+ * Round IBI data chunk size to number of bytes supported by
+ * the HW. Chunk size can be 2^n number of DWORDs which is the
+ * same as 2^(n+2) bytes, where n is 0..6.
+ */
+ rh->ibi_chunk_sz = umax(4, rh->ibi_chunk_sz);
+ rh->ibi_chunk_sz = roundup_pow_of_two(rh->ibi_chunk_sz);
+ if (rh->ibi_chunk_sz > 256) {
+ ret = -EINVAL;
+ goto err_out;
+ }
ibi_status_ring_sz = rh->ibi_status_sz * rh->ibi_status_entries;
ibi_data_ring_sz = rh->ibi_chunk_sz * rh->ibi_chunks_total;
rh->ibi_status =
- dma_alloc_coherent(&hci->master.dev, ibi_status_ring_sz,
+ dma_alloc_coherent(rings->sysdev, ibi_status_ring_sz,
&rh->ibi_status_dma, GFP_KERNEL);
rh->ibi_data = kmalloc(ibi_data_ring_sz, GFP_KERNEL);
ret = -ENOMEM;
if (!rh->ibi_status || !rh->ibi_data)
goto err_out;
rh->ibi_data_dma =
- dma_map_single(&hci->master.dev, rh->ibi_data,
+ dma_map_single(rings->sysdev, rh->ibi_data,
ibi_data_ring_sz, DMA_FROM_DEVICE);
- if (dma_mapping_error(&hci->master.dev, rh->ibi_data_dma)) {
+ if (dma_mapping_error(rings->sysdev, rh->ibi_data_dma)) {
rh->ibi_data_dma = 0;
ret = -ENOMEM;
goto err_out;
}
+ rh_reg_write(IBI_STATUS_RING_BASE_LO, lower_32_bits(rh->ibi_status_dma));
+ rh_reg_write(IBI_STATUS_RING_BASE_HI, upper_32_bits(rh->ibi_status_dma));
+ rh_reg_write(IBI_DATA_RING_BASE_LO, lower_32_bits(rh->ibi_data_dma));
+ rh_reg_write(IBI_DATA_RING_BASE_HI, upper_32_bits(rh->ibi_data_dma));
+
regval = FIELD_PREP(IBI_STATUS_RING_SIZE,
rh->ibi_status_entries) |
FIELD_PREP(IBI_DATA_CHUNK_SIZE,
@@ -325,11 +342,10 @@ static int hci_dma_init(struct i3c_hci *hci)
rh_reg_write(INTR_SIGNAL_ENABLE, regval);
ring_ready:
- rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
+ rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE |
+ RING_CTRL_RUN_STOP);
}
- regval = FIELD_PREP(MAX_HEADER_COUNT, rings->total);
- rhs_reg_write(CONTROL, regval);
return 0;
err_out:
@@ -345,9 +361,9 @@ static void hci_dma_unmap_xfer(struct i3c_hci *hci,
for (i = 0; i < n; i++) {
xfer = xfer_list + i;
- dma_unmap_single(&hci->master.dev,
- xfer->data_dma, xfer->data_len,
- xfer->rnw ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ if (!xfer->data)
+ continue;
+ i3c_master_dma_unmap_single(xfer->dma);
}
}
@@ -368,6 +384,9 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
for (i = 0; i < n; i++) {
struct hci_xfer *xfer = xfer_list + i;
u32 *ring_data = rh->xfer + rh->xfer_struct_sz * enqueue_ptr;
+ enum dma_data_direction dir = xfer->rnw ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE;
+ bool need_bounce;
/* store cmd descriptor */
*ring_data++ = xfer->cmd_desc[0];
@@ -386,20 +405,20 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
/* 2nd and 3rd words of Data Buffer Descriptor Structure */
if (xfer->data) {
- xfer->data_dma =
- dma_map_single(&hci->master.dev,
- xfer->data,
- xfer->data_len,
- xfer->rnw ?
- DMA_FROM_DEVICE :
- DMA_TO_DEVICE);
- if (dma_mapping_error(&hci->master.dev,
- xfer->data_dma)) {
+ need_bounce = device_iommu_mapped(rings->sysdev) &&
+ xfer->rnw &&
+ xfer->data_len != ALIGN(xfer->data_len, 4);
+ xfer->dma = i3c_master_dma_map_single(rings->sysdev,
+ xfer->data,
+ xfer->data_len,
+ need_bounce,
+ dir);
+ if (!xfer->dma) {
hci_dma_unmap_xfer(hci, xfer_list, i);
return -ENOMEM;
}
- *ring_data++ = lo32(xfer->data_dma);
- *ring_data++ = hi32(xfer->data_dma);
+ *ring_data++ = lower_32_bits(xfer->dma->addr);
+ *ring_data++ = upper_32_bits(xfer->dma->addr);
} else {
*ring_data++ = 0;
*ring_data++ = 0;
@@ -450,10 +469,9 @@ static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,
/*
* We're deep in it if ever this condition is ever met.
* Hardware might still be writing to memory, etc.
- * Better suspend the world than risking silent corruption.
*/
dev_crit(&hci->master.dev, "unable to abort the ring\n");
- BUG();
+ WARN_ON(1);
}
for (i = 0; i < n; i++) {
@@ -506,11 +524,11 @@ static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
ring_resp = rh->resp + rh->resp_struct_sz * done_ptr;
resp = *ring_resp;
tid = RESP_TID(resp);
- DBG("resp = 0x%08x", resp);
+ dev_dbg(&hci->master.dev, "resp = 0x%08x", resp);
xfer = rh->src_xfers[done_ptr];
if (!xfer) {
- DBG("orphaned ring entry");
+ dev_dbg(&hci->master.dev, "orphaned ring entry");
} else {
hci_dma_unmap_xfer(hci, xfer, 1);
xfer->ring_entry = -1;
@@ -581,6 +599,7 @@ static void hci_dma_recycle_ibi_slot(struct i3c_hci *hci,
static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
{
+ struct hci_rings_data *rings = hci->io_data;
struct i3c_dev_desc *dev;
struct i3c_hci_dev_data *dev_data;
struct hci_dma_dev_ibi_data *dev_ibi;
@@ -612,7 +631,7 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
ring_ibi_status = rh->ibi_status + rh->ibi_status_sz * ptr;
ibi_status = *ring_ibi_status;
- DBG("status = %#x", ibi_status);
+ dev_dbg(&hci->master.dev, "status = %#x", ibi_status);
if (ibi_status_error) {
/* we no longer care */
@@ -640,7 +659,9 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
if (last_ptr == -1) {
/* this IBI sequence is not yet complete */
- DBG("no LAST_STATUS available (e=%d d=%d)", enq_ptr, deq_ptr);
+ dev_dbg(&hci->master.dev,
+ "no LAST_STATUS available (e=%d d=%d)",
+ enq_ptr, deq_ptr);
return;
}
deq_ptr = last_ptr + 1;
@@ -691,7 +712,7 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
* rh->ibi_chunk_sz;
if (first_part > ibi_size)
first_part = ibi_size;
- dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
+ dma_sync_single_for_cpu(rings->sysdev, ring_ibi_data_dma,
first_part, DMA_FROM_DEVICE);
memcpy(slot->data, ring_ibi_data, first_part);
@@ -700,7 +721,7 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
/* we wrap back to the start and copy remaining data */
ring_ibi_data = rh->ibi_data;
ring_ibi_data_dma = rh->ibi_data_dma;
- dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
+ dma_sync_single_for_cpu(rings->sysdev, ring_ibi_data_dma,
ibi_size - first_part, DMA_FROM_DEVICE);
memcpy(slot->data + first_part, ring_ibi_data,
ibi_size - first_part);
@@ -728,23 +749,20 @@ done:
rh_reg_write(CHUNK_CONTROL, rh_reg_read(CHUNK_CONTROL) + ibi_chunks);
}
-static bool hci_dma_irq_handler(struct i3c_hci *hci, unsigned int mask)
+static bool hci_dma_irq_handler(struct i3c_hci *hci)
{
struct hci_rings_data *rings = hci->io_data;
unsigned int i;
bool handled = false;
- for (i = 0; mask && i < 8; i++) {
+ for (i = 0; i < rings->total; i++) {
struct hci_rh_data *rh;
u32 status;
- if (!(mask & BIT(i)))
- continue;
- mask &= ~BIT(i);
-
rh = &rings->headers[i];
status = rh_reg_read(INTR_STATUS);
- DBG("rh%d status: %#x", i, status);
+ dev_dbg(&hci->master.dev, "Ring %d: RH_INTR_STATUS %#x",
+ i, status);
if (!status)
continue;
rh_reg_write(INTR_STATUS, status);
@@ -756,15 +774,31 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci, unsigned int mask)
if (status & INTR_RING_OP)
complete(&rh->op_done);
- if (status & INTR_TRANSFER_ABORT)
+ if (status & INTR_TRANSFER_ABORT) {
+ u32 ring_status;
+
dev_notice_ratelimited(&hci->master.dev,
- "ring %d: Transfer Aborted\n", i);
- if (status & INTR_WARN_INS_STOP_MODE)
- dev_warn_ratelimited(&hci->master.dev,
- "ring %d: Inserted Stop on Mode Change\n", i);
+ "Ring %d: Transfer Aborted\n", i);
+ mipi_i3c_hci_resume(hci);
+ ring_status = rh_reg_read(RING_STATUS);
+ if (!(ring_status & RING_STATUS_RUNNING) &&
+ status & INTR_TRANSFER_COMPLETION &&
+ status & INTR_TRANSFER_ERR) {
+ /*
+ * Ring stop followed by run is an Intel
+ * specific required quirk after resuming the
+ * halted controller. Do it only when the ring
+ * is not in running state after a transfer
+ * error.
+ */
+ rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
+ rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE |
+ RING_CTRL_RUN_STOP);
+ }
+ }
if (status & INTR_IBI_RING_FULL)
dev_err_ratelimited(&hci->master.dev,
- "ring %d: IBI Ring Full Condition\n", i);
+ "Ring %d: IBI Ring Full Condition\n", i);
handled = true;
}
diff --git a/drivers/i3c/master/mipi-i3c-hci/ext_caps.c b/drivers/i3c/master/mipi-i3c-hci/ext_caps.c
index 2e9b23efdc45..7714f00ea9cc 100644
--- a/drivers/i3c/master/mipi-i3c-hci/ext_caps.c
+++ b/drivers/i3c/master/mipi-i3c-hci/ext_caps.c
@@ -35,7 +35,7 @@ static int hci_extcap_hardware_id(struct i3c_hci *hci, void __iomem *base)
switch (hci->vendor_mipi_id) {
case MIPI_VENDOR_NXP:
hci->quirks |= HCI_QUIRK_RAW_CCC;
- DBG("raw CCC quirks set");
+ dev_dbg(&hci->master.dev, "raw CCC quirks set");
break;
}
@@ -77,7 +77,8 @@ static int hci_extcap_xfer_modes(struct i3c_hci *hci, void __iomem *base)
for (index = 0; index < entries; index++) {
u32 mode_entry = readl(base);
- DBG("mode %d: 0x%08x", index, mode_entry);
+ dev_dbg(&hci->master.dev, "mode %d: 0x%08x",
+ index, mode_entry);
/* TODO: will be needed when I3C core does more than SDR */
base += 4;
}
@@ -97,7 +98,8 @@ static int hci_extcap_xfer_rates(struct i3c_hci *hci, void __iomem *base)
dev_info(&hci->master.dev, "available data rates:\n");
for (index = 0; index < entries; index++) {
rate_entry = readl(base);
- DBG("entry %d: 0x%08x", index, rate_entry);
+ dev_dbg(&hci->master.dev, "entry %d: 0x%08x",
+ index, rate_entry);
rate = FIELD_GET(XFERRATE_ACTUAL_RATE_KHZ, rate_entry);
rate_id = FIELD_GET(XFERRATE_RATE_ID, rate_entry);
mode_id = FIELD_GET(XFERRATE_MODE_ID, rate_entry);
@@ -268,7 +270,8 @@ int i3c_hci_parse_ext_caps(struct i3c_hci *hci)
cap_header = readl(curr_cap);
cap_id = FIELD_GET(CAP_HEADER_ID, cap_header);
cap_length = FIELD_GET(CAP_HEADER_LENGTH, cap_header);
- DBG("id=0x%02x length=%d", cap_id, cap_length);
+ dev_dbg(&hci->master.dev, "id=0x%02x length=%d",
+ cap_id, cap_length);
if (!cap_length)
break;
if (curr_cap + cap_length * 4 >= end) {
diff --git a/drivers/i3c/master/mipi-i3c-hci/hci.h b/drivers/i3c/master/mipi-i3c-hci/hci.h
index f109923f6c3f..249ccb13c909 100644
--- a/drivers/i3c/master/mipi-i3c-hci/hci.h
+++ b/drivers/i3c/master/mipi-i3c-hci/hci.h
@@ -10,9 +10,7 @@
#ifndef HCI_H
#define HCI_H
-
-/* Handy logging macro to save on line length */
-#define DBG(x, ...) pr_devel("%s: " x "\n", __func__, ##__VA_ARGS__)
+#include <linux/io.h>
/* 32-bit word aware bit and mask macros */
#define W0_MASK(h, l) GENMASK((h) - 0, (l) - 0)
@@ -26,6 +24,10 @@
#define W2_BIT_(x) BIT((x) - 64)
#define W3_BIT_(x) BIT((x) - 96)
+#define reg_read(r) readl(hci->base_regs + (r))
+#define reg_write(r, v) writel(v, hci->base_regs + (r))
+#define reg_set(r, v) reg_write(r, reg_read(r) | (v))
+#define reg_clear(r, v) reg_write(r, reg_read(r) & ~(v))
struct hci_cmd_ops;
@@ -89,7 +91,7 @@ struct hci_xfer {
};
struct {
/* DMA specific */
- dma_addr_t data_dma;
+ struct i3c_dma *dma;
int ring_number;
int ring_entry;
};
@@ -109,7 +111,7 @@ static inline void hci_free_xfer(struct hci_xfer *xfer, unsigned int n)
/* This abstracts PIO vs DMA operations */
struct hci_io_ops {
- bool (*irq_handler)(struct i3c_hci *hci, unsigned int mask);
+ bool (*irq_handler)(struct i3c_hci *hci);
int (*queue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n);
bool (*dequeue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n);
int (*request_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev,
@@ -134,11 +136,16 @@ struct i3c_hci_dev_data {
/* list of quirks */
#define HCI_QUIRK_RAW_CCC BIT(1) /* CCC framing must be explicit */
+#define HCI_QUIRK_PIO_MODE BIT(2) /* Set PIO mode for AMD platforms */
+#define HCI_QUIRK_OD_PP_TIMING BIT(3) /* Set OD and PP timings for AMD platforms */
+#define HCI_QUIRK_RESP_BUF_THLD BIT(4) /* Set resp buf thld to 0 for AMD platforms */
/* global functions */
void mipi_i3c_hci_resume(struct i3c_hci *hci);
void mipi_i3c_hci_pio_reset(struct i3c_hci *hci);
void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci);
+void amd_set_od_pp_timing(struct i3c_hci *hci);
+void amd_set_resp_buf_thld(struct i3c_hci *hci);
#endif
diff --git a/drivers/i3c/master/mipi-i3c-hci/hci_quirks.c b/drivers/i3c/master/mipi-i3c-hci/hci_quirks.c
new file mode 100644
index 000000000000..3b9c6e76c536
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/hci_quirks.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * I3C HCI Quirks
+ *
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Authors: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ * Guruvendra Punugupati <Guruvendra.Punugupati@amd.com>
+ */
+
+#include <linux/i3c/master.h>
+#include "hci.h"
+
+/* Timing registers */
+#define HCI_SCL_I3C_OD_TIMING 0x214
+#define HCI_SCL_I3C_PP_TIMING 0x218
+#define HCI_SDA_HOLD_SWITCH_DLY_TIMING 0x230
+
+/* Timing values to configure 9MHz frequency */
+#define AMD_SCL_I3C_OD_TIMING 0x00cf00cf
+#define AMD_SCL_I3C_PP_TIMING 0x00160016
+
+#define QUEUE_THLD_CTRL 0xD0
+
+void amd_set_od_pp_timing(struct i3c_hci *hci)
+{
+ u32 data;
+
+ reg_write(HCI_SCL_I3C_OD_TIMING, AMD_SCL_I3C_OD_TIMING);
+ reg_write(HCI_SCL_I3C_PP_TIMING, AMD_SCL_I3C_PP_TIMING);
+ data = reg_read(HCI_SDA_HOLD_SWITCH_DLY_TIMING);
+ /* Configure maximum TX hold time */
+ data |= W0_MASK(18, 16);
+ reg_write(HCI_SDA_HOLD_SWITCH_DLY_TIMING, data);
+}
+
+void amd_set_resp_buf_thld(struct i3c_hci *hci)
+{
+ u32 data;
+
+ data = reg_read(QUEUE_THLD_CTRL);
+ data = data & ~W0_MASK(15, 8);
+ reg_write(QUEUE_THLD_CTRL, data);
+}
diff --git a/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c b/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c
new file mode 100644
index 000000000000..dc8ede0f8ad8
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI glue code for MIPI I3C HCI driver
+ *
+ * Copyright (C) 2024 Intel Corporation
+ *
+ * Author: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+ */
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/idr.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
+
+struct mipi_i3c_hci_pci {
+ struct pci_dev *pci;
+ struct platform_device *pdev;
+ const struct mipi_i3c_hci_pci_info *info;
+ void *private;
+};
+
+struct mipi_i3c_hci_pci_info {
+ int (*init)(struct mipi_i3c_hci_pci *hci);
+ void (*exit)(struct mipi_i3c_hci_pci *hci);
+};
+
+static DEFINE_IDA(mipi_i3c_hci_pci_ida);
+
+#define INTEL_PRIV_OFFSET 0x2b0
+#define INTEL_PRIV_SIZE 0x28
+#define INTEL_RESETS 0x04
+#define INTEL_RESETS_RESET BIT(0)
+#define INTEL_RESETS_RESET_DONE BIT(1)
+#define INTEL_RESETS_TIMEOUT_US (10 * USEC_PER_MSEC)
+
+#define INTEL_ACTIVELTR 0x0c
+#define INTEL_IDLELTR 0x10
+
+#define INTEL_LTR_REQ BIT(15)
+#define INTEL_LTR_SCALE_MASK GENMASK(11, 10)
+#define INTEL_LTR_SCALE_1US FIELD_PREP(INTEL_LTR_SCALE_MASK, 2)
+#define INTEL_LTR_SCALE_32US FIELD_PREP(INTEL_LTR_SCALE_MASK, 3)
+#define INTEL_LTR_VALUE_MASK GENMASK(9, 0)
+
+struct intel_host {
+ void __iomem *priv;
+ u32 active_ltr;
+ u32 idle_ltr;
+ struct dentry *debugfs_root;
+};
+
+static void intel_cache_ltr(struct intel_host *host)
+{
+ host->active_ltr = readl(host->priv + INTEL_ACTIVELTR);
+ host->idle_ltr = readl(host->priv + INTEL_IDLELTR);
+}
+
+static void intel_ltr_set(struct device *dev, s32 val)
+{
+ struct mipi_i3c_hci_pci *hci = dev_get_drvdata(dev);
+ struct intel_host *host = hci->private;
+ u32 ltr;
+
+ /*
+ * Program latency tolerance (LTR) accordingly what has been asked
+ * by the PM QoS layer or disable it in case we were passed
+ * negative value or PM_QOS_LATENCY_ANY.
+ */
+ ltr = readl(host->priv + INTEL_ACTIVELTR);
+
+ if (val == PM_QOS_LATENCY_ANY || val < 0) {
+ ltr &= ~INTEL_LTR_REQ;
+ } else {
+ ltr |= INTEL_LTR_REQ;
+ ltr &= ~INTEL_LTR_SCALE_MASK;
+ ltr &= ~INTEL_LTR_VALUE_MASK;
+
+ if (val > INTEL_LTR_VALUE_MASK) {
+ val >>= 5;
+ if (val > INTEL_LTR_VALUE_MASK)
+ val = INTEL_LTR_VALUE_MASK;
+ ltr |= INTEL_LTR_SCALE_32US | val;
+ } else {
+ ltr |= INTEL_LTR_SCALE_1US | val;
+ }
+ }
+
+ if (ltr == host->active_ltr)
+ return;
+
+ writel(ltr, host->priv + INTEL_ACTIVELTR);
+ writel(ltr, host->priv + INTEL_IDLELTR);
+
+ /* Cache the values into intel_host structure */
+ intel_cache_ltr(host);
+}
+
+static void intel_ltr_expose(struct device *dev)
+{
+ dev->power.set_latency_tolerance = intel_ltr_set;
+ dev_pm_qos_expose_latency_tolerance(dev);
+}
+
+static void intel_ltr_hide(struct device *dev)
+{
+ dev_pm_qos_hide_latency_tolerance(dev);
+ dev->power.set_latency_tolerance = NULL;
+}
+
+static void intel_add_debugfs(struct mipi_i3c_hci_pci *hci)
+{
+ struct dentry *dir = debugfs_create_dir(dev_name(&hci->pci->dev), NULL);
+ struct intel_host *host = hci->private;
+
+ intel_cache_ltr(host);
+
+ host->debugfs_root = dir;
+ debugfs_create_x32("active_ltr", 0444, dir, &host->active_ltr);
+ debugfs_create_x32("idle_ltr", 0444, dir, &host->idle_ltr);
+}
+
+static void intel_remove_debugfs(struct mipi_i3c_hci_pci *hci)
+{
+ struct intel_host *host = hci->private;
+
+ debugfs_remove_recursive(host->debugfs_root);
+}
+
+static void intel_reset(void __iomem *priv)
+{
+ u32 reg;
+
+ /* Assert reset, wait for completion and release reset */
+ writel(0, priv + INTEL_RESETS);
+ readl_poll_timeout(priv + INTEL_RESETS, reg,
+ reg & INTEL_RESETS_RESET_DONE, 0,
+ INTEL_RESETS_TIMEOUT_US);
+ writel(INTEL_RESETS_RESET, priv + INTEL_RESETS);
+}
+
+static void __iomem *intel_priv(struct pci_dev *pci)
+{
+ resource_size_t base = pci_resource_start(pci, 0);
+
+ return devm_ioremap(&pci->dev, base + INTEL_PRIV_OFFSET, INTEL_PRIV_SIZE);
+}
+
+static int intel_i3c_init(struct mipi_i3c_hci_pci *hci)
+{
+ struct intel_host *host = devm_kzalloc(&hci->pci->dev, sizeof(*host), GFP_KERNEL);
+ void __iomem *priv = intel_priv(hci->pci);
+
+ if (!host || !priv)
+ return -ENOMEM;
+
+ dma_set_mask_and_coherent(&hci->pci->dev, DMA_BIT_MASK(64));
+
+ hci->pci->d3cold_delay = 0;
+
+ hci->private = host;
+ host->priv = priv;
+
+ intel_reset(priv);
+
+ intel_ltr_expose(&hci->pci->dev);
+ intel_add_debugfs(hci);
+
+ return 0;
+}
+
+static void intel_i3c_exit(struct mipi_i3c_hci_pci *hci)
+{
+ intel_remove_debugfs(hci);
+ intel_ltr_hide(&hci->pci->dev);
+}
+
+static const struct mipi_i3c_hci_pci_info intel_info = {
+ .init = intel_i3c_init,
+ .exit = intel_i3c_exit,
+};
+
+static int mipi_i3c_hci_pci_probe(struct pci_dev *pci,
+ const struct pci_device_id *id)
+{
+ struct mipi_i3c_hci_pci *hci;
+ struct resource res[2];
+ int dev_id, ret;
+
+ hci = devm_kzalloc(&pci->dev, sizeof(*hci), GFP_KERNEL);
+ if (!hci)
+ return -ENOMEM;
+
+ hci->pci = pci;
+
+ ret = pcim_enable_device(pci);
+ if (ret)
+ return ret;
+
+ pci_set_master(pci);
+
+ memset(&res, 0, sizeof(res));
+
+ res[0].flags = IORESOURCE_MEM;
+ res[0].start = pci_resource_start(pci, 0);
+ res[0].end = pci_resource_end(pci, 0);
+
+ res[1].flags = IORESOURCE_IRQ;
+ res[1].start = pci->irq;
+ res[1].end = pci->irq;
+
+ dev_id = ida_alloc(&mipi_i3c_hci_pci_ida, GFP_KERNEL);
+ if (dev_id < 0)
+ return dev_id;
+
+ hci->pdev = platform_device_alloc("mipi-i3c-hci", dev_id);
+ if (!hci->pdev)
+ return -ENOMEM;
+
+ hci->pdev->dev.parent = &pci->dev;
+ device_set_node(&hci->pdev->dev, dev_fwnode(&pci->dev));
+
+ ret = platform_device_add_resources(hci->pdev, res, ARRAY_SIZE(res));
+ if (ret)
+ goto err;
+
+ hci->info = (const struct mipi_i3c_hci_pci_info *)id->driver_data;
+ if (hci->info && hci->info->init) {
+ ret = hci->info->init(hci);
+ if (ret)
+ goto err;
+ }
+
+ ret = platform_device_add(hci->pdev);
+ if (ret)
+ goto err_exit;
+
+ pci_set_drvdata(pci, hci);
+
+ return 0;
+
+err_exit:
+ if (hci->info && hci->info->exit)
+ hci->info->exit(hci);
+err:
+ platform_device_put(hci->pdev);
+ ida_free(&mipi_i3c_hci_pci_ida, dev_id);
+ return ret;
+}
+
+static void mipi_i3c_hci_pci_remove(struct pci_dev *pci)
+{
+ struct mipi_i3c_hci_pci *hci = pci_get_drvdata(pci);
+ struct platform_device *pdev = hci->pdev;
+ int dev_id = pdev->id;
+
+ if (hci->info && hci->info->exit)
+ hci->info->exit(hci);
+
+ platform_device_unregister(pdev);
+ ida_free(&mipi_i3c_hci_pci_ida, dev_id);
+}
+
+static const struct pci_device_id mipi_i3c_hci_pci_devices[] = {
+ /* Wildcat Lake-U */
+ { PCI_VDEVICE(INTEL, 0x4d7c), (kernel_ulong_t)&intel_info},
+ { PCI_VDEVICE(INTEL, 0x4d6f), (kernel_ulong_t)&intel_info},
+ /* Panther Lake-H */
+ { PCI_VDEVICE(INTEL, 0xe37c), (kernel_ulong_t)&intel_info},
+ { PCI_VDEVICE(INTEL, 0xe36f), (kernel_ulong_t)&intel_info},
+ /* Panther Lake-P */
+ { PCI_VDEVICE(INTEL, 0xe47c), (kernel_ulong_t)&intel_info},
+ { PCI_VDEVICE(INTEL, 0xe46f), (kernel_ulong_t)&intel_info},
+ /* Nova Lake-S */
+ { PCI_VDEVICE(INTEL, 0x6e2c), (kernel_ulong_t)&intel_info},
+ { PCI_VDEVICE(INTEL, 0x6e2d), (kernel_ulong_t)&intel_info},
+ { },
+};
+MODULE_DEVICE_TABLE(pci, mipi_i3c_hci_pci_devices);
+
+static struct pci_driver mipi_i3c_hci_pci_driver = {
+ .name = "mipi_i3c_hci_pci",
+ .id_table = mipi_i3c_hci_pci_devices,
+ .probe = mipi_i3c_hci_pci_probe,
+ .remove = mipi_i3c_hci_pci_remove,
+};
+
+module_pci_driver(mipi_i3c_hci_pci_driver);
+
+MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MIPI I3C HCI driver on PCI bus");
diff --git a/drivers/i3c/master/mipi-i3c-hci/pio.c b/drivers/i3c/master/mipi-i3c-hci/pio.c
index d0272aa93599..710faa46a00f 100644
--- a/drivers/i3c/master/mipi-i3c-hci/pio.c
+++ b/drivers/i3c/master/mipi-i3c-hci/pio.c
@@ -213,8 +213,8 @@ static void hci_pio_cleanup(struct i3c_hci *hci)
pio_reg_write(INTR_SIGNAL_ENABLE, 0x0);
if (pio) {
- DBG("status = %#x/%#x",
- pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ dev_dbg(&hci->master.dev, "status = %#x/%#x",
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
BUG_ON(pio->curr_xfer);
BUG_ON(pio->curr_rx);
BUG_ON(pio->curr_tx);
@@ -226,13 +226,17 @@ static void hci_pio_cleanup(struct i3c_hci *hci)
static void hci_pio_write_cmd(struct i3c_hci *hci, struct hci_xfer *xfer)
{
- DBG("cmd_desc[%d] = 0x%08x", 0, xfer->cmd_desc[0]);
- DBG("cmd_desc[%d] = 0x%08x", 1, xfer->cmd_desc[1]);
+ dev_dbg(&hci->master.dev, "cmd_desc[%d] = 0x%08x",
+ 0, xfer->cmd_desc[0]);
+ dev_dbg(&hci->master.dev, "cmd_desc[%d] = 0x%08x",
+ 1, xfer->cmd_desc[1]);
pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[0]);
pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[1]);
if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
- DBG("cmd_desc[%d] = 0x%08x", 2, xfer->cmd_desc[2]);
- DBG("cmd_desc[%d] = 0x%08x", 3, xfer->cmd_desc[3]);
+ dev_dbg(&hci->master.dev, "cmd_desc[%d] = 0x%08x",
+ 2, xfer->cmd_desc[2]);
+ dev_dbg(&hci->master.dev, "cmd_desc[%d] = 0x%08x",
+ 3, xfer->cmd_desc[3]);
pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[2]);
pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[3]);
}
@@ -254,7 +258,8 @@ static bool hci_pio_do_rx(struct i3c_hci *hci, struct hci_pio_data *pio)
nr_words = min(xfer->data_left / 4, pio->rx_thresh_size);
/* extract data from FIFO */
xfer->data_left -= nr_words * 4;
- DBG("now %d left %d", nr_words * 4, xfer->data_left);
+ dev_dbg(&hci->master.dev, "now %d left %d",
+ nr_words * 4, xfer->data_left);
while (nr_words--)
*p++ = pio_reg_read(XFER_DATA_PORT);
}
@@ -269,7 +274,7 @@ static void hci_pio_do_trailing_rx(struct i3c_hci *hci,
struct hci_xfer *xfer = pio->curr_rx;
u32 *p;
- DBG("%d remaining", count);
+ dev_dbg(&hci->master.dev, "%d remaining", count);
p = xfer->data;
p += (xfer->data_len - xfer->data_left) / 4;
@@ -278,7 +283,8 @@ static void hci_pio_do_trailing_rx(struct i3c_hci *hci,
unsigned int nr_words = count / 4;
/* extract data from FIFO */
xfer->data_left -= nr_words * 4;
- DBG("now %d left %d", nr_words * 4, xfer->data_left);
+ dev_dbg(&hci->master.dev, "now %d left %d",
+ nr_words * 4, xfer->data_left);
while (nr_words--)
*p++ = pio_reg_read(XFER_DATA_PORT);
}
@@ -321,7 +327,8 @@ static bool hci_pio_do_tx(struct i3c_hci *hci, struct hci_pio_data *pio)
nr_words = min(xfer->data_left / 4, pio->tx_thresh_size);
/* push data into the FIFO */
xfer->data_left -= nr_words * 4;
- DBG("now %d left %d", nr_words * 4, xfer->data_left);
+ dev_dbg(&hci->master.dev, "now %d left %d",
+ nr_words * 4, xfer->data_left);
while (nr_words--)
pio_reg_write(XFER_DATA_PORT, *p++);
}
@@ -336,7 +343,7 @@ static bool hci_pio_do_tx(struct i3c_hci *hci, struct hci_pio_data *pio)
*/
if (!(pio_reg_read(INTR_STATUS) & STAT_TX_THLD))
return false;
- DBG("trailing %d", xfer->data_left);
+ dev_dbg(&hci->master.dev, "trailing %d", xfer->data_left);
pio_reg_write(XFER_DATA_PORT, *p);
xfer->data_left = 0;
}
@@ -481,7 +488,7 @@ static bool hci_pio_process_resp(struct i3c_hci *hci, struct hci_pio_data *pio)
u32 resp = pio_reg_read(RESPONSE_QUEUE_PORT);
unsigned int tid = RESP_TID(resp);
- DBG("resp = 0x%08x", resp);
+ dev_dbg(&hci->master.dev, "resp = 0x%08x", resp);
if (tid != xfer->cmd_tid) {
dev_err(&hci->master.dev,
"response tid=%d when expecting %d\n",
@@ -522,14 +529,15 @@ static bool hci_pio_process_resp(struct i3c_hci *hci, struct hci_pio_data *pio)
* still exists.
*/
if (pio->curr_rx == xfer) {
- DBG("short RX ?");
+ dev_dbg(&hci->master.dev, "short RX ?");
pio->curr_rx = pio->curr_rx->next_data;
} else if (pio->curr_tx == xfer) {
- DBG("short TX ?");
+ dev_dbg(&hci->master.dev, "short TX ?");
pio->curr_tx = pio->curr_tx->next_data;
} else if (xfer->data_left) {
- DBG("PIO xfer count = %d after response",
- xfer->data_left);
+ dev_dbg(&hci->master.dev,
+ "PIO xfer count = %d after response",
+ xfer->data_left);
}
pio->curr_resp = xfer->next_resp;
@@ -591,7 +599,7 @@ static int hci_pio_queue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
struct hci_xfer *prev_queue_tail;
int i;
- DBG("n = %d", n);
+ dev_dbg(&hci->master.dev, "n = %d", n);
/* link xfer instances together and initialize data count */
for (i = 0; i < n; i++) {
@@ -611,8 +619,9 @@ static int hci_pio_queue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
if (!hci_pio_process_cmd(hci, pio))
pio->enabled_irqs |= STAT_CMD_QUEUE_READY;
pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs);
- DBG("status = %#x/%#x",
- pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ dev_dbg(&hci->master.dev, "status = %#x/%#x",
+ pio_reg_read(INTR_STATUS),
+ pio_reg_read(INTR_SIGNAL_ENABLE));
}
spin_unlock_irq(&pio->lock);
return 0;
@@ -686,10 +695,10 @@ static bool hci_pio_dequeue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int
int ret;
spin_lock_irq(&pio->lock);
- DBG("n=%d status=%#x/%#x", n,
- pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
- DBG("main_status = %#x/%#x",
- readl(hci->base_regs + 0x20), readl(hci->base_regs + 0x28));
+ dev_dbg(&hci->master.dev, "n=%d status=%#x/%#x", n,
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ dev_dbg(&hci->master.dev, "main_status = %#x/%#x",
+ readl(hci->base_regs + 0x20), readl(hci->base_regs + 0x28));
ret = hci_pio_dequeue_xfer_common(hci, pio, xfer, n);
spin_unlock_irq(&pio->lock);
@@ -733,8 +742,8 @@ static void hci_pio_err(struct i3c_hci *hci, struct hci_pio_data *pio,
mipi_i3c_hci_pio_reset(hci);
mipi_i3c_hci_resume(hci);
- DBG("status=%#x/%#x",
- pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ dev_dbg(&hci->master.dev, "status=%#x/%#x",
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
}
static void hci_pio_set_ibi_thresh(struct i3c_hci *hci,
@@ -749,7 +758,7 @@ static void hci_pio_set_ibi_thresh(struct i3c_hci *hci,
if (regval != pio->reg_queue_thresh) {
pio_reg_write(QUEUE_THLD_CTRL, regval);
pio->reg_queue_thresh = regval;
- DBG("%d", thresh_val);
+ dev_dbg(&hci->master.dev, "%d", thresh_val);
}
}
@@ -773,7 +782,8 @@ static bool hci_pio_get_ibi_segment(struct i3c_hci *hci,
/* extract the data from the IBI port */
nr_words = thresh_val;
ibi->seg_cnt -= nr_words * 4;
- DBG("now %d left %d", nr_words * 4, ibi->seg_cnt);
+ dev_dbg(&hci->master.dev, "now %d left %d",
+ nr_words * 4, ibi->seg_cnt);
while (nr_words--)
*p++ = pio_reg_read(IBI_PORT);
}
@@ -791,7 +801,7 @@ static bool hci_pio_get_ibi_segment(struct i3c_hci *hci,
hci_pio_set_ibi_thresh(hci, pio, 1);
if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
return false;
- DBG("trailing %d", ibi->seg_cnt);
+ dev_dbg(&hci->master.dev, "trailing %d", ibi->seg_cnt);
data = pio_reg_read(IBI_PORT);
data = (__force u32) cpu_to_le32(data);
while (ibi->seg_cnt--) {
@@ -820,7 +830,7 @@ static bool hci_pio_prep_new_ibi(struct i3c_hci *hci, struct hci_pio_data *pio)
*/
ibi_status = pio_reg_read(IBI_PORT);
- DBG("status = %#x", ibi_status);
+ dev_dbg(&hci->master.dev, "status = %#x", ibi_status);
ibi->addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
if (ibi_status & IBI_ERROR) {
dev_err(&hci->master.dev, "IBI error from %#x\n", ibi->addr);
@@ -979,14 +989,15 @@ static void hci_pio_recycle_ibi_slot(struct i3c_hci *hci,
i3c_generic_ibi_recycle_slot(dev_ibi->pool, slot);
}
-static bool hci_pio_irq_handler(struct i3c_hci *hci, unsigned int unused)
+static bool hci_pio_irq_handler(struct i3c_hci *hci)
{
struct hci_pio_data *pio = hci->io_data;
u32 status;
spin_lock(&pio->lock);
status = pio_reg_read(INTR_STATUS);
- DBG("(in) status: %#x/%#x", status, pio->enabled_irqs);
+ dev_dbg(&hci->master.dev, "PIO_INTR_STATUS %#x/%#x",
+ status, pio->enabled_irqs);
status &= pio->enabled_irqs | STAT_LATENCY_WARNINGS;
if (!status) {
spin_unlock(&pio->lock);
@@ -1023,8 +1034,8 @@ static bool hci_pio_irq_handler(struct i3c_hci *hci, unsigned int unused)
pio->enabled_irqs &= ~STAT_CMD_QUEUE_READY;
pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs);
- DBG("(out) status: %#x/%#x",
- pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ dev_dbg(&hci->master.dev, "PIO_INTR_STATUS %#x/%#x",
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
spin_unlock(&pio->lock);
return true;
}
diff --git a/drivers/i3c/master/renesas-i3c.c b/drivers/i3c/master/renesas-i3c.c
new file mode 100644
index 000000000000..275f7b924288
--- /dev/null
+++ b/drivers/i3c/master/renesas-i3c.c
@@ -0,0 +1,1404 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas I3C Controller driver
+ * Copyright (C) 2023-25 Renesas Electronics Corp.
+ *
+ * TODO: IBI support, HotJoin support, Target support
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/i3c/master.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include "../internals.h"
+
+#define PRTS 0x00
+#define PRTS_PRTMD BIT(0)
+
+#define BCTL 0x14
+#define BCTL_INCBA BIT(0)
+#define BCTL_HJACKCTL BIT(8)
+#define BCTL_ABT BIT(29)
+#define BCTL_BUSE BIT(31)
+
+#define MSDVAD 0x18
+#define MSDVAD_MDYAD(x) FIELD_PREP(GENMASK(21, 16), x)
+#define MSDVAD_MDYADV BIT(31)
+
+#define RSTCTL 0x20
+#define RSTCTL_RI3CRST BIT(0)
+#define RSTCTL_INTLRST BIT(16)
+
+#define INST 0x30
+
+#define IBINCTL 0x58
+#define IBINCTL_NRHJCTL BIT(0)
+#define IBINCTL_NRMRCTL BIT(1)
+#define IBINCTL_NRSIRCTL BIT(3)
+
+#define SVCTL 0x64
+
+#define REFCKCTL 0x70
+#define REFCKCTL_IREFCKS(x) FIELD_PREP(GENMASK(2, 0), x)
+
+#define STDBR 0x74
+#define STDBR_SBRLO(cond, x) FIELD_PREP(GENMASK(7, 0), (x) >> (cond))
+#define STDBR_SBRHO(cond, x) FIELD_PREP(GENMASK(15, 8), (x) >> (cond))
+#define STDBR_SBRLP(x) FIELD_PREP(GENMASK(21, 16), x)
+#define STDBR_SBRHP(x) FIELD_PREP(GENMASK(29, 24), x)
+#define STDBR_DSBRPO BIT(31)
+
+#define EXTBR 0x78
+#define EXTBR_EBRLO(x) FIELD_PREP(GENMASK(7, 0), x)
+#define EXTBR_EBRHO(x) FIELD_PREP(GENMASK(15, 8), x)
+#define EXTBR_EBRLP(x) FIELD_PREP(GENMASK(21, 16), x)
+#define EXTBR_EBRHP(x) FIELD_PREP(GENMASK(29, 24), x)
+
+#define BFRECDT 0x7c
+#define BFRECDT_FRECYC(x) FIELD_PREP(GENMASK(8, 0), x)
+
+#define BAVLCDT 0x80
+#define BAVLCDT_AVLCYC(x) FIELD_PREP(GENMASK(8, 0), x)
+
+#define BIDLCDT 0x84
+#define BIDLCDT_IDLCYC(x) FIELD_PREP(GENMASK(17, 0), x)
+
+#define ACKCTL 0xa0
+#define ACKCTL_ACKT BIT(1)
+#define ACKCTL_ACKTWP BIT(2)
+
+#define SCSTRCTL 0xa4
+#define SCSTRCTL_ACKTWE BIT(0)
+#define SCSTRCTL_RWE BIT(1)
+
+#define SCSTLCTL 0xb0
+
+#define CNDCTL 0x140
+#define CNDCTL_STCND BIT(0)
+#define CNDCTL_SRCND BIT(1)
+#define CNDCTL_SPCND BIT(2)
+
+#define NCMDQP 0x150 /* Normal Command Queue */
+#define NCMDQP_CMD_ATTR(x) FIELD_PREP(GENMASK(2, 0), x)
+#define NCMDQP_IMMED_XFER 0x01
+#define NCMDQP_ADDR_ASSGN 0x02
+#define NCMDQP_TID(x) FIELD_PREP(GENMASK(6, 3), x)
+#define NCMDQP_CMD(x) FIELD_PREP(GENMASK(14, 7), x)
+#define NCMDQP_CP BIT(15)
+#define NCMDQP_DEV_INDEX(x) FIELD_PREP(GENMASK(20, 16), x)
+#define NCMDQP_BYTE_CNT(x) FIELD_PREP(GENMASK(25, 23), x)
+#define NCMDQP_DEV_COUNT(x) FIELD_PREP(GENMASK(29, 26), x)
+#define NCMDQP_MODE(x) FIELD_PREP(GENMASK(28, 26), x)
+#define NCMDQP_RNW(x) FIELD_PREP(GENMASK(29, 29), x)
+#define NCMDQP_ROC BIT(30)
+#define NCMDQP_TOC BIT(31)
+#define NCMDQP_DATA_LENGTH(x) FIELD_PREP(GENMASK(31, 16), x)
+
+#define NRSPQP 0x154 /* Normal Respone Queue */
+#define NRSPQP_NO_ERROR 0
+#define NRSPQP_ERROR_CRC 1
+#define NRSPQP_ERROR_PARITY 2
+#define NRSPQP_ERROR_FRAME 3
+#define NRSPQP_ERROR_IBA_NACK 4
+#define NRSPQP_ERROR_ADDRESS_NACK 5
+#define NRSPQP_ERROR_OVER_UNDER_FLOW 6
+#define NRSPQP_ERROR_TRANSF_ABORT 8
+#define NRSPQP_ERROR_I2C_W_NACK_ERR 9
+#define NRSPQP_ERROR_UNSUPPORTED 10
+#define NRSPQP_DATA_LEN(x) FIELD_GET(GENMASK(15, 0), x)
+#define NRSPQP_ERR_STATUS(x) FIELD_GET(GENMASK(31, 28), x)
+
+#define NTDTBP0 0x158 /* Normal Transfer Data Buffer */
+#define NTDTBP0_DEPTH 16
+
+#define NQTHCTL 0x190
+#define NQTHCTL_CMDQTH(x) FIELD_PREP(GENMASK(1, 0), x)
+#define NQTHCTL_IBIDSSZ(x) FIELD_PREP(GENMASK(23, 16), x)
+
+#define NTBTHCTL0 0x194
+
+#define NRQTHCTL 0x1c0
+
+#define BST 0x1d0
+#define BST_STCNDDF BIT(0)
+#define BST_SPCNDDF BIT(1)
+#define BST_NACKDF BIT(4)
+#define BST_TENDF BIT(8)
+
+#define BSTE 0x1d4
+#define BSTE_STCNDDE BIT(0)
+#define BSTE_SPCNDDE BIT(1)
+#define BSTE_NACKDE BIT(4)
+#define BSTE_TENDE BIT(8)
+#define BSTE_ALE BIT(16)
+#define BSTE_TODE BIT(20)
+#define BSTE_WUCNDDE BIT(24)
+#define BSTE_ALL_FLAG (BSTE_STCNDDE | BSTE_SPCNDDE |\
+ BSTE_NACKDE | BSTE_TENDE |\
+ BSTE_ALE | BSTE_TODE | BSTE_WUCNDDE)
+
+#define BIE 0x1d8
+#define BIE_STCNDDIE BIT(0)
+#define BIE_SPCNDDIE BIT(1)
+#define BIE_NACKDIE BIT(4)
+#define BIE_TENDIE BIT(8)
+
+#define NTST 0x1e0
+#define NTST_TDBEF0 BIT(0)
+#define NTST_RDBFF0 BIT(1)
+#define NTST_CMDQEF BIT(3)
+#define NTST_RSPQFF BIT(4)
+#define NTST_TABTF BIT(5)
+#define NTST_TEF BIT(9)
+
+#define NTSTE 0x1e4
+#define NTSTE_TDBEE0 BIT(0)
+#define NTSTE_RDBFE0 BIT(1)
+#define NTSTE_IBIQEFE BIT(2)
+#define NTSTE_CMDQEE BIT(3)
+#define NTSTE_RSPQFE BIT(4)
+#define NTSTE_TABTE BIT(5)
+#define NTSTE_TEE BIT(9)
+#define NTSTE_RSQFE BIT(20)
+#define NTSTE_ALL_FLAG (NTSTE_TDBEE0 | NTSTE_RDBFE0 |\
+ NTSTE_IBIQEFE | NTSTE_CMDQEE |\
+ NTSTE_RSPQFE | NTSTE_TABTE |\
+ NTSTE_TEE | NTSTE_RSQFE)
+
+#define NTIE 0x1e8
+#define NTIE_TDBEIE0 BIT(0)
+#define NTIE_RDBFIE0 BIT(1)
+#define NTIE_IBIQEFIE BIT(2)
+#define NTIE_RSPQFIE BIT(4)
+#define NTIE_RSQFIE BIT(20)
+
+#define BCST 0x210
+#define BCST_BFREF BIT(0)
+
+#define DATBAS(x) (0x224 + 0x8 * (x))
+#define DATBAS_DVSTAD(x) FIELD_PREP(GENMASK(6, 0), x)
+#define DATBAS_DVDYAD(x) FIELD_PREP(GENMASK(23, 16), x)
+
+#define NDBSTLV0 0x398
+#define NDBSTLV0_RDBLV(x) FIELD_GET(GENMASK(15, 8), x)
+
+#define RENESAS_I3C_MAX_DEVS 8
+#define I2C_INIT_MSG -1
+
+enum i3c_internal_state {
+ I3C_INTERNAL_STATE_DISABLED,
+ I3C_INTERNAL_STATE_CONTROLLER_IDLE,
+ I3C_INTERNAL_STATE_CONTROLLER_ENTDAA,
+ I3C_INTERNAL_STATE_CONTROLLER_SETDASA,
+ I3C_INTERNAL_STATE_CONTROLLER_WRITE,
+ I3C_INTERNAL_STATE_CONTROLLER_READ,
+ I3C_INTERNAL_STATE_CONTROLLER_COMMAND_WRITE,
+ I3C_INTERNAL_STATE_CONTROLLER_COMMAND_READ,
+};
+
+enum renesas_i3c_event {
+ I3C_COMMAND_ADDRESS_ASSIGNMENT,
+ I3C_WRITE,
+ I3C_READ,
+ I3C_COMMAND_WRITE,
+ I3C_COMMAND_READ,
+};
+
+struct renesas_i3c_cmd {
+ u32 cmd0;
+ u32 len;
+ const void *tx_buf;
+ u32 tx_count;
+ void *rx_buf;
+ u32 rx_count;
+ u32 err;
+ u8 rnw;
+ /* i2c xfer */
+ int i2c_bytes_left;
+ int i2c_is_last;
+ u8 *i2c_buf;
+ const struct i2c_msg *msg;
+};
+
+struct renesas_i3c_xfer {
+ struct list_head node;
+ struct completion comp;
+ int ret;
+ bool is_i2c_xfer;
+ unsigned int ncmds;
+ struct renesas_i3c_cmd cmds[] __counted_by(ncmds);
+};
+
+struct renesas_i3c_xferqueue {
+ struct list_head list;
+ struct renesas_i3c_xfer *cur;
+ /* Lock for accessing the xfer queue */
+ spinlock_t lock;
+};
+
+struct renesas_i3c {
+ struct i3c_master_controller base;
+ enum i3c_internal_state internal_state;
+ u16 maxdevs;
+ u32 free_pos;
+ u32 i2c_STDBR;
+ u32 i3c_STDBR;
+ u8 addrs[RENESAS_I3C_MAX_DEVS];
+ struct renesas_i3c_xferqueue xferqueue;
+ void __iomem *regs;
+ struct clk *tclk;
+};
+
+struct renesas_i3c_i2c_dev_data {
+ u8 index;
+};
+
+struct renesas_i3c_irq_desc {
+ const char *name;
+ irq_handler_t isr;
+ const char *desc;
+};
+
+struct renesas_i3c_config {
+ unsigned int has_pclkrw:1;
+};
+
+static inline void renesas_i3c_reg_update(void __iomem *reg, u32 mask, u32 val)
+{
+ u32 data = readl(reg);
+
+ data &= ~mask;
+ data |= (val & mask);
+ writel(data, reg);
+}
+
+static inline u32 renesas_readl(void __iomem *base, u32 reg)
+{
+ return readl(base + reg);
+}
+
+static inline void renesas_writel(void __iomem *base, u32 reg, u32 val)
+{
+ writel(val, base + reg);
+}
+
+static void renesas_set_bit(void __iomem *base, u32 reg, u32 val)
+{
+ renesas_i3c_reg_update(base + reg, val, val);
+}
+
+static void renesas_clear_bit(void __iomem *base, u32 reg, u32 val)
+{
+ renesas_i3c_reg_update(base + reg, val, 0);
+}
+
+static inline struct renesas_i3c *to_renesas_i3c(struct i3c_master_controller *m)
+{
+ return container_of(m, struct renesas_i3c, base);
+}
+
+static inline u32 datbas_dvdyad_with_parity(u8 addr)
+{
+ return DATBAS_DVDYAD(addr | (parity8(addr) ? 0 : BIT(7)));
+}
+
+static int renesas_i3c_get_free_pos(struct renesas_i3c *i3c)
+{
+ if (!(i3c->free_pos & GENMASK(i3c->maxdevs - 1, 0)))
+ return -ENOSPC;
+
+ return ffs(i3c->free_pos) - 1;
+}
+
+static int renesas_i3c_get_addr_pos(struct renesas_i3c *i3c, u8 addr)
+{
+ int pos;
+
+ for (pos = 0; pos < i3c->maxdevs; pos++) {
+ if (addr == i3c->addrs[pos])
+ return pos;
+ }
+
+ return -EINVAL;
+}
+
+static struct renesas_i3c_xfer *renesas_i3c_alloc_xfer(struct renesas_i3c *i3c,
+ unsigned int ncmds)
+{
+ struct renesas_i3c_xfer *xfer;
+
+ xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
+ if (!xfer)
+ return NULL;
+
+ INIT_LIST_HEAD(&xfer->node);
+ xfer->ncmds = ncmds;
+ xfer->ret = -ETIMEDOUT;
+
+ return xfer;
+}
+
+static void renesas_i3c_start_xfer_locked(struct renesas_i3c *i3c)
+{
+ struct renesas_i3c_xfer *xfer = i3c->xferqueue.cur;
+ struct renesas_i3c_cmd *cmd;
+ u32 cmd1;
+
+ if (!xfer)
+ return;
+
+ cmd = xfer->cmds;
+
+ switch (i3c->internal_state) {
+ case I3C_INTERNAL_STATE_CONTROLLER_ENTDAA:
+ case I3C_INTERNAL_STATE_CONTROLLER_SETDASA:
+ renesas_set_bit(i3c->regs, NTIE, NTIE_RSPQFIE);
+ renesas_writel(i3c->regs, NCMDQP, cmd->cmd0);
+ renesas_writel(i3c->regs, NCMDQP, 0);
+ break;
+ case I3C_INTERNAL_STATE_CONTROLLER_WRITE:
+ case I3C_INTERNAL_STATE_CONTROLLER_COMMAND_WRITE:
+ renesas_set_bit(i3c->regs, NTIE, NTIE_RSPQFIE);
+ if (cmd->len <= 4) {
+ cmd->cmd0 |= NCMDQP_CMD_ATTR(NCMDQP_IMMED_XFER);
+ cmd->cmd0 |= NCMDQP_BYTE_CNT(cmd->len);
+ cmd->tx_count = cmd->len;
+ cmd1 = cmd->len == 0 ? 0 : *(u32 *)cmd->tx_buf;
+ } else {
+ cmd1 = NCMDQP_DATA_LENGTH(cmd->len);
+ }
+ renesas_writel(i3c->regs, NCMDQP, cmd->cmd0);
+ renesas_writel(i3c->regs, NCMDQP, cmd1);
+ break;
+ case I3C_INTERNAL_STATE_CONTROLLER_READ:
+ case I3C_INTERNAL_STATE_CONTROLLER_COMMAND_READ:
+ renesas_set_bit(i3c->regs, NTIE, NTIE_RDBFIE0);
+ cmd1 = NCMDQP_DATA_LENGTH(cmd->len);
+ renesas_writel(i3c->regs, NCMDQP, cmd->cmd0);
+ renesas_writel(i3c->regs, NCMDQP, cmd1);
+ break;
+ default:
+ break;
+ }
+
+ /* Clear the command queue empty flag */
+ renesas_clear_bit(i3c->regs, NTST, NTST_CMDQEF);
+}
+
+static void renesas_i3c_dequeue_xfer_locked(struct renesas_i3c *i3c,
+ struct renesas_i3c_xfer *xfer)
+{
+ if (i3c->xferqueue.cur == xfer)
+ i3c->xferqueue.cur = NULL;
+ else
+ list_del_init(&xfer->node);
+}
+
+static void renesas_i3c_dequeue_xfer(struct renesas_i3c *i3c, struct renesas_i3c_xfer *xfer)
+{
+ scoped_guard(spinlock_irqsave, &i3c->xferqueue.lock)
+ renesas_i3c_dequeue_xfer_locked(i3c, xfer);
+}
+
+static void renesas_i3c_enqueue_xfer(struct renesas_i3c *i3c, struct renesas_i3c_xfer *xfer)
+{
+ reinit_completion(&xfer->comp);
+ scoped_guard(spinlock_irqsave, &i3c->xferqueue.lock) {
+ if (i3c->xferqueue.cur) {
+ list_add_tail(&xfer->node, &i3c->xferqueue.list);
+ } else {
+ i3c->xferqueue.cur = xfer;
+ if (!xfer->is_i2c_xfer)
+ renesas_i3c_start_xfer_locked(i3c);
+ }
+ }
+}
+
+static void renesas_i3c_wait_xfer(struct renesas_i3c *i3c, struct renesas_i3c_xfer *xfer)
+{
+ unsigned long time_left;
+
+ renesas_i3c_enqueue_xfer(i3c, xfer);
+
+ time_left = wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000));
+ if (!time_left)
+ renesas_i3c_dequeue_xfer(i3c, xfer);
+}
+
+static void renesas_i3c_set_prts(struct renesas_i3c *i3c, u32 val)
+{
+ /* Required sequence according to tnrza0140ae */
+ renesas_set_bit(i3c->regs, RSTCTL, RSTCTL_INTLRST);
+ renesas_writel(i3c->regs, PRTS, val);
+ renesas_clear_bit(i3c->regs, RSTCTL, RSTCTL_INTLRST);
+}
+
+static void renesas_i3c_bus_enable(struct i3c_master_controller *m, bool i3c_mode)
+{
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+
+ /* Setup either I3C or I2C protocol */
+ if (i3c_mode) {
+ renesas_i3c_set_prts(i3c, 0);
+ /* Revisit: INCBA handling, especially after I2C transfers */
+ renesas_set_bit(i3c->regs, BCTL, BCTL_HJACKCTL | BCTL_INCBA);
+ renesas_set_bit(i3c->regs, MSDVAD, MSDVAD_MDYADV);
+ renesas_writel(i3c->regs, STDBR, i3c->i3c_STDBR);
+ } else {
+ renesas_i3c_set_prts(i3c, PRTS_PRTMD);
+ renesas_writel(i3c->regs, STDBR, i3c->i2c_STDBR);
+ }
+
+ /* Enable I3C bus */
+ renesas_set_bit(i3c->regs, BCTL, BCTL_BUSE);
+}
+
+static int renesas_i3c_reset(struct renesas_i3c *i3c)
+{
+ u32 val;
+
+ renesas_writel(i3c->regs, BCTL, 0);
+ renesas_set_bit(i3c->regs, RSTCTL, RSTCTL_RI3CRST);
+
+ return read_poll_timeout(renesas_readl, val, !(val & RSTCTL_RI3CRST),
+ 0, 1000, false, i3c->regs, RSTCTL);
+}
+
+static int renesas_i3c_bus_init(struct i3c_master_controller *m)
+{
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+ struct i3c_bus *bus = i3c_master_get_bus(m);
+ struct i3c_device_info info = {};
+ struct i2c_timings t;
+ unsigned long rate;
+ u32 double_SBR, val;
+ int cks, pp_high_ticks, pp_low_ticks, i3c_total_ticks;
+ int od_high_ticks, od_low_ticks, i2c_total_ticks;
+ int ret;
+
+ rate = clk_get_rate(i3c->tclk);
+ if (!rate)
+ return -EINVAL;
+
+ ret = renesas_i3c_reset(i3c);
+ if (ret)
+ return ret;
+
+ i2c_total_ticks = DIV_ROUND_UP(rate, bus->scl_rate.i2c);
+ i3c_total_ticks = DIV_ROUND_UP(rate, bus->scl_rate.i3c);
+
+ i2c_parse_fw_timings(&m->dev, &t, true);
+
+ for (cks = 0; cks < 7; cks++) {
+ /* SCL low-period calculation in Open-drain mode */
+ od_low_ticks = ((i2c_total_ticks * 6) / 10);
+
+ /* SCL clock calculation in Push-Pull mode */
+ if (bus->mode == I3C_BUS_MODE_PURE)
+ pp_high_ticks = ((i3c_total_ticks * 5) / 10);
+ else
+ pp_high_ticks = DIV_ROUND_UP(I3C_BUS_THIGH_MIXED_MAX_NS,
+ NSEC_PER_SEC / rate);
+ pp_low_ticks = i3c_total_ticks - pp_high_ticks;
+
+ if ((od_low_ticks / 2) <= 0xFF && pp_low_ticks < 0x3F)
+ break;
+
+ i2c_total_ticks /= 2;
+ i3c_total_ticks /= 2;
+ rate /= 2;
+ }
+
+ /* SCL clock period calculation in Open-drain mode */
+ if ((od_low_ticks / 2) > 0xFF || pp_low_ticks > 0x3F) {
+ dev_err(&m->dev, "invalid speed (i2c-scl = %lu Hz, i3c-scl = %lu Hz). Too slow.\n",
+ (unsigned long)bus->scl_rate.i2c, (unsigned long)bus->scl_rate.i3c);
+ return -EINVAL;
+ }
+
+ /* SCL high-period calculation in Open-drain mode */
+ od_high_ticks = i2c_total_ticks - od_low_ticks;
+
+ /* Standard Bit Rate setting */
+ double_SBR = od_low_ticks > 0xFF ? 1 : 0;
+ i3c->i3c_STDBR = (double_SBR ? STDBR_DSBRPO : 0) |
+ STDBR_SBRLO(double_SBR, od_low_ticks) |
+ STDBR_SBRHO(double_SBR, od_high_ticks) |
+ STDBR_SBRLP(pp_low_ticks) |
+ STDBR_SBRHP(pp_high_ticks);
+
+ od_low_ticks -= t.scl_fall_ns / (NSEC_PER_SEC / rate) + 1;
+ od_high_ticks -= t.scl_rise_ns / (NSEC_PER_SEC / rate) + 1;
+ i3c->i2c_STDBR = (double_SBR ? STDBR_DSBRPO : 0) |
+ STDBR_SBRLO(double_SBR, od_low_ticks) |
+ STDBR_SBRHO(double_SBR, od_high_ticks) |
+ STDBR_SBRLP(pp_low_ticks) |
+ STDBR_SBRHP(pp_high_ticks);
+ renesas_writel(i3c->regs, STDBR, i3c->i3c_STDBR);
+
+ /* Extended Bit Rate setting */
+ renesas_writel(i3c->regs, EXTBR, EXTBR_EBRLO(od_low_ticks) |
+ EXTBR_EBRHO(od_high_ticks) |
+ EXTBR_EBRLP(pp_low_ticks) |
+ EXTBR_EBRHP(pp_high_ticks));
+
+ renesas_writel(i3c->regs, REFCKCTL, REFCKCTL_IREFCKS(cks));
+
+ /* Disable Slave Mode */
+ renesas_writel(i3c->regs, SVCTL, 0);
+
+ /* Initialize Queue/Buffer threshold */
+ renesas_writel(i3c->regs, NQTHCTL, NQTHCTL_IBIDSSZ(6) |
+ NQTHCTL_CMDQTH(1));
+
+ /* The only supported configuration is two entries*/
+ renesas_writel(i3c->regs, NTBTHCTL0, 0);
+ /* Interrupt when there is one entry in the queue */
+ renesas_writel(i3c->regs, NRQTHCTL, 0);
+
+ /* Enable all Bus/Transfer Status Flags */
+ renesas_writel(i3c->regs, BSTE, BSTE_ALL_FLAG);
+ renesas_writel(i3c->regs, NTSTE, NTSTE_ALL_FLAG);
+
+ /* Interrupt enable settings */
+ renesas_writel(i3c->regs, BIE, BIE_NACKDIE | BIE_TENDIE);
+ renesas_writel(i3c->regs, NTIE, 0);
+
+ /* Clear Status register */
+ renesas_writel(i3c->regs, NTST, 0);
+ renesas_writel(i3c->regs, INST, 0);
+ renesas_writel(i3c->regs, BST, 0);
+
+ /* Hot-Join Acknowlege setting. */
+ renesas_set_bit(i3c->regs, BCTL, BCTL_HJACKCTL);
+
+ renesas_writel(i3c->regs, IBINCTL, IBINCTL_NRHJCTL | IBINCTL_NRMRCTL |
+ IBINCTL_NRSIRCTL);
+
+ renesas_writel(i3c->regs, SCSTLCTL, 0);
+ renesas_set_bit(i3c->regs, SCSTRCTL, SCSTRCTL_ACKTWE);
+
+ /* Bus condition timing */
+ val = DIV_ROUND_UP(I3C_BUS_TBUF_MIXED_FM_MIN_NS, NSEC_PER_SEC / rate);
+ renesas_writel(i3c->regs, BFRECDT, BFRECDT_FRECYC(val));
+
+ val = DIV_ROUND_UP(I3C_BUS_TAVAL_MIN_NS, NSEC_PER_SEC / rate);
+ renesas_writel(i3c->regs, BAVLCDT, BAVLCDT_AVLCYC(val));
+
+ val = DIV_ROUND_UP(I3C_BUS_TIDLE_MIN_NS, NSEC_PER_SEC / rate);
+ renesas_writel(i3c->regs, BIDLCDT, BIDLCDT_IDLCYC(val));
+
+ ret = i3c_master_get_free_addr(m, 0);
+ if (ret < 0)
+ return ret;
+
+ renesas_writel(i3c->regs, MSDVAD, MSDVAD_MDYAD(ret) | MSDVAD_MDYADV);
+
+ memset(&info, 0, sizeof(info));
+ info.dyn_addr = ret;
+ return i3c_master_set_info(&i3c->base, &info);
+}
+
+static void renesas_i3c_bus_cleanup(struct i3c_master_controller *m)
+{
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+
+ renesas_i3c_reset(i3c);
+}
+
+static int renesas_i3c_daa(struct i3c_master_controller *m)
+{
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+ struct renesas_i3c_cmd *cmd;
+ u32 olddevs, newdevs;
+ u8 last_addr = 0, pos;
+ int ret;
+
+ struct renesas_i3c_xfer *xfer __free(kfree) = renesas_i3c_alloc_xfer(i3c, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ /* Enable I3C bus. */
+ renesas_i3c_bus_enable(m, true);
+
+ olddevs = ~(i3c->free_pos);
+ i3c->internal_state = I3C_INTERNAL_STATE_CONTROLLER_ENTDAA;
+
+ /* Setting DATBASn registers for target devices. */
+ for (pos = 0; pos < i3c->maxdevs; pos++) {
+ if (olddevs & BIT(pos))
+ continue;
+
+ ret = i3c_master_get_free_addr(m, last_addr + 1);
+ if (ret < 0)
+ return -ENOSPC;
+
+ i3c->addrs[pos] = ret;
+ last_addr = ret;
+
+ renesas_writel(i3c->regs, DATBAS(pos), datbas_dvdyad_with_parity(ret));
+ }
+
+ init_completion(&xfer->comp);
+ cmd = xfer->cmds;
+ cmd->rx_count = 0;
+
+ ret = renesas_i3c_get_free_pos(i3c);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Setup the command descriptor to start the ENTDAA command
+ * and starting at the selected device index.
+ */
+ cmd->cmd0 = NCMDQP_CMD_ATTR(NCMDQP_ADDR_ASSGN) | NCMDQP_ROC |
+ NCMDQP_TID(I3C_COMMAND_ADDRESS_ASSIGNMENT) |
+ NCMDQP_CMD(I3C_CCC_ENTDAA) | NCMDQP_DEV_INDEX(ret) |
+ NCMDQP_DEV_COUNT(i3c->maxdevs - ret) | NCMDQP_TOC;
+
+ renesas_i3c_wait_xfer(i3c, xfer);
+
+ newdevs = GENMASK(i3c->maxdevs - cmd->rx_count - 1, 0);
+ newdevs &= ~olddevs;
+
+ for (pos = 0; pos < i3c->maxdevs; pos++) {
+ if (newdevs & BIT(pos))
+ i3c_master_add_i3c_dev_locked(m, i3c->addrs[pos]);
+ }
+
+ return 0;
+}
+
+static bool renesas_i3c_supports_ccc_cmd(struct i3c_master_controller *m,
+ const struct i3c_ccc_cmd *cmd)
+{
+ if (cmd->ndests > 1)
+ return false;
+
+ switch (cmd->id) {
+ case I3C_CCC_ENEC(true):
+ case I3C_CCC_ENEC(false):
+ case I3C_CCC_DISEC(true):
+ case I3C_CCC_DISEC(false):
+ case I3C_CCC_ENTAS(0, true):
+ case I3C_CCC_ENTAS(1, true):
+ case I3C_CCC_ENTAS(2, true):
+ case I3C_CCC_ENTAS(3, true):
+ case I3C_CCC_ENTAS(0, false):
+ case I3C_CCC_ENTAS(1, false):
+ case I3C_CCC_ENTAS(2, false):
+ case I3C_CCC_ENTAS(3, false):
+ case I3C_CCC_RSTDAA(true):
+ case I3C_CCC_RSTDAA(false):
+ case I3C_CCC_ENTDAA:
+ case I3C_CCC_DEFSLVS:
+ case I3C_CCC_SETMWL(true):
+ case I3C_CCC_SETMWL(false):
+ case I3C_CCC_SETMRL(true):
+ case I3C_CCC_SETMRL(false):
+ case I3C_CCC_ENTTM:
+ case I3C_CCC_SETDASA:
+ case I3C_CCC_SETNEWDA:
+ case I3C_CCC_GETMWL:
+ case I3C_CCC_GETMRL:
+ case I3C_CCC_GETPID:
+ case I3C_CCC_GETBCR:
+ case I3C_CCC_GETDCR:
+ case I3C_CCC_GETSTATUS:
+ case I3C_CCC_GETACCMST:
+ case I3C_CCC_GETMXDS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int renesas_i3c_send_ccc_cmd(struct i3c_master_controller *m,
+ struct i3c_ccc_cmd *ccc)
+{
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+ struct renesas_i3c_xfer *xfer;
+ struct renesas_i3c_cmd *cmd;
+ int ret, pos = 0;
+
+ if (ccc->id & I3C_CCC_DIRECT) {
+ pos = renesas_i3c_get_addr_pos(i3c, ccc->dests[0].addr);
+ if (pos < 0)
+ return pos;
+ }
+
+ xfer = renesas_i3c_alloc_xfer(i3c, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ renesas_i3c_bus_enable(m, true);
+
+ init_completion(&xfer->comp);
+ cmd = xfer->cmds;
+ cmd->rnw = ccc->rnw;
+ cmd->cmd0 = 0;
+
+ /* Calculate the command descriptor. */
+ switch (ccc->id) {
+ case I3C_CCC_SETDASA:
+ renesas_writel(i3c->regs, DATBAS(pos),
+ DATBAS_DVSTAD(ccc->dests[0].addr) |
+ DATBAS_DVDYAD(*(u8 *)ccc->dests[0].payload.data >> 1));
+ cmd->cmd0 = NCMDQP_CMD_ATTR(NCMDQP_ADDR_ASSGN) | NCMDQP_ROC |
+ NCMDQP_TID(I3C_COMMAND_ADDRESS_ASSIGNMENT) |
+ NCMDQP_CMD(I3C_CCC_SETDASA) | NCMDQP_DEV_INDEX(pos) |
+ NCMDQP_DEV_COUNT(0) | NCMDQP_TOC;
+ i3c->internal_state = I3C_INTERNAL_STATE_CONTROLLER_SETDASA;
+ break;
+ default:
+ /* Calculate the command descriptor. */
+ cmd->cmd0 = NCMDQP_TID(I3C_COMMAND_WRITE) | NCMDQP_MODE(0) |
+ NCMDQP_RNW(ccc->rnw) | NCMDQP_CMD(ccc->id) |
+ NCMDQP_ROC | NCMDQP_TOC | NCMDQP_CP |
+ NCMDQP_DEV_INDEX(pos);
+
+ if (ccc->rnw) {
+ cmd->rx_buf = ccc->dests[0].payload.data;
+ cmd->len = ccc->dests[0].payload.len;
+ cmd->rx_count = 0;
+ i3c->internal_state = I3C_INTERNAL_STATE_CONTROLLER_COMMAND_READ;
+ } else {
+ cmd->tx_buf = ccc->dests[0].payload.data;
+ cmd->len = ccc->dests[0].payload.len;
+ cmd->tx_count = 0;
+ i3c->internal_state = I3C_INTERNAL_STATE_CONTROLLER_COMMAND_WRITE;
+ }
+ }
+
+ renesas_i3c_wait_xfer(i3c, xfer);
+
+ ret = xfer->ret;
+ if (ret)
+ ccc->err = I3C_ERROR_M2;
+
+ kfree(xfer);
+
+ return ret;
+}
+
+static int renesas_i3c_priv_xfers(struct i3c_dev_desc *dev, struct i3c_priv_xfer *i3c_xfers,
+ int i3c_nxfers)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+ struct renesas_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct renesas_i3c_xfer *xfer;
+ int i;
+
+ /* Enable I3C bus. */
+ renesas_i3c_bus_enable(m, true);
+
+ xfer = renesas_i3c_alloc_xfer(i3c, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ init_completion(&xfer->comp);
+
+ for (i = 0; i < i3c_nxfers; i++) {
+ struct renesas_i3c_cmd *cmd = xfer->cmds;
+
+ /* Calculate the Transfer Command Descriptor */
+ cmd->rnw = i3c_xfers[i].rnw;
+ cmd->cmd0 = NCMDQP_DEV_INDEX(data->index) | NCMDQP_MODE(0) |
+ NCMDQP_RNW(cmd->rnw) | NCMDQP_ROC | NCMDQP_TOC;
+
+ if (i3c_xfers[i].rnw) {
+ cmd->rx_count = 0;
+ cmd->cmd0 |= NCMDQP_TID(I3C_READ);
+ cmd->rx_buf = i3c_xfers[i].data.in;
+ cmd->len = i3c_xfers[i].len;
+ i3c->internal_state = I3C_INTERNAL_STATE_CONTROLLER_READ;
+ } else {
+ cmd->tx_count = 0;
+ cmd->cmd0 |= NCMDQP_TID(I3C_WRITE);
+ cmd->tx_buf = i3c_xfers[i].data.out;
+ cmd->len = i3c_xfers[i].len;
+ i3c->internal_state = I3C_INTERNAL_STATE_CONTROLLER_WRITE;
+ }
+
+ if (!i3c_xfers[i].rnw && i3c_xfers[i].len > 4) {
+ i3c_writel_fifo(i3c->regs + NTDTBP0, cmd->tx_buf, cmd->len);
+ if (cmd->len > NTDTBP0_DEPTH * sizeof(u32))
+ renesas_set_bit(i3c->regs, NTIE, NTIE_TDBEIE0);
+ }
+
+ renesas_i3c_wait_xfer(i3c, xfer);
+ }
+
+ return 0;
+}
+
+static int renesas_i3c_attach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+ struct renesas_i3c_i2c_dev_data *data;
+ int pos;
+
+ pos = renesas_i3c_get_free_pos(i3c);
+ if (pos < 0)
+ return pos;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->index = pos;
+ i3c->addrs[pos] = dev->info.dyn_addr ? : dev->info.static_addr;
+ i3c->free_pos &= ~BIT(pos);
+
+ renesas_writel(i3c->regs, DATBAS(pos), DATBAS_DVSTAD(dev->info.static_addr) |
+ datbas_dvdyad_with_parity(i3c->addrs[pos]));
+ i3c_dev_set_master_data(dev, data);
+
+ return 0;
+}
+
+static int renesas_i3c_reattach_i3c_dev(struct i3c_dev_desc *dev,
+ u8 old_dyn_addr)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+ struct renesas_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+ i3c->addrs[data->index] = dev->info.dyn_addr ? dev->info.dyn_addr :
+ dev->info.static_addr;
+
+ return 0;
+}
+
+static void renesas_i3c_detach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct renesas_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+
+ i3c_dev_set_master_data(dev, NULL);
+ i3c->addrs[data->index] = 0;
+ i3c->free_pos |= BIT(data->index);
+ kfree(data);
+}
+
+static int renesas_i3c_i2c_xfers(struct i2c_dev_desc *dev,
+ struct i2c_msg *i2c_xfers,
+ int i2c_nxfers)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+ struct renesas_i3c_cmd *cmd;
+ u8 start_bit = CNDCTL_STCND;
+ int i;
+
+ struct renesas_i3c_xfer *xfer __free(kfree) = renesas_i3c_alloc_xfer(i3c, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ if (!i2c_nxfers)
+ return 0;
+
+ renesas_i3c_bus_enable(m, false);
+
+ init_completion(&xfer->comp);
+ xfer->is_i2c_xfer = true;
+ cmd = xfer->cmds;
+
+ if (!(renesas_readl(i3c->regs, BCST) & BCST_BFREF)) {
+ cmd->err = -EBUSY;
+ return cmd->err;
+ }
+
+ renesas_writel(i3c->regs, BST, 0);
+
+ renesas_i3c_enqueue_xfer(i3c, xfer);
+
+ for (i = 0; i < i2c_nxfers; i++) {
+ cmd->i2c_bytes_left = I2C_INIT_MSG;
+ cmd->i2c_buf = i2c_xfers[i].buf;
+ cmd->msg = &i2c_xfers[i];
+ cmd->i2c_is_last = (i == i2c_nxfers - 1);
+
+ renesas_set_bit(i3c->regs, BIE, BIE_NACKDIE);
+ renesas_set_bit(i3c->regs, NTIE, NTIE_TDBEIE0);
+ renesas_set_bit(i3c->regs, BIE, BIE_STCNDDIE);
+
+ /* Issue Start condition */
+ renesas_set_bit(i3c->regs, CNDCTL, start_bit);
+
+ renesas_set_bit(i3c->regs, NTSTE, NTSTE_TDBEE0);
+
+ wait_for_completion_timeout(&xfer->comp, m->i2c.timeout);
+
+ if (cmd->err)
+ break;
+
+ start_bit = CNDCTL_SRCND;
+ }
+
+ renesas_i3c_dequeue_xfer(i3c, xfer);
+ return cmd->err;
+}
+
+static int renesas_i3c_attach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+ struct renesas_i3c_i2c_dev_data *data;
+ int pos;
+
+ pos = renesas_i3c_get_free_pos(i3c);
+ if (pos < 0)
+ return pos;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->index = pos;
+ i3c->addrs[pos] = dev->addr;
+ i3c->free_pos &= ~BIT(pos);
+ i2c_dev_set_master_data(dev, data);
+
+ return 0;
+}
+
+static void renesas_i3c_detach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct renesas_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+
+ i2c_dev_set_master_data(dev, NULL);
+ i3c->addrs[data->index] = 0;
+ i3c->free_pos |= BIT(data->index);
+ kfree(data);
+}
+
+static irqreturn_t renesas_i3c_tx_isr(int irq, void *data)
+{
+ struct renesas_i3c *i3c = data;
+ struct renesas_i3c_xfer *xfer;
+ struct renesas_i3c_cmd *cmd;
+ u8 val;
+
+ scoped_guard(spinlock, &i3c->xferqueue.lock) {
+ xfer = i3c->xferqueue.cur;
+ cmd = xfer->cmds;
+
+ if (xfer->is_i2c_xfer) {
+ if (!cmd->i2c_bytes_left)
+ return IRQ_NONE;
+
+ if (cmd->i2c_bytes_left != I2C_INIT_MSG) {
+ val = *cmd->i2c_buf;
+ cmd->i2c_buf++;
+ cmd->i2c_bytes_left--;
+ renesas_writel(i3c->regs, NTDTBP0, val);
+ }
+
+ if (cmd->i2c_bytes_left == 0) {
+ renesas_clear_bit(i3c->regs, NTIE, NTIE_TDBEIE0);
+ renesas_set_bit(i3c->regs, BIE, BIE_TENDIE);
+ }
+
+ /* Clear the Transmit Buffer Empty status flag. */
+ renesas_clear_bit(i3c->regs, NTST, NTST_TDBEF0);
+ } else {
+ i3c_writel_fifo(i3c->regs + NTDTBP0, cmd->tx_buf, cmd->len);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t renesas_i3c_resp_isr(int irq, void *data)
+{
+ struct renesas_i3c *i3c = data;
+ struct renesas_i3c_xfer *xfer;
+ struct renesas_i3c_cmd *cmd;
+ u32 resp_descriptor = renesas_readl(i3c->regs, NRSPQP);
+ u32 bytes_remaining = 0;
+ u32 ntst, data_len;
+ int ret = 0;
+
+ scoped_guard(spinlock, &i3c->xferqueue.lock) {
+ xfer = i3c->xferqueue.cur;
+ cmd = xfer->cmds;
+
+ /* Clear the Respone Queue Full status flag*/
+ renesas_clear_bit(i3c->regs, NTST, NTST_RSPQFF);
+
+ data_len = NRSPQP_DATA_LEN(resp_descriptor);
+
+ switch (i3c->internal_state) {
+ case I3C_INTERNAL_STATE_CONTROLLER_ENTDAA:
+ cmd->rx_count = data_len;
+ break;
+ case I3C_INTERNAL_STATE_CONTROLLER_WRITE:
+ case I3C_INTERNAL_STATE_CONTROLLER_COMMAND_WRITE:
+ /* Disable the transmit IRQ if it hasn't been disabled already. */
+ renesas_clear_bit(i3c->regs, NTIE, NTIE_TDBEIE0);
+ break;
+ case I3C_INTERNAL_STATE_CONTROLLER_READ:
+ case I3C_INTERNAL_STATE_CONTROLLER_COMMAND_READ:
+ if (NDBSTLV0_RDBLV(renesas_readl(i3c->regs, NDBSTLV0)) && !cmd->err)
+ bytes_remaining = data_len - cmd->rx_count;
+
+ i3c_readl_fifo(i3c->regs + NTDTBP0, cmd->rx_buf, bytes_remaining);
+ renesas_clear_bit(i3c->regs, NTIE, NTIE_RDBFIE0);
+ break;
+ default:
+ break;
+ }
+
+ switch (NRSPQP_ERR_STATUS(resp_descriptor)) {
+ case NRSPQP_NO_ERROR:
+ break;
+ case NRSPQP_ERROR_PARITY:
+ case NRSPQP_ERROR_IBA_NACK:
+ case NRSPQP_ERROR_TRANSF_ABORT:
+ case NRSPQP_ERROR_CRC:
+ case NRSPQP_ERROR_FRAME:
+ ret = -EIO;
+ break;
+ case NRSPQP_ERROR_OVER_UNDER_FLOW:
+ ret = -ENOSPC;
+ break;
+ case NRSPQP_ERROR_UNSUPPORTED:
+ ret = -EOPNOTSUPP;
+ break;
+ case NRSPQP_ERROR_I2C_W_NACK_ERR:
+ case NRSPQP_ERROR_ADDRESS_NACK:
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ /*
+ * If the transfer was aborted, then the abort flag must be cleared
+ * before notifying the application that a transfer has completed.
+ */
+ ntst = renesas_readl(i3c->regs, NTST);
+ if (ntst & NTST_TABTF)
+ renesas_clear_bit(i3c->regs, BCTL, BCTL_ABT);
+
+ /* Clear error status flags. */
+ renesas_clear_bit(i3c->regs, NTST, NTST_TEF | NTST_TABTF);
+
+ xfer->ret = ret;
+ complete(&xfer->comp);
+
+ xfer = list_first_entry_or_null(&i3c->xferqueue.list,
+ struct renesas_i3c_xfer, node);
+ if (xfer)
+ list_del_init(&xfer->node);
+
+ i3c->xferqueue.cur = xfer;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t renesas_i3c_tend_isr(int irq, void *data)
+{
+ struct renesas_i3c *i3c = data;
+ struct renesas_i3c_xfer *xfer;
+ struct renesas_i3c_cmd *cmd;
+
+ scoped_guard(spinlock, &i3c->xferqueue.lock) {
+ xfer = i3c->xferqueue.cur;
+ cmd = xfer->cmds;
+
+ if (xfer->is_i2c_xfer) {
+ if (renesas_readl(i3c->regs, BST) & BST_NACKDF) {
+ /* We got a NACKIE */
+ renesas_readl(i3c->regs, NTDTBP0); /* dummy read */
+ renesas_clear_bit(i3c->regs, BST, BST_NACKDF);
+ cmd->err = -ENXIO;
+ } else if (cmd->i2c_bytes_left) {
+ renesas_set_bit(i3c->regs, NTIE, NTIE_TDBEIE0);
+ return IRQ_NONE;
+ }
+
+ if (cmd->i2c_is_last || cmd->err) {
+ renesas_clear_bit(i3c->regs, BIE, BIE_TENDIE);
+ renesas_set_bit(i3c->regs, BIE, BIE_SPCNDDIE);
+ renesas_set_bit(i3c->regs, CNDCTL, CNDCTL_SPCND);
+ } else {
+ /* Transfer is complete, but do not send STOP */
+ renesas_clear_bit(i3c->regs, NTSTE, NTSTE_TDBEE0);
+ renesas_clear_bit(i3c->regs, BIE, BIE_TENDIE);
+ xfer->ret = 0;
+ complete(&xfer->comp);
+ }
+ }
+
+ /* Clear the Transmit Buffer Empty status flag. */
+ renesas_clear_bit(i3c->regs, BST, BST_TENDF);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t renesas_i3c_rx_isr(int irq, void *data)
+{
+ struct renesas_i3c *i3c = data;
+ struct renesas_i3c_xfer *xfer;
+ struct renesas_i3c_cmd *cmd;
+ int read_bytes;
+
+ /* If resp_isr already read the data and updated 'xfer', we can just leave */
+ if (!(renesas_readl(i3c->regs, NTIE) & NTIE_RDBFIE0))
+ return IRQ_NONE;
+
+ scoped_guard(spinlock, &i3c->xferqueue.lock) {
+ xfer = i3c->xferqueue.cur;
+ cmd = xfer->cmds;
+
+ if (xfer->is_i2c_xfer) {
+ if (!cmd->i2c_bytes_left)
+ return IRQ_NONE;
+
+ if (cmd->i2c_bytes_left == I2C_INIT_MSG) {
+ cmd->i2c_bytes_left = cmd->msg->len;
+ renesas_set_bit(i3c->regs, SCSTRCTL, SCSTRCTL_RWE);
+ renesas_readl(i3c->regs, NTDTBP0); /* dummy read */
+ if (cmd->i2c_bytes_left == 1)
+ renesas_writel(i3c->regs, ACKCTL, ACKCTL_ACKT | ACKCTL_ACKTWP);
+ return IRQ_HANDLED;
+ }
+
+ if (cmd->i2c_bytes_left == 1) {
+ /* STOP must come before we set ACKCTL! */
+ if (cmd->i2c_is_last) {
+ renesas_set_bit(i3c->regs, BIE, BIE_SPCNDDIE);
+ renesas_clear_bit(i3c->regs, BST, BST_SPCNDDF);
+ renesas_set_bit(i3c->regs, CNDCTL, CNDCTL_SPCND);
+ }
+ renesas_writel(i3c->regs, ACKCTL, ACKCTL_ACKT | ACKCTL_ACKTWP);
+ } else {
+ renesas_writel(i3c->regs, ACKCTL, ACKCTL_ACKTWP);
+ }
+
+ /* Reading acks the RIE interrupt */
+ *cmd->i2c_buf = renesas_readl(i3c->regs, NTDTBP0);
+ cmd->i2c_buf++;
+ cmd->i2c_bytes_left--;
+ } else {
+ read_bytes = NDBSTLV0_RDBLV(renesas_readl(i3c->regs, NDBSTLV0)) * sizeof(u32);
+ i3c_readl_fifo(i3c->regs + NTDTBP0, cmd->rx_buf, read_bytes);
+ cmd->rx_count = read_bytes;
+ }
+
+ /* Clear the Read Buffer Full status flag. */
+ renesas_clear_bit(i3c->regs, NTST, NTST_RDBFF0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t renesas_i3c_stop_isr(int irq, void *data)
+{
+ struct renesas_i3c *i3c = data;
+ struct renesas_i3c_xfer *xfer;
+
+ scoped_guard(spinlock, &i3c->xferqueue.lock) {
+ xfer = i3c->xferqueue.cur;
+
+ /* read back registers to confirm writes have fully propagated */
+ renesas_writel(i3c->regs, BST, 0);
+ renesas_readl(i3c->regs, BST);
+ renesas_writel(i3c->regs, BIE, 0);
+ renesas_clear_bit(i3c->regs, NTST, NTST_TDBEF0 | NTST_RDBFF0);
+ renesas_clear_bit(i3c->regs, SCSTRCTL, SCSTRCTL_RWE);
+
+ xfer->ret = 0;
+ complete(&xfer->comp);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t renesas_i3c_start_isr(int irq, void *data)
+{
+ struct renesas_i3c *i3c = data;
+ struct renesas_i3c_xfer *xfer;
+ struct renesas_i3c_cmd *cmd;
+ u8 val;
+
+ scoped_guard(spinlock, &i3c->xferqueue.lock) {
+ xfer = i3c->xferqueue.cur;
+ cmd = xfer->cmds;
+
+ if (xfer->is_i2c_xfer) {
+ if (!cmd->i2c_bytes_left)
+ return IRQ_NONE;
+
+ if (cmd->i2c_bytes_left == I2C_INIT_MSG) {
+ if (cmd->msg->flags & I2C_M_RD) {
+ /* On read, switch over to receive interrupt */
+ renesas_clear_bit(i3c->regs, NTIE, NTIE_TDBEIE0);
+ renesas_set_bit(i3c->regs, NTIE, NTIE_RDBFIE0);
+ } else {
+ /* On write, initialize length */
+ cmd->i2c_bytes_left = cmd->msg->len;
+ }
+
+ val = i2c_8bit_addr_from_msg(cmd->msg);
+ renesas_writel(i3c->regs, NTDTBP0, val);
+ }
+ }
+
+ renesas_clear_bit(i3c->regs, BIE, BIE_STCNDDIE);
+ renesas_clear_bit(i3c->regs, BST, BST_STCNDDF);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct i3c_master_controller_ops renesas_i3c_ops = {
+ .bus_init = renesas_i3c_bus_init,
+ .bus_cleanup = renesas_i3c_bus_cleanup,
+ .attach_i3c_dev = renesas_i3c_attach_i3c_dev,
+ .reattach_i3c_dev = renesas_i3c_reattach_i3c_dev,
+ .detach_i3c_dev = renesas_i3c_detach_i3c_dev,
+ .do_daa = renesas_i3c_daa,
+ .supports_ccc_cmd = renesas_i3c_supports_ccc_cmd,
+ .send_ccc_cmd = renesas_i3c_send_ccc_cmd,
+ .priv_xfers = renesas_i3c_priv_xfers,
+ .attach_i2c_dev = renesas_i3c_attach_i2c_dev,
+ .detach_i2c_dev = renesas_i3c_detach_i2c_dev,
+ .i2c_xfers = renesas_i3c_i2c_xfers,
+};
+
+static const struct renesas_i3c_irq_desc renesas_i3c_irqs[] = {
+ { .name = "resp", .isr = renesas_i3c_resp_isr, .desc = "i3c-resp" },
+ { .name = "rx", .isr = renesas_i3c_rx_isr, .desc = "i3c-rx" },
+ { .name = "tx", .isr = renesas_i3c_tx_isr, .desc = "i3c-tx" },
+ { .name = "st", .isr = renesas_i3c_start_isr, .desc = "i3c-start" },
+ { .name = "sp", .isr = renesas_i3c_stop_isr, .desc = "i3c-stop" },
+ { .name = "tend", .isr = renesas_i3c_tend_isr, .desc = "i3c-tend" },
+ { .name = "nack", .isr = renesas_i3c_tend_isr, .desc = "i3c-nack" },
+};
+
+static int renesas_i3c_probe(struct platform_device *pdev)
+{
+ struct renesas_i3c *i3c;
+ struct reset_control *reset;
+ struct clk *clk;
+ const struct renesas_i3c_config *config = of_device_get_match_data(&pdev->dev);
+ int ret, i;
+
+ if (!config)
+ return -ENODATA;
+
+ i3c = devm_kzalloc(&pdev->dev, sizeof(*i3c), GFP_KERNEL);
+ if (!i3c)
+ return -ENOMEM;
+
+ i3c->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(i3c->regs))
+ return PTR_ERR(i3c->regs);
+
+ clk = devm_clk_get_enabled(&pdev->dev, "pclk");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ if (config->has_pclkrw) {
+ clk = devm_clk_get_enabled(&pdev->dev, "pclkrw");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ }
+
+ i3c->tclk = devm_clk_get_enabled(&pdev->dev, "tclk");
+ if (IS_ERR(i3c->tclk))
+ return PTR_ERR(i3c->tclk);
+
+ reset = devm_reset_control_get_optional_exclusive_deasserted(&pdev->dev, "tresetn");
+ if (IS_ERR(reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(reset),
+ "Error: missing tresetn ctrl\n");
+
+ reset = devm_reset_control_get_optional_exclusive_deasserted(&pdev->dev, "presetn");
+ if (IS_ERR(reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(reset),
+ "Error: missing presetn ctrl\n");
+
+ spin_lock_init(&i3c->xferqueue.lock);
+ INIT_LIST_HEAD(&i3c->xferqueue.list);
+
+ ret = renesas_i3c_reset(i3c);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(renesas_i3c_irqs); i++) {
+ ret = platform_get_irq_byname(pdev, renesas_i3c_irqs[i].name);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_request_irq(&pdev->dev, ret, renesas_i3c_irqs[i].isr,
+ 0, renesas_i3c_irqs[i].desc, i3c);
+ if (ret)
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, i3c);
+
+ i3c->maxdevs = RENESAS_I3C_MAX_DEVS;
+ i3c->free_pos = GENMASK(i3c->maxdevs - 1, 0);
+
+ return i3c_master_register(&i3c->base, &pdev->dev, &renesas_i3c_ops, false);
+}
+
+static void renesas_i3c_remove(struct platform_device *pdev)
+{
+ struct renesas_i3c *i3c = platform_get_drvdata(pdev);
+
+ i3c_master_unregister(&i3c->base);
+}
+
+static const struct renesas_i3c_config empty_i3c_config = {
+};
+
+static const struct renesas_i3c_config r9a09g047_i3c_config = {
+ .has_pclkrw = 1,
+};
+
+static const struct of_device_id renesas_i3c_of_ids[] = {
+ { .compatible = "renesas,r9a08g045-i3c", .data = &empty_i3c_config },
+ { .compatible = "renesas,r9a09g047-i3c", .data = &r9a09g047_i3c_config },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, renesas_i3c_of_ids);
+
+static struct platform_driver renesas_i3c = {
+ .probe = renesas_i3c_probe,
+ .remove = renesas_i3c_remove,
+ .driver = {
+ .name = "renesas-i3c",
+ .of_match_table = renesas_i3c_of_ids,
+ },
+};
+module_platform_driver(renesas_i3c);
+
+MODULE_AUTHOR("Wolfram Sang <wsa+renesas@sang-engineering.com>");
+MODULE_AUTHOR("Renesas BSP teams");
+MODULE_DESCRIPTION("Renesas I3C controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index d6e9ed74cdcf..a62f22ff8b57 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -32,6 +32,7 @@
#define SVC_I3C_MCONFIG_ODBAUD(x) FIELD_PREP(GENMASK(23, 16), (x))
#define SVC_I3C_MCONFIG_ODHPP(x) FIELD_PREP(BIT(24), (x))
#define SVC_I3C_MCONFIG_SKEW(x) FIELD_PREP(GENMASK(27, 25), (x))
+#define SVC_I3C_MCONFIG_SKEW_MASK GENMASK(27, 25)
#define SVC_I3C_MCONFIG_I2CBAUD(x) FIELD_PREP(GENMASK(31, 28), (x))
#define SVC_I3C_MCTRL 0x084
@@ -39,11 +40,13 @@
#define SVC_I3C_MCTRL_REQUEST_NONE 0
#define SVC_I3C_MCTRL_REQUEST_START_ADDR 1
#define SVC_I3C_MCTRL_REQUEST_STOP 2
+#define SVC_I3C_MCTRL_REQUEST_FORCE_EXIT 6
#define SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3
#define SVC_I3C_MCTRL_REQUEST_PROC_DAA 4
#define SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7
#define SVC_I3C_MCTRL_TYPE_I3C 0
#define SVC_I3C_MCTRL_TYPE_I2C BIT(4)
+#define SVC_I3C_MCTRL_TYPE_DDR BIT(5)
#define SVC_I3C_MCTRL_IBIRESP_AUTO 0
#define SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0
#define SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7)
@@ -58,6 +61,7 @@
#define SVC_I3C_MSTATUS 0x088
#define SVC_I3C_MSTATUS_STATE(x) FIELD_GET(GENMASK(2, 0), (x))
#define SVC_I3C_MSTATUS_STATE_DAA(x) (SVC_I3C_MSTATUS_STATE(x) == 5)
+#define SVC_I3C_MSTATUS_STATE_SLVREQ(x) (SVC_I3C_MSTATUS_STATE(x) == 1)
#define SVC_I3C_MSTATUS_STATE_IDLE(x) (SVC_I3C_MSTATUS_STATE(x) == 0)
#define SVC_I3C_MSTATUS_BETWEEN(x) FIELD_GET(BIT(4), (x))
#define SVC_I3C_MSTATUS_NACKED(x) FIELD_GET(BIT(5), (x))
@@ -92,6 +96,9 @@
#define SVC_I3C_MINTCLR 0x094
#define SVC_I3C_MINTMASKED 0x098
#define SVC_I3C_MERRWARN 0x09C
+#define SVC_I3C_MERRWARN_NACK BIT(2)
+#define SVC_I3C_MERRWARN_CRC BIT(10)
+#define SVC_I3C_MERRWARN_TIMEOUT BIT(20)
#define SVC_I3C_MDMACTRL 0x0A0
#define SVC_I3C_MDATACTRL 0x0AC
#define SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
@@ -100,6 +107,7 @@
#define SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL GENMASK(5, 4)
#define SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY 0
#define SVC_I3C_MDATACTRL_RXCOUNT(x) FIELD_GET(GENMASK(28, 24), (x))
+#define SVC_I3C_MDATACTRL_TXCOUNT(x) FIELD_GET(GENMASK(20, 16), (x))
#define SVC_I3C_MDATACTRL_TXFULL BIT(30)
#define SVC_I3C_MDATACTRL_RXEMPTY BIT(31)
@@ -111,6 +119,7 @@
#define SVC_I3C_MWDATAHE 0x0BC
#define SVC_I3C_MRDATAB 0x0C0
#define SVC_I3C_MRDATAH 0x0C8
+#define SVC_I3C_MWDATAB1 0x0CC
#define SVC_I3C_MWMSG_SDR 0x0D0
#define SVC_I3C_MRMSG_SDR 0x0D4
#define SVC_I3C_MWMSG_DDR 0x0D8
@@ -125,14 +134,50 @@
/* This parameter depends on the implementation and may be tuned */
#define SVC_I3C_FIFO_SIZE 16
+#define SVC_I3C_PPBAUD_MAX 15
+#define SVC_I3C_QUICK_I2C_CLK 4170000
+
+#define SVC_I3C_EVENT_IBI GENMASK(7, 0)
+#define SVC_I3C_EVENT_HOTJOIN BIT(31)
+
+/*
+ * SVC_I3C_QUIRK_FIFO_EMPTY:
+ * I3C HW stalls the write transfer if the transmit FIFO becomes empty,
+ * when new data is written to FIFO, I3C HW resumes the transfer but
+ * the first transmitted data bit may have the wrong value.
+ * Workaround:
+ * Fill the FIFO in advance to prevent FIFO from becoming empty.
+ */
+#define SVC_I3C_QUIRK_FIFO_EMPTY BIT(0)
+/*
+ * SVC_I3C_QUIRK_FLASE_SLVSTART:
+ * I3C HW may generate an invalid SlvStart event when emitting a STOP.
+ * If it is a true SlvStart, the MSTATUS state is SLVREQ.
+ */
+#define SVC_I3C_QUIRK_FALSE_SLVSTART BIT(1)
+/*
+ * SVC_I3C_QUIRK_DAA_CORRUPT:
+ * When MCONFIG.SKEW=0 and MCONFIG.ODHPP=0, the ENTDAA transaction gets
+ * corrupted and results in a no repeated-start condition at the end of
+ * address assignment.
+ * Workaround:
+ * Set MCONFIG.SKEW to 1 before initiating the DAA process. After the DAA
+ * process is completed, return MCONFIG.SKEW to its previous value.
+ */
+#define SVC_I3C_QUIRK_DAA_CORRUPT BIT(2)
struct svc_i3c_cmd {
u8 addr;
- bool rnw;
+ union {
+ bool rnw;
+ u8 cmd;
+ u32 rnw_cmd;
+ };
u8 *in;
const void *out;
unsigned int len;
- unsigned int read_len;
+ unsigned int actual_len;
+ struct i3c_xfer *xfer;
bool continued;
};
@@ -142,7 +187,16 @@ struct svc_i3c_xfer {
int ret;
unsigned int type;
unsigned int ncmds;
- struct svc_i3c_cmd cmds[];
+ struct svc_i3c_cmd cmds[] __counted_by(ncmds);
+};
+
+struct svc_i3c_regs_save {
+ u32 mconfig;
+ u32 mdynaddr;
+};
+
+struct svc_i3c_drvdata {
+ u32 quirks;
};
/**
@@ -150,15 +204,15 @@ struct svc_i3c_xfer {
* @base: I3C master controller
* @dev: Corresponding device
* @regs: Memory mapping
+ * @saved_regs: Volatile values for PM operations
* @free_slots: Bit array of available slots
* @addrs: Array containing the dynamic addresses of each attached device
* @descs: Array of descriptors, one per attached device
* @hj_work: Hot-join work
- * @ibi_work: IBI work
* @irq: Main interrupt
- * @pclk: System clock
+ * @num_clks: I3C clock number
* @fclk: Fast clock (bus)
- * @sclk: Slow clock (other events)
+ * @clks: I3C clock array
* @xferqueue: Transfer queue structure
* @xferqueue.list: List member
* @xferqueue.cur: Current ongoing transfer
@@ -168,20 +222,24 @@ struct svc_i3c_xfer {
* @ibi.slots: Available IBI slots
* @ibi.tbq_slot: To be queued IBI slot
* @ibi.lock: IBI lock
+ * @lock: Transfer lock, protect between IBI work thread and callbacks from master
+ * @drvdata: Driver data
+ * @enabled_events: Bit masks for enable events (IBI, HotJoin).
+ * @mctrl_config: Configuration value in SVC_I3C_MCTRL for setting speed back.
*/
struct svc_i3c_master {
struct i3c_master_controller base;
struct device *dev;
void __iomem *regs;
+ struct svc_i3c_regs_save saved_regs;
u32 free_slots;
u8 addrs[SVC_I3C_MAX_DEVS];
struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS];
struct work_struct hj_work;
- struct work_struct ibi_work;
int irq;
- struct clk *pclk;
+ int num_clks;
struct clk *fclk;
- struct clk *sclk;
+ struct clk_bulk_data *clks;
struct {
struct list_head list;
struct svc_i3c_xfer *cur;
@@ -195,6 +253,10 @@ struct svc_i3c_master {
/* Prevent races within IBI handlers */
spinlock_t lock;
} ibi;
+ struct mutex lock;
+ const struct svc_i3c_drvdata *drvdata;
+ u32 enabled_events;
+ u32 mctrl_config;
};
/**
@@ -209,6 +271,23 @@ struct svc_i3c_i2c_dev_data {
struct i3c_generic_ibi_pool *ibi_pool;
};
+static inline bool svc_has_quirk(struct svc_i3c_master *master, u32 quirk)
+{
+ return (master->drvdata->quirks & quirk);
+}
+
+static inline bool svc_has_daa_corrupt(struct svc_i3c_master *master)
+{
+ return ((master->drvdata->quirks & SVC_I3C_QUIRK_DAA_CORRUPT) &&
+ !(master->mctrl_config &
+ (SVC_I3C_MCONFIG_SKEW_MASK | SVC_I3C_MCONFIG_ODHPP(1))));
+}
+
+static inline bool is_events_enabled(struct svc_i3c_master *master, u32 mask)
+{
+ return !!(master->enabled_events & mask);
+}
+
static bool svc_i3c_master_error(struct svc_i3c_master *master)
{
u32 mstatus, merrwarn;
@@ -217,6 +296,14 @@ static bool svc_i3c_master_error(struct svc_i3c_master *master)
if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
+
+ /* Ignore timeout error */
+ if (merrwarn & SVC_I3C_MERRWARN_TIMEOUT) {
+ dev_dbg(master->dev, "Warning condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
+ mstatus, merrwarn);
+ return false;
+ }
+
dev_err(master->dev,
"Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
mstatus, merrwarn);
@@ -303,6 +390,36 @@ svc_i3c_master_dev_from_addr(struct svc_i3c_master *master,
return master->descs[i];
}
+static bool svc_cmd_is_read(u32 rnw_cmd, u32 type)
+{
+ return (type == SVC_I3C_MCTRL_TYPE_DDR) ? (rnw_cmd & 0x80) : rnw_cmd;
+}
+
+static void svc_i3c_master_emit_force_exit(struct svc_i3c_master *master)
+{
+ u32 reg;
+
+ writel(SVC_I3C_MCTRL_REQUEST_FORCE_EXIT, master->regs + SVC_I3C_MCTRL);
+
+ /*
+ * Not need check error here because it is never happen at hardware.
+ * IP just wait for few fclk cycle to complete DDR exit pattern. Even
+ * though fclk stop, timeout happen here, the whole data actually
+ * already finish transfer. The next command will be timeout because
+ * wrong hardware state.
+ */
+ readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, reg,
+ SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
+
+ /*
+ * This delay is necessary after the emission of a stop, otherwise eg.
+ * repeating IBIs do not get detected. There is a note in the manual
+ * about it, stating that the stop condition might not be settled
+ * correctly if a start condition follows too rapidly.
+ */
+ udelay(1);
+}
+
static void svc_i3c_master_emit_stop(struct svc_i3c_master *master)
{
writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL);
@@ -323,11 +440,26 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
struct i3c_ibi_slot *slot;
unsigned int count;
u32 mdatactrl;
+ int ret, val;
u8 *buf;
+ /*
+ * Wait for transfer to complete before returning. Otherwise, the EmitStop
+ * request might be sent when the transfer is not complete.
+ */
+ ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
+ SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
+ if (ret) {
+ dev_err(master->dev, "Timeout when polling for COMPLETE\n");
+ return ret;
+ }
+
slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
- if (!slot)
+ if (!slot) {
+ dev_dbg(master->dev, "No free ibi slot, drop the data\n");
+ writel(SVC_I3C_MDATACTRL_FLUSHRB, master->regs + SVC_I3C_MDATACTRL);
return -ENOSPC;
+ }
slot->len = 0;
buf = slot->data;
@@ -336,7 +468,7 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
slot->len < SVC_I3C_FIFO_SIZE) {
mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl);
- readsl(master->regs + SVC_I3C_MRDATAB, buf, count);
+ readsb(master->regs + SVC_I3C_MRDATAB, buf, count);
slot->len += count;
buf += count;
}
@@ -346,10 +478,11 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
return 0;
}
-static void svc_i3c_master_ack_ibi(struct svc_i3c_master *master,
+static int svc_i3c_master_ack_ibi(struct svc_i3c_master *master,
bool mandatory_byte)
{
unsigned int ibi_ack_nack;
+ u32 reg;
ibi_ack_nack = SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK;
if (mandatory_byte)
@@ -358,40 +491,105 @@ static void svc_i3c_master_ack_ibi(struct svc_i3c_master *master,
ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE;
writel(ibi_ack_nack, master->regs + SVC_I3C_MCTRL);
+
+ return readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, reg,
+ SVC_I3C_MSTATUS_MCTRLDONE(reg), 1, 1000);
+
}
-static void svc_i3c_master_nack_ibi(struct svc_i3c_master *master)
+static int svc_i3c_master_nack_ibi(struct svc_i3c_master *master)
{
+ int ret;
+ u32 reg;
+
writel(SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK |
SVC_I3C_MCTRL_IBIRESP_NACK,
master->regs + SVC_I3C_MCTRL);
+
+ ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, reg,
+ SVC_I3C_MSTATUS_MCTRLDONE(reg), 1, 1000);
+ return ret;
+}
+
+static int svc_i3c_master_handle_ibi_won(struct svc_i3c_master *master, u32 mstatus)
+{
+ u32 ibitype;
+ int ret = 0;
+
+ ibitype = SVC_I3C_MSTATUS_IBITYPE(mstatus);
+
+ writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
+
+ /* Hardware can't auto emit NACK for hot join and master request */
+ switch (ibitype) {
+ case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
+ case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
+ ret = svc_i3c_master_nack_ibi(master);
+ }
+
+ return ret;
}
-static void svc_i3c_master_ibi_work(struct work_struct *work)
+static void svc_i3c_master_ibi_isr(struct svc_i3c_master *master)
{
- struct svc_i3c_master *master = container_of(work, struct svc_i3c_master, ibi_work);
struct svc_i3c_i2c_dev_data *data;
unsigned int ibitype, ibiaddr;
struct i3c_dev_desc *dev;
u32 status, val;
int ret;
- /* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
- writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
- SVC_I3C_MCTRL_IBIRESP_AUTO,
+ /*
+ * According to I3C spec ver 1.1, 09-Jun-2021, section 5.1.2.5:
+ *
+ * The I3C Controller shall hold SCL low while the Bus is in ACK/NACK Phase of I3C/I2C
+ * transfer. But maximum stall time is 100us. The IRQs have to be disabled to prevent
+ * schedule during the whole I3C transaction, otherwise, the I3C bus timeout may happen if
+ * any irq or schedule happen during transaction.
+ */
+ guard(spinlock)(&master->xferqueue.lock);
+
+ /*
+ * IBIWON may be set before SVC_I3C_MCTRL_REQUEST_AUTO_IBI, causing
+ * readl_relaxed_poll_timeout() to return immediately. Consequently,
+ * ibitype will be 0 since it was last updated only after the 8th SCL
+ * cycle, leading to missed client IBI handlers.
+ *
+ * A typical scenario is when IBIWON occurs and bus arbitration is lost
+ * at svc_i3c_master_i3c_xfers().
+ *
+ * Clear SVC_I3C_MINT_IBIWON before sending SVC_I3C_MCTRL_REQUEST_AUTO_IBI.
+ */
+ writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
+
+ /*
+ * Write REQUEST_START_ADDR request to emit broadcast address for arbitration,
+ * instend of using AUTO_IBI.
+ *
+ * Using AutoIBI request may cause controller to remain in AutoIBI state when
+ * there is a glitch on SDA line (high->low->high).
+ * 1. SDA high->low, raising an interrupt to execute IBI isr.
+ * 2. SDA low->high.
+ * 3. IBI isr writes an AutoIBI request.
+ * 4. The controller will not start AutoIBI process because SDA is not low.
+ * 5. IBIWON polling times out.
+ * 6. Controller reamins in AutoIBI state and doesn't accept EmitStop request.
+ */
+ writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
+ SVC_I3C_MCTRL_TYPE_I3C |
+ SVC_I3C_MCTRL_IBIRESP_MANUAL |
+ SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE) |
+ SVC_I3C_MCTRL_ADDR(I3C_BROADCAST_ADDR),
master->regs + SVC_I3C_MCTRL);
/* Wait for IBIWON, should take approximately 100us */
- ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
- SVC_I3C_MSTATUS_IBIWON(val), 0, 1000);
+ ret = readl_relaxed_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, val,
+ SVC_I3C_MSTATUS_IBIWON(val), 0, 100);
if (ret) {
dev_err(master->dev, "Timeout when polling for IBIWON\n");
- goto reenable_ibis;
+ svc_i3c_master_emit_stop(master);
+ return;
}
- /* Clear the interrupt status */
- writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
-
status = readl(master->regs + SVC_I3C_MSTATUS);
ibitype = SVC_I3C_MSTATUS_IBITYPE(status);
ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status);
@@ -400,13 +598,21 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
switch (ibitype) {
case SVC_I3C_MSTATUS_IBITYPE_IBI:
dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
- if (!dev)
+ if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI)) {
svc_i3c_master_nack_ibi(master);
- else
+ } else {
+ if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD)
+ svc_i3c_master_ack_ibi(master, true);
+ else
+ svc_i3c_master_ack_ibi(master, false);
svc_i3c_master_handle_ibi(master, dev);
+ }
break;
case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
- svc_i3c_master_ack_ibi(master, false);
+ if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
+ svc_i3c_master_ack_ibi(master, false);
+ else
+ svc_i3c_master_nack_ibi(master);
break;
case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
svc_i3c_master_nack_ibi(master);
@@ -430,34 +636,35 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
svc_i3c_master_emit_stop(master);
- goto reenable_ibis;
+ return;
}
/* Handle the non critical tasks */
switch (ibitype) {
case SVC_I3C_MSTATUS_IBITYPE_IBI:
+ svc_i3c_master_emit_stop(master);
if (dev) {
i3c_master_queue_ibi(dev, master->ibi.tbq_slot);
master->ibi.tbq_slot = NULL;
}
- svc_i3c_master_emit_stop(master);
break;
case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
- queue_work(master->base.wq, &master->hj_work);
+ svc_i3c_master_emit_stop(master);
+ if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
+ queue_work(master->base.wq, &master->hj_work);
break;
case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
+ svc_i3c_master_emit_stop(master);
+ break;
default:
break;
}
-
-reenable_ibis:
- svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
}
static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
{
struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
- u32 active = readl(master->regs + SVC_I3C_MINTMASKED);
+ u32 active = readl(master->regs + SVC_I3C_MSTATUS);
if (!SVC_I3C_MSTATUS_SLVSTART(active))
return IRQ_NONE;
@@ -465,20 +672,75 @@ static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
/* Clear the interrupt status */
writel(SVC_I3C_MINT_SLVSTART, master->regs + SVC_I3C_MSTATUS);
- svc_i3c_master_disable_interrupts(master);
+ /* Ignore the false event */
+ if (svc_has_quirk(master, SVC_I3C_QUIRK_FALSE_SLVSTART) &&
+ !SVC_I3C_MSTATUS_STATE_SLVREQ(active))
+ return IRQ_HANDLED;
- /* Handle the interrupt in a non atomic context */
- queue_work(master->base.wq, &master->ibi_work);
+ /*
+ * The SDA line remains low until the request is processed.
+ * Receive the request in the interrupt context to respond promptly
+ * and restore the bus to idle state.
+ */
+ svc_i3c_master_ibi_isr(master);
return IRQ_HANDLED;
}
+static int svc_i3c_master_set_speed(struct i3c_master_controller *m,
+ enum i3c_open_drain_speed speed)
+{
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ struct i3c_bus *bus = i3c_master_get_bus(&master->base);
+ u32 ppbaud, odbaud, odhpp, mconfig;
+ unsigned long fclk_rate;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
+ return ret;
+ }
+
+ switch (speed) {
+ case I3C_OPEN_DRAIN_SLOW_SPEED:
+ fclk_rate = clk_get_rate(master->fclk);
+ if (!fclk_rate) {
+ ret = -EINVAL;
+ goto rpm_out;
+ }
+ /*
+ * Set 50% duty-cycle I2C speed to I3C OPEN-DRAIN mode, so the first
+ * broadcast address is visible to all I2C/I3C devices on the I3C bus.
+ * I3C device working as a I2C device will turn off its 50ns Spike
+ * Filter to change to I3C mode.
+ */
+ mconfig = master->mctrl_config;
+ ppbaud = FIELD_GET(GENMASK(11, 8), mconfig);
+ odhpp = 0;
+ odbaud = DIV_ROUND_UP(fclk_rate, bus->scl_rate.i2c * (2 + 2 * ppbaud)) - 1;
+ mconfig &= ~GENMASK(24, 16);
+ mconfig |= SVC_I3C_MCONFIG_ODBAUD(odbaud) | SVC_I3C_MCONFIG_ODHPP(odhpp);
+ writel(mconfig, master->regs + SVC_I3C_MCONFIG);
+ break;
+ case I3C_OPEN_DRAIN_NORMAL_SPEED:
+ writel(master->mctrl_config, master->regs + SVC_I3C_MCONFIG);
+ break;
+ }
+
+rpm_out:
+ pm_runtime_put_autosuspend(master->dev);
+
+ return ret;
+}
+
static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
{
struct svc_i3c_master *master = to_svc_i3c_master(m);
struct i3c_bus *bus = i3c_master_get_bus(m);
struct i3c_device_info info = {};
unsigned long fclk_rate, fclk_period_ns;
+ unsigned long i2c_period_ns, i2c_scl_rate, i3c_scl_rate;
unsigned int high_period_ns, od_low_period_ns;
u32 ppbaud, pplow, odhpp, odbaud, odstop, i2cbaud, reg;
int ret;
@@ -499,12 +761,15 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
}
fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate);
+ i2c_period_ns = DIV_ROUND_UP(1000000000, bus->scl_rate.i2c);
+ i2c_scl_rate = bus->scl_rate.i2c;
+ i3c_scl_rate = bus->scl_rate.i3c;
/*
* Using I3C Push-Pull mode, target is 12.5MHz/80ns period.
* Simplest configuration is using a 50% duty-cycle of 40ns.
*/
- ppbaud = DIV_ROUND_UP(40, fclk_period_ns) - 1;
+ ppbaud = DIV_ROUND_UP(fclk_rate / 2, i3c_scl_rate) - 1;
pplow = 0;
/*
@@ -514,7 +779,7 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
*/
odhpp = 1;
high_period_ns = (ppbaud + 1) * fclk_period_ns;
- odbaud = DIV_ROUND_UP(240 - high_period_ns, high_period_ns) - 1;
+ odbaud = DIV_ROUND_UP(fclk_rate, SVC_I3C_QUICK_I2C_CLK * (1 + ppbaud)) - 2;
od_low_period_ns = (odbaud + 1) * high_period_ns;
switch (bus->mode) {
@@ -523,20 +788,27 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
odstop = 0;
break;
case I3C_BUS_MODE_MIXED_FAST:
- case I3C_BUS_MODE_MIXED_LIMITED:
/*
* Using I2C Fm+ mode, target is 1MHz/1000ns, the difference
* between the high and low period does not really matter.
*/
- i2cbaud = DIV_ROUND_UP(1000, od_low_period_ns) - 2;
+ i2cbaud = DIV_ROUND_UP(i2c_period_ns, od_low_period_ns) - 2;
odstop = 1;
break;
+ case I3C_BUS_MODE_MIXED_LIMITED:
case I3C_BUS_MODE_MIXED_SLOW:
- /*
- * Using I2C Fm mode, target is 0.4MHz/2500ns, with the same
- * constraints as the FM+ mode.
- */
- i2cbaud = DIV_ROUND_UP(2500, od_low_period_ns) - 2;
+ /* I3C PP + I3C OP + I2C OP both use i2c clk rate */
+ if (ppbaud > SVC_I3C_PPBAUD_MAX) {
+ ppbaud = SVC_I3C_PPBAUD_MAX;
+ pplow = DIV_ROUND_UP(fclk_rate, i3c_scl_rate) - (2 + 2 * ppbaud);
+ }
+
+ high_period_ns = (ppbaud + 1) * fclk_period_ns;
+ odhpp = 0;
+ odbaud = DIV_ROUND_UP(fclk_rate, i2c_scl_rate * (2 + 2 * ppbaud)) - 1;
+
+ od_low_period_ns = (odbaud + 1) * high_period_ns;
+ i2cbaud = DIV_ROUND_UP(i2c_period_ns, od_low_period_ns) - 2;
odstop = 1;
break;
default:
@@ -555,6 +827,7 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
SVC_I3C_MCONFIG_I2CBAUD(i2cbaud);
writel(reg, master->regs + SVC_I3C_MCONFIG);
+ master->mctrl_config = reg;
/* Master core's registration */
ret = i3c_master_get_free_addr(m, 0);
if (ret < 0)
@@ -562,6 +835,8 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
info.dyn_addr = ret;
+ info.hdr_cap = I3C_CCC_HDR_MODE(I3C_HDR_DDR);
+
writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr),
master->regs + SVC_I3C_MDYNADDR);
@@ -570,7 +845,6 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
goto rpm_out;
rpm_out:
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return ret;
@@ -592,7 +866,6 @@ static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m)
/* Disable master */
writel(0, master->regs + SVC_I3C_MCONFIG);
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
}
@@ -729,12 +1002,30 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
u8 *addrs, unsigned int *count)
{
u64 prov_id[SVC_I3C_MAX_DEVS] = {}, nacking_prov_id = 0;
- unsigned int dev_nb = 0, last_addr = 0;
+ unsigned int dev_nb = 0, last_addr = 0, dyn_addr = 0;
u32 reg;
int ret, i;
+ svc_i3c_master_flush_fifo(master);
+
while (true) {
- /* Enter/proceed with DAA */
+ /* clean SVC_I3C_MINT_IBIWON w1c bits */
+ writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
+
+ /* SVC_I3C_MCTRL_REQUEST_PROC_DAA have two mode, ENTER DAA or PROCESS DAA.
+ *
+ * ENTER DAA:
+ * 1 will issue START, 7E, ENTDAA, and then emits 7E/R to process first target.
+ * 2 Stops just before the new Dynamic Address (DA) is to be emitted.
+ *
+ * PROCESS DAA:
+ * 1 The DA is written using MWDATAB or ADDR bits 6:0.
+ * 2 ProcessDAA is requested again to write the new address, and then starts the
+ * next (START, 7E, ENTDAA) unless marked to STOP; an MSTATUS indicating NACK
+ * means DA was not accepted (e.g. parity error). If PROCESSDAA is NACKed on the
+ * 7E/R, which means no more Slaves need a DA, then a COMPLETE will be signaled
+ * (along with DONE), and a STOP issued automatically.
+ */
writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA |
SVC_I3C_MCTRL_TYPE_I3C |
SVC_I3C_MCTRL_IBIRESP_NACK |
@@ -751,19 +1042,39 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
SVC_I3C_MSTATUS_MCTRLDONE(reg),
1, 1000);
if (ret)
- return ret;
+ break;
if (SVC_I3C_MSTATUS_RXPEND(reg)) {
u8 data[6];
/*
- * We only care about the 48-bit provisional ID yet to
+ * One slave sends its ID to request for address assignment,
+ * prefilling the dynamic address can reduce SCL clock stalls
+ * and also fix the SVC_I3C_QUIRK_FIFO_EMPTY quirk.
+ *
+ * Ideally, prefilling before the processDAA command is better.
+ * However, it requires an additional check to write the dyn_addr
+ * at the right time because the driver needs to write the processDAA
+ * command twice for one assignment.
+ * Prefilling here is safe and efficient because the FIFO starts
+ * filling within a few hundred nanoseconds, which is significantly
+ * faster compared to the 64 SCL clock cycles.
+ */
+ ret = i3c_master_get_free_addr(&master->base, last_addr + 1);
+ if (ret < 0)
+ break;
+
+ dyn_addr = ret;
+ writel(dyn_addr, master->regs + SVC_I3C_MWDATAB);
+
+ /*
+ * We only care about the 48-bit provisioned ID yet to
* be sure a device does not nack an address twice.
* Otherwise, we would just need to flush the RX FIFO.
*/
ret = svc_i3c_master_readb(master, data, 6);
if (ret)
- return ret;
+ break;
for (i = 0; i < 6; i++)
prov_id[dev_nb] |= (u64)(data[i]) << (8 * (5 - i));
@@ -771,7 +1082,12 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
/* We do not care about the BCR and DCR yet */
ret = svc_i3c_master_readb(master, data, 2);
if (ret)
- return ret;
+ break;
+ } else if (SVC_I3C_MSTATUS_IBIWON(reg)) {
+ ret = svc_i3c_master_handle_ibi_won(master, reg);
+ if (ret)
+ break;
+ continue;
} else if (SVC_I3C_MSTATUS_MCTRLDONE(reg)) {
if (SVC_I3C_MSTATUS_STATE_IDLE(reg) &&
SVC_I3C_MSTATUS_COMPLETE(reg)) {
@@ -779,9 +1095,24 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
* All devices received and acked they dynamic
* address, this is the natural end of the DAA
* procedure.
+ *
+ * Hardware will auto emit STOP at this case.
*/
- break;
+ *count = dev_nb;
+ return 0;
+
} else if (SVC_I3C_MSTATUS_NACKED(reg)) {
+ /* No I3C devices attached */
+ if (dev_nb == 0) {
+ /*
+ * Hardware can't treat first NACK for ENTAA as normal
+ * COMPLETE. So need manual emit STOP.
+ */
+ ret = 0;
+ *count = 0;
+ break;
+ }
+
/*
* A slave device nacked the address, this is
* allowed only once, DAA will be stopped and
@@ -789,8 +1120,10 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
* answer again immediately and shall ack the
* address this time.
*/
- if (prov_id[dev_nb] == nacking_prov_id)
- return -EIO;
+ if (prov_id[dev_nb] == nacking_prov_id) {
+ ret = -EIO;
+ break;
+ }
dev_nb--;
nacking_prov_id = prov_id[dev_nb];
@@ -798,7 +1131,7 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
continue;
} else {
- return -EIO;
+ break;
}
}
@@ -810,24 +1143,19 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
SVC_I3C_MSTATUS_BETWEEN(reg),
0, 1000);
if (ret)
- return ret;
-
- /* Give the slave device a suitable dynamic address */
- ret = i3c_master_get_free_addr(&master->base, last_addr + 1);
- if (ret < 0)
- return ret;
+ break;
- addrs[dev_nb] = ret;
+ addrs[dev_nb] = dyn_addr;
dev_dbg(master->dev, "DAA: device %d assigned to 0x%02x\n",
dev_nb, addrs[dev_nb]);
-
- writel(addrs[dev_nb], master->regs + SVC_I3C_MWDATAB);
last_addr = addrs[dev_nb++];
}
- *count = dev_nb;
+ /* Need manual issue STOP except for Complete condition */
+ svc_i3c_master_emit_stop(master);
+ svc_i3c_master_flush_fifo(master);
- return 0;
+ return ret;
}
static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
@@ -840,7 +1168,7 @@ static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
/* Create the IBIRULES register for both cases */
i3c_bus_for_each_i3cdev(&master->base.bus, dev) {
- if (I3C_BCR_DEVICE_ROLE(dev->info.bcr) == I3C_BCR_I3C_MASTER)
+ if (!(dev->info.bcr & I3C_BCR_IBI_REQ_CAP))
continue;
if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) {
@@ -899,20 +1227,43 @@ static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
}
spin_lock_irqsave(&master->xferqueue.lock, flags);
+
+ if (svc_has_daa_corrupt(master))
+ writel(master->mctrl_config | SVC_I3C_MCONFIG_SKEW(1),
+ master->regs + SVC_I3C_MCONFIG);
+
ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb);
+
+ if (svc_has_daa_corrupt(master))
+ writel(master->mctrl_config, master->regs + SVC_I3C_MCONFIG);
+
spin_unlock_irqrestore(&master->xferqueue.lock, flags);
- if (ret) {
- svc_i3c_master_emit_stop(master);
- svc_i3c_master_clear_merrwarn(master);
+
+ svc_i3c_master_clear_merrwarn(master);
+ if (ret)
goto rpm_out;
- }
- /* Register all devices who participated to the core */
- for (i = 0; i < dev_nb; i++) {
- ret = i3c_master_add_i3c_dev_locked(m, addrs[i]);
- if (ret)
- goto rpm_out;
- }
+ /*
+ * Register all devices who participated to the core
+ *
+ * If two devices (A and B) are detected in DAA and address 0xa is assigned to
+ * device A and 0xb to device B, a failure in i3c_master_add_i3c_dev_locked()
+ * for device A (addr: 0xa) could prevent device B (addr: 0xb) from being
+ * registered on the bus. The I3C stack might still consider 0xb a free
+ * address. If a subsequent Hotjoin occurs, 0xb might be assigned to Device A,
+ * causing both devices A and B to use the same address 0xb, violating the I3C
+ * specification.
+ *
+ * The return value for i3c_master_add_i3c_dev_locked() should not be checked
+ * because subsequent steps will scan the entire I3C bus, independent of
+ * whether i3c_master_add_i3c_dev_locked() returns success.
+ *
+ * If device A registration fails, there is still a chance to register device
+ * B. i3c_master_add_i3c_dev_locked() can reset DAA if a failure occurs while
+ * retrieving device information.
+ */
+ for (i = 0; i < dev_nb; i++)
+ i3c_master_add_i3c_dev_locked(m, addrs[i]);
/* Configure IBI auto-rules */
ret = svc_i3c_update_ibirules(master);
@@ -920,7 +1271,6 @@ static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
dev_err(master->dev, "Cannot handle such a list of devices");
rpm_out:
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return ret;
@@ -988,45 +1338,147 @@ static int svc_i3c_master_write(struct svc_i3c_master *master,
}
static int svc_i3c_master_xfer(struct svc_i3c_master *master,
- bool rnw, unsigned int xfer_type, u8 addr,
+ u32 rnw_cmd, unsigned int xfer_type, u8 addr,
u8 *in, const u8 *out, unsigned int xfer_len,
- unsigned int *read_len, bool continued)
+ unsigned int *actual_len, bool continued, bool repeat_start)
{
+ bool rnw = svc_cmd_is_read(rnw_cmd, xfer_type);
+ int retry = repeat_start ? 1 : 2;
u32 reg;
int ret;
- writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
- xfer_type |
- SVC_I3C_MCTRL_IBIRESP_NACK |
- SVC_I3C_MCTRL_DIR(rnw) |
- SVC_I3C_MCTRL_ADDR(addr) |
- SVC_I3C_MCTRL_RDTERM(*read_len),
- master->regs + SVC_I3C_MCTRL);
+ /* clean SVC_I3C_MINT_IBIWON w1c bits */
+ writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
- ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
+ if (xfer_type == SVC_I3C_MCTRL_TYPE_DDR) {
+ /* DDR command need prefill into FIFO */
+ writel(rnw_cmd, master->regs + SVC_I3C_MWDATAB);
+ if (!rnw) {
+ /* write data also need prefill into FIFO */
+ ret = svc_i3c_master_write(master, out, xfer_len);
+ if (ret)
+ goto emit_stop;
+ }
+ }
+
+ while (retry--) {
+ writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
+ xfer_type |
+ SVC_I3C_MCTRL_IBIRESP_NACK |
+ SVC_I3C_MCTRL_DIR(rnw) |
+ SVC_I3C_MCTRL_ADDR(addr) |
+ SVC_I3C_MCTRL_RDTERM(*actual_len),
+ master->regs + SVC_I3C_MCTRL);
+
+ /*
+ * The entire transaction can consist of multiple write transfers.
+ * Prefilling before EmitStartAddr causes the data to be emitted
+ * immediately, becoming part of the previous transfer.
+ * The only way to work around this hardware issue is to let the
+ * FIFO start filling as soon as possible after EmitStartAddr.
+ */
+ if (svc_has_quirk(master, SVC_I3C_QUIRK_FIFO_EMPTY) && !rnw && xfer_len) {
+ u32 space, end, len;
+
+ reg = readl(master->regs + SVC_I3C_MDATACTRL);
+ space = SVC_I3C_FIFO_SIZE - SVC_I3C_MDATACTRL_TXCOUNT(reg);
+ if (space) {
+ end = xfer_len > space ? 0 : SVC_I3C_MWDATAB_END;
+ len = min_t(u32, xfer_len, space);
+ writesb(master->regs + SVC_I3C_MWDATAB1, out, len - 1);
+ /* Mark END bit if this is the last byte */
+ writel(out[len - 1] | end, master->regs + SVC_I3C_MWDATAB);
+ xfer_len -= len;
+ out += len;
+ }
+ }
+
+ ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
- if (ret)
- goto emit_stop;
+ if (ret)
+ goto emit_stop;
+
+ /*
+ * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a
+ * Frame with I3C Target Address.
+ *
+ * The I3C Controller normally should start a Frame, the Address may be arbitrated,
+ * and so the Controller shall monitor to see whether an In-Band Interrupt request,
+ * a Controller Role Request (i.e., Secondary Controller requests to become the
+ * Active Controller), or a Hot-Join Request has been made.
+ *
+ * If missed IBIWON check, the wrong data will be return. When IBIWON happen, issue
+ * repeat start. Address arbitrate only happen at START, never happen at REPEAT
+ * start.
+ */
+ if (SVC_I3C_MSTATUS_IBIWON(reg)) {
+ ret = svc_i3c_master_handle_ibi_won(master, reg);
+ if (ret)
+ goto emit_stop;
+ continue;
+ }
+
+ if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
+ /*
+ * According to I3C Spec 1.1.1, 11-Jun-2021, section: 5.1.2.2.3.
+ * If the Controller chooses to start an I3C Message with an I3C Dynamic
+ * Address, then special provisions shall be made because that same I3C
+ * Target may be initiating an IBI or a Controller Role Request. So, one of
+ * three things may happen: (skip 1, 2)
+ *
+ * 3. The Addresses match and the RnW bits also match, and so neither
+ * Controller nor Target will ACK since both are expecting the other side to
+ * provide ACK. As a result, each side might think it had "won" arbitration,
+ * but neither side would continue, as each would subsequently see that the
+ * other did not provide ACK.
+ * ...
+ * For either value of RnW: Due to the NACK, the Controller shall defer the
+ * Private Write or Private Read, and should typically transmit the Target
+ * Address again after a Repeated START (i.e., the next one or any one prior
+ * to a STOP in the Frame). Since the Address Header following a Repeated
+ * START is not arbitrated, the Controller will always win (see Section
+ * 5.1.2.2.4).
+ */
+ if (retry && addr != 0x7e) {
+ writel(SVC_I3C_MERRWARN_NACK, master->regs + SVC_I3C_MERRWARN);
+ } else {
+ ret = -ENXIO;
+ *actual_len = 0;
+ goto emit_stop;
+ }
+ } else {
+ break;
+ }
+ }
if (rnw)
ret = svc_i3c_master_read(master, in, xfer_len);
- else
+ else if (xfer_type != SVC_I3C_MCTRL_TYPE_DDR)
ret = svc_i3c_master_write(master, out, xfer_len);
if (ret < 0)
goto emit_stop;
if (rnw)
- *read_len = ret;
+ *actual_len = ret;
ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000);
if (ret)
goto emit_stop;
+ if (xfer_type == SVC_I3C_MCTRL_TYPE_DDR &&
+ (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_CRC)) {
+ ret = -ENXIO;
+ goto emit_stop;
+ }
+
writel(SVC_I3C_MINT_COMPLETE, master->regs + SVC_I3C_MSTATUS);
if (!continued) {
- svc_i3c_master_emit_stop(master);
+ if (xfer_type != SVC_I3C_MCTRL_TYPE_DDR)
+ svc_i3c_master_emit_stop(master);
+ else
+ svc_i3c_master_emit_force_exit(master);
/* Wait idle if stop is sent. */
readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
@@ -1036,8 +1488,13 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
return 0;
emit_stop:
- svc_i3c_master_emit_stop(master);
+ if (xfer_type != SVC_I3C_MCTRL_TYPE_DDR)
+ svc_i3c_master_emit_stop(master);
+ else
+ svc_i3c_master_emit_force_exit(master);
+
svc_i3c_master_clear_merrwarn(master);
+ svc_i3c_master_flush_fifo(master);
return ret;
}
@@ -1082,6 +1539,11 @@ static void svc_i3c_master_dequeue_xfer(struct svc_i3c_master *master,
spin_unlock_irqrestore(&master->xferqueue.lock, flags);
}
+static int i3c_mode_to_svc_type(enum i3c_xfer_mode mode)
+{
+ return (mode == I3C_SDR) ? SVC_I3C_MCTRL_TYPE_I3C : SVC_I3C_MCTRL_TYPE_DDR;
+}
+
static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
{
struct svc_i3c_xfer *xfer = master->xferqueue.cur;
@@ -1090,29 +1552,24 @@ static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
if (!xfer)
return;
- ret = pm_runtime_resume_and_get(master->dev);
- if (ret < 0) {
- dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
- return;
- }
-
svc_i3c_master_clear_merrwarn(master);
svc_i3c_master_flush_fifo(master);
for (i = 0; i < xfer->ncmds; i++) {
struct svc_i3c_cmd *cmd = &xfer->cmds[i];
- ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
+ ret = svc_i3c_master_xfer(master, cmd->rnw_cmd, xfer->type,
cmd->addr, cmd->in, cmd->out,
- cmd->len, &cmd->read_len,
- cmd->continued);
+ cmd->len, &cmd->actual_len,
+ cmd->continued, i > 0);
+ /* cmd->xfer is NULL if I2C or CCC transfer */
+ if (cmd->xfer)
+ cmd->xfer->actual_len = cmd->actual_len;
+
if (ret)
break;
}
- pm_runtime_mark_last_busy(master->dev);
- pm_runtime_put_autosuspend(master->dev);
-
xfer->ret = ret;
complete(&xfer->comp);
@@ -1133,6 +1590,13 @@ static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
struct svc_i3c_xfer *xfer)
{
unsigned long flags;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
+ return;
+ }
init_completion(&xfer->comp);
spin_lock_irqsave(&master->xferqueue.lock, flags);
@@ -1143,6 +1607,8 @@ static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
svc_i3c_master_start_xfer_locked(master);
}
spin_unlock_irqrestore(&master->xferqueue.lock, flags);
+
+ pm_runtime_put_autosuspend(master->dev);
}
static bool
@@ -1183,12 +1649,14 @@ static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
cmd->in = NULL;
cmd->out = buf;
cmd->len = xfer_len;
- cmd->read_len = 0;
+ cmd->actual_len = 0;
cmd->continued = false;
+ mutex_lock(&master->lock);
svc_i3c_master_enqueue_xfer(master, xfer);
if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
svc_i3c_master_dequeue_xfer(master, xfer);
+ mutex_unlock(&master->lock);
ret = xfer->ret;
kfree(buf);
@@ -1201,7 +1669,7 @@ static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
struct i3c_ccc_cmd *ccc)
{
unsigned int xfer_len = ccc->dests[0].payload.len;
- unsigned int read_len = ccc->rnw ? xfer_len : 0;
+ unsigned int actual_len = ccc->rnw ? xfer_len : 0;
struct svc_i3c_xfer *xfer;
struct svc_i3c_cmd *cmd;
int ret;
@@ -1219,7 +1687,7 @@ static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
cmd->in = NULL;
cmd->out = &ccc->id;
cmd->len = 1;
- cmd->read_len = 0;
+ cmd->actual_len = 0;
cmd->continued = true;
/* Directed message */
@@ -1227,17 +1695,19 @@ static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
cmd->addr = ccc->dests[0].addr;
cmd->rnw = ccc->rnw;
cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL;
- cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data,
+ cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data;
cmd->len = xfer_len;
- cmd->read_len = read_len;
+ cmd->actual_len = actual_len;
cmd->continued = false;
+ mutex_lock(&master->lock);
svc_i3c_master_enqueue_xfer(master, xfer);
if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
svc_i3c_master_dequeue_xfer(master, xfer);
+ mutex_unlock(&master->lock);
- if (cmd->read_len != xfer_len)
- ccc->dests[0].payload.len = cmd->read_len;
+ if (cmd->actual_len != xfer_len)
+ ccc->dests[0].payload.len = cmd->actual_len;
ret = xfer->ret;
svc_i3c_master_free_xfer(xfer);
@@ -1250,16 +1720,21 @@ static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
{
struct svc_i3c_master *master = to_svc_i3c_master(m);
bool broadcast = cmd->id < 0x80;
+ int ret;
if (broadcast)
- return svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
+ ret = svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
else
- return svc_i3c_master_send_direct_ccc_cmd(master, cmd);
+ ret = svc_i3c_master_send_direct_ccc_cmd(master, cmd);
+
+ if (ret)
+ cmd->err = I3C_ERROR_M2;
+
+ return ret;
}
-static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
- struct i3c_priv_xfer *xfers,
- int nxfers)
+static int svc_i3c_master_i3c_xfers(struct i3c_dev_desc *dev, struct i3c_xfer *xfers,
+ int nxfers, enum i3c_xfer_mode mode)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct svc_i3c_master *master = to_svc_i3c_master(m);
@@ -1267,27 +1742,44 @@ static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
struct svc_i3c_xfer *xfer;
int ret, i;
+ if (mode != I3C_SDR) {
+ /*
+ * Only support data size less than FIFO SIZE when using DDR
+ * mode. First entry is cmd in FIFO, so actual available FIFO
+ * for data is SVC_I3C_FIFO_SIZE - 2 since DDR only supports
+ * even length.
+ */
+ for (i = 0; i < nxfers; i++)
+ if (xfers[i].len > SVC_I3C_FIFO_SIZE - 2)
+ return -EINVAL;
+ }
+
xfer = svc_i3c_master_alloc_xfer(master, nxfers);
if (!xfer)
return -ENOMEM;
- xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
+ xfer->type = i3c_mode_to_svc_type(mode);
for (i = 0; i < nxfers; i++) {
+ u32 rnw_cmd = (mode == I3C_SDR) ? xfers[i].rnw : xfers[i].cmd;
+ bool rnw = svc_cmd_is_read(rnw_cmd, xfer->type);
struct svc_i3c_cmd *cmd = &xfer->cmds[i];
+ cmd->xfer = &xfers[i];
cmd->addr = master->addrs[data->index];
- cmd->rnw = xfers[i].rnw;
- cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL;
- cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out;
+ cmd->rnw_cmd = rnw_cmd;
+ cmd->in = rnw ? xfers[i].data.in : NULL;
+ cmd->out = rnw ? NULL : xfers[i].data.out;
cmd->len = xfers[i].len;
- cmd->read_len = xfers[i].rnw ? xfers[i].len : 0;
+ cmd->actual_len = rnw ? xfers[i].len : 0;
cmd->continued = (i + 1) < nxfers;
}
+ mutex_lock(&master->lock);
svc_i3c_master_enqueue_xfer(master, xfer);
if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
svc_i3c_master_dequeue_xfer(master, xfer);
+ mutex_unlock(&master->lock);
ret = xfer->ret;
svc_i3c_master_free_xfer(xfer);
@@ -1296,7 +1788,7 @@ static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
}
static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
- const struct i2c_msg *xfers,
+ struct i2c_msg *xfers,
int nxfers)
{
struct i3c_master_controller *m = i2c_dev_get_master(dev);
@@ -1319,13 +1811,15 @@ static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
cmd->in = cmd->rnw ? xfers[i].buf : NULL;
cmd->out = cmd->rnw ? NULL : xfers[i].buf;
cmd->len = xfers[i].len;
- cmd->read_len = cmd->rnw ? xfers[i].len : 0;
+ cmd->actual_len = cmd->rnw ? xfers[i].len : 0;
cmd->continued = (i + 1 < nxfers);
}
+ mutex_lock(&master->lock);
svc_i3c_master_enqueue_xfer(master, xfer);
- if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ if (!wait_for_completion_timeout(&xfer->comp, m->i2c.timeout))
svc_i3c_master_dequeue_xfer(master, xfer);
+ mutex_unlock(&master->lock);
ret = xfer->ret;
svc_i3c_master_free_xfer(xfer);
@@ -1398,6 +1892,7 @@ static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
return ret;
}
+ master->enabled_events++;
svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
@@ -1409,16 +1904,49 @@ static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
struct svc_i3c_master *master = to_svc_i3c_master(m);
int ret;
- svc_i3c_master_disable_interrupts(master);
+ master->enabled_events--;
+ if (!master->enabled_events)
+ svc_i3c_master_disable_interrupts(master);
ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return ret;
}
+static int svc_i3c_master_enable_hotjoin(struct i3c_master_controller *m)
+{
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
+ return ret;
+ }
+
+ master->enabled_events |= SVC_I3C_EVENT_HOTJOIN;
+
+ svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
+
+ return 0;
+}
+
+static int svc_i3c_master_disable_hotjoin(struct i3c_master_controller *m)
+{
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+
+ master->enabled_events &= ~SVC_I3C_EVENT_HOTJOIN;
+
+ if (!master->enabled_events)
+ svc_i3c_master_disable_interrupts(master);
+
+ pm_runtime_put_autosuspend(master->dev);
+
+ return 0;
+}
+
static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
struct i3c_ibi_slot *slot)
{
@@ -1438,84 +1966,65 @@ static const struct i3c_master_controller_ops svc_i3c_master_ops = {
.do_daa = svc_i3c_master_do_daa,
.supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd,
.send_ccc_cmd = svc_i3c_master_send_ccc_cmd,
- .priv_xfers = svc_i3c_master_priv_xfers,
+ .i3c_xfers = svc_i3c_master_i3c_xfers,
.i2c_xfers = svc_i3c_master_i2c_xfers,
.request_ibi = svc_i3c_master_request_ibi,
.free_ibi = svc_i3c_master_free_ibi,
.recycle_ibi_slot = svc_i3c_master_recycle_ibi_slot,
.enable_ibi = svc_i3c_master_enable_ibi,
.disable_ibi = svc_i3c_master_disable_ibi,
+ .enable_hotjoin = svc_i3c_master_enable_hotjoin,
+ .disable_hotjoin = svc_i3c_master_disable_hotjoin,
+ .set_speed = svc_i3c_master_set_speed,
};
-static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master)
-{
- int ret = 0;
-
- ret = clk_prepare_enable(master->pclk);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(master->fclk);
- if (ret) {
- clk_disable_unprepare(master->pclk);
- return ret;
- }
-
- ret = clk_prepare_enable(master->sclk);
- if (ret) {
- clk_disable_unprepare(master->pclk);
- clk_disable_unprepare(master->fclk);
- return ret;
- }
-
- return 0;
-}
-
-static void svc_i3c_master_unprepare_clks(struct svc_i3c_master *master)
-{
- clk_disable_unprepare(master->pclk);
- clk_disable_unprepare(master->fclk);
- clk_disable_unprepare(master->sclk);
-}
-
static int svc_i3c_master_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct svc_i3c_master *master;
- int ret;
+ int ret, i;
master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
if (!master)
return -ENOMEM;
+ master->drvdata = of_device_get_match_data(dev);
+ if (!master->drvdata)
+ return -EINVAL;
+
master->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(master->regs))
return PTR_ERR(master->regs);
- master->pclk = devm_clk_get(dev, "pclk");
- if (IS_ERR(master->pclk))
- return PTR_ERR(master->pclk);
+ master->num_clks = devm_clk_bulk_get_all(dev, &master->clks);
+ if (master->num_clks < 0)
+ return dev_err_probe(dev, -EINVAL, "can't get I3C clocks\n");
+
+ for (i = 0; i < master->num_clks; i++) {
+ if (!strcmp(master->clks[i].id, "fast_clk"))
+ break;
+ }
+
+ if (i == master->num_clks)
+ return dev_err_probe(dev, -EINVAL,
+ "can't get I3C peripheral clock\n");
- master->fclk = devm_clk_get(dev, "fast_clk");
+ master->fclk = master->clks[i].clk;
if (IS_ERR(master->fclk))
return PTR_ERR(master->fclk);
- master->sclk = devm_clk_get(dev, "slow_clk");
- if (IS_ERR(master->sclk))
- return PTR_ERR(master->sclk);
-
master->irq = platform_get_irq(pdev, 0);
- if (master->irq <= 0)
- return -ENOENT;
+ if (master->irq < 0)
+ return master->irq;
master->dev = dev;
-
- ret = svc_i3c_master_prepare_clks(master);
+ ret = clk_bulk_prepare_enable(master->num_clks, master->clks);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret, "can't enable I3C clocks\n");
INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
- INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
+ mutex_init(&master->lock);
+
ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
IRQF_NO_SUSPEND, "svc-i3c-irq", master);
if (ret)
@@ -1552,7 +2061,6 @@ static int svc_i3c_master_probe(struct platform_device *pdev)
if (ret)
goto rpm_disable;
- pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
@@ -1560,35 +2068,49 @@ static int svc_i3c_master_probe(struct platform_device *pdev)
rpm_disable:
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
- pm_runtime_set_suspended(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
err_disable_clks:
- svc_i3c_master_unprepare_clks(master);
+ clk_bulk_disable_unprepare(master->num_clks, master->clks);
return ret;
}
-static int svc_i3c_master_remove(struct platform_device *pdev)
+static void svc_i3c_master_remove(struct platform_device *pdev)
{
struct svc_i3c_master *master = platform_get_drvdata(pdev);
- int ret;
- ret = i3c_master_unregister(&master->base);
- if (ret)
- return ret;
+ cancel_work_sync(&master->hj_work);
+ i3c_master_unregister(&master->base);
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+}
- return 0;
+static void svc_i3c_save_regs(struct svc_i3c_master *master)
+{
+ master->saved_regs.mconfig = readl(master->regs + SVC_I3C_MCONFIG);
+ master->saved_regs.mdynaddr = readl(master->regs + SVC_I3C_MDYNADDR);
+}
+
+static void svc_i3c_restore_regs(struct svc_i3c_master *master)
+{
+ if (readl(master->regs + SVC_I3C_MDYNADDR) !=
+ master->saved_regs.mdynaddr) {
+ writel(master->saved_regs.mconfig,
+ master->regs + SVC_I3C_MCONFIG);
+ writel(master->saved_regs.mdynaddr,
+ master->regs + SVC_I3C_MDYNADDR);
+ }
}
static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
{
struct svc_i3c_master *master = dev_get_drvdata(dev);
- svc_i3c_master_unprepare_clks(master);
+ svc_i3c_save_regs(master);
+ clk_bulk_disable_unprepare(master->num_clks, master->clks);
pinctrl_pm_select_sleep_state(dev);
return 0;
@@ -1597,9 +2119,14 @@ static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
static int __maybe_unused svc_i3c_runtime_resume(struct device *dev)
{
struct svc_i3c_master *master = dev_get_drvdata(dev);
+ int ret;
pinctrl_pm_select_default_state(dev);
- svc_i3c_master_prepare_clks(master);
+ ret = clk_bulk_prepare_enable(master->num_clks, master->clks);
+ if (ret)
+ return ret;
+
+ svc_i3c_restore_regs(master);
return 0;
}
@@ -1611,8 +2138,17 @@ static const struct dev_pm_ops svc_i3c_pm_ops = {
svc_i3c_runtime_resume, NULL)
};
+static const struct svc_i3c_drvdata npcm845_drvdata = {
+ .quirks = SVC_I3C_QUIRK_FIFO_EMPTY |
+ SVC_I3C_QUIRK_FALSE_SLVSTART |
+ SVC_I3C_QUIRK_DAA_CORRUPT,
+};
+
+static const struct svc_i3c_drvdata svc_default_drvdata = {};
+
static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
- { .compatible = "silvaco,i3c-master" },
+ { .compatible = "nuvoton,npcm845-i3c", .data = &npcm845_drvdata },
+ { .compatible = "silvaco,i3c-master-v1", .data = &svc_default_drvdata },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl);