summaryrefslogtreecommitdiff
path: root/drivers/spi/spi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi/spi.c')
-rw-r--r--drivers/spi/spi.c3414
1 files changed, 2369 insertions, 1045 deletions
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 9a7def7c3237..e25df9990f82 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -4,52 +4,54 @@
// Copyright (C) 2005 David Brownell
// Copyright (C) 2008 Secret Lab Technologies Ltd.
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/init.h>
+#include <linux/acpi.h>
#include <linux/cache.h>
-#include <linux/dma-mapping.h>
+#include <linux/clk/clk-conf.h>
+#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+#include <linux/gpio/consumer.h>
+#include <linux/highmem.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
-#include <linux/clk/clk-conf.h>
-#include <linux/slab.h>
-#include <linux/mod_devicetable.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/spi-mem.h>
-#include <linux/of_gpio.h>
-#include <linux/pm_runtime.h>
+#include <linux/percpu.h>
+#include <linux/platform_data/x86/apple.h>
#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
#include <linux/property.h>
-#include <linux/export.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/sched/rt.h>
+#include <linux/slab.h>
+#include <linux/spi/offload/types.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
#include <uapi/linux/sched/types.h>
-#include <linux/delay.h>
-#include <linux/kthread.h>
-#include <linux/ioport.h>
-#include <linux/acpi.h>
-#include <linux/highmem.h>
-#include <linux/idr.h>
-#include <linux/platform_data/x86/apple.h>
#define CREATE_TRACE_POINTS
#include <trace/events/spi.h>
+EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
+EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
#include "internals.h"
-static DEFINE_IDR(spi_master_idr);
+static DEFINE_IDR(spi_controller_idr);
static void spidev_release(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
- /* spi controllers may cleanup for released devices */
- if (spi->controller->cleanup)
- spi->controller->cleanup(spi);
-
spi_controller_put(spi->controller);
kfree(spi->driver_override);
+ free_percpu(spi->pcpu_statistics);
kfree(spi);
}
@@ -63,7 +65,7 @@ modalias_show(struct device *dev, struct device_attribute *a, char *buf)
if (len != -ENODEV)
return len;
- return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
+ return sysfs_emit(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
}
static DEVICE_ATTR_RO(modalias);
@@ -72,29 +74,11 @@ static ssize_t driver_override_store(struct device *dev,
const char *buf, size_t count)
{
struct spi_device *spi = to_spi_device(dev);
- const char *end = memchr(buf, '\n', count);
- const size_t len = end ? end - buf : count;
- const char *driver_override, *old;
-
- /* We need to keep extra room for a newline when displaying value */
- if (len >= (PAGE_SIZE - 1))
- return -EINVAL;
-
- driver_override = kstrndup(buf, len, GFP_KERNEL);
- if (!driver_override)
- return -ENOMEM;
+ int ret;
- device_lock(dev);
- old = spi->driver_override;
- if (len) {
- spi->driver_override = driver_override;
- } else {
- /* Emptry string, disable driver override */
- spi->driver_override = NULL;
- kfree(driver_override);
- }
- device_unlock(dev);
- kfree(old);
+ ret = driver_set_override(dev, &spi->driver_override, buf, count);
+ if (ret)
+ return ret;
return count;
}
@@ -106,12 +90,57 @@ static ssize_t driver_override_show(struct device *dev,
ssize_t len;
device_lock(dev);
- len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
+ len = sysfs_emit(buf, "%s\n", spi->driver_override ? : "");
device_unlock(dev);
return len;
}
static DEVICE_ATTR_RW(driver_override);
+static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
+{
+ struct spi_statistics __percpu *pcpu_stats;
+
+ if (dev)
+ pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics);
+ else
+ pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL);
+
+ if (pcpu_stats) {
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct spi_statistics *stat;
+
+ stat = per_cpu_ptr(pcpu_stats, cpu);
+ u64_stats_init(&stat->syncp);
+ }
+ }
+ return pcpu_stats;
+}
+
+static ssize_t spi_emit_pcpu_stats(struct spi_statistics __percpu *stat,
+ char *buf, size_t offset)
+{
+ u64 val = 0;
+ int i;
+
+ for_each_possible_cpu(i) {
+ const struct spi_statistics *pcpu_stats;
+ u64_stats_t *field;
+ unsigned int start;
+ u64 inc;
+
+ pcpu_stats = per_cpu_ptr(stat, i);
+ field = (void *)pcpu_stats + offset;
+ do {
+ start = u64_stats_fetch_begin(&pcpu_stats->syncp);
+ inc = u64_stats_read(field);
+ } while (u64_stats_fetch_retry(&pcpu_stats->syncp, start));
+ val += inc;
+ }
+ return sysfs_emit(buf, "%llu\n", val);
+}
+
#define SPI_STATISTICS_ATTRS(field, file) \
static ssize_t spi_controller_##field##_show(struct device *dev, \
struct device_attribute *attr, \
@@ -119,7 +148,7 @@ static ssize_t spi_controller_##field##_show(struct device *dev, \
{ \
struct spi_controller *ctlr = container_of(dev, \
struct spi_controller, dev); \
- return spi_statistics_##field##_show(&ctlr->statistics, buf); \
+ return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
} \
static struct device_attribute dev_attr_spi_controller_##field = { \
.attr = { .name = file, .mode = 0444 }, \
@@ -130,47 +159,43 @@ static ssize_t spi_device_##field##_show(struct device *dev, \
char *buf) \
{ \
struct spi_device *spi = to_spi_device(dev); \
- return spi_statistics_##field##_show(&spi->statistics, buf); \
+ return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
} \
static struct device_attribute dev_attr_spi_device_##field = { \
.attr = { .name = file, .mode = 0444 }, \
.show = spi_device_##field##_show, \
}
-#define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \
-static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
+#define SPI_STATISTICS_SHOW_NAME(name, file, field) \
+static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
char *buf) \
{ \
- unsigned long flags; \
- ssize_t len; \
- spin_lock_irqsave(&stat->lock, flags); \
- len = sprintf(buf, format_string, stat->field); \
- spin_unlock_irqrestore(&stat->lock, flags); \
- return len; \
+ return spi_emit_pcpu_stats(stat, buf, \
+ offsetof(struct spi_statistics, field)); \
} \
SPI_STATISTICS_ATTRS(name, file)
-#define SPI_STATISTICS_SHOW(field, format_string) \
+#define SPI_STATISTICS_SHOW(field) \
SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
- field, format_string)
+ field)
-SPI_STATISTICS_SHOW(messages, "%lu");
-SPI_STATISTICS_SHOW(transfers, "%lu");
-SPI_STATISTICS_SHOW(errors, "%lu");
-SPI_STATISTICS_SHOW(timedout, "%lu");
+SPI_STATISTICS_SHOW(messages);
+SPI_STATISTICS_SHOW(transfers);
+SPI_STATISTICS_SHOW(errors);
+SPI_STATISTICS_SHOW(timedout);
-SPI_STATISTICS_SHOW(spi_sync, "%lu");
-SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
-SPI_STATISTICS_SHOW(spi_async, "%lu");
+SPI_STATISTICS_SHOW(spi_sync);
+SPI_STATISTICS_SHOW(spi_sync_immediate);
+SPI_STATISTICS_SHOW(spi_async);
-SPI_STATISTICS_SHOW(bytes, "%llu");
-SPI_STATISTICS_SHOW(bytes_rx, "%llu");
-SPI_STATISTICS_SHOW(bytes_tx, "%llu");
+SPI_STATISTICS_SHOW(bytes);
+SPI_STATISTICS_SHOW(bytes_rx);
+SPI_STATISTICS_SHOW(bytes_tx);
#define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
"transfer_bytes_histo_" number, \
- transfer_bytes_histo[index], "%lu")
+ transfer_bytes_histo[index])
SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
@@ -189,7 +214,7 @@ SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
-SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
+SPI_STATISTICS_SHOW(transfers_split_maxsize);
static struct attribute *spi_dev_attrs[] = {
&dev_attr_modalias.attr,
@@ -281,47 +306,46 @@ static const struct attribute_group spi_controller_statistics_group = {
.attrs = spi_controller_statistics_attrs,
};
-static const struct attribute_group *spi_master_groups[] = {
+static const struct attribute_group *spi_controller_groups[] = {
&spi_controller_statistics_group,
NULL,
};
-void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
- struct spi_transfer *xfer,
- struct spi_controller *ctlr)
+static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
+ struct spi_transfer *xfer,
+ struct spi_message *msg)
{
- unsigned long flags;
int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
+ struct spi_statistics *stats;
if (l2len < 0)
l2len = 0;
- spin_lock_irqsave(&stats->lock, flags);
+ get_cpu();
+ stats = this_cpu_ptr(pcpu_stats);
+ u64_stats_update_begin(&stats->syncp);
- stats->transfers++;
- stats->transfer_bytes_histo[l2len]++;
+ u64_stats_inc(&stats->transfers);
+ u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
- stats->bytes += xfer->len;
- if ((xfer->tx_buf) &&
- (xfer->tx_buf != ctlr->dummy_tx))
- stats->bytes_tx += xfer->len;
- if ((xfer->rx_buf) &&
- (xfer->rx_buf != ctlr->dummy_rx))
- stats->bytes_rx += xfer->len;
+ u64_stats_add(&stats->bytes, xfer->len);
+ if (spi_valid_txbuf(msg, xfer))
+ u64_stats_add(&stats->bytes_tx, xfer->len);
+ if (spi_valid_rxbuf(msg, xfer))
+ u64_stats_add(&stats->bytes_rx, xfer->len);
- spin_unlock_irqrestore(&stats->lock, flags);
+ u64_stats_update_end(&stats->syncp);
+ put_cpu();
}
-EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
-/* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
+/*
+ * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
* and the sysfs version makes coldplug work too.
*/
-
-static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
- const struct spi_device *sdev)
+static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
{
while (id->name[0]) {
- if (!strcmp(sdev->modalias, id->name))
+ if (!strcmp(name, id->name))
return id;
id++;
}
@@ -332,11 +356,23 @@ const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
{
const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
- return spi_match_id(sdrv->id_table, sdev);
+ return spi_match_id(sdrv->id_table, sdev->modalias);
}
EXPORT_SYMBOL_GPL(spi_get_device_id);
-static int spi_match_device(struct device *dev, struct device_driver *drv)
+const void *spi_get_device_match_data(const struct spi_device *sdev)
+{
+ const void *match;
+
+ match = device_get_match_data(&sdev->dev);
+ if (match)
+ return match;
+
+ return (const void *)spi_get_device_id(sdev)->driver_data;
+}
+EXPORT_SYMBOL_GPL(spi_get_device_match_data);
+
+static int spi_match_device(struct device *dev, const struct device_driver *drv)
{
const struct spi_device *spi = to_spi_device(dev);
const struct spi_driver *sdrv = to_spi_driver(drv);
@@ -354,12 +390,12 @@ static int spi_match_device(struct device *dev, struct device_driver *drv)
return 1;
if (sdrv->id_table)
- return !!spi_match_id(sdrv->id_table, spi);
+ return !!spi_match_id(sdrv->id_table, spi->modalias);
return strcmp(spi->modalias, drv->name) == 0;
}
-static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int spi_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct spi_device *spi = to_spi_device(dev);
int rc;
@@ -371,62 +407,66 @@ static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
}
-struct bus_type spi_bus_type = {
- .name = "spi",
- .dev_groups = spi_dev_groups,
- .match = spi_match_device,
- .uevent = spi_uevent,
-};
-EXPORT_SYMBOL_GPL(spi_bus_type);
-
-
-static int spi_drv_probe(struct device *dev)
+static int spi_probe(struct device *dev)
{
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
struct spi_device *spi = to_spi_device(dev);
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
int ret;
ret = of_clk_set_defaults(dev->of_node, false);
if (ret)
return ret;
- if (dev->of_node) {
+ if (is_of_node(fwnode))
spi->irq = of_irq_get(dev->of_node, 0);
- if (spi->irq == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- if (spi->irq < 0)
- spi->irq = 0;
- }
+ else if (is_acpi_device_node(fwnode) && spi->irq < 0)
+ spi->irq = acpi_dev_gpio_irq_get(to_acpi_device_node(fwnode), 0);
+ if (spi->irq == -EPROBE_DEFER)
+ return dev_err_probe(dev, spi->irq, "Failed to get irq\n");
+ if (spi->irq < 0)
+ spi->irq = 0;
- ret = dev_pm_domain_attach(dev, true);
+ ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON |
+ PD_FLAG_DETACH_POWER_OFF);
if (ret)
return ret;
- ret = sdrv->probe(spi);
- if (ret)
- dev_pm_domain_detach(dev, true);
+ if (sdrv->probe)
+ ret = sdrv->probe(spi);
return ret;
}
-static int spi_drv_remove(struct device *dev)
+static void spi_remove(struct device *dev)
{
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
- int ret;
-
- ret = sdrv->remove(to_spi_device(dev));
- dev_pm_domain_detach(dev, true);
- return ret;
+ if (sdrv->remove)
+ sdrv->remove(to_spi_device(dev));
}
-static void spi_drv_shutdown(struct device *dev)
+static void spi_shutdown(struct device *dev)
{
- const struct spi_driver *sdrv = to_spi_driver(dev->driver);
+ if (dev->driver) {
+ const struct spi_driver *sdrv = to_spi_driver(dev->driver);
- sdrv->shutdown(to_spi_device(dev));
+ if (sdrv->shutdown)
+ sdrv->shutdown(to_spi_device(dev));
+ }
}
+const struct bus_type spi_bus_type = {
+ .name = "spi",
+ .dev_groups = spi_dev_groups,
+ .match = spi_match_device,
+ .uevent = spi_uevent,
+ .probe = spi_probe,
+ .remove = spi_remove,
+ .shutdown = spi_shutdown,
+};
+EXPORT_SYMBOL_GPL(spi_bus_type);
+
/**
* __spi_register_driver - register a SPI driver
* @owner: owner module of the driver to register
@@ -439,19 +479,51 @@ int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
{
sdrv->driver.owner = owner;
sdrv->driver.bus = &spi_bus_type;
- if (sdrv->probe)
- sdrv->driver.probe = spi_drv_probe;
- if (sdrv->remove)
- sdrv->driver.remove = spi_drv_remove;
- if (sdrv->shutdown)
- sdrv->driver.shutdown = spi_drv_shutdown;
+
+ /*
+ * For Really Good Reasons we use spi: modaliases not of:
+ * modaliases for DT so module autoloading won't work if we
+ * don't have a spi_device_id as well as a compatible string.
+ */
+ if (sdrv->driver.of_match_table) {
+ const struct of_device_id *of_id;
+
+ for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
+ of_id++) {
+ const char *of_name;
+
+ /* Strip off any vendor prefix */
+ of_name = strnchr(of_id->compatible,
+ sizeof(of_id->compatible), ',');
+ if (of_name)
+ of_name++;
+ else
+ of_name = of_id->compatible;
+
+ if (sdrv->id_table) {
+ const struct spi_device_id *spi_id;
+
+ spi_id = spi_match_id(sdrv->id_table, of_name);
+ if (spi_id)
+ continue;
+ } else {
+ if (strcmp(sdrv->driver.name, of_name) == 0)
+ continue;
+ }
+
+ pr_warn("SPI driver %s has no spi_device_id for %s\n",
+ sdrv->driver.name, of_id->compatible);
+ }
+ }
+
return driver_register(&sdrv->driver);
}
EXPORT_SYMBOL_GPL(__spi_register_driver);
/*-------------------------------------------------------------------------*/
-/* SPI devices should normally not be created by SPI device drivers; that
+/*
+ * SPI devices should normally not be created by SPI device drivers; that
* would make them board-specific. Similarly with SPI controller drivers.
* Device registration normally goes into like arch/.../mach.../board-YYY.c
* with other readonly (flashable) information about mainboard devices.
@@ -466,9 +538,9 @@ static LIST_HEAD(board_list);
static LIST_HEAD(spi_controller_list);
/*
- * Used to protect add/del opertion for board_info list and
- * spi_controller list, and their matching process
- * also used to protect object of type struct idr
+ * Used to protect add/del operation for board_info list and
+ * spi_controller list, and their matching process also used
+ * to protect object of type struct idr.
*/
static DEFINE_MUTEX(board_lock);
@@ -502,13 +574,19 @@ struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
return NULL;
}
- spi->master = spi->controller = ctlr;
+ spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
+ if (!spi->pcpu_statistics) {
+ kfree(spi);
+ spi_controller_put(ctlr);
+ return NULL;
+ }
+
+ spi->controller = ctlr;
spi->dev.parent = &ctlr->dev;
spi->dev.bus = &spi_bus_type;
spi->dev.release = spidev_release;
- spi->cs_gpio = -ENOENT;
-
- spin_lock_init(&spi->statistics.lock);
+ spi->mode = ctlr->buswidth_override_bits;
+ spi->num_chipselect = 1;
device_initialize(&spi->dev);
return spi;
@@ -517,71 +595,142 @@ EXPORT_SYMBOL_GPL(spi_alloc_device);
static void spi_dev_set_name(struct spi_device *spi)
{
- struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
+ struct device *dev = &spi->dev;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
- if (adev) {
- dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
+ if (is_acpi_device_node(fwnode)) {
+ dev_set_name(dev, "spi-%s", acpi_dev_name(to_acpi_device_node(fwnode)));
+ return;
+ }
+
+ if (is_software_node(fwnode)) {
+ dev_set_name(dev, "spi-%pfwP", fwnode);
return;
}
dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
- spi->chip_select);
+ spi_get_chipselect(spi, 0));
+}
+
+/*
+ * Zero(0) is a valid physical CS value and can be located at any
+ * logical CS in the spi->chip_select[]. If all the physical CS
+ * are initialized to 0 then It would be difficult to differentiate
+ * between a valid physical CS 0 & an unused logical CS whose physical
+ * CS can be 0. As a solution to this issue initialize all the CS to -1.
+ * Now all the unused logical CS will have -1 physical CS value & can be
+ * ignored while performing physical CS validity checks.
+ */
+#define SPI_INVALID_CS ((s8)-1)
+
+static inline int spi_dev_check_cs(struct device *dev,
+ struct spi_device *spi, u8 idx,
+ struct spi_device *new_spi, u8 new_idx)
+{
+ u8 cs, cs_new;
+ u8 idx_new;
+
+ cs = spi_get_chipselect(spi, idx);
+ for (idx_new = new_idx; idx_new < new_spi->num_chipselect; idx_new++) {
+ cs_new = spi_get_chipselect(new_spi, idx_new);
+ if (cs == cs_new) {
+ dev_err(dev, "chipselect %u already in use\n", cs_new);
+ return -EBUSY;
+ }
+ }
+ return 0;
}
static int spi_dev_check(struct device *dev, void *data)
{
struct spi_device *spi = to_spi_device(dev);
struct spi_device *new_spi = data;
+ int status, idx;
- if (spi->controller == new_spi->controller &&
- spi->chip_select == new_spi->chip_select)
- return -EBUSY;
+ if (spi->controller == new_spi->controller) {
+ for (idx = 0; idx < spi->num_chipselect; idx++) {
+ status = spi_dev_check_cs(dev, spi, idx, new_spi, 0);
+ if (status)
+ return status;
+ }
+ }
return 0;
}
-/**
- * spi_add_device - Add spi_device allocated with spi_alloc_device
- * @spi: spi_device to register
- *
- * Companion function to spi_alloc_device. Devices allocated with
- * spi_alloc_device can be added onto the spi bus with this function.
- *
- * Return: 0 on success; negative errno on failure
- */
-int spi_add_device(struct spi_device *spi)
+static void spi_cleanup(struct spi_device *spi)
+{
+ if (spi->controller->cleanup)
+ spi->controller->cleanup(spi);
+}
+
+static int __spi_add_device(struct spi_device *spi)
{
- static DEFINE_MUTEX(spi_add_lock);
struct spi_controller *ctlr = spi->controller;
struct device *dev = ctlr->dev.parent;
- int status;
+ int status, idx;
+ u8 cs;
- /* Chipselects are numbered 0..max; validate. */
- if (spi->chip_select >= ctlr->num_chipselect) {
- dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
- ctlr->num_chipselect);
- return -EINVAL;
+ if (spi->num_chipselect > SPI_DEVICE_CS_CNT_MAX) {
+ dev_err(dev, "num_cs %d > max %d\n", spi->num_chipselect,
+ SPI_DEVICE_CS_CNT_MAX);
+ return -EOVERFLOW;
}
+ for (idx = 0; idx < spi->num_chipselect; idx++) {
+ /* Chipselects are numbered 0..max; validate. */
+ cs = spi_get_chipselect(spi, idx);
+ if (cs >= ctlr->num_chipselect) {
+ dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, idx),
+ ctlr->num_chipselect);
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * Make sure that multiple logical CS doesn't map to the same physical CS.
+ * For example, spi->chip_select[0] != spi->chip_select[1] and so on.
+ */
+ if (!spi_controller_is_target(ctlr)) {
+ for (idx = 0; idx < spi->num_chipselect; idx++) {
+ status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1);
+ if (status)
+ return status;
+ }
+ }
+
+ /* Initialize unused logical CS as invalid */
+ for (idx = spi->num_chipselect; idx < SPI_DEVICE_CS_CNT_MAX; idx++)
+ spi_set_chipselect(spi, idx, SPI_INVALID_CS);
+
/* Set the bus ID string */
spi_dev_set_name(spi);
- /* We need to make sure there's no other device with this
+ /*
+ * We need to make sure there's no other device with this
* chipselect **BEFORE** we call setup(), else we'll trash
- * its configuration. Lock against concurrent add() calls.
+ * its configuration.
*/
- mutex_lock(&spi_add_lock);
-
status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
- if (status) {
- dev_err(dev, "chipselect %d already in use\n",
- spi->chip_select);
- goto done;
+ if (status)
+ return status;
+
+ /* Controller may unregister concurrently */
+ if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
+ !device_is_registered(&ctlr->dev)) {
+ return -ENODEV;
}
- if (ctlr->cs_gpios)
- spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
+ if (ctlr->cs_gpiods) {
+ u8 cs;
- /* Drivers may modify this initial i/o setup, but will
+ for (idx = 0; idx < spi->num_chipselect; idx++) {
+ cs = spi_get_chipselect(spi, idx);
+ spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]);
+ }
+ }
+
+ /*
+ * Drivers may modify this initial i/o setup, but will
* normally rely on the device being setup. Devices
* using SPI_CS_HIGH can't coexist well otherwise...
*/
@@ -589,19 +738,42 @@ int spi_add_device(struct spi_device *spi)
if (status < 0) {
dev_err(dev, "can't setup %s, status %d\n",
dev_name(&spi->dev), status);
- goto done;
+ return status;
}
/* Device may be bound to an active driver when this returns */
status = device_add(&spi->dev);
- if (status < 0)
+ if (status < 0) {
dev_err(dev, "can't add %s, status %d\n",
dev_name(&spi->dev), status);
- else
+ spi_cleanup(spi);
+ } else {
dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
+ }
-done:
- mutex_unlock(&spi_add_lock);
+ return status;
+}
+
+/**
+ * spi_add_device - Add spi_device allocated with spi_alloc_device
+ * @spi: spi_device to register
+ *
+ * Companion function to spi_alloc_device. Devices allocated with
+ * spi_alloc_device can be added onto the SPI bus with this function.
+ *
+ * Return: 0 on success; negative errno on failure
+ */
+int spi_add_device(struct spi_device *spi)
+{
+ struct spi_controller *ctlr = spi->controller;
+ int status;
+
+ /* Set the bus ID string */
+ spi_dev_set_name(spi);
+
+ mutex_lock(&ctlr->add_lock);
+ status = __spi_add_device(spi);
+ mutex_unlock(&ctlr->add_lock);
return status;
}
EXPORT_SYMBOL_GPL(spi_add_device);
@@ -626,7 +798,8 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr,
struct spi_device *proxy;
int status;
- /* NOTE: caller did any chip->bus_num checks necessary.
+ /*
+ * NOTE: caller did any chip->bus_num checks necessary.
*
* Also, unless we change the return value convention to use
* error-or-pointer (not NULL-or-pointer), troubleshootability
@@ -639,20 +812,26 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr,
WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
- proxy->chip_select = chip->chip_select;
+ /* Use provided chip-select for proxy device */
+ spi_set_chipselect(proxy, 0, chip->chip_select);
+
proxy->max_speed_hz = chip->max_speed_hz;
proxy->mode = chip->mode;
proxy->irq = chip->irq;
- strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
+ strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
proxy->dev.platform_data = (void *) chip->platform_data;
proxy->controller_data = chip->controller_data;
proxy->controller_state = NULL;
+ /*
+ * By default spi->chip_select[0] will hold the physical CS number,
+ * so set bit 0 in spi->cs_index_mask.
+ */
+ proxy->cs_index_mask = BIT(0);
- if (chip->properties) {
- status = device_add_properties(&proxy->dev, chip->properties);
+ if (chip->swnode) {
+ status = device_add_software_node(&proxy->dev, chip->swnode);
if (status) {
- dev_err(&ctlr->dev,
- "failed to add properties to '%s': %d\n",
+ dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
chip->modalias, status);
goto err_dev_put;
}
@@ -660,14 +839,12 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr,
status = spi_add_device(proxy);
if (status < 0)
- goto err_remove_props;
+ goto err_dev_put;
return proxy;
-err_remove_props:
- if (chip->properties)
- device_remove_properties(&proxy->dev);
err_dev_put:
+ device_remove_software_node(&proxy->dev);
spi_dev_put(proxy);
return NULL;
}
@@ -682,16 +859,22 @@ EXPORT_SYMBOL_GPL(spi_new_device);
*/
void spi_unregister_device(struct spi_device *spi)
{
+ struct fwnode_handle *fwnode;
+
if (!spi)
return;
- if (spi->dev.of_node) {
- of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
- of_node_put(spi->dev.of_node);
+ fwnode = dev_fwnode(&spi->dev);
+ if (is_of_node(fwnode)) {
+ of_node_clear_flag(to_of_node(fwnode), OF_POPULATED);
+ of_node_put(to_of_node(fwnode));
+ } else if (is_acpi_device_node(fwnode)) {
+ acpi_device_clear_enumerated(to_acpi_device_node(fwnode));
}
- if (ACPI_COMPANION(&spi->dev))
- acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
- device_unregister(&spi->dev);
+ device_remove_software_node(&spi->dev);
+ device_del(&spi->dev);
+ spi_cleanup(spi);
+ put_device(&spi->dev);
}
EXPORT_SYMBOL_GPL(spi_unregister_device);
@@ -727,7 +910,6 @@ static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
*
* The board info passed can safely be __initdata ... but be careful of
* any embedded pointers (platform_data, etc), they're copied as-is.
- * Device properties are deep-copied though.
*
* Return: zero on success, else a negative error code.
*/
@@ -747,12 +929,6 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
struct spi_controller *ctlr;
memcpy(&bi->board_info, info, sizeof(*info));
- if (info->properties) {
- bi->board_info.properties =
- property_entries_dup(info->properties);
- if (IS_ERR(bi->board_info.properties))
- return PTR_ERR(bi->board_info.properties);
- }
mutex_lock(&board_lock);
list_add_tail(&bi->list, &board_list);
@@ -767,28 +943,184 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
/*-------------------------------------------------------------------------*/
-static void spi_set_cs(struct spi_device *spi, bool enable)
+/* Core methods for SPI resource management */
+
+/**
+ * spi_res_alloc - allocate a spi resource that is life-cycle managed
+ * during the processing of a spi_message while using
+ * spi_transfer_one
+ * @spi: the SPI device for which we allocate memory
+ * @release: the release code to execute for this resource
+ * @size: size to alloc and return
+ * @gfp: GFP allocation flags
+ *
+ * Return: the pointer to the allocated data
+ *
+ * This may get enhanced in the future to allocate from a memory pool
+ * of the @spi_device or @spi_controller to avoid repeated allocations.
+ */
+static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
+ size_t size, gfp_t gfp)
+{
+ struct spi_res *sres;
+
+ sres = kzalloc(sizeof(*sres) + size, gfp);
+ if (!sres)
+ return NULL;
+
+ INIT_LIST_HEAD(&sres->entry);
+ sres->release = release;
+
+ return sres->data;
+}
+
+/**
+ * spi_res_free - free an SPI resource
+ * @res: pointer to the custom data of a resource
+ */
+static void spi_res_free(void *res)
{
- if (spi->mode & SPI_CS_HIGH)
+ struct spi_res *sres = container_of(res, struct spi_res, data);
+
+ WARN_ON(!list_empty(&sres->entry));
+ kfree(sres);
+}
+
+/**
+ * spi_res_add - add a spi_res to the spi_message
+ * @message: the SPI message
+ * @res: the spi_resource
+ */
+static void spi_res_add(struct spi_message *message, void *res)
+{
+ struct spi_res *sres = container_of(res, struct spi_res, data);
+
+ WARN_ON(!list_empty(&sres->entry));
+ list_add_tail(&sres->entry, &message->resources);
+}
+
+/**
+ * spi_res_release - release all SPI resources for this message
+ * @ctlr: the @spi_controller
+ * @message: the @spi_message
+ */
+static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
+{
+ struct spi_res *res, *tmp;
+
+ list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
+ if (res->release)
+ res->release(ctlr, message, res->data);
+
+ list_del(&res->entry);
+
+ kfree(res);
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+#define spi_for_each_valid_cs(spi, idx) \
+ for (idx = 0; idx < spi->num_chipselect; idx++) \
+ if (!(spi->cs_index_mask & BIT(idx))) {} else
+
+static inline bool spi_is_last_cs(struct spi_device *spi)
+{
+ u8 idx;
+ bool last = false;
+
+ spi_for_each_valid_cs(spi, idx) {
+ if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx))
+ last = true;
+ }
+ return last;
+}
+
+static void spi_toggle_csgpiod(struct spi_device *spi, u8 idx, bool enable, bool activate)
+{
+ /*
+ * Historically ACPI has no means of the GPIO polarity and
+ * thus the SPISerialBus() resource defines it on the per-chip
+ * basis. In order to avoid a chain of negations, the GPIO
+ * polarity is considered being Active High. Even for the cases
+ * when _DSD() is involved (in the updated versions of ACPI)
+ * the GPIO CS polarity must be defined Active High to avoid
+ * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
+ * into account.
+ */
+ if (is_acpi_device_node(dev_fwnode(&spi->dev)))
+ gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), !enable);
+ else
+ /* Polarity handled by GPIO library */
+ gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), activate);
+
+ if (activate)
+ spi_delay_exec(&spi->cs_setup, NULL);
+ else
+ spi_delay_exec(&spi->cs_inactive, NULL);
+}
+
+static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
+{
+ bool activate = enable;
+ u8 idx;
+
+ /*
+ * Avoid calling into the driver (or doing delays) if the chip select
+ * isn't actually changing from the last time this was called.
+ */
+ if (!force && (enable == spi_is_last_cs(spi)) &&
+ (spi->controller->last_cs_index_mask == spi->cs_index_mask) &&
+ (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
+ return;
+
+ trace_spi_set_cs(spi, activate);
+
+ spi->controller->last_cs_index_mask = spi->cs_index_mask;
+ for (idx = 0; idx < SPI_DEVICE_CS_CNT_MAX; idx++) {
+ if (enable && idx < spi->num_chipselect)
+ spi->controller->last_cs[idx] = spi_get_chipselect(spi, 0);
+ else
+ spi->controller->last_cs[idx] = SPI_INVALID_CS;
+ }
+
+ spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
+ if (spi->controller->last_cs_mode_high)
enable = !enable;
- if (gpio_is_valid(spi->cs_gpio)) {
- /* Honour the SPI_NO_CS flag */
- if (!(spi->mode & SPI_NO_CS))
- gpio_set_value(spi->cs_gpio, !enable);
- /* Some SPI masters need both GPIO CS & slave_select */
- if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
+ /*
+ * Handle chip select delays for GPIO based CS or controllers without
+ * programmable chip select timing.
+ */
+ if ((spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) && !activate)
+ spi_delay_exec(&spi->cs_hold, NULL);
+
+ if (spi_is_csgpiod(spi)) {
+ if (!(spi->mode & SPI_NO_CS)) {
+ spi_for_each_valid_cs(spi, idx) {
+ if (spi_get_csgpiod(spi, idx))
+ spi_toggle_csgpiod(spi, idx, enable, activate);
+ }
+ }
+ /* Some SPI controllers need both GPIO CS & ->set_cs() */
+ if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
spi->controller->set_cs)
spi->controller->set_cs(spi, !enable);
} else if (spi->controller->set_cs) {
spi->controller->set_cs(spi, !enable);
}
+
+ if (spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) {
+ if (activate)
+ spi_delay_exec(&spi->cs_setup, NULL);
+ else
+ spi_delay_exec(&spi->cs_inactive, NULL);
+ }
}
#ifdef CONFIG_HAS_DMA
-int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
- struct sg_table *sgt, void *buf, size_t len,
- enum dma_data_direction dir)
+static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
+ struct sg_table *sgt, void *buf, size_t len,
+ enum dma_data_direction dir, unsigned long attrs)
{
const bool vmalloced_buf = is_vmalloc_addr(buf);
unsigned int max_seg_size = dma_get_max_seg_size(dev);
@@ -808,10 +1140,10 @@ int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
int i, ret;
if (vmalloced_buf || kmap_buf) {
- desc_len = min_t(int, max_seg_size, PAGE_SIZE);
+ desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
} else if (virt_addr_valid(buf)) {
- desc_len = min_t(int, max_seg_size, ctlr->max_dma_len);
+ desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
sgs = DIV_ROUND_UP(len, desc_len);
} else {
return -EINVAL;
@@ -854,26 +1186,37 @@ int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
sg = sg_next(sg);
}
- ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
- if (!ret)
- ret = -ENOMEM;
+ ret = dma_map_sgtable(dev, sgt, dir, attrs);
if (ret < 0) {
sg_free_table(sgt);
return ret;
}
- sgt->nents = ret;
-
return 0;
}
+int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
+ struct sg_table *sgt, void *buf, size_t len,
+ enum dma_data_direction dir)
+{
+ return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
+}
+
+static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
+ struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ dma_unmap_sgtable(dev, sgt, dir, attrs);
+ sg_free_table(sgt);
+ sgt->orig_nents = 0;
+ sgt->nents = 0;
+}
+
void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
struct sg_table *sgt, enum dma_data_direction dir)
{
- if (sgt->orig_nents) {
- dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
- sg_free_table(sgt);
- }
+ spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
}
static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
@@ -887,70 +1230,108 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
if (ctlr->dma_tx)
tx_dev = ctlr->dma_tx->device->dev;
+ else if (ctlr->dma_map_dev)
+ tx_dev = ctlr->dma_map_dev;
else
tx_dev = ctlr->dev.parent;
if (ctlr->dma_rx)
rx_dev = ctlr->dma_rx->device->dev;
+ else if (ctlr->dma_map_dev)
+ rx_dev = ctlr->dma_map_dev;
else
rx_dev = ctlr->dev.parent;
+ ret = -ENOMSG;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /* The sync is done before each transfer. */
+ unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
+
if (!ctlr->can_dma(ctlr, msg->spi, xfer))
continue;
if (xfer->tx_buf != NULL) {
- ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
- (void *)xfer->tx_buf, xfer->len,
- DMA_TO_DEVICE);
+ ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
+ (void *)xfer->tx_buf,
+ xfer->len, DMA_TO_DEVICE,
+ attrs);
if (ret != 0)
return ret;
+
+ xfer->tx_sg_mapped = true;
}
if (xfer->rx_buf != NULL) {
- ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
- xfer->rx_buf, xfer->len,
- DMA_FROM_DEVICE);
+ ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
+ xfer->rx_buf, xfer->len,
+ DMA_FROM_DEVICE, attrs);
if (ret != 0) {
- spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
- DMA_TO_DEVICE);
+ spi_unmap_buf_attrs(ctlr, tx_dev,
+ &xfer->tx_sg, DMA_TO_DEVICE,
+ attrs);
+
return ret;
}
+
+ xfer->rx_sg_mapped = true;
}
}
+ /* No transfer has been mapped, bail out with success */
+ if (ret)
+ return 0;
- ctlr->cur_msg_mapped = true;
+ ctlr->cur_rx_dma_dev = rx_dev;
+ ctlr->cur_tx_dma_dev = tx_dev;
return 0;
}
static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
{
+ struct device *rx_dev = ctlr->cur_rx_dma_dev;
+ struct device *tx_dev = ctlr->cur_tx_dma_dev;
struct spi_transfer *xfer;
- struct device *tx_dev, *rx_dev;
- if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
- return 0;
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /* The sync has already been done after each transfer. */
+ unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
+
+ if (xfer->rx_sg_mapped)
+ spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
+ DMA_FROM_DEVICE, attrs);
+ xfer->rx_sg_mapped = false;
+
+ if (xfer->tx_sg_mapped)
+ spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
+ DMA_TO_DEVICE, attrs);
+ xfer->tx_sg_mapped = false;
+ }
- if (ctlr->dma_tx)
- tx_dev = ctlr->dma_tx->device->dev;
- else
- tx_dev = ctlr->dev.parent;
+ return 0;
+}
- if (ctlr->dma_rx)
- rx_dev = ctlr->dma_rx->device->dev;
- else
- rx_dev = ctlr->dev.parent;
+static void spi_dma_sync_for_device(struct spi_controller *ctlr,
+ struct spi_transfer *xfer)
+{
+ struct device *rx_dev = ctlr->cur_rx_dma_dev;
+ struct device *tx_dev = ctlr->cur_tx_dma_dev;
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- if (!ctlr->can_dma(ctlr, msg->spi, xfer))
- continue;
+ if (xfer->tx_sg_mapped)
+ dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
+ if (xfer->rx_sg_mapped)
+ dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
+}
- spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
- spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
- }
+static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
+ struct spi_transfer *xfer)
+{
+ struct device *rx_dev = ctlr->cur_rx_dma_dev;
+ struct device *tx_dev = ctlr->cur_tx_dma_dev;
- return 0;
+ if (xfer->rx_sg_mapped)
+ dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
+ if (xfer->tx_sg_mapped)
+ dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
}
#else /* !CONFIG_HAS_DMA */
static inline int __spi_map_msg(struct spi_controller *ctlr,
@@ -964,6 +1345,16 @@ static inline int __spi_unmap_msg(struct spi_controller *ctlr,
{
return 0;
}
+
+static void spi_dma_sync_for_device(struct spi_controller *ctrl,
+ struct spi_transfer *xfer)
+{
+}
+
+static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
+ struct spi_transfer *xfer)
+{
+}
#endif /* !CONFIG_HAS_DMA */
static inline int spi_unmap_msg(struct spi_controller *ctlr,
@@ -991,7 +1382,8 @@ static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
void *tmp;
unsigned int max_tx, max_rx;
- if (ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) {
+ if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
+ && !(msg->spi->mode & SPI_3WIRE)) {
max_tx = 0;
max_rx = 0;
@@ -1006,11 +1398,10 @@ static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
if (max_tx) {
tmp = krealloc(ctlr->dummy_tx, max_tx,
- GFP_KERNEL | GFP_DMA);
+ GFP_KERNEL | GFP_DMA | __GFP_ZERO);
if (!tmp)
return -ENOMEM;
ctlr->dummy_tx = tmp;
- memset(tmp, 0, max_tx);
}
if (max_rx) {
@@ -1024,6 +1415,8 @@ static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
if (max_tx || max_rx) {
list_for_each_entry(xfer, &msg->transfers,
transfer_list) {
+ if (!xfer->len)
+ continue;
if (!xfer->tx_buf)
xfer->tx_buf = ctlr->dummy_tx;
if (!xfer->rx_buf)
@@ -1039,20 +1432,34 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
struct spi_message *msg,
struct spi_transfer *xfer)
{
- struct spi_statistics *statm = &ctlr->statistics;
- struct spi_statistics *stats = &msg->spi->statistics;
- unsigned long long ms = 1;
+ struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
+ struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
+ u32 speed_hz = xfer->speed_hz;
+ unsigned long long ms;
- if (spi_controller_is_slave(ctlr)) {
+ if (spi_controller_is_target(ctlr)) {
if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
return -EINTR;
}
} else {
- ms = 8LL * 1000LL * xfer->len;
- do_div(ms, xfer->speed_hz);
- ms += ms + 200; /* some tolerance */
+ if (!speed_hz)
+ speed_hz = 100000;
+ /*
+ * For each byte we wait for 8 cycles of the SPI clock.
+ * Since speed is defined in Hz and we want milliseconds,
+ * use respective multiplier, but before the division,
+ * otherwise we may get 0 for short transfers.
+ */
+ ms = 8LL * MSEC_PER_SEC * xfer->len;
+ do_div(ms, speed_hz);
+
+ /*
+ * Increase it twice and add 200 ms tolerance, use
+ * predefined maximum in case of overflow.
+ */
+ ms += ms + 200;
if (ms > UINT_MAX)
ms = UINT_MAX;
@@ -1066,10 +1473,115 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
"SPI transfer timed out\n");
return -ETIMEDOUT;
}
+
+ if (xfer->error & SPI_TRANS_FAIL_IO)
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void _spi_transfer_delay_ns(u32 ns)
+{
+ if (!ns)
+ return;
+ if (ns <= NSEC_PER_USEC) {
+ ndelay(ns);
+ } else {
+ u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
+
+ fsleep(us);
}
+}
+
+int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
+{
+ u32 delay = _delay->value;
+ u32 unit = _delay->unit;
+ u32 hz;
+
+ if (!delay)
+ return 0;
+
+ switch (unit) {
+ case SPI_DELAY_UNIT_USECS:
+ delay *= NSEC_PER_USEC;
+ break;
+ case SPI_DELAY_UNIT_NSECS:
+ /* Nothing to do here */
+ break;
+ case SPI_DELAY_UNIT_SCK:
+ /* Clock cycles need to be obtained from spi_transfer */
+ if (!xfer)
+ return -EINVAL;
+ /*
+ * If there is unknown effective speed, approximate it
+ * by underestimating with half of the requested Hz.
+ */
+ hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
+ if (!hz)
+ return -EINVAL;
+
+ /* Convert delay to nanoseconds */
+ delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return delay;
+}
+EXPORT_SYMBOL_GPL(spi_delay_to_ns);
+
+int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
+{
+ int delay;
+
+ might_sleep();
+
+ if (!_delay)
+ return -EINVAL;
+
+ delay = spi_delay_to_ns(_delay, xfer);
+ if (delay < 0)
+ return delay;
+
+ _spi_transfer_delay_ns(delay);
return 0;
}
+EXPORT_SYMBOL_GPL(spi_delay_exec);
+
+static void _spi_transfer_cs_change_delay(struct spi_message *msg,
+ struct spi_transfer *xfer)
+{
+ u32 default_delay_ns = 10 * NSEC_PER_USEC;
+ u32 delay = xfer->cs_change_delay.value;
+ u32 unit = xfer->cs_change_delay.unit;
+ int ret;
+
+ /* Return early on "fast" mode - for everything but USECS */
+ if (!delay) {
+ if (unit == SPI_DELAY_UNIT_USECS)
+ _spi_transfer_delay_ns(default_delay_ns);
+ return;
+ }
+
+ ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
+ if (ret) {
+ dev_err_once(&msg->spi->dev,
+ "Use of unsupported delay unit %i, using default of %luus\n",
+ unit, default_delay_ns / NSEC_PER_USEC);
+ _spi_transfer_delay_ns(default_delay_ns);
+ }
+}
+
+void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
+ struct spi_transfer *xfer)
+{
+ _spi_transfer_cs_change_delay(msg, xfer);
+}
+EXPORT_SYMBOL_GPL(spi_transfer_cs_change_delay_exec);
/*
* spi_transfer_one_message - Default implementation of transfer_one_message()
@@ -1084,10 +1596,11 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
struct spi_transfer *xfer;
bool keep_cs = false;
int ret = 0;
- struct spi_statistics *statm = &ctlr->statistics;
- struct spi_statistics *stats = &msg->spi->statistics;
+ struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
+ struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
- spi_set_cs(msg->spi, true);
+ xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
+ spi_set_cs(msg->spi, !xfer->cs_off, false);
SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
@@ -1095,14 +1608,31 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
trace_spi_transfer_start(msg, xfer);
- spi_statistics_add_transfer_stats(statm, xfer, ctlr);
- spi_statistics_add_transfer_stats(stats, xfer, ctlr);
+ spi_statistics_add_transfer_stats(statm, xfer, msg);
+ spi_statistics_add_transfer_stats(stats, xfer, msg);
- if (xfer->tx_buf || xfer->rx_buf) {
+ if (!ctlr->ptp_sts_supported) {
+ xfer->ptp_sts_word_pre = 0;
+ ptp_read_system_prets(xfer->ptp_sts);
+ }
+
+ if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
reinit_completion(&ctlr->xfer_completion);
+fallback_pio:
+ spi_dma_sync_for_device(ctlr, xfer);
ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
if (ret < 0) {
+ spi_dma_sync_for_cpu(ctlr, xfer);
+
+ if ((xfer->tx_sg_mapped || xfer->rx_sg_mapped) &&
+ (xfer->error & SPI_TRANS_FAIL_NO_START)) {
+ __spi_unmap_msg(ctlr, msg);
+ ctlr->fallback = true;
+ xfer->error &= ~SPI_TRANS_FAIL_NO_START;
+ goto fallback_pio;
+ }
+
SPI_STATISTICS_INCREMENT_FIELD(statm,
errors);
SPI_STATISTICS_INCREMENT_FIELD(stats,
@@ -1117,6 +1647,8 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
if (ret < 0)
msg->status = ret;
}
+
+ spi_dma_sync_for_cpu(ctlr, xfer);
} else {
if (xfer->len)
dev_err(&msg->spi->dev,
@@ -1124,29 +1656,32 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
xfer->len);
}
+ if (!ctlr->ptp_sts_supported) {
+ ptp_read_system_postts(xfer->ptp_sts);
+ xfer->ptp_sts_word_post = xfer->len;
+ }
+
trace_spi_transfer_stop(msg, xfer);
if (msg->status != -EINPROGRESS)
goto out;
- if (xfer->delay_usecs) {
- u16 us = xfer->delay_usecs;
-
- if (us <= 10)
- udelay(us);
- else
- usleep_range(us, us + DIV_ROUND_UP(us, 10));
- }
+ spi_transfer_delay_exec(xfer);
if (xfer->cs_change) {
if (list_is_last(&xfer->transfer_list,
&msg->transfers)) {
keep_cs = true;
} else {
- spi_set_cs(msg->spi, false);
- udelay(10);
- spi_set_cs(msg->spi, true);
+ if (!xfer->cs_off)
+ spi_set_cs(msg->spi, false, false);
+ _spi_transfer_cs_change_delay(msg, xfer);
+ if (!list_next_entry(xfer, transfer_list)->cs_off)
+ spi_set_cs(msg->spi, true, false);
}
+ } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
+ xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
+ spi_set_cs(msg->spi, xfer->cs_off, false);
}
msg->actual_length += xfer->len;
@@ -1154,7 +1689,7 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
out:
if (ret != 0 || !keep_cs)
- spi_set_cs(msg->spi, false);
+ spi_set_cs(msg->spi, false, false);
if (msg->status == -EINPROGRESS)
msg->status = ret;
@@ -1162,8 +1697,6 @@ out:
if (msg->status && ctlr->handle_err)
ctlr->handle_err(ctlr, msg);
- spi_res_release(ctlr, msg);
-
spi_finalize_current_message(ctlr);
return ret;
@@ -1183,12 +1716,120 @@ void spi_finalize_current_transfer(struct spi_controller *ctlr)
}
EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
+static void spi_idle_runtime_pm(struct spi_controller *ctlr)
+{
+ if (ctlr->auto_runtime_pm) {
+ pm_runtime_put_autosuspend(ctlr->dev.parent);
+ }
+}
+
+static int __spi_pump_transfer_message(struct spi_controller *ctlr,
+ struct spi_message *msg, bool was_busy)
+{
+ struct spi_transfer *xfer;
+ int ret;
+
+ if (!was_busy && ctlr->auto_runtime_pm) {
+ ret = pm_runtime_get_sync(ctlr->dev.parent);
+ if (ret < 0) {
+ pm_runtime_put_noidle(ctlr->dev.parent);
+ dev_err(&ctlr->dev, "Failed to power device: %d\n",
+ ret);
+
+ msg->status = ret;
+ spi_finalize_current_message(ctlr);
+
+ return ret;
+ }
+ }
+
+ if (!was_busy)
+ trace_spi_controller_busy(ctlr);
+
+ if (!was_busy && ctlr->prepare_transfer_hardware) {
+ ret = ctlr->prepare_transfer_hardware(ctlr);
+ if (ret) {
+ dev_err(&ctlr->dev,
+ "failed to prepare transfer hardware: %d\n",
+ ret);
+
+ if (ctlr->auto_runtime_pm)
+ pm_runtime_put(ctlr->dev.parent);
+
+ msg->status = ret;
+ spi_finalize_current_message(ctlr);
+
+ return ret;
+ }
+ }
+
+ trace_spi_message_start(msg);
+
+ if (ctlr->prepare_message) {
+ ret = ctlr->prepare_message(ctlr, msg);
+ if (ret) {
+ dev_err(&ctlr->dev, "failed to prepare message: %d\n",
+ ret);
+ msg->status = ret;
+ spi_finalize_current_message(ctlr);
+ return ret;
+ }
+ msg->prepared = true;
+ }
+
+ ret = spi_map_msg(ctlr, msg);
+ if (ret) {
+ msg->status = ret;
+ spi_finalize_current_message(ctlr);
+ return ret;
+ }
+
+ if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ xfer->ptp_sts_word_pre = 0;
+ ptp_read_system_prets(xfer->ptp_sts);
+ }
+ }
+
+ /*
+ * Drivers implementation of transfer_one_message() must arrange for
+ * spi_finalize_current_message() to get called. Most drivers will do
+ * this in the calling context, but some don't. For those cases, a
+ * completion is used to guarantee that this function does not return
+ * until spi_finalize_current_message() is done accessing
+ * ctlr->cur_msg.
+ * Use of the following two flags enable to opportunistically skip the
+ * use of the completion since its use involves expensive spin locks.
+ * In case of a race with the context that calls
+ * spi_finalize_current_message() the completion will always be used,
+ * due to strict ordering of these flags using barriers.
+ */
+ WRITE_ONCE(ctlr->cur_msg_incomplete, true);
+ WRITE_ONCE(ctlr->cur_msg_need_completion, false);
+ reinit_completion(&ctlr->cur_msg_completion);
+ smp_wmb(); /* Make these available to spi_finalize_current_message() */
+
+ ret = ctlr->transfer_one_message(ctlr, msg);
+ if (ret) {
+ dev_err(&ctlr->dev,
+ "failed to transfer one message from queue\n");
+ return ret;
+ }
+
+ WRITE_ONCE(ctlr->cur_msg_need_completion, true);
+ smp_mb(); /* See spi_finalize_current_message()... */
+ if (READ_ONCE(ctlr->cur_msg_incomplete))
+ wait_for_completion(&ctlr->cur_msg_completion);
+
+ return 0;
+}
+
/**
- * __spi_pump_messages - function which processes spi message queue
+ * __spi_pump_messages - function which processes SPI message queue
* @ctlr: controller to process queue for
* @in_kthread: true if we are in the context of the message pump thread
*
- * This function checks if there is any spi message in the queue that
+ * This function checks if there is any SPI message in the queue that
* needs processing and if so call out to the driver to initialize hardware
* and transfer each message.
*
@@ -1198,43 +1839,42 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
*/
static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
{
- unsigned long flags;
+ struct spi_message *msg;
bool was_busy = false;
+ unsigned long flags;
int ret;
+ /* Take the I/O mutex */
+ mutex_lock(&ctlr->io_mutex);
+
/* Lock queue */
spin_lock_irqsave(&ctlr->queue_lock, flags);
/* Make sure we are not already running a message */
- if (ctlr->cur_msg) {
- spin_unlock_irqrestore(&ctlr->queue_lock, flags);
- return;
- }
-
- /* If another context is idling the device then defer */
- if (ctlr->idling) {
- kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
- spin_unlock_irqrestore(&ctlr->queue_lock, flags);
- return;
- }
+ if (ctlr->cur_msg)
+ goto out_unlock;
/* Check if the queue is idle */
if (list_empty(&ctlr->queue) || !ctlr->running) {
- if (!ctlr->busy) {
- spin_unlock_irqrestore(&ctlr->queue_lock, flags);
- return;
- }
+ if (!ctlr->busy)
+ goto out_unlock;
- /* Only do teardown in the thread */
+ /* Defer any non-atomic teardown to the thread */
if (!in_kthread) {
- kthread_queue_work(&ctlr->kworker,
- &ctlr->pump_messages);
- spin_unlock_irqrestore(&ctlr->queue_lock, flags);
- return;
+ if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
+ !ctlr->unprepare_transfer_hardware) {
+ spi_idle_runtime_pm(ctlr);
+ ctlr->busy = false;
+ ctlr->queue_empty = true;
+ trace_spi_controller_idle(ctlr);
+ } else {
+ kthread_queue_work(ctlr->kworker,
+ &ctlr->pump_messages);
+ }
+ goto out_unlock;
}
ctlr->busy = false;
- ctlr->idling = true;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
kfree(ctlr->dummy_rx);
@@ -1245,92 +1885,41 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
ctlr->unprepare_transfer_hardware(ctlr))
dev_err(&ctlr->dev,
"failed to unprepare transfer hardware\n");
- if (ctlr->auto_runtime_pm) {
- pm_runtime_mark_last_busy(ctlr->dev.parent);
- pm_runtime_put_autosuspend(ctlr->dev.parent);
- }
+ spi_idle_runtime_pm(ctlr);
trace_spi_controller_idle(ctlr);
spin_lock_irqsave(&ctlr->queue_lock, flags);
- ctlr->idling = false;
- spin_unlock_irqrestore(&ctlr->queue_lock, flags);
- return;
+ ctlr->queue_empty = true;
+ goto out_unlock;
}
/* Extract head of queue */
- ctlr->cur_msg =
- list_first_entry(&ctlr->queue, struct spi_message, queue);
+ msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
+ ctlr->cur_msg = msg;
- list_del_init(&ctlr->cur_msg->queue);
+ list_del_init(&msg->queue);
if (ctlr->busy)
was_busy = true;
else
ctlr->busy = true;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
- mutex_lock(&ctlr->io_mutex);
+ ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
- if (!was_busy && ctlr->auto_runtime_pm) {
- ret = pm_runtime_get_sync(ctlr->dev.parent);
- if (ret < 0) {
- pm_runtime_put_noidle(ctlr->dev.parent);
- dev_err(&ctlr->dev, "Failed to power device: %d\n",
- ret);
- mutex_unlock(&ctlr->io_mutex);
- return;
- }
- }
-
- if (!was_busy)
- trace_spi_controller_busy(ctlr);
-
- if (!was_busy && ctlr->prepare_transfer_hardware) {
- ret = ctlr->prepare_transfer_hardware(ctlr);
- if (ret) {
- dev_err(&ctlr->dev,
- "failed to prepare transfer hardware\n");
-
- if (ctlr->auto_runtime_pm)
- pm_runtime_put(ctlr->dev.parent);
- mutex_unlock(&ctlr->io_mutex);
- return;
- }
- }
-
- trace_spi_message_start(ctlr->cur_msg);
-
- if (ctlr->prepare_message) {
- ret = ctlr->prepare_message(ctlr, ctlr->cur_msg);
- if (ret) {
- dev_err(&ctlr->dev, "failed to prepare message: %d\n",
- ret);
- ctlr->cur_msg->status = ret;
- spi_finalize_current_message(ctlr);
- goto out;
- }
- ctlr->cur_msg_prepared = true;
- }
-
- ret = spi_map_msg(ctlr, ctlr->cur_msg);
- if (ret) {
- ctlr->cur_msg->status = ret;
- spi_finalize_current_message(ctlr);
- goto out;
- }
-
- ret = ctlr->transfer_one_message(ctlr, ctlr->cur_msg);
- if (ret) {
- dev_err(&ctlr->dev,
- "failed to transfer one message from queue\n");
- goto out;
- }
+ ctlr->cur_msg = NULL;
+ ctlr->fallback = false;
-out:
mutex_unlock(&ctlr->io_mutex);
/* Prod the scheduler in case transfer_one() was busy waiting */
if (!ret)
cond_resched();
+ return;
+
+out_unlock:
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+ mutex_unlock(&ctlr->io_mutex);
}
/**
@@ -1345,20 +1934,124 @@ static void spi_pump_messages(struct kthread_work *work)
__spi_pump_messages(ctlr, true);
}
-static int spi_init_queue(struct spi_controller *ctlr)
+/**
+ * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
+ * @ctlr: Pointer to the spi_controller structure of the driver
+ * @xfer: Pointer to the transfer being timestamped
+ * @progress: How many words (not bytes) have been transferred so far
+ * @irqs_off: If true, will disable IRQs and preemption for the duration of the
+ * transfer, for less jitter in time measurement. Only compatible
+ * with PIO drivers. If true, must follow up with
+ * spi_take_timestamp_post or otherwise system will crash.
+ * WARNING: for fully predictable results, the CPU frequency must
+ * also be under control (governor).
+ *
+ * This is a helper for drivers to collect the beginning of the TX timestamp
+ * for the requested byte from the SPI transfer. The frequency with which this
+ * function must be called (once per word, once for the whole transfer, once
+ * per batch of words etc) is arbitrary as long as the @tx buffer offset is
+ * greater than or equal to the requested byte at the time of the call. The
+ * timestamp is only taken once, at the first such call. It is assumed that
+ * the driver advances its @tx buffer pointer monotonically.
+ */
+void spi_take_timestamp_pre(struct spi_controller *ctlr,
+ struct spi_transfer *xfer,
+ size_t progress, bool irqs_off)
+{
+ if (!xfer->ptp_sts)
+ return;
+
+ if (xfer->timestamped)
+ return;
+
+ if (progress > xfer->ptp_sts_word_pre)
+ return;
+
+ /* Capture the resolution of the timestamp */
+ xfer->ptp_sts_word_pre = progress;
+
+ if (irqs_off) {
+ local_irq_save(ctlr->irq_flags);
+ preempt_disable();
+ }
+
+ ptp_read_system_prets(xfer->ptp_sts);
+}
+EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
+
+/**
+ * spi_take_timestamp_post - helper to collect the end of the TX timestamp
+ * @ctlr: Pointer to the spi_controller structure of the driver
+ * @xfer: Pointer to the transfer being timestamped
+ * @progress: How many words (not bytes) have been transferred so far
+ * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
+ *
+ * This is a helper for drivers to collect the end of the TX timestamp for
+ * the requested byte from the SPI transfer. Can be called with an arbitrary
+ * frequency: only the first call where @tx exceeds or is equal to the
+ * requested word will be timestamped.
+ */
+void spi_take_timestamp_post(struct spi_controller *ctlr,
+ struct spi_transfer *xfer,
+ size_t progress, bool irqs_off)
{
- struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+ if (!xfer->ptp_sts)
+ return;
+
+ if (xfer->timestamped)
+ return;
+
+ if (progress < xfer->ptp_sts_word_post)
+ return;
+
+ ptp_read_system_postts(xfer->ptp_sts);
+
+ if (irqs_off) {
+ local_irq_restore(ctlr->irq_flags);
+ preempt_enable();
+ }
+
+ /* Capture the resolution of the timestamp */
+ xfer->ptp_sts_word_post = progress;
+ xfer->timestamped = 1;
+}
+EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
+
+/**
+ * spi_set_thread_rt - set the controller to pump at realtime priority
+ * @ctlr: controller to boost priority of
+ *
+ * This can be called because the controller requested realtime priority
+ * (by setting the ->rt value before calling spi_register_controller()) or
+ * because a device on the bus said that its transfers needed realtime
+ * priority.
+ *
+ * NOTE: at the moment if any device on a bus says it needs realtime then
+ * the thread will be at realtime priority for all transfers on that
+ * controller. If this eventually becomes a problem we may see if we can
+ * find a way to boost the priority only temporarily during relevant
+ * transfers.
+ */
+static void spi_set_thread_rt(struct spi_controller *ctlr)
+{
+ dev_info(&ctlr->dev,
+ "will run message pump with realtime priority\n");
+ sched_set_fifo(ctlr->kworker->task);
+}
+
+static int spi_init_queue(struct spi_controller *ctlr)
+{
ctlr->running = false;
ctlr->busy = false;
+ ctlr->queue_empty = true;
- kthread_init_worker(&ctlr->kworker);
- ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker,
- "%s", dev_name(&ctlr->dev));
- if (IS_ERR(ctlr->kworker_task)) {
- dev_err(&ctlr->dev, "failed to create message pump task\n");
- return PTR_ERR(ctlr->kworker_task);
+ ctlr->kworker = kthread_run_worker(0, dev_name(&ctlr->dev));
+ if (IS_ERR(ctlr->kworker)) {
+ dev_err(&ctlr->dev, "failed to create message pump kworker\n");
+ return PTR_ERR(ctlr->kworker);
}
+
kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
/*
@@ -1368,11 +2061,8 @@ static int spi_init_queue(struct spi_controller *ctlr)
* request and the scheduling of the message pump thread. Without this
* setting the message pump thread will remain at default priority.
*/
- if (ctlr->rt) {
- dev_info(&ctlr->dev,
- "will run message pump with realtime priority\n");
- sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, &param);
- }
+ if (ctlr->rt)
+ spi_set_thread_rt(ctlr);
return 0;
}
@@ -1392,7 +2082,7 @@ struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
struct spi_message *next;
unsigned long flags;
- /* get a pointer to the next message, if any */
+ /* Get a pointer to the next message, if any */
spin_lock_irqsave(&ctlr->queue_lock, flags);
next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
queue);
@@ -1402,6 +2092,44 @@ struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
}
EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
+/*
+ * __spi_unoptimize_message - shared implementation of spi_unoptimize_message()
+ * and spi_maybe_unoptimize_message()
+ * @msg: the message to unoptimize
+ *
+ * Peripheral drivers should use spi_unoptimize_message() and callers inside
+ * core should use spi_maybe_unoptimize_message() rather than calling this
+ * function directly.
+ *
+ * It is not valid to call this on a message that is not currently optimized.
+ */
+static void __spi_unoptimize_message(struct spi_message *msg)
+{
+ struct spi_controller *ctlr = msg->spi->controller;
+
+ if (ctlr->unoptimize_message)
+ ctlr->unoptimize_message(msg);
+
+ spi_res_release(ctlr, msg);
+
+ msg->optimized = false;
+ msg->opt_state = NULL;
+}
+
+/*
+ * spi_maybe_unoptimize_message - unoptimize msg not managed by a peripheral
+ * @msg: the message to unoptimize
+ *
+ * This function is used to unoptimize a message if and only if it was
+ * optimized by the core (via spi_maybe_optimize_message()).
+ */
+static void spi_maybe_unoptimize_message(struct spi_message *msg)
+{
+ if (!msg->pre_optimized && msg->optimized &&
+ !msg->spi->controller->defer_optimize_message)
+ __spi_unoptimize_message(msg);
+}
+
/**
* spi_finalize_current_message() - the current message is complete
* @ctlr: the controller to return the message to
@@ -1411,17 +2139,26 @@ EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
*/
void spi_finalize_current_message(struct spi_controller *ctlr)
{
+ struct spi_transfer *xfer;
struct spi_message *mesg;
- unsigned long flags;
int ret;
- spin_lock_irqsave(&ctlr->queue_lock, flags);
mesg = ctlr->cur_msg;
- spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+
+ if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
+ list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
+ ptp_read_system_postts(xfer->ptp_sts);
+ xfer->ptp_sts_word_post = xfer->len;
+ }
+ }
+
+ if (unlikely(ctlr->ptp_sts_supported))
+ list_for_each_entry(xfer, &mesg->transfers, transfer_list)
+ WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
spi_unmap_msg(ctlr, mesg);
- if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
+ if (mesg->prepared && ctlr->unprepare_message) {
ret = ctlr->unprepare_message(ctlr, mesg);
if (ret) {
dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
@@ -1429,11 +2166,14 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
}
}
- spin_lock_irqsave(&ctlr->queue_lock, flags);
- ctlr->cur_msg = NULL;
- ctlr->cur_msg_prepared = false;
- kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
- spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+ mesg->prepared = false;
+
+ spi_maybe_unoptimize_message(mesg);
+
+ WRITE_ONCE(ctlr->cur_msg_incomplete, false);
+ smp_mb(); /* See __spi_pump_transfer_message()... */
+ if (READ_ONCE(ctlr->cur_msg_need_completion))
+ complete(&ctlr->cur_msg_completion);
trace_spi_message_done(mesg);
@@ -1458,18 +2198,15 @@ static int spi_start_queue(struct spi_controller *ctlr)
ctlr->cur_msg = NULL;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
- kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
return 0;
}
static int spi_stop_queue(struct spi_controller *ctlr)
{
+ unsigned int limit = 500;
unsigned long flags;
- unsigned limit = 500;
- int ret = 0;
-
- spin_lock_irqsave(&ctlr->queue_lock, flags);
/*
* This is a bit lame, but is optimized for the common execution path.
@@ -1477,24 +2214,18 @@ static int spi_stop_queue(struct spi_controller *ctlr)
* execution path (pump_messages) would be required to call wake_up or
* friends on every SPI message. Do this instead.
*/
- while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
+ do {
+ spin_lock_irqsave(&ctlr->queue_lock, flags);
+ if (list_empty(&ctlr->queue) && !ctlr->busy) {
+ ctlr->running = false;
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+ return 0;
+ }
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
usleep_range(10000, 11000);
- spin_lock_irqsave(&ctlr->queue_lock, flags);
- }
-
- if (!list_empty(&ctlr->queue) || ctlr->busy)
- ret = -EBUSY;
- else
- ctlr->running = false;
+ } while (--limit);
- spin_unlock_irqrestore(&ctlr->queue_lock, flags);
-
- if (ret) {
- dev_warn(&ctlr->dev, "could not stop message queue\n");
- return ret;
- }
- return ret;
+ return -EBUSY;
}
static int spi_destroy_queue(struct spi_controller *ctlr)
@@ -1514,8 +2245,7 @@ static int spi_destroy_queue(struct spi_controller *ctlr)
return ret;
}
- kthread_flush_worker(&ctlr->kworker);
- kthread_stop(ctlr->kworker_task);
+ kthread_destroy_worker(ctlr->kworker);
return 0;
}
@@ -1537,8 +2267,9 @@ static int __spi_queued_transfer(struct spi_device *spi,
msg->status = -EINPROGRESS;
list_add_tail(&msg->queue, &ctlr->queue);
+ ctlr->queue_empty = false;
if (!ctlr->busy && need_pump)
- kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return 0;
@@ -1546,8 +2277,8 @@ static int __spi_queued_transfer(struct spi_device *spi,
/**
* spi_queued_transfer - transfer function for queued transfers
- * @spi: spi device which is requesting transfer
- * @msg: spi message which is to handled is queued to driver queue
+ * @spi: SPI device which is requesting transfer
+ * @msg: SPI message which is to handled is queued to driver queue
*
* Return: zero on success, else a negative error code.
*/
@@ -1604,27 +2335,46 @@ void spi_flush_queue(struct spi_controller *ctlr)
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_OF)
+static void of_spi_parse_dt_cs_delay(struct device_node *nc,
+ struct spi_delay *delay, const char *prop)
+{
+ u32 value;
+
+ if (!of_property_read_u32(nc, prop, &value)) {
+ if (value > U16_MAX) {
+ delay->value = DIV_ROUND_UP(value, 1000);
+ delay->unit = SPI_DELAY_UNIT_USECS;
+ } else {
+ delay->value = value;
+ delay->unit = SPI_DELAY_UNIT_NSECS;
+ }
+ }
+}
+
static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
struct device_node *nc)
{
- u32 value;
- int rc;
+ u32 value, cs[SPI_DEVICE_CS_CNT_MAX];
+ int rc, idx;
/* Mode (clock phase/polarity/etc.) */
if (of_property_read_bool(nc, "spi-cpha"))
spi->mode |= SPI_CPHA;
if (of_property_read_bool(nc, "spi-cpol"))
spi->mode |= SPI_CPOL;
- if (of_property_read_bool(nc, "spi-cs-high"))
- spi->mode |= SPI_CS_HIGH;
if (of_property_read_bool(nc, "spi-3wire"))
spi->mode |= SPI_3WIRE;
if (of_property_read_bool(nc, "spi-lsb-first"))
spi->mode |= SPI_LSB_FIRST;
+ if (of_property_read_bool(nc, "spi-cs-high"))
+ spi->mode |= SPI_CS_HIGH;
/* Device DUAL/QUAD mode */
if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
switch (value) {
+ case 0:
+ spi->mode |= SPI_NO_TX;
+ break;
case 1:
break;
case 2:
@@ -1646,6 +2396,9 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
switch (value) {
+ case 0:
+ spi->mode |= SPI_NO_RX;
+ break;
case 1:
break;
case 2:
@@ -1665,7 +2418,7 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
}
}
- if (spi_controller_is_slave(ctlr)) {
+ if (spi_controller_is_target(ctlr)) {
if (!of_node_name_eq(nc, "slave")) {
dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
nc);
@@ -1675,22 +2428,38 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
}
/* Device address */
- rc = of_property_read_u32(nc, "reg", &value);
- if (rc) {
+ rc = of_property_read_variable_u32_array(nc, "reg", &cs[0], 1,
+ SPI_DEVICE_CS_CNT_MAX);
+ if (rc < 0) {
dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
nc, rc);
return rc;
}
- spi->chip_select = value;
- /* Device speed */
- rc = of_property_read_u32(nc, "spi-max-frequency", &value);
- if (rc) {
- dev_err(&ctlr->dev,
- "%pOF has no valid 'spi-max-frequency' property (%d)\n", nc, rc);
- return rc;
+ if ((of_property_present(nc, "parallel-memories")) &&
+ (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) {
+ dev_err(&ctlr->dev, "SPI controller doesn't support multi CS\n");
+ return -EINVAL;
}
- spi->max_speed_hz = value;
+
+ spi->num_chipselect = rc;
+ for (idx = 0; idx < rc; idx++)
+ spi_set_chipselect(spi, idx, cs[idx]);
+
+ /*
+ * By default spi->chip_select[0] will hold the physical CS number,
+ * so set bit 0 in spi->cs_index_mask.
+ */
+ spi->cs_index_mask = BIT(0);
+
+ /* Device speed */
+ if (!of_property_read_u32(nc, "spi-max-frequency", &value))
+ spi->max_speed_hz = value;
+
+ /* Device CS delays */
+ of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns");
+ of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns");
+ of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns");
return 0;
}
@@ -1710,8 +2479,8 @@ of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
}
/* Select device driver */
- rc = of_modalias_node(nc, spi->modalias,
- sizeof(spi->modalias));
+ rc = of_alias_from_compatible(nc, spi->modalias,
+ sizeof(spi->modalias));
if (rc < 0) {
dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
goto err_out;
@@ -1723,7 +2492,8 @@ of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
/* Store a pointer to the node in the device structure */
of_node_get(nc);
- spi->dev.of_node = nc;
+
+ device_set_node(&spi->dev, of_fwnode_handle(nc));
/* Register the new device */
rc = spi_add_device(spi);
@@ -1746,16 +2516,13 @@ err_out:
* @ctlr: Pointer to spi_controller device
*
* Registers an spi_device for each child node of controller node which
- * represents a valid SPI slave.
+ * represents a valid SPI target device.
*/
static void of_register_spi_devices(struct spi_controller *ctlr)
{
struct spi_device *spi;
struct device_node *nc;
- if (!ctlr->dev.of_node)
- return;
-
for_each_available_child_of_node(ctlr->dev.of_node, nc) {
if (of_node_test_and_set_flag(nc, OF_POPULATED))
continue;
@@ -1771,10 +2538,118 @@ static void of_register_spi_devices(struct spi_controller *ctlr)
static void of_register_spi_devices(struct spi_controller *ctlr) { }
#endif
+/**
+ * spi_new_ancillary_device() - Register ancillary SPI device
+ * @spi: Pointer to the main SPI device registering the ancillary device
+ * @chip_select: Chip Select of the ancillary device
+ *
+ * Register an ancillary SPI device; for example some chips have a chip-select
+ * for normal device usage and another one for setup/firmware upload.
+ *
+ * This may only be called from main SPI device's probe routine.
+ *
+ * Return: 0 on success; negative errno on failure
+ */
+struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
+ u8 chip_select)
+{
+ struct spi_controller *ctlr = spi->controller;
+ struct spi_device *ancillary;
+ int rc;
+
+ /* Alloc an spi_device */
+ ancillary = spi_alloc_device(ctlr);
+ if (!ancillary) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
+
+ /* Use provided chip-select for ancillary device */
+ spi_set_chipselect(ancillary, 0, chip_select);
+
+ /* Take over SPI mode/speed from SPI main device */
+ ancillary->max_speed_hz = spi->max_speed_hz;
+ ancillary->mode = spi->mode;
+ /*
+ * By default spi->chip_select[0] will hold the physical CS number,
+ * so set bit 0 in spi->cs_index_mask.
+ */
+ ancillary->cs_index_mask = BIT(0);
+
+ WARN_ON(!mutex_is_locked(&ctlr->add_lock));
+
+ /* Register the new device */
+ rc = __spi_add_device(ancillary);
+ if (rc) {
+ dev_err(&spi->dev, "failed to register ancillary device\n");
+ goto err_out;
+ }
+
+ return ancillary;
+
+err_out:
+ spi_dev_put(ancillary);
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
+
#ifdef CONFIG_ACPI
-static void acpi_spi_parse_apple_properties(struct spi_device *spi)
+struct acpi_spi_lookup {
+ struct spi_controller *ctlr;
+ u32 max_speed_hz;
+ u32 mode;
+ int irq;
+ u8 bits_per_word;
+ u8 chip_select;
+ int n;
+ int index;
+};
+
+static int acpi_spi_count(struct acpi_resource *ares, void *data)
+{
+ struct acpi_resource_spi_serialbus *sb;
+ int *count = data;
+
+ if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
+ return 1;
+
+ sb = &ares->data.spi_serial_bus;
+ if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
+ return 1;
+
+ *count = *count + 1;
+
+ return 1;
+}
+
+/**
+ * acpi_spi_count_resources - Count the number of SpiSerialBus resources
+ * @adev: ACPI device
+ *
+ * Return: the number of SpiSerialBus resources in the ACPI-device's
+ * resource-list; or a negative error code.
+ */
+int acpi_spi_count_resources(struct acpi_device *adev)
+{
+ LIST_HEAD(r);
+ int count = 0;
+ int ret;
+
+ ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
+ if (ret < 0)
+ return ret;
+
+ acpi_dev_free_resource_list(&r);
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
+
+static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
+ struct acpi_spi_lookup *lookup)
{
- struct acpi_device *dev = ACPI_COMPANION(&spi->dev);
const union acpi_object *obj;
if (!x86_apple_machine)
@@ -1782,35 +2657,65 @@ static void acpi_spi_parse_apple_properties(struct spi_device *spi)
if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
&& obj->buffer.length >= 4)
- spi->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
+ lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
&& obj->buffer.length == 8)
- spi->bits_per_word = *(u64 *)obj->buffer.pointer;
+ lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
&& obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
- spi->mode |= SPI_LSB_FIRST;
+ lookup->mode |= SPI_LSB_FIRST;
if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
&& obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
- spi->mode |= SPI_CPOL;
+ lookup->mode |= SPI_CPOL;
if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
&& obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
- spi->mode |= SPI_CPHA;
+ lookup->mode |= SPI_CPHA;
}
static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
{
- struct spi_device *spi = data;
- struct spi_controller *ctlr = spi->controller;
+ struct acpi_spi_lookup *lookup = data;
+ struct spi_controller *ctlr = lookup->ctlr;
if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
struct acpi_resource_spi_serialbus *sb;
+ acpi_handle parent_handle;
+ acpi_status status;
sb = &ares->data.spi_serial_bus;
if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
+
+ if (lookup->index != -1 && lookup->n++ != lookup->index)
+ return 1;
+
+ status = acpi_get_handle(NULL,
+ sb->resource_source.string_ptr,
+ &parent_handle);
+
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ if (ctlr) {
+ if (!device_match_acpi_handle(ctlr->dev.parent, parent_handle))
+ return -ENODEV;
+ } else {
+ struct acpi_device *adev;
+
+ adev = acpi_fetch_acpi_dev(parent_handle);
+ if (!adev)
+ return -ENODEV;
+
+ ctlr = acpi_spi_find_controller_by_adev(adev);
+ if (!ctlr)
+ return -EPROBE_DEFER;
+
+ lookup->ctlr = ctlr;
+ }
+
/*
* ACPI DeviceSelection numbering is handled by the
* host controller driver in Windows and can vary
@@ -1823,67 +2728,138 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
sb->device_selection);
if (cs < 0)
return cs;
- spi->chip_select = cs;
+ lookup->chip_select = cs;
} else {
- spi->chip_select = sb->device_selection;
+ lookup->chip_select = sb->device_selection;
}
- spi->max_speed_hz = sb->connection_speed;
+ lookup->max_speed_hz = sb->connection_speed;
+ lookup->bits_per_word = sb->data_bit_length;
if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
- spi->mode |= SPI_CPHA;
+ lookup->mode |= SPI_CPHA;
if (sb->clock_polarity == ACPI_SPI_START_HIGH)
- spi->mode |= SPI_CPOL;
+ lookup->mode |= SPI_CPOL;
if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
- spi->mode |= SPI_CS_HIGH;
+ lookup->mode |= SPI_CS_HIGH;
}
- } else if (spi->irq < 0) {
+ } else if (lookup->irq < 0) {
struct resource r;
if (acpi_dev_resource_interrupt(ares, 0, &r))
- spi->irq = r.start;
+ lookup->irq = r.start;
}
/* Always tell the ACPI core to skip this resource */
return 1;
}
-static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
- struct acpi_device *adev)
+/**
+ * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
+ * @ctlr: controller to which the spi device belongs
+ * @adev: ACPI Device for the spi device
+ * @index: Index of the spi resource inside the ACPI Node
+ *
+ * This should be used to allocate a new SPI device from and ACPI Device node.
+ * The caller is responsible for calling spi_add_device to register the SPI device.
+ *
+ * If ctlr is set to NULL, the Controller for the SPI device will be looked up
+ * using the resource.
+ * If index is set to -1, index is not used.
+ * Note: If index is -1, ctlr must be set.
+ *
+ * Return: a pointer to the new device, or ERR_PTR on error.
+ */
+struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
+ struct acpi_device *adev,
+ int index)
{
+ acpi_handle parent_handle = NULL;
struct list_head resource_list;
+ struct acpi_spi_lookup lookup = {};
struct spi_device *spi;
int ret;
- if (acpi_bus_get_status(adev) || !adev->status.present ||
- acpi_device_enumerated(adev))
- return AE_OK;
+ if (!ctlr && index == -1)
+ return ERR_PTR(-EINVAL);
- spi = spi_alloc_device(ctlr);
+ lookup.ctlr = ctlr;
+ lookup.irq = -1;
+ lookup.index = index;
+ lookup.n = 0;
+
+ INIT_LIST_HEAD(&resource_list);
+ ret = acpi_dev_get_resources(adev, &resource_list,
+ acpi_spi_add_resource, &lookup);
+ acpi_dev_free_resource_list(&resource_list);
+
+ if (ret < 0)
+ /* Found SPI in _CRS but it points to another controller */
+ return ERR_PTR(ret);
+
+ if (!lookup.max_speed_hz &&
+ ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
+ device_match_acpi_handle(lookup.ctlr->dev.parent, parent_handle)) {
+ /* Apple does not use _CRS but nested devices for SPI target devices */
+ acpi_spi_parse_apple_properties(adev, &lookup);
+ }
+
+ if (!lookup.max_speed_hz)
+ return ERR_PTR(-ENODEV);
+
+ spi = spi_alloc_device(lookup.ctlr);
if (!spi) {
- dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
+ dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
dev_name(&adev->dev));
- return AE_NO_MEMORY;
+ return ERR_PTR(-ENOMEM);
}
+ spi_set_chipselect(spi, 0, lookup.chip_select);
+
ACPI_COMPANION_SET(&spi->dev, adev);
- spi->irq = -1;
+ spi->max_speed_hz = lookup.max_speed_hz;
+ spi->mode |= lookup.mode;
+ spi->irq = lookup.irq;
+ spi->bits_per_word = lookup.bits_per_word;
+ /*
+ * By default spi->chip_select[0] will hold the physical CS number,
+ * so set bit 0 in spi->cs_index_mask.
+ */
+ spi->cs_index_mask = BIT(0);
- INIT_LIST_HEAD(&resource_list);
- ret = acpi_dev_get_resources(adev, &resource_list,
- acpi_spi_add_resource, spi);
- acpi_dev_free_resource_list(&resource_list);
+ return spi;
+}
+EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
- acpi_spi_parse_apple_properties(spi);
+static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
+ struct acpi_device *adev)
+{
+ struct spi_device *spi;
- if (ret < 0 || !spi->max_speed_hz) {
- spi_dev_put(spi);
+ if (acpi_bus_get_status(adev) || !adev->status.present ||
+ acpi_device_enumerated(adev))
return AE_OK;
+
+ spi = acpi_spi_device_alloc(ctlr, adev, -1);
+ if (IS_ERR(spi)) {
+ if (PTR_ERR(spi) == -ENOMEM)
+ return AE_NO_MEMORY;
+ else
+ return AE_OK;
}
acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
sizeof(spi->modalias));
+ /*
+ * This gets re-tried in spi_probe() for -EPROBE_DEFER handling in case
+ * the GPIO controller does not have a driver yet. This needs to be done
+ * here too, because this call sets the GPIO direction and/or bias.
+ * Setting these needs to be done even if there is no driver, in which
+ * case spi_probe() will never get called.
+ * TODO: ideally the setup of the GPIO should be handled in a generic
+ * manner in the ACPI/gpiolib core code.
+ */
if (spi->irq < 0)
spi->irq = acpi_dev_gpio_irq_get(adev, 0);
@@ -1903,15 +2879,17 @@ static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
void *data, void **return_value)
{
+ struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
struct spi_controller *ctlr = data;
- struct acpi_device *adev;
- if (acpi_bus_get_device(handle, &adev))
+ if (!adev)
return AE_OK;
return acpi_register_spi_device(ctlr, adev);
}
+#define SPI_ACPI_ENUMERATE_MAX_DEPTH 32
+
static void acpi_register_spi_devices(struct spi_controller *ctlr)
{
acpi_status status;
@@ -1921,10 +2899,11 @@ static void acpi_register_spi_devices(struct spi_controller *ctlr)
if (!handle)
return;
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ SPI_ACPI_ENUMERATE_MAX_DEPTH,
acpi_spi_add_device, NULL, ctlr, NULL);
if (ACPI_FAILURE(status))
- dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
+ dev_warn(&ctlr->dev, "failed to enumerate SPI target devices\n");
}
#else
static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
@@ -1938,50 +2917,45 @@ static void spi_controller_release(struct device *dev)
kfree(ctlr);
}
-static struct class spi_master_class = {
+static const struct class spi_controller_class = {
.name = "spi_master",
- .owner = THIS_MODULE,
.dev_release = spi_controller_release,
- .dev_groups = spi_master_groups,
+ .dev_groups = spi_controller_groups,
};
#ifdef CONFIG_SPI_SLAVE
/**
- * spi_slave_abort - abort the ongoing transfer request on an SPI slave
- * controller
+ * spi_target_abort - abort the ongoing transfer request on an SPI target controller
* @spi: device used for the current transfer
*/
-int spi_slave_abort(struct spi_device *spi)
+int spi_target_abort(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
- if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
- return ctlr->slave_abort(ctlr);
+ if (spi_controller_is_target(ctlr) && ctlr->target_abort)
+ return ctlr->target_abort(ctlr);
return -ENOTSUPP;
}
-EXPORT_SYMBOL_GPL(spi_slave_abort);
+EXPORT_SYMBOL_GPL(spi_target_abort);
-static int match_true(struct device *dev, void *data)
-{
- return 1;
-}
-
-static ssize_t spi_slave_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct spi_controller *ctlr = container_of(dev, struct spi_controller,
dev);
struct device *child;
+ int ret;
+
+ child = device_find_any_child(&ctlr->dev);
+ ret = sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
+ put_device(child);
- child = device_find_child(&ctlr->dev, NULL, match_true);
- return sprintf(buf, "%s\n",
- child ? to_spi_device(child)->modalias : NULL);
+ return ret;
}
-static ssize_t spi_slave_store(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
+static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct spi_controller *ctlr = container_of(dev, struct spi_controller,
dev);
@@ -1994,20 +2968,20 @@ static ssize_t spi_slave_store(struct device *dev,
if (rc != 1 || !name[0])
return -EINVAL;
- child = device_find_child(&ctlr->dev, NULL, match_true);
+ child = device_find_any_child(&ctlr->dev);
if (child) {
- /* Remove registered slave */
+ /* Remove registered target device */
device_unregister(child);
put_device(child);
}
if (strcmp(name, "(null)")) {
- /* Register new slave */
+ /* Register new target device */
spi = spi_alloc_device(ctlr);
if (!spi)
return -ENOMEM;
- strlcpy(spi->modalias, name, sizeof(spi->modalias));
+ strscpy(spi->modalias, name, sizeof(spi->modalias));
rc = spi_add_device(spi);
if (rc) {
@@ -2019,41 +2993,42 @@ static ssize_t spi_slave_store(struct device *dev,
return count;
}
-static DEVICE_ATTR(slave, 0644, spi_slave_show, spi_slave_store);
+static DEVICE_ATTR_RW(slave);
-static struct attribute *spi_slave_attrs[] = {
+static struct attribute *spi_target_attrs[] = {
&dev_attr_slave.attr,
NULL,
};
-static const struct attribute_group spi_slave_group = {
- .attrs = spi_slave_attrs,
+static const struct attribute_group spi_target_group = {
+ .attrs = spi_target_attrs,
};
-static const struct attribute_group *spi_slave_groups[] = {
+static const struct attribute_group *spi_target_groups[] = {
&spi_controller_statistics_group,
- &spi_slave_group,
+ &spi_target_group,
NULL,
};
-static struct class spi_slave_class = {
+static const struct class spi_target_class = {
.name = "spi_slave",
- .owner = THIS_MODULE,
.dev_release = spi_controller_release,
- .dev_groups = spi_slave_groups,
+ .dev_groups = spi_target_groups,
};
#else
-extern struct class spi_slave_class; /* dummy */
+extern struct class spi_target_class; /* dummy */
#endif
/**
- * __spi_alloc_controller - allocate an SPI master or slave controller
+ * __spi_alloc_controller - allocate an SPI host or target controller
* @dev: the controller, possibly using the platform_bus
* @size: how much zeroed driver-private data to allocate; the pointer to this
- * memory is in the driver_data field of the returned device,
- * accessible with spi_controller_get_devdata().
- * @slave: flag indicating whether to allocate an SPI master (false) or SPI
- * slave (true) controller
+ * memory is in the driver_data field of the returned device, accessible
+ * with spi_controller_get_devdata(); the memory is cacheline aligned;
+ * drivers granting DMA access to portions of their private data need to
+ * round up @size using ALIGN(size, dma_get_cache_alignment()).
+ * @target: flag indicating whether to allocate an SPI host (false) or SPI target (true)
+ * controller
* Context: can sleep
*
* This call is used only by SPI controller drivers, which are the
@@ -2070,72 +3045,158 @@ extern struct class spi_slave_class; /* dummy */
* Return: the SPI controller structure on success, else NULL.
*/
struct spi_controller *__spi_alloc_controller(struct device *dev,
- unsigned int size, bool slave)
+ unsigned int size, bool target)
{
struct spi_controller *ctlr;
+ size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
if (!dev)
return NULL;
- ctlr = kzalloc(size + sizeof(*ctlr), GFP_KERNEL);
+ ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
if (!ctlr)
return NULL;
device_initialize(&ctlr->dev);
+ INIT_LIST_HEAD(&ctlr->queue);
+ spin_lock_init(&ctlr->queue_lock);
+ spin_lock_init(&ctlr->bus_lock_spinlock);
+ mutex_init(&ctlr->bus_lock_mutex);
+ mutex_init(&ctlr->io_mutex);
+ mutex_init(&ctlr->add_lock);
ctlr->bus_num = -1;
ctlr->num_chipselect = 1;
- ctlr->slave = slave;
- if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
- ctlr->dev.class = &spi_slave_class;
+ ctlr->target = target;
+ if (IS_ENABLED(CONFIG_SPI_SLAVE) && target)
+ ctlr->dev.class = &spi_target_class;
else
- ctlr->dev.class = &spi_master_class;
+ ctlr->dev.class = &spi_controller_class;
ctlr->dev.parent = dev;
pm_suspend_ignore_children(&ctlr->dev, true);
- spi_controller_set_devdata(ctlr, &ctlr[1]);
+ spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
return ctlr;
}
EXPORT_SYMBOL_GPL(__spi_alloc_controller);
-#ifdef CONFIG_OF
-static int of_spi_register_master(struct spi_controller *ctlr)
+static void devm_spi_release_controller(struct device *dev, void *ctlr)
{
- int nb, i, *cs;
- struct device_node *np = ctlr->dev.of_node;
+ spi_controller_put(*(struct spi_controller **)ctlr);
+}
- if (!np)
- return 0;
+/**
+ * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
+ * @dev: physical device of SPI controller
+ * @size: how much zeroed driver-private data to allocate
+ * @target: whether to allocate an SPI host (false) or SPI target (true) controller
+ * Context: can sleep
+ *
+ * Allocate an SPI controller and automatically release a reference on it
+ * when @dev is unbound from its driver. Drivers are thus relieved from
+ * having to call spi_controller_put().
+ *
+ * The arguments to this function are identical to __spi_alloc_controller().
+ *
+ * Return: the SPI controller structure on success, else NULL.
+ */
+struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
+ unsigned int size,
+ bool target)
+{
+ struct spi_controller **ptr, *ctlr;
- nb = of_gpio_named_count(np, "cs-gpios");
- ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
+ ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return NULL;
- /* Return error only for an incorrectly formed cs-gpios property */
- if (nb == 0 || nb == -ENOENT)
- return 0;
- else if (nb < 0)
+ ctlr = __spi_alloc_controller(dev, size, target);
+ if (ctlr) {
+ ctlr->devm_allocated = true;
+ *ptr = ctlr;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ctlr;
+}
+EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
+
+/**
+ * spi_get_gpio_descs() - grab chip select GPIOs for the controller
+ * @ctlr: The SPI controller to grab GPIO descriptors for
+ */
+static int spi_get_gpio_descs(struct spi_controller *ctlr)
+{
+ int nb, i;
+ struct gpio_desc **cs;
+ struct device *dev = &ctlr->dev;
+ unsigned long native_cs_mask = 0;
+ unsigned int num_cs_gpios = 0;
+
+ nb = gpiod_count(dev, "cs");
+ if (nb < 0) {
+ /* No GPIOs at all is fine, else return the error */
+ if (nb == -ENOENT)
+ return 0;
return nb;
+ }
- cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int),
- GFP_KERNEL);
- ctlr->cs_gpios = cs;
+ ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
- if (!ctlr->cs_gpios)
+ cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
+ GFP_KERNEL);
+ if (!cs)
return -ENOMEM;
+ ctlr->cs_gpiods = cs;
- for (i = 0; i < ctlr->num_chipselect; i++)
- cs[i] = -ENOENT;
+ for (i = 0; i < nb; i++) {
+ /*
+ * Most chipselects are active low, the inverted
+ * semantics are handled by special quirks in gpiolib,
+ * so initializing them GPIOD_OUT_LOW here means
+ * "unasserted", in most cases this will drive the physical
+ * line high.
+ */
+ cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(cs[i]))
+ return PTR_ERR(cs[i]);
- for (i = 0; i < nb; i++)
- cs[i] = of_get_named_gpio(np, "cs-gpios", i);
+ if (cs[i]) {
+ /*
+ * If we find a CS GPIO, name it after the device and
+ * chip select line.
+ */
+ char *gpioname;
+
+ gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
+ dev_name(dev), i);
+ if (!gpioname)
+ return -ENOMEM;
+ gpiod_set_consumer_name(cs[i], gpioname);
+ num_cs_gpios++;
+ continue;
+ }
+
+ if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
+ dev_err(dev, "Invalid native chip select %d\n", i);
+ return -EINVAL;
+ }
+ native_cs_mask |= BIT(i);
+ }
+
+ ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
+
+ if ((ctlr->flags & SPI_CONTROLLER_GPIO_SS) && num_cs_gpios &&
+ ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
+ dev_err(dev, "No unused native chip select available\n");
+ return -EINVAL;
+ }
return 0;
}
-#else
-static int of_spi_register_master(struct spi_controller *ctlr)
-{
- return 0;
-}
-#endif
static int spi_controller_check_ops(struct spi_controller *ctlr)
{
@@ -2143,24 +3204,37 @@ static int spi_controller_check_ops(struct spi_controller *ctlr)
* The controller may implement only the high-level SPI-memory like
* operations if it does not support regular SPI transfers, and this is
* valid use case.
- * If ->mem_ops is NULL, we request that at least one of the
- * ->transfer_xxx() method be implemented.
+ * If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least
+ * one of the ->transfer_xxx() method be implemented.
*/
- if (ctlr->mem_ops) {
- if (!ctlr->mem_ops->exec_op)
- return -EINVAL;
- } else if (!ctlr->transfer && !ctlr->transfer_one &&
+ if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
+ if (!ctlr->transfer && !ctlr->transfer_one &&
!ctlr->transfer_one_message) {
- return -EINVAL;
+ return -EINVAL;
+ }
}
return 0;
}
+/* Allocate dynamic bus number using Linux idr */
+static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int end)
+{
+ int id;
+
+ mutex_lock(&board_lock);
+ id = idr_alloc(&spi_controller_idr, ctlr, start, end, GFP_KERNEL);
+ mutex_unlock(&board_lock);
+ if (WARN(id < 0, "couldn't get idr"))
+ return id == -ENOSPC ? -EBUSY : id;
+ ctlr->bus_num = id;
+ return 0;
+}
+
/**
- * spi_register_controller - register SPI master or slave controller
- * @ctlr: initialized master, originally from spi_alloc_master() or
- * spi_alloc_slave()
+ * spi_register_controller - register SPI host or target controller
+ * @ctlr: initialized controller, originally from spi_alloc_host() or
+ * spi_alloc_target()
* Context: can sleep
*
* SPI controllers connect to their drivers using some non-SPI bus,
@@ -2184,8 +3258,9 @@ int spi_register_controller(struct spi_controller *ctlr)
{
struct device *dev = ctlr->dev.parent;
struct boardinfo *bi;
- int status = -ENODEV;
- int id, first_dynamic;
+ int first_dynamic;
+ int status;
+ int idx;
if (!dev)
return -ENODEV;
@@ -2198,39 +3273,14 @@ int spi_register_controller(struct spi_controller *ctlr)
if (status)
return status;
- if (!spi_controller_is_slave(ctlr)) {
- status = of_spi_register_master(ctlr);
+ if (ctlr->bus_num < 0)
+ ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi");
+ if (ctlr->bus_num >= 0) {
+ /* Devices with a fixed bus num must check-in with the num */
+ status = spi_controller_id_alloc(ctlr, ctlr->bus_num, ctlr->bus_num + 1);
if (status)
return status;
}
-
- /* even if it's just one always-selected device, there must
- * be at least one chipselect
- */
- if (ctlr->num_chipselect == 0)
- return -EINVAL;
- if (ctlr->bus_num >= 0) {
- /* devices with a fixed bus num must check-in with the num */
- mutex_lock(&board_lock);
- id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
- ctlr->bus_num + 1, GFP_KERNEL);
- mutex_unlock(&board_lock);
- if (WARN(id < 0, "couldn't get idr"))
- return id == -ENOSPC ? -EBUSY : id;
- ctlr->bus_num = id;
- } else if (ctlr->dev.of_node) {
- /* allocate dynamic bus number using Linux idr */
- id = of_alias_get_id(ctlr->dev.of_node, "spi");
- if (id >= 0) {
- ctlr->bus_num = id;
- mutex_lock(&board_lock);
- id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
- ctlr->bus_num + 1, GFP_KERNEL);
- mutex_unlock(&board_lock);
- if (WARN(id < 0, "couldn't get idr"))
- return id == -ENOSPC ? -EBUSY : id;
- }
- }
if (ctlr->bus_num < 0) {
first_dynamic = of_alias_get_highest_id("spi");
if (first_dynamic < 0)
@@ -2238,38 +3288,51 @@ int spi_register_controller(struct spi_controller *ctlr)
else
first_dynamic++;
- mutex_lock(&board_lock);
- id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
- 0, GFP_KERNEL);
- mutex_unlock(&board_lock);
- if (WARN(id < 0, "couldn't get idr"))
- return id;
- ctlr->bus_num = id;
+ status = spi_controller_id_alloc(ctlr, first_dynamic, 0);
+ if (status)
+ return status;
}
- INIT_LIST_HEAD(&ctlr->queue);
- spin_lock_init(&ctlr->queue_lock);
- spin_lock_init(&ctlr->bus_lock_spinlock);
- mutex_init(&ctlr->bus_lock_mutex);
- mutex_init(&ctlr->io_mutex);
ctlr->bus_lock_flag = 0;
init_completion(&ctlr->xfer_completion);
+ init_completion(&ctlr->cur_msg_completion);
if (!ctlr->max_dma_len)
ctlr->max_dma_len = INT_MAX;
- /* register the device, then userspace will see it.
- * registration fails if the bus ID is in use.
+ /*
+ * Register the device, then userspace will see it.
+ * Registration fails if the bus ID is in use.
*/
dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
- status = device_add(&ctlr->dev);
- if (status < 0) {
- /* free bus id */
- mutex_lock(&board_lock);
- idr_remove(&spi_master_idr, ctlr->bus_num);
- mutex_unlock(&board_lock);
- goto done;
+
+ if (!spi_controller_is_target(ctlr) && ctlr->use_gpio_descriptors) {
+ status = spi_get_gpio_descs(ctlr);
+ if (status)
+ goto free_bus_id;
+ /*
+ * A controller using GPIO descriptors always
+ * supports SPI_CS_HIGH if need be.
+ */
+ ctlr->mode_bits |= SPI_CS_HIGH;
+ }
+
+ /*
+ * Even if it's just one always-selected device, there must
+ * be at least one chipselect.
+ */
+ if (!ctlr->num_chipselect) {
+ status = -EINVAL;
+ goto free_bus_id;
}
+
+ /* Setting last_cs to SPI_INVALID_CS means no chip selected */
+ for (idx = 0; idx < SPI_DEVICE_CS_CNT_MAX; idx++)
+ ctlr->last_cs[idx] = SPI_INVALID_CS;
+
+ status = device_add(&ctlr->dev);
+ if (status < 0)
+ goto free_bus_id;
dev_dbg(dev, "registered %s %s\n",
- spi_controller_is_slave(ctlr) ? "slave" : "master",
+ spi_controller_is_target(ctlr) ? "target" : "host",
dev_name(&ctlr->dev));
/*
@@ -2283,15 +3346,16 @@ int spi_register_controller(struct spi_controller *ctlr)
status = spi_controller_initialize_queue(ctlr);
if (status) {
device_del(&ctlr->dev);
- /* free bus id */
- mutex_lock(&board_lock);
- idr_remove(&spi_master_idr, ctlr->bus_num);
- mutex_unlock(&board_lock);
- goto done;
+ goto free_bus_id;
}
}
- /* add statistics */
- spin_lock_init(&ctlr->statistics.lock);
+ /* Add statistics */
+ ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
+ if (!ctlr->pcpu_statistics) {
+ dev_err(dev, "Error allocating per-cpu statistics\n");
+ status = -ENOMEM;
+ goto destroy_queue;
+ }
mutex_lock(&board_lock);
list_add_tail(&ctlr->list, &spi_controller_list);
@@ -2302,7 +3366,14 @@ int spi_register_controller(struct spi_controller *ctlr)
/* Register devices from the device tree and ACPI */
of_register_spi_devices(ctlr);
acpi_register_spi_devices(ctlr);
-done:
+ return status;
+
+destroy_queue:
+ spi_destroy_queue(ctlr);
+free_bus_id:
+ mutex_lock(&board_lock);
+ idr_remove(&spi_controller_idr, ctlr->bus_num);
+ mutex_unlock(&board_lock);
return status;
}
EXPORT_SYMBOL_GPL(spi_register_controller);
@@ -2313,11 +3384,10 @@ static void devm_spi_unregister(struct device *dev, void *res)
}
/**
- * devm_spi_register_controller - register managed SPI master or slave
- * controller
+ * devm_spi_register_controller - register managed SPI host or target controller
* @dev: device managing SPI controller
- * @ctlr: initialized controller, originally from spi_alloc_master() or
- * spi_alloc_slave()
+ * @ctlr: initialized controller, originally from spi_alloc_host() or
+ * spi_alloc_target()
* Context: can sleep
*
* Register a SPI device as with spi_register_controller() which will
@@ -2354,7 +3424,7 @@ static int __unregister(struct device *dev, void *null)
}
/**
- * spi_unregister_controller - unregister SPI master or slave controller
+ * spi_unregister_controller - unregister SPI host or target controller
* @ctlr: the controller being unregistered
* Context: can sleep
*
@@ -2369,11 +3439,16 @@ void spi_unregister_controller(struct spi_controller *ctlr)
{
struct spi_controller *found;
int id = ctlr->bus_num;
- int dummy;
+
+ /* Prevent addition of new devices, unregister existing ones */
+ if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
+ mutex_lock(&ctlr->add_lock);
+
+ device_for_each_child(&ctlr->dev, NULL, __unregister);
/* First make sure that this controller was ever added */
mutex_lock(&board_lock);
- found = idr_find(&spi_master_idr, id);
+ found = idr_find(&spi_controller_idr, id);
mutex_unlock(&board_lock);
if (ctlr->queued) {
if (spi_destroy_queue(ctlr))
@@ -2383,170 +3458,75 @@ void spi_unregister_controller(struct spi_controller *ctlr)
list_del(&ctlr->list);
mutex_unlock(&board_lock);
- dummy = device_for_each_child(&ctlr->dev, NULL, __unregister);
- device_unregister(&ctlr->dev);
- /* free bus id */
+ device_del(&ctlr->dev);
+
+ /* Free bus id */
mutex_lock(&board_lock);
if (found == ctlr)
- idr_remove(&spi_master_idr, id);
+ idr_remove(&spi_controller_idr, id);
mutex_unlock(&board_lock);
-}
-EXPORT_SYMBOL_GPL(spi_unregister_controller);
-
-int spi_controller_suspend(struct spi_controller *ctlr)
-{
- int ret;
- /* Basically no-ops for non-queued controllers */
- if (!ctlr->queued)
- return 0;
-
- ret = spi_stop_queue(ctlr);
- if (ret)
- dev_err(&ctlr->dev, "queue stop failed\n");
+ if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
+ mutex_unlock(&ctlr->add_lock);
- return ret;
-}
-EXPORT_SYMBOL_GPL(spi_controller_suspend);
-
-int spi_controller_resume(struct spi_controller *ctlr)
-{
- int ret;
-
- if (!ctlr->queued)
- return 0;
-
- ret = spi_start_queue(ctlr);
- if (ret)
- dev_err(&ctlr->dev, "queue restart failed\n");
-
- return ret;
+ /*
+ * Release the last reference on the controller if its driver
+ * has not yet been converted to devm_spi_alloc_host/target().
+ */
+ if (!ctlr->devm_allocated)
+ put_device(&ctlr->dev);
}
-EXPORT_SYMBOL_GPL(spi_controller_resume);
+EXPORT_SYMBOL_GPL(spi_unregister_controller);
-static int __spi_controller_match(struct device *dev, const void *data)
+static inline int __spi_check_suspended(const struct spi_controller *ctlr)
{
- struct spi_controller *ctlr;
- const u16 *bus_num = data;
-
- ctlr = container_of(dev, struct spi_controller, dev);
- return ctlr->bus_num == *bus_num;
+ return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
}
-/**
- * spi_busnum_to_master - look up master associated with bus_num
- * @bus_num: the master's bus number
- * Context: can sleep
- *
- * This call may be used with devices that are registered after
- * arch init time. It returns a refcounted pointer to the relevant
- * spi_controller (which the caller must release), or NULL if there is
- * no such master registered.
- *
- * Return: the SPI master structure on success, else NULL.
- */
-struct spi_controller *spi_busnum_to_master(u16 bus_num)
+static inline void __spi_mark_suspended(struct spi_controller *ctlr)
{
- struct device *dev;
- struct spi_controller *ctlr = NULL;
-
- dev = class_find_device(&spi_master_class, NULL, &bus_num,
- __spi_controller_match);
- if (dev)
- ctlr = container_of(dev, struct spi_controller, dev);
- /* reference got in class_find_device */
- return ctlr;
+ mutex_lock(&ctlr->bus_lock_mutex);
+ ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
+ mutex_unlock(&ctlr->bus_lock_mutex);
}
-EXPORT_SYMBOL_GPL(spi_busnum_to_master);
-/*-------------------------------------------------------------------------*/
-
-/* Core methods for SPI resource management */
-
-/**
- * spi_res_alloc - allocate a spi resource that is life-cycle managed
- * during the processing of a spi_message while using
- * spi_transfer_one
- * @spi: the spi device for which we allocate memory
- * @release: the release code to execute for this resource
- * @size: size to alloc and return
- * @gfp: GFP allocation flags
- *
- * Return: the pointer to the allocated data
- *
- * This may get enhanced in the future to allocate from a memory pool
- * of the @spi_device or @spi_controller to avoid repeated allocations.
- */
-void *spi_res_alloc(struct spi_device *spi,
- spi_res_release_t release,
- size_t size, gfp_t gfp)
+static inline void __spi_mark_resumed(struct spi_controller *ctlr)
{
- struct spi_res *sres;
-
- sres = kzalloc(sizeof(*sres) + size, gfp);
- if (!sres)
- return NULL;
-
- INIT_LIST_HEAD(&sres->entry);
- sres->release = release;
-
- return sres->data;
+ mutex_lock(&ctlr->bus_lock_mutex);
+ ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
+ mutex_unlock(&ctlr->bus_lock_mutex);
}
-EXPORT_SYMBOL_GPL(spi_res_alloc);
-/**
- * spi_res_free - free an spi resource
- * @res: pointer to the custom data of a resource
- *
- */
-void spi_res_free(void *res)
+int spi_controller_suspend(struct spi_controller *ctlr)
{
- struct spi_res *sres = container_of(res, struct spi_res, data);
-
- if (!res)
- return;
-
- WARN_ON(!list_empty(&sres->entry));
- kfree(sres);
-}
-EXPORT_SYMBOL_GPL(spi_res_free);
+ int ret = 0;
-/**
- * spi_res_add - add a spi_res to the spi_message
- * @message: the spi message
- * @res: the spi_resource
- */
-void spi_res_add(struct spi_message *message, void *res)
-{
- struct spi_res *sres = container_of(res, struct spi_res, data);
+ /* Basically no-ops for non-queued controllers */
+ if (ctlr->queued) {
+ ret = spi_stop_queue(ctlr);
+ if (ret)
+ dev_err(&ctlr->dev, "queue stop failed\n");
+ }
- WARN_ON(!list_empty(&sres->entry));
- list_add_tail(&sres->entry, &message->resources);
+ __spi_mark_suspended(ctlr);
+ return ret;
}
-EXPORT_SYMBOL_GPL(spi_res_add);
+EXPORT_SYMBOL_GPL(spi_controller_suspend);
-/**
- * spi_res_release - release all spi resources for this message
- * @ctlr: the @spi_controller
- * @message: the @spi_message
- */
-void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
+int spi_controller_resume(struct spi_controller *ctlr)
{
- struct spi_res *res;
-
- while (!list_empty(&message->resources)) {
- res = list_last_entry(&message->resources,
- struct spi_res, entry);
-
- if (res->release)
- res->release(ctlr, message, res->data);
+ int ret = 0;
- list_del(&res->entry);
+ __spi_mark_resumed(ctlr);
- kfree(res);
+ if (ctlr->queued) {
+ ret = spi_start_queue(ctlr);
+ if (ret)
+ dev_err(&ctlr->dev, "queue restart failed\n");
}
+ return ret;
}
-EXPORT_SYMBOL_GPL(spi_res_release);
+EXPORT_SYMBOL_GPL(spi_controller_resume);
/*-------------------------------------------------------------------------*/
@@ -2559,14 +3539,14 @@ static void __spi_replace_transfers_release(struct spi_controller *ctlr,
struct spi_replaced_transfers *rxfer = res;
size_t i;
- /* call extra callback if requested */
+ /* Call extra callback if requested */
if (rxfer->release)
rxfer->release(ctlr, msg, res);
- /* insert replaced transfers back into the message */
+ /* Insert replaced transfers back into the message */
list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
- /* remove the formerly inserted entries */
+ /* Remove the formerly inserted entries */
for (i = 0; i < rxfer->inserted; i++)
list_del(&rxfer->inserted_transfers[i].transfer_list);
}
@@ -2586,7 +3566,7 @@ static void __spi_replace_transfers_release(struct spi_controller *ctlr,
* Returns: pointer to @spi_replaced_transfers,
* PTR_ERR(...) in case of errors.
*/
-struct spi_replaced_transfers *spi_replace_transfers(
+static struct spi_replaced_transfers *spi_replace_transfers(
struct spi_message *msg,
struct spi_transfer *xfer_first,
size_t remove,
@@ -2599,187 +3579,182 @@ struct spi_replaced_transfers *spi_replace_transfers(
struct spi_transfer *xfer;
size_t i;
- /* allocate the structure using spi_res */
+ /* Allocate the structure using spi_res */
rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
- insert * sizeof(struct spi_transfer)
- + sizeof(struct spi_replaced_transfers)
+ struct_size(rxfer, inserted_transfers, insert)
+ extradatasize,
gfp);
if (!rxfer)
return ERR_PTR(-ENOMEM);
- /* the release code to invoke before running the generic release */
+ /* The release code to invoke before running the generic release */
rxfer->release = release;
- /* assign extradata */
+ /* Assign extradata */
if (extradatasize)
rxfer->extradata =
&rxfer->inserted_transfers[insert];
- /* init the replaced_transfers list */
+ /* Init the replaced_transfers list */
INIT_LIST_HEAD(&rxfer->replaced_transfers);
- /* assign the list_entry after which we should reinsert
+ /*
+ * Assign the list_entry after which we should reinsert
* the @replaced_transfers - it may be spi_message.messages!
*/
rxfer->replaced_after = xfer_first->transfer_list.prev;
- /* remove the requested number of transfers */
+ /* Remove the requested number of transfers */
for (i = 0; i < remove; i++) {
- /* if the entry after replaced_after it is msg->transfers
+ /*
+ * If the entry after replaced_after it is msg->transfers
* then we have been requested to remove more transfers
- * than are in the list
+ * than are in the list.
*/
if (rxfer->replaced_after->next == &msg->transfers) {
dev_err(&msg->spi->dev,
"requested to remove more spi_transfers than are available\n");
- /* insert replaced transfers back into the message */
+ /* Insert replaced transfers back into the message */
list_splice(&rxfer->replaced_transfers,
rxfer->replaced_after);
- /* free the spi_replace_transfer structure */
+ /* Free the spi_replace_transfer structure... */
spi_res_free(rxfer);
- /* and return with an error */
+ /* ...and return with an error */
return ERR_PTR(-EINVAL);
}
- /* remove the entry after replaced_after from list of
- * transfers and add it to list of replaced_transfers
+ /*
+ * Remove the entry after replaced_after from list of
+ * transfers and add it to list of replaced_transfers.
*/
list_move_tail(rxfer->replaced_after->next,
&rxfer->replaced_transfers);
}
- /* create copy of the given xfer with identical settings
- * based on the first transfer to get removed
+ /*
+ * Create copy of the given xfer with identical settings
+ * based on the first transfer to get removed.
*/
for (i = 0; i < insert; i++) {
- /* we need to run in reverse order */
+ /* We need to run in reverse order */
xfer = &rxfer->inserted_transfers[insert - 1 - i];
- /* copy all spi_transfer data */
+ /* Copy all spi_transfer data */
memcpy(xfer, xfer_first, sizeof(*xfer));
- /* add to list */
+ /* Add to list */
list_add(&xfer->transfer_list, rxfer->replaced_after);
- /* clear cs_change and delay_usecs for all but the last */
+ /* Clear cs_change and delay for all but the last */
if (i) {
xfer->cs_change = false;
- xfer->delay_usecs = 0;
+ xfer->delay.value = 0;
}
}
- /* set up inserted */
+ /* Set up inserted... */
rxfer->inserted = insert;
- /* and register it with spi_res/spi_message */
+ /* ...and register it with spi_res/spi_message */
spi_res_add(msg, rxfer);
return rxfer;
}
-EXPORT_SYMBOL_GPL(spi_replace_transfers);
static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
struct spi_message *msg,
struct spi_transfer **xferp,
- size_t maxsize,
- gfp_t gfp)
+ size_t maxsize)
{
struct spi_transfer *xfer = *xferp, *xfers;
struct spi_replaced_transfers *srt;
size_t offset;
size_t count, i;
- /* warn once about this fact that we are splitting a transfer */
- dev_warn_once(&msg->spi->dev,
- "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
- xfer->len, maxsize);
-
- /* calculate how many we have to replace */
+ /* Calculate how many we have to replace */
count = DIV_ROUND_UP(xfer->len, maxsize);
- /* create replacement */
- srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
+ /* Create replacement */
+ srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, GFP_KERNEL);
if (IS_ERR(srt))
return PTR_ERR(srt);
xfers = srt->inserted_transfers;
- /* now handle each of those newly inserted spi_transfers
- * note that the replacements spi_transfers all are preset
+ /*
+ * Now handle each of those newly inserted spi_transfers.
+ * Note that the replacements spi_transfers all are preset
* to the same values as *xferp, so tx_buf, rx_buf and len
* are all identical (as well as most others)
* so we just have to fix up len and the pointers.
- *
- * this also includes support for the depreciated
- * spi_message.is_dma_mapped interface
*/
- /* the first transfer just needs the length modified, so we
- * run it outside the loop
+ /*
+ * The first transfer just needs the length modified, so we
+ * run it outside the loop.
*/
xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
- /* all the others need rx_buf/tx_buf also set */
+ /* All the others need rx_buf/tx_buf also set */
for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
- /* update rx_buf, tx_buf and dma */
+ /* Update rx_buf, tx_buf and DMA */
if (xfers[i].rx_buf)
xfers[i].rx_buf += offset;
- if (xfers[i].rx_dma)
- xfers[i].rx_dma += offset;
if (xfers[i].tx_buf)
xfers[i].tx_buf += offset;
- if (xfers[i].tx_dma)
- xfers[i].tx_dma += offset;
- /* update length */
+ /* Update length */
xfers[i].len = min(maxsize, xfers[i].len - offset);
}
- /* we set up xferp to the last entry we have inserted,
- * so that we skip those already split transfers
+ /*
+ * We set up xferp to the last entry we have inserted,
+ * so that we skip those already split transfers.
*/
*xferp = &xfers[count - 1];
- /* increment statistics counters */
- SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
+ /* Increment statistics counters */
+ SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
transfers_split_maxsize);
- SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
+ SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
transfers_split_maxsize);
return 0;
}
/**
- * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
- * when an individual transfer exceeds a
- * certain size
+ * spi_split_transfers_maxsize - split spi transfers into multiple transfers
+ * when an individual transfer exceeds a
+ * certain size
* @ctlr: the @spi_controller for this transfer
* @msg: the @spi_message to transform
* @maxsize: the maximum when to apply this
- * @gfp: GFP allocation flags
+ *
+ * This function allocates resources that are automatically freed during the
+ * spi message unoptimize phase so this function should only be called from
+ * optimize_message callbacks.
*
* Return: status of transformation
*/
int spi_split_transfers_maxsize(struct spi_controller *ctlr,
struct spi_message *msg,
- size_t maxsize,
- gfp_t gfp)
+ size_t maxsize)
{
struct spi_transfer *xfer;
int ret;
- /* iterate over the transfer_list,
+ /*
+ * Iterate over the transfer_list,
* but note that xfer is advanced to the last transfer inserted
* to avoid checking sizes again unnecessarily (also xfer does
- * potentiall belong to a different list by the time the
- * replacement has happened
+ * potentially belong to a different list by the time the
+ * replacement has happened).
*/
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
if (xfer->len > maxsize) {
ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
- maxsize, gfp);
+ maxsize);
if (ret)
return ret;
}
@@ -2789,9 +3764,55 @@ int spi_split_transfers_maxsize(struct spi_controller *ctlr,
}
EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
+
+/**
+ * spi_split_transfers_maxwords - split SPI transfers into multiple transfers
+ * when an individual transfer exceeds a
+ * certain number of SPI words
+ * @ctlr: the @spi_controller for this transfer
+ * @msg: the @spi_message to transform
+ * @maxwords: the number of words to limit each transfer to
+ *
+ * This function allocates resources that are automatically freed during the
+ * spi message unoptimize phase so this function should only be called from
+ * optimize_message callbacks.
+ *
+ * Return: status of transformation
+ */
+int spi_split_transfers_maxwords(struct spi_controller *ctlr,
+ struct spi_message *msg,
+ size_t maxwords)
+{
+ struct spi_transfer *xfer;
+
+ /*
+ * Iterate over the transfer_list,
+ * but note that xfer is advanced to the last transfer inserted
+ * to avoid checking sizes again unnecessarily (also xfer does
+ * potentially belong to a different list by the time the
+ * replacement has happened).
+ */
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ size_t maxsize;
+ int ret;
+
+ maxsize = maxwords * spi_bpw_to_bytes(xfer->bits_per_word);
+ if (xfer->len > maxsize) {
+ ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
+ maxsize);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_split_transfers_maxwords);
+
/*-------------------------------------------------------------------------*/
-/* Core methods for SPI controller protocol drivers. Some of the
+/*
+ * Core methods for SPI controller protocol drivers. Some of the
* other core methods are currently defined as inline functions.
*/
@@ -2810,6 +3831,36 @@ static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
}
/**
+ * spi_set_cs_timing - configure CS setup, hold, and inactive delays
+ * @spi: the device that requires specific CS timing configuration
+ *
+ * Return: zero on success, else a negative error code.
+ */
+static int spi_set_cs_timing(struct spi_device *spi)
+{
+ struct device *parent = spi->controller->dev.parent;
+ int status = 0;
+
+ if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) {
+ if (spi->controller->auto_runtime_pm) {
+ status = pm_runtime_get_sync(parent);
+ if (status < 0) {
+ pm_runtime_put_noidle(parent);
+ dev_err(&spi->controller->dev, "Failed to power device: %d\n",
+ status);
+ return status;
+ }
+
+ status = spi->controller->set_cs_timing(spi);
+ pm_runtime_put_autosuspend(parent);
+ } else {
+ status = spi->controller->set_cs_timing(spi);
+ }
+ }
+ return status;
+}
+
+/**
* spi_setup - setup SPI mode and clock rate
* @spi: the device whose settings are being modified
* Context: can sleep, and no requests are queued to the device
@@ -2820,7 +3871,7 @@ static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
* changes those settings, and must be called from a context that can sleep.
* Except for SPI_CS_HIGH, which takes effect immediately, the changes take
* effect the next time the device is selected and data is transferred to
- * or from it. When this function returns, the spi device is deselected.
+ * or from it. When this function returns, the SPI device is deselected.
*
* Note that this call will fail if the protocol driver specifies an option
* that the underlying controller or its driver does not support. For
@@ -2834,26 +3885,37 @@ int spi_setup(struct spi_device *spi)
unsigned bad_bits, ugly_bits;
int status;
- /* check mode to prevent that DUAL and QUAD set at the same time
+ /*
+ * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
+ * are set at the same time.
*/
- if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
- ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
+ if ((hweight_long(spi->mode &
+ (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
+ (hweight_long(spi->mode &
+ (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
dev_err(&spi->dev,
- "setup: can not select dual and quad at the same time\n");
+ "setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
return -EINVAL;
}
- /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
- */
+ /* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */
if ((spi->mode & SPI_3WIRE) && (spi->mode &
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
return -EINVAL;
- /* help drivers fail *cleanly* when they need options
- * that aren't supported with their current controller
+ /* Check against conflicting MOSI idle configuration */
+ if ((spi->mode & SPI_MOSI_IDLE_LOW) && (spi->mode & SPI_MOSI_IDLE_HIGH)) {
+ dev_err(&spi->dev,
+ "setup: MOSI configured to idle low and high at the same time.\n");
+ return -EINVAL;
+ }
+ /*
+ * Help drivers fail *cleanly* when they need options
+ * that aren't supported with their current controller.
* SPI_CS_WORD has a fallback software implementation,
* so it is ignored here.
*/
- bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD);
+ bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
+ SPI_NO_TX | SPI_NO_RX);
ugly_bits = bad_bits &
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
@@ -2870,24 +3932,76 @@ int spi_setup(struct spi_device *spi)
return -EINVAL;
}
- if (!spi->bits_per_word)
+ if (!spi->bits_per_word) {
spi->bits_per_word = 8;
+ } else {
+ /*
+ * Some controllers may not support the default 8 bits-per-word
+ * so only perform the check when this is explicitly provided.
+ */
+ status = __spi_validate_bits_per_word(spi->controller,
+ spi->bits_per_word);
+ if (status)
+ return status;
+ }
- status = __spi_validate_bits_per_word(spi->controller,
- spi->bits_per_word);
- if (status)
- return status;
-
- if (!spi->max_speed_hz)
+ if (spi->controller->max_speed_hz &&
+ (!spi->max_speed_hz ||
+ spi->max_speed_hz > spi->controller->max_speed_hz))
spi->max_speed_hz = spi->controller->max_speed_hz;
- if (spi->controller->setup)
+ mutex_lock(&spi->controller->io_mutex);
+
+ if (spi->controller->setup) {
status = spi->controller->setup(spi);
+ if (status) {
+ mutex_unlock(&spi->controller->io_mutex);
+ dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
+ status);
+ return status;
+ }
+ }
- spi_set_cs(spi, false);
+ status = spi_set_cs_timing(spi);
+ if (status) {
+ mutex_unlock(&spi->controller->io_mutex);
+ return status;
+ }
+
+ if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
+ status = pm_runtime_resume_and_get(spi->controller->dev.parent);
+ if (status < 0) {
+ mutex_unlock(&spi->controller->io_mutex);
+ dev_err(&spi->controller->dev, "Failed to power device: %d\n",
+ status);
+ return status;
+ }
- dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
- (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
+ /*
+ * We do not want to return positive value from pm_runtime_get,
+ * there are many instances of devices calling spi_setup() and
+ * checking for a non-zero return value instead of a negative
+ * return value.
+ */
+ status = 0;
+
+ spi_set_cs(spi, false, true);
+ pm_runtime_put_autosuspend(spi->controller->dev.parent);
+ } else {
+ spi_set_cs(spi, false, true);
+ }
+
+ mutex_unlock(&spi->controller->io_mutex);
+
+ if (spi->rt && !spi->controller->rt) {
+ spi->controller->rt = true;
+ spi_set_thread_rt(spi->controller);
+ }
+
+ trace_spi_setup(spi, status);
+
+ dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
+ spi->mode & SPI_MODE_X_MASK,
(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
(spi->mode & SPI_3WIRE) ? "3wire, " : "",
@@ -2899,6 +4013,26 @@ int spi_setup(struct spi_device *spi)
}
EXPORT_SYMBOL_GPL(spi_setup);
+static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
+ struct spi_device *spi)
+{
+ int delay1, delay2;
+
+ delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
+ if (delay1 < 0)
+ return delay1;
+
+ delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
+ if (delay2 < 0)
+ return delay2;
+
+ if (delay1 < delay2)
+ memcpy(&xfer->word_delay, &spi->word_delay,
+ sizeof(xfer->word_delay));
+
+ return 0;
+}
+
static int __spi_validate(struct spi_device *spi, struct spi_message *message)
{
struct spi_controller *ctlr = spi->controller;
@@ -2908,36 +4042,10 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
if (list_empty(&message->transfers))
return -EINVAL;
- /* If an SPI controller does not support toggling the CS line on each
- * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
- * for the CS line, we can emulate the CS-per-word hardware function by
- * splitting transfers into one-word transfers and ensuring that
- * cs_change is set for each transfer.
- */
- if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
- gpio_is_valid(spi->cs_gpio))) {
- size_t maxsize;
- int ret;
-
- maxsize = (spi->bits_per_word + 7) / 8;
-
- /* spi_split_transfers_maxsize() requires message->spi */
- message->spi = spi;
-
- ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
- GFP_KERNEL);
- if (ret)
- return ret;
-
- list_for_each_entry(xfer, &message->transfers, transfer_list) {
- /* don't change cs_change on the last entry in the list */
- if (list_is_last(&xfer->transfer_list, &message->transfers))
- break;
- xfer->cs_change = 1;
- }
- }
+ message->spi = spi;
- /* Half-duplex links include original MicroWire, and ones with
+ /*
+ * Half-duplex links include original MicroWire, and ones with
* only one data pin like SPI_3WIRE (switches direction) or where
* either MOSI or MISO is missing. They can also be caused by
* software limitations.
@@ -2956,22 +4064,23 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
}
}
- /**
+ /*
* Set transfer bits_per_word and max speed as spi device default if
* it is not set for this transfer.
* Set transfer tx_nbits and rx_nbits as single transfer default
* (SPI_NBITS_SINGLE) if it is not set for this transfer.
+ * Ensure transfer word_delay is at least as long as that required by
+ * device itself.
*/
message->frame_length = 0;
list_for_each_entry(xfer, &message->transfers, transfer_list) {
+ xfer->effective_speed_hz = 0;
message->frame_length += xfer->len;
if (!xfer->bits_per_word)
xfer->bits_per_word = spi->bits_per_word;
if (!xfer->speed_hz)
xfer->speed_hz = spi->max_speed_hz;
- if (!xfer->speed_hz)
- xfer->speed_hz = ctlr->max_speed_hz;
if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
xfer->speed_hz = ctlr->max_speed_hz;
@@ -2979,9 +4088,16 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
return -EINVAL;
+ /* DDR mode is supported only if controller has dtr_caps=true.
+ * default considered as SDR mode for SPI and QSPI controller.
+ * Note: This is applicable only to QSPI controller.
+ */
+ if (xfer->dtr_mode && !ctlr->dtr_caps)
+ return -EINVAL;
+
/*
* SPI transfer length should be multiple of SPI word size
- * where SPI word size should be power-of-two multiple
+ * where SPI word size should be power-of-two multiple.
*/
if (xfer->bits_per_word <= 8)
w_size = 1;
@@ -3002,33 +4118,58 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
xfer->tx_nbits = SPI_NBITS_SINGLE;
if (xfer->rx_buf && !xfer->rx_nbits)
xfer->rx_nbits = SPI_NBITS_SINGLE;
- /* check transfer tx/rx_nbits:
+ /*
+ * Check transfer tx/rx_nbits:
* 1. check the value matches one of single, dual and quad
* 2. check tx/rx_nbits match the mode in spi_device
*/
if (xfer->tx_buf) {
+ if (spi->mode & SPI_NO_TX)
+ return -EINVAL;
if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
xfer->tx_nbits != SPI_NBITS_DUAL &&
- xfer->tx_nbits != SPI_NBITS_QUAD)
+ xfer->tx_nbits != SPI_NBITS_QUAD &&
+ xfer->tx_nbits != SPI_NBITS_OCTAL)
return -EINVAL;
if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
- !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
+ !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL)))
return -EINVAL;
if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
- !(spi->mode & SPI_TX_QUAD))
+ !(spi->mode & (SPI_TX_QUAD | SPI_TX_OCTAL)))
+ return -EINVAL;
+ if ((xfer->tx_nbits == SPI_NBITS_OCTAL) &&
+ !(spi->mode & SPI_TX_OCTAL))
return -EINVAL;
}
- /* check transfer rx_nbits */
+ /* Check transfer rx_nbits */
if (xfer->rx_buf) {
+ if (spi->mode & SPI_NO_RX)
+ return -EINVAL;
if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
xfer->rx_nbits != SPI_NBITS_DUAL &&
- xfer->rx_nbits != SPI_NBITS_QUAD)
+ xfer->rx_nbits != SPI_NBITS_QUAD &&
+ xfer->rx_nbits != SPI_NBITS_OCTAL)
return -EINVAL;
if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
- !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
+ !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
return -EINVAL;
if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
- !(spi->mode & SPI_RX_QUAD))
+ !(spi->mode & (SPI_RX_QUAD | SPI_RX_OCTAL)))
+ return -EINVAL;
+ if ((xfer->rx_nbits == SPI_NBITS_OCTAL) &&
+ !(spi->mode & SPI_RX_OCTAL))
+ return -EINVAL;
+ }
+
+ if (_spi_xfer_word_delay_update(xfer, spi))
+ return -EINVAL;
+
+ /* Make sure controller supports required offload features. */
+ if (xfer->offload_flags) {
+ if (!message->offload)
+ return -EINVAL;
+
+ if (xfer->offload_flags & ~message->offload->xfer_flags)
return -EINVAL;
}
}
@@ -3038,9 +4179,186 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
return 0;
}
+/*
+ * spi_split_transfers - generic handling of transfer splitting
+ * @msg: the message to split
+ *
+ * Under certain conditions, a SPI controller may not support arbitrary
+ * transfer sizes or other features required by a peripheral. This function
+ * will split the transfers in the message into smaller transfers that are
+ * supported by the controller.
+ *
+ * Controllers with special requirements not covered here can also split
+ * transfers in the optimize_message() callback.
+ *
+ * Context: can sleep
+ * Return: zero on success, else a negative error code
+ */
+static int spi_split_transfers(struct spi_message *msg)
+{
+ struct spi_controller *ctlr = msg->spi->controller;
+ struct spi_transfer *xfer;
+ int ret;
+
+ /*
+ * If an SPI controller does not support toggling the CS line on each
+ * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
+ * for the CS line, we can emulate the CS-per-word hardware function by
+ * splitting transfers into one-word transfers and ensuring that
+ * cs_change is set for each transfer.
+ */
+ if ((msg->spi->mode & SPI_CS_WORD) &&
+ (!(ctlr->mode_bits & SPI_CS_WORD) || spi_is_csgpiod(msg->spi))) {
+ ret = spi_split_transfers_maxwords(ctlr, msg, 1);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /* Don't change cs_change on the last entry in the list */
+ if (list_is_last(&xfer->transfer_list, &msg->transfers))
+ break;
+
+ xfer->cs_change = 1;
+ }
+ } else {
+ ret = spi_split_transfers_maxsize(ctlr, msg,
+ spi_max_transfer_size(msg->spi));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * __spi_optimize_message - shared implementation for spi_optimize_message()
+ * and spi_maybe_optimize_message()
+ * @spi: the device that will be used for the message
+ * @msg: the message to optimize
+ *
+ * Peripheral drivers will call spi_optimize_message() and the spi core will
+ * call spi_maybe_optimize_message() instead of calling this directly.
+ *
+ * It is not valid to call this on a message that has already been optimized.
+ *
+ * Return: zero on success, else a negative error code
+ */
+static int __spi_optimize_message(struct spi_device *spi,
+ struct spi_message *msg)
+{
+ struct spi_controller *ctlr = spi->controller;
+ int ret;
+
+ ret = __spi_validate(spi, msg);
+ if (ret)
+ return ret;
+
+ ret = spi_split_transfers(msg);
+ if (ret)
+ return ret;
+
+ if (ctlr->optimize_message) {
+ ret = ctlr->optimize_message(msg);
+ if (ret) {
+ spi_res_release(ctlr, msg);
+ return ret;
+ }
+ }
+
+ msg->optimized = true;
+
+ return 0;
+}
+
+/*
+ * spi_maybe_optimize_message - optimize message if it isn't already pre-optimized
+ * @spi: the device that will be used for the message
+ * @msg: the message to optimize
+ * Return: zero on success, else a negative error code
+ */
+static int spi_maybe_optimize_message(struct spi_device *spi,
+ struct spi_message *msg)
+{
+ if (spi->controller->defer_optimize_message) {
+ msg->spi = spi;
+ return 0;
+ }
+
+ if (msg->pre_optimized)
+ return 0;
+
+ return __spi_optimize_message(spi, msg);
+}
+
+/**
+ * spi_optimize_message - do any one-time validation and setup for a SPI message
+ * @spi: the device that will be used for the message
+ * @msg: the message to optimize
+ *
+ * Peripheral drivers that reuse the same message repeatedly may call this to
+ * perform as much message prep as possible once, rather than repeating it each
+ * time a message transfer is performed to improve throughput and reduce CPU
+ * usage.
+ *
+ * Once a message has been optimized, it cannot be modified with the exception
+ * of updating the contents of any xfer->tx_buf (the pointer can't be changed,
+ * only the data in the memory it points to).
+ *
+ * Calls to this function must be balanced with calls to spi_unoptimize_message()
+ * to avoid leaking resources.
+ *
+ * Context: can sleep
+ * Return: zero on success, else a negative error code
+ */
+int spi_optimize_message(struct spi_device *spi, struct spi_message *msg)
+{
+ int ret;
+
+ /*
+ * Pre-optimization is not supported and optimization is deferred e.g.
+ * when using spi-mux.
+ */
+ if (spi->controller->defer_optimize_message)
+ return 0;
+
+ ret = __spi_optimize_message(spi, msg);
+ if (ret)
+ return ret;
+
+ /*
+ * This flag indicates that the peripheral driver called spi_optimize_message()
+ * and therefore we shouldn't unoptimize message automatically when finalizing
+ * the message but rather wait until spi_unoptimize_message() is called
+ * by the peripheral driver.
+ */
+ msg->pre_optimized = true;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_optimize_message);
+
+/**
+ * spi_unoptimize_message - releases any resources allocated by spi_optimize_message()
+ * @msg: the message to unoptimize
+ *
+ * Calls to this function must be balanced with calls to spi_optimize_message().
+ *
+ * Context: can sleep
+ */
+void spi_unoptimize_message(struct spi_message *msg)
+{
+ if (msg->spi->controller->defer_optimize_message)
+ return;
+
+ __spi_unoptimize_message(msg);
+ msg->pre_optimized = false;
+}
+EXPORT_SYMBOL_GPL(spi_unoptimize_message);
+
static int __spi_async(struct spi_device *spi, struct spi_message *message)
{
struct spi_controller *ctlr = spi->controller;
+ struct spi_transfer *xfer;
/*
* Some controllers do not support doing regular SPI transfers. Return
@@ -3049,21 +4367,54 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
if (!ctlr->transfer)
return -ENOTSUPP;
- message->spi = spi;
-
- SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
- SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
+ SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
+ SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
trace_spi_message_submit(message);
+ if (!ctlr->ptp_sts_supported) {
+ list_for_each_entry(xfer, &message->transfers, transfer_list) {
+ xfer->ptp_sts_word_pre = 0;
+ ptp_read_system_prets(xfer->ptp_sts);
+ }
+ }
+
return ctlr->transfer(spi, message);
}
+static void devm_spi_unoptimize_message(void *msg)
+{
+ spi_unoptimize_message(msg);
+}
+
+/**
+ * devm_spi_optimize_message - managed version of spi_optimize_message()
+ * @dev: the device that manages @msg (usually @spi->dev)
+ * @spi: the device that will be used for the message
+ * @msg: the message to optimize
+ * Return: zero on success, else a negative error code
+ *
+ * spi_unoptimize_message() will automatically be called when the device is
+ * removed.
+ */
+int devm_spi_optimize_message(struct device *dev, struct spi_device *spi,
+ struct spi_message *msg)
+{
+ int ret;
+
+ ret = spi_optimize_message(spi, msg);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, devm_spi_unoptimize_message, msg);
+}
+EXPORT_SYMBOL_GPL(devm_spi_optimize_message);
+
/**
* spi_async - asynchronous SPI transfer
* @spi: device with which data will be exchanged
* @message: describes the data transfers, including completion callback
- * Context: any (irqs may be blocked, etc)
+ * Context: any (IRQs may be blocked, etc)
*
* This call may be used in_irq and other contexts which can't sleep,
* as well as from task contexts which can sleep.
@@ -3096,8 +4447,8 @@ int spi_async(struct spi_device *spi, struct spi_message *message)
int ret;
unsigned long flags;
- ret = __spi_validate(spi, message);
- if (ret != 0)
+ ret = spi_maybe_optimize_message(spi, message);
+ if (ret)
return ret;
spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
@@ -3113,61 +4464,41 @@ int spi_async(struct spi_device *spi, struct spi_message *message)
}
EXPORT_SYMBOL_GPL(spi_async);
-/**
- * spi_async_locked - version of spi_async with exclusive bus usage
- * @spi: device with which data will be exchanged
- * @message: describes the data transfers, including completion callback
- * Context: any (irqs may be blocked, etc)
- *
- * This call may be used in_irq and other contexts which can't sleep,
- * as well as from task contexts which can sleep.
- *
- * The completion callback is invoked in a context which can't sleep.
- * Before that invocation, the value of message->status is undefined.
- * When the callback is issued, message->status holds either zero (to
- * indicate complete success) or a negative error code. After that
- * callback returns, the driver which issued the transfer request may
- * deallocate the associated memory; it's no longer in use by any SPI
- * core or controller driver code.
- *
- * Note that although all messages to a spi_device are handled in
- * FIFO order, messages may go to different devices in other orders.
- * Some device might be higher priority, or have various "hard" access
- * time requirements, for example.
- *
- * On detection of any fault during the transfer, processing of
- * the entire message is aborted, and the device is deselected.
- * Until returning from the associated message completion callback,
- * no other spi_message queued to that device will be processed.
- * (This rule applies equally to all the synchronous transfer calls,
- * which are wrappers around this core asynchronous primitive.)
- *
- * Return: zero on success, else a negative error code.
- */
-int spi_async_locked(struct spi_device *spi, struct spi_message *message)
+static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
{
- struct spi_controller *ctlr = spi->controller;
+ bool was_busy;
int ret;
- unsigned long flags;
-
- ret = __spi_validate(spi, message);
- if (ret != 0)
- return ret;
- spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
+ mutex_lock(&ctlr->io_mutex);
- ret = __spi_async(spi, message);
+ was_busy = ctlr->busy;
- spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
+ ctlr->cur_msg = msg;
+ ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
+ if (ret)
+ dev_err(&ctlr->dev, "noqueue transfer failed\n");
+ ctlr->cur_msg = NULL;
+ ctlr->fallback = false;
- return ret;
+ if (!was_busy) {
+ kfree(ctlr->dummy_rx);
+ ctlr->dummy_rx = NULL;
+ kfree(ctlr->dummy_tx);
+ ctlr->dummy_tx = NULL;
+ if (ctlr->unprepare_transfer_hardware &&
+ ctlr->unprepare_transfer_hardware(ctlr))
+ dev_err(&ctlr->dev,
+ "failed to unprepare transfer hardware\n");
+ spi_idle_runtime_pm(ctlr);
+ }
+ mutex_unlock(&ctlr->io_mutex);
}
-EXPORT_SYMBOL_GPL(spi_async_locked);
/*-------------------------------------------------------------------------*/
-/* Utility methods for SPI protocol drivers, layered on
+/*
+ * Utility methods for SPI protocol drivers, layered on
* top of the core. Some other utility methods are defined as
* inline functions.
*/
@@ -3180,54 +4511,62 @@ static void spi_complete(void *arg)
static int __spi_sync(struct spi_device *spi, struct spi_message *message)
{
DECLARE_COMPLETION_ONSTACK(done);
+ unsigned long flags;
int status;
struct spi_controller *ctlr = spi->controller;
- unsigned long flags;
- status = __spi_validate(spi, message);
- if (status != 0)
- return status;
+ if (__spi_check_suspended(ctlr)) {
+ dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
+ return -ESHUTDOWN;
+ }
- message->complete = spi_complete;
- message->context = &done;
- message->spi = spi;
+ status = spi_maybe_optimize_message(spi, message);
+ if (status)
+ return status;
- SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
- SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
+ SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
+ SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
- /* If we're not using the legacy transfer method then we will
- * try to transfer in the calling context so special case.
- * This code would be less tricky if we could remove the
- * support for driver implemented message queues.
+ /*
+ * Checking queue_empty here only guarantees async/sync message
+ * ordering when coming from the same context. It does not need to
+ * guard against reentrancy from a different context. The io_mutex
+ * will catch those cases.
*/
- if (ctlr->transfer == spi_queued_transfer) {
- spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
+ if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
+ message->actual_length = 0;
+ message->status = -EINPROGRESS;
trace_spi_message_submit(message);
- status = __spi_queued_transfer(spi, message, false);
+ SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
+ SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
- spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
- } else {
- status = spi_async_locked(spi, message);
+ __spi_transfer_message_noqueue(ctlr, message);
+
+ return message->status;
}
- if (status == 0) {
- /* Push out the messages in the calling context if we
- * can.
- */
- if (ctlr->transfer == spi_queued_transfer) {
- SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
- spi_sync_immediate);
- SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
- spi_sync_immediate);
- __spi_pump_messages(ctlr, false);
- }
+ /*
+ * There are messages in the async queue that could have originated
+ * from the same context, so we need to preserve ordering.
+ * Therefor we send the message to the async queue and wait until they
+ * are completed.
+ */
+ message->complete = spi_complete;
+ message->context = &done;
+
+ spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
+ status = __spi_async(spi, message);
+ spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
+ if (status == 0) {
wait_for_completion(&done);
status = message->status;
}
+ message->complete = NULL;
message->context = NULL;
+
return status;
}
@@ -3288,7 +4627,7 @@ EXPORT_SYMBOL_GPL(spi_sync_locked);
/**
* spi_bus_lock - obtain a lock for exclusive SPI bus usage
- * @ctlr: SPI bus master that should be locked for exclusive bus access
+ * @ctlr: SPI bus controller that should be locked for exclusive bus access
* Context: can sleep
*
* This call may only be used from a context that may sleep. The sleep
@@ -3311,7 +4650,7 @@ int spi_bus_lock(struct spi_controller *ctlr)
ctlr->bus_lock_flag = 1;
spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
- /* mutex remains locked until spi_bus_unlock is called */
+ /* Mutex remains locked until spi_bus_unlock() is called */
return 0;
}
@@ -3319,7 +4658,7 @@ EXPORT_SYMBOL_GPL(spi_bus_lock);
/**
* spi_bus_unlock - release the lock for exclusive SPI bus usage
- * @ctlr: SPI bus master that was locked for exclusive bus access
+ * @ctlr: SPI bus controller that was locked for exclusive bus access
* Context: can sleep
*
* This call may only be used from a context that may sleep. The sleep
@@ -3340,7 +4679,7 @@ int spi_bus_unlock(struct spi_controller *ctlr)
}
EXPORT_SYMBOL_GPL(spi_bus_unlock);
-/* portable code must never pass more than 32 bytes */
+/* Portable code must never pass more than 32 bytes */
#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
static u8 *buf;
@@ -3348,9 +4687,9 @@ static u8 *buf;
/**
* spi_write_then_read - SPI synchronous write followed by read
* @spi: device with which data will be exchanged
- * @txbuf: data to be written (need not be dma-safe)
+ * @txbuf: data to be written (need not be DMA-safe)
* @n_tx: size of txbuf, in bytes
- * @rxbuf: buffer into which data will be read (need not be dma-safe)
+ * @rxbuf: buffer into which data will be read (need not be DMA-safe)
* @n_rx: size of rxbuf, in bytes
* Context: can sleep
*
@@ -3359,10 +4698,9 @@ static u8 *buf;
* is zero for success, else a negative errno status code.
* This call may only be used from a context that may sleep.
*
- * Parameters to this routine are always copied using a small buffer;
- * portable code should never use this for more than 32 bytes.
+ * Parameters to this routine are always copied using a small buffer.
* Performance-sensitive or bulk transfer code should instead use
- * spi_{async,sync}() calls with dma-safe buffers.
+ * spi_{async,sync}() calls with DMA-safe buffers.
*
* Return: zero on success, else a negative error code.
*/
@@ -3377,7 +4715,8 @@ int spi_write_then_read(struct spi_device *spi,
struct spi_transfer x[2];
u8 *local_buf;
- /* Use preallocated DMA-safe buffer if we can. We can't avoid
+ /*
+ * Use preallocated DMA-safe buffer if we can. We can't avoid
* copying here, (as a pure convenience thing), but we can
* keep heap costs out of the hot path unless someone else is
* using the pre-allocated buffer or the transfer is too large.
@@ -3406,7 +4745,7 @@ int spi_write_then_read(struct spi_device *spi,
x[0].tx_buf = local_buf;
x[1].rx_buf = local_buf + n_tx;
- /* do the i/o */
+ /* Do the I/O */
status = spi_sync(spi, &message);
if (status == 0)
memcpy(rxbuf, x[1].rx_buf, n_rx);
@@ -3422,42 +4761,27 @@ EXPORT_SYMBOL_GPL(spi_write_then_read);
/*-------------------------------------------------------------------------*/
-#if IS_ENABLED(CONFIG_OF)
-static int __spi_of_device_match(struct device *dev, void *data)
+#if IS_ENABLED(CONFIG_OF_DYNAMIC)
+/* Must call put_device() when done with returned spi_device device */
+static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
{
- return dev->of_node == data;
-}
+ struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
-/* must call put_device() when done with returned spi_device device */
-struct spi_device *of_find_spi_device_by_node(struct device_node *node)
-{
- struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
- __spi_of_device_match);
return dev ? to_spi_device(dev) : NULL;
}
-EXPORT_SYMBOL_GPL(of_find_spi_device_by_node);
-#endif /* IS_ENABLED(CONFIG_OF) */
-#if IS_ENABLED(CONFIG_OF_DYNAMIC)
-static int __spi_of_controller_match(struct device *dev, const void *data)
-{
- return dev->of_node == data;
-}
-
-/* the spi controllers are not using spi_bus, so we find it with another way */
+/* The spi controllers are not using spi_bus, so we find it with another way */
static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
{
struct device *dev;
- dev = class_find_device(&spi_master_class, NULL, node,
- __spi_of_controller_match);
+ dev = class_find_device_by_of_node(&spi_controller_class, node);
if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
- dev = class_find_device(&spi_slave_class, NULL, node,
- __spi_of_controller_match);
+ dev = class_find_device_by_of_node(&spi_target_class, node);
if (!dev)
return NULL;
- /* reference got in class_find_device */
+ /* Reference got in class_find_device */
return container_of(dev, struct spi_controller, dev);
}
@@ -3472,13 +4796,18 @@ static int of_spi_notify(struct notifier_block *nb, unsigned long action,
case OF_RECONFIG_CHANGE_ADD:
ctlr = of_find_spi_controller_by_node(rd->dn->parent);
if (ctlr == NULL)
- return NOTIFY_OK; /* not for us */
+ return NOTIFY_OK; /* Not for us */
if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
put_device(&ctlr->dev);
return NOTIFY_OK;
}
+ /*
+ * Clear the flag before adding the device so that fw_devlink
+ * doesn't skip adding consumers to this device.
+ */
+ rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
spi = of_register_spi_device(ctlr, rd->dn);
put_device(&ctlr->dev);
@@ -3491,19 +4820,19 @@ static int of_spi_notify(struct notifier_block *nb, unsigned long action,
break;
case OF_RECONFIG_CHANGE_REMOVE:
- /* already depopulated? */
+ /* Already depopulated? */
if (!of_node_check_flag(rd->dn, OF_POPULATED))
return NOTIFY_OK;
- /* find our device by node */
+ /* Find our device by node */
spi = of_find_spi_device_by_node(rd->dn);
if (spi == NULL)
- return NOTIFY_OK; /* no? not meant for us */
+ return NOTIFY_OK; /* No? not meant for us */
- /* unregister takes one ref away */
+ /* Unregister takes one ref away */
spi_unregister_device(spi);
- /* and put the reference of the find */
+ /* And put the reference of the find */
put_device(&spi->dev);
break;
}
@@ -3521,36 +4850,31 @@ extern struct notifier_block spi_of_notifier;
#if IS_ENABLED(CONFIG_ACPI)
static int spi_acpi_controller_match(struct device *dev, const void *data)
{
- return ACPI_COMPANION(dev->parent) == data;
-}
-
-static int spi_acpi_device_match(struct device *dev, void *data)
-{
- return ACPI_COMPANION(dev) == data;
+ return device_match_acpi_dev(dev->parent, data);
}
-static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
+struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
{
struct device *dev;
- dev = class_find_device(&spi_master_class, NULL, adev,
+ dev = class_find_device(&spi_controller_class, NULL, adev,
spi_acpi_controller_match);
if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
- dev = class_find_device(&spi_slave_class, NULL, adev,
+ dev = class_find_device(&spi_target_class, NULL, adev,
spi_acpi_controller_match);
if (!dev)
return NULL;
return container_of(dev, struct spi_controller, dev);
}
+EXPORT_SYMBOL_GPL(acpi_spi_find_controller_by_adev);
static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
{
struct device *dev;
- dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match);
-
- return dev ? to_spi_device(dev) : NULL;
+ dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
+ return to_spi_device(dev);
}
static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
@@ -3562,7 +4886,7 @@ static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
switch (value) {
case ACPI_RECONFIG_DEVICE_ADD:
- ctlr = acpi_spi_find_controller_by_adev(adev->parent);
+ ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev));
if (!ctlr)
break;
@@ -3606,12 +4930,12 @@ static int __init spi_init(void)
if (status < 0)
goto err1;
- status = class_register(&spi_master_class);
+ status = class_register(&spi_controller_class);
if (status < 0)
goto err2;
if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
- status = class_register(&spi_slave_class);
+ status = class_register(&spi_target_class);
if (status < 0)
goto err3;
}
@@ -3624,7 +4948,7 @@ static int __init spi_init(void)
return 0;
err3:
- class_unregister(&spi_master_class);
+ class_unregister(&spi_controller_class);
err2:
bus_unregister(&spi_bus_type);
err1:
@@ -3634,12 +4958,12 @@ err0:
return status;
}
-/* board_info is normally registered in arch_initcall(),
- * but even essential drivers wait till later
+/*
+ * A board_info is normally registered in arch_initcall(),
+ * but even essential drivers wait till later.
*
- * REVISIT only boardinfo really needs static linking. the rest (device and
- * driver registration) _could_ be dynamically linked (modular) ... costs
+ * REVISIT only boardinfo really needs static linking. The rest (device and
+ * driver registration) _could_ be dynamically linked (modular) ... Costs
* include needing to have boardinfo data structures be much more public.
*/
postcore_initcall(spi_init);
-