summaryrefslogtreecommitdiff
path: root/drivers/firmware
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2022-02-25 17:05:31 +0100
committerArnd Bergmann <arnd@arndb.de>2022-02-25 17:05:31 +0100
commitb610c55bdfb95b84e010129527f01a662a218d67 (patch)
tree837770f4434a49f3a45b40cd65ac7d94688796d1 /drivers/firmware
parentc8812c2a0815b0969414d9d224a1c6e652159e75 (diff)
parent38a0e5b735d6152d334d2f94b925a1c8a93bd7eb (diff)
Merge tag 'scmi-updates-5.18' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux into arm/drivers
Arm SCMI firmware interface updates for v5.18 Few main additions include: - Support for OPTEE based SCMI transport to enable using SCMI service provided by OPTEE on some platforms - Support for atomic SCMI transports which enables few SCMI transactions to be completed in atomic context. This involves other refactoring work associated with it. It also marks SMC and OPTEE as atomic transport as the commands are completed once the return. - Support for polling mode in SCMI VirtIO transport in order to support atomic operations - Support for atomic clock operations based on availability of atomic capability in the underlying SCMI transport Other changes involves some trace and log enhancements and miscellaneous bug fixes. * tag 'scmi-updates-5.18' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux: (28 commits) clk: scmi: Support atomic clock enable/disable API firmware: arm_scmi: Add support for clock_enable_latency firmware: arm_scmi: Add atomic support to clock protocol firmware: arm_scmi: Support optional system wide atomic-threshold-us dt-bindings: firmware: arm,scmi: Add atomic-threshold-us optional property firmware: arm_scmi: Add atomic mode support to virtio transport firmware: arm_scmi: Review virtio free_list handling firmware: arm_scmi: Add a virtio channel refcount firmware: arm_scmi: Disable ftrace for Clang Thumb2 builds firmware: arm_scmi: Add new parameter to mark_txdone firmware: arm_scmi: Add atomic mode support to smc transport firmware: arm_scmi: Add support for atomic transports firmware: arm_scmi: Make optee support sync_cmds_completed_on_ret firmware: arm_scmi: Make smc support sync_cmds_completed_on_ret firmware: arm_scmi: Add sync_cmds_completed_on_ret transport flag firmware: arm_scmi: Make smc transport use common completions firmware: arm_scmi: Add configurable polling mode for transports firmware: arm_scmi: Use new trace event scmi_xfer_response_wait include: trace: Add new scmi_xfer_response_wait event firmware: arm_scmi: Refactor message response path ... Link: https://lore.kernel.org/r/20220222201742.3338589-1-sudeep.holla@arm.com Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'drivers/firmware')
-rw-r--r--drivers/firmware/arm_scmi/Kconfig56
-rw-r--r--drivers/firmware/arm_scmi/Makefile8
-rw-r--r--drivers/firmware/arm_scmi/clock.c34
-rw-r--r--drivers/firmware/arm_scmi/common.h26
-rw-r--r--drivers/firmware/arm_scmi/driver.c234
-rw-r--r--drivers/firmware/arm_scmi/mailbox.c3
-rw-r--r--drivers/firmware/arm_scmi/optee.c567
-rw-r--r--drivers/firmware/arm_scmi/smc.c98
-rw-r--r--drivers/firmware/arm_scmi/virtio.c613
9 files changed, 1466 insertions, 173 deletions
diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig
index 3d7081e84853..7794bd41eaa0 100644
--- a/drivers/firmware/arm_scmi/Kconfig
+++ b/drivers/firmware/arm_scmi/Kconfig
@@ -54,6 +54,18 @@ config ARM_SCMI_TRANSPORT_MAILBOX
If you want the ARM SCMI PROTOCOL stack to include support for a
transport based on mailboxes, answer Y.
+config ARM_SCMI_TRANSPORT_OPTEE
+ bool "SCMI transport based on OP-TEE service"
+ depends on OPTEE=y || OPTEE=ARM_SCMI_PROTOCOL
+ select ARM_SCMI_HAVE_TRANSPORT
+ select ARM_SCMI_HAVE_SHMEM
+ default y
+ help
+ This enables the OP-TEE service based transport for SCMI.
+
+ If you want the ARM SCMI PROTOCOL stack to include support for a
+ transport based on OP-TEE SCMI service, answer Y.
+
config ARM_SCMI_TRANSPORT_SMC
bool "SCMI transport based on SMC"
depends on HAVE_ARM_SMCCC_DISCOVERY
@@ -66,6 +78,20 @@ config ARM_SCMI_TRANSPORT_SMC
If you want the ARM SCMI PROTOCOL stack to include support for a
transport based on SMC, answer Y.
+config ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE
+ bool "Enable atomic mode support for SCMI SMC transport"
+ depends on ARM_SCMI_TRANSPORT_SMC
+ help
+ Enable support of atomic operation for SCMI SMC based transport.
+
+ If you want the SCMI SMC based transport to operate in atomic
+ mode, avoiding any kind of sleeping behaviour for selected
+ transactions on the TX path, answer Y.
+ Enabling atomic mode operations allows any SCMI driver using this
+ transport to optionally ask for atomic SCMI transactions and operate
+ in atomic context too, at the price of using a number of busy-waiting
+ primitives all over instead. If unsure say N.
+
config ARM_SCMI_TRANSPORT_VIRTIO
bool "SCMI transport based on VirtIO"
depends on VIRTIO=y || VIRTIO=ARM_SCMI_PROTOCOL
@@ -77,6 +103,36 @@ config ARM_SCMI_TRANSPORT_VIRTIO
If you want the ARM SCMI PROTOCOL stack to include support for a
transport based on VirtIO, answer Y.
+config ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE
+ bool "SCMI VirtIO transport Version 1 compliance"
+ depends on ARM_SCMI_TRANSPORT_VIRTIO
+ default y
+ help
+ This enforces strict compliance with VirtIO Version 1 specification.
+
+ If you want the ARM SCMI VirtIO transport layer to refuse to work
+ with Legacy VirtIO backends and instead support only VirtIO Version 1
+ devices (or above), answer Y.
+
+ If you want instead to support also old Legacy VirtIO backends (like
+ the ones implemented by kvmtool) and let the core Kernel VirtIO layer
+ take care of the needed conversions, say N.
+
+config ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE
+ bool "Enable atomic mode for SCMI VirtIO transport"
+ depends on ARM_SCMI_TRANSPORT_VIRTIO
+ help
+ Enable support of atomic operation for SCMI VirtIO based transport.
+
+ If you want the SCMI VirtIO based transport to operate in atomic
+ mode, avoiding any kind of sleeping behaviour for selected
+ transactions on the TX path, answer Y.
+
+ Enabling atomic mode operations allows any SCMI driver using this
+ transport to optionally ask for atomic SCMI transactions and operate
+ in atomic context too, at the price of using a number of busy-waiting
+ primitives all over instead. If unsure say N.
+
endif #ARM_SCMI_PROTOCOL
config ARM_SCMI_POWER_DOMAIN
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index 1dcf123d64ab..8d4afadda38c 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -6,8 +6,16 @@ scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += mailbox.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o
scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o
+scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += optee.o
scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o
scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \
$(scmi-transport-y)
obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
+
+ifeq ($(CONFIG_THUMB2_KERNEL)$(CONFIG_CC_IS_CLANG),yy)
+# The use of R7 in the SMCCC conflicts with the compiler's use of R7 as a frame
+# pointer in Thumb2 mode, which is forcibly enabled by Clang when profiling
+# hooks are inserted via the -pg switch.
+CFLAGS_REMOVE_smc.o += $(CC_FLAGS_FTRACE)
+endif
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 35b56c8ba0c0..cf6fed6dec77 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -27,7 +27,8 @@ struct scmi_msg_resp_clock_protocol_attributes {
struct scmi_msg_resp_clock_attributes {
__le32 attributes;
#define CLOCK_ENABLE BIT(0)
- u8 name[SCMI_MAX_STR_SIZE];
+ u8 name[SCMI_MAX_STR_SIZE];
+ __le32 clock_enable_latency;
};
struct scmi_clock_set_config {
@@ -116,10 +117,15 @@ static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
attr = t->rx.buf;
ret = ph->xops->do_xfer(ph, t);
- if (!ret)
+ if (!ret) {
strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE);
- else
+ /* Is optional field clock_enable_latency provided ? */
+ if (t->rx.len == sizeof(*attr))
+ clk->enable_latency =
+ le32_to_cpu(attr->clock_enable_latency);
+ } else {
clk->name[0] = '\0';
+ }
ph->xops->xfer_put(ph, t);
return ret;
@@ -273,7 +279,7 @@ static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
static int
scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
- u32 config)
+ u32 config, bool atomic)
{
int ret;
struct scmi_xfer *t;
@@ -284,6 +290,8 @@ scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
if (ret)
return ret;
+ t->hdr.poll_completion = atomic;
+
cfg = t->tx.buf;
cfg->id = cpu_to_le32(clk_id);
cfg->attributes = cpu_to_le32(config);
@@ -296,12 +304,24 @@ scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id)
{
- return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE);
+ return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, false);
}
static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id)
{
- return scmi_clock_config_set(ph, clk_id, 0);
+ return scmi_clock_config_set(ph, clk_id, 0, false);
+}
+
+static int scmi_clock_enable_atomic(const struct scmi_protocol_handle *ph,
+ u32 clk_id)
+{
+ return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, true);
+}
+
+static int scmi_clock_disable_atomic(const struct scmi_protocol_handle *ph,
+ u32 clk_id)
+{
+ return scmi_clock_config_set(ph, clk_id, 0, true);
}
static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
@@ -330,6 +350,8 @@ static const struct scmi_clk_proto_ops clk_proto_ops = {
.rate_set = scmi_clock_rate_set,
.enable = scmi_clock_enable,
.disable = scmi_clock_disable,
+ .enable_atomic = scmi_clock_enable_atomic,
+ .disable_atomic = scmi_clock_disable_atomic,
};
static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index dea1bfbe1052..4fda84bfab42 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -339,11 +339,16 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
* @dev: Reference to device in the SCMI hierarchy corresponding to this
* channel
* @handle: Pointer to SCMI entity handle
+ * @no_completion_irq: Flag to indicate that this channel has no completion
+ * interrupt mechanism for synchronous commands.
+ * This can be dynamically set by transports at run-time
+ * inside their provided .chan_setup().
* @transport_info: Transport layer related information
*/
struct scmi_chan_info {
struct device *dev;
struct scmi_handle *handle;
+ bool no_completion_irq;
void *transport_info;
};
@@ -373,7 +378,8 @@ struct scmi_transport_ops {
unsigned int (*get_max_msg)(struct scmi_chan_info *base_cinfo);
int (*send_message)(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer);
- void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret);
+ void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret,
+ struct scmi_xfer *xfer);
void (*fetch_response)(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer);
void (*fetch_notification)(struct scmi_chan_info *cinfo,
@@ -402,6 +408,18 @@ struct scmi_device *scmi_child_dev_find(struct device *parent,
* be pending simultaneously in the system. May be overridden by the
* get_max_msg op.
* @max_msg_size: Maximum size of data per message that can be handled.
+ * @force_polling: Flag to force this whole transport to use SCMI core polling
+ * mechanism instead of completion interrupts even if available.
+ * @sync_cmds_completed_on_ret: Flag to indicate that the transport assures
+ * synchronous-command messages are atomically
+ * completed on .send_message: no need to poll
+ * actively waiting for a response.
+ * Used by core internally only when polling is
+ * selected as a waiting for reply method: i.e.
+ * if a completion irq was found use that anyway.
+ * @atomic_enabled: Flag to indicate that this transport, which is assured not
+ * to sleep anywhere on the TX path, can be used in atomic mode
+ * when requested.
*/
struct scmi_desc {
int (*transport_init)(void);
@@ -410,6 +428,9 @@ struct scmi_desc {
int max_rx_timeout_ms;
int max_msg;
int max_msg_size;
+ const bool force_polling;
+ const bool sync_cmds_completed_on_ret;
+ const bool atomic_enabled;
};
#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
@@ -421,6 +442,9 @@ extern const struct scmi_desc scmi_smc_desc;
#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO
extern const struct scmi_desc scmi_virtio_desc;
#endif
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE
+extern const struct scmi_desc scmi_optee_desc;
+#endif
void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv);
void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id);
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index b406b3f78f46..7436c475e708 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -131,6 +131,12 @@ struct scmi_protocol_instance {
* MAX_PROTOCOLS_IMP elements allocated by the base protocol
* @active_protocols: IDR storing device_nodes for protocols actually defined
* in the DT and confirmed as implemented by fw.
+ * @atomic_threshold: Optional system wide DT-configured threshold, expressed
+ * in microseconds, for atomic operations.
+ * Only SCMI synchronous commands reported by the platform
+ * to have an execution latency lesser-equal to the threshold
+ * should be considered for atomic mode operation: such
+ * decision is finally left up to the SCMI drivers.
* @notify_priv: Pointer to private data structure specific to notifications.
* @node: List head
* @users: Number of users of this instance
@@ -149,6 +155,7 @@ struct scmi_info {
struct mutex protocols_mtx;
u8 *protocols_imp;
struct idr active_protocols;
+ unsigned int atomic_threshold;
void *notify_priv;
struct list_head node;
int users;
@@ -609,6 +616,25 @@ static inline void scmi_clear_channel(struct scmi_info *info,
info->desc->ops->clear_channel(cinfo);
}
+static inline bool is_polling_required(struct scmi_chan_info *cinfo,
+ struct scmi_info *info)
+{
+ return cinfo->no_completion_irq || info->desc->force_polling;
+}
+
+static inline bool is_transport_polling_capable(struct scmi_info *info)
+{
+ return info->desc->ops->poll_done ||
+ info->desc->sync_cmds_completed_on_ret;
+}
+
+static inline bool is_polling_enabled(struct scmi_chan_info *cinfo,
+ struct scmi_info *info)
+{
+ return is_polling_required(cinfo, info) &&
+ is_transport_polling_capable(info);
+}
+
static void scmi_handle_notification(struct scmi_chan_info *cinfo,
u32 msg_hdr, void *priv)
{
@@ -629,7 +655,8 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
unpack_scmi_header(msg_hdr, &xfer->hdr);
if (priv)
- xfer->priv = priv;
+ /* Ensure order between xfer->priv store and following ops */
+ smp_store_mb(xfer->priv, priv);
info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
xfer);
scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
@@ -661,7 +688,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
xfer->rx.len = info->desc->max_msg_size;
if (priv)
- xfer->priv = priv;
+ /* Ensure order between xfer->priv store and following ops */
+ smp_store_mb(xfer->priv, priv);
info->desc->ops->fetch_response(cinfo, xfer);
trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
@@ -724,8 +752,6 @@ static void xfer_put(const struct scmi_protocol_handle *ph,
__scmi_xfer_put(&info->tx_minfo, xfer);
}
-#define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC)
-
static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer, ktime_t stop)
{
@@ -741,6 +767,79 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
}
/**
+ * scmi_wait_for_message_response - An helper to group all the possible ways of
+ * waiting for a synchronous message response.
+ *
+ * @cinfo: SCMI channel info
+ * @xfer: Reference to the transfer being waited for.
+ *
+ * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
+ * configuration flags like xfer->hdr.poll_completion.
+ *
+ * Return: 0 on Success, error otherwise.
+ */
+static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct device *dev = info->dev;
+ int ret = 0, timeout_ms = info->desc->max_rx_timeout_ms;
+
+ trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ timeout_ms,
+ xfer->hdr.poll_completion);
+
+ if (xfer->hdr.poll_completion) {
+ /*
+ * Real polling is needed only if transport has NOT declared
+ * itself to support synchronous commands replies.
+ */
+ if (!info->desc->sync_cmds_completed_on_ret) {
+ /*
+ * Poll on xfer using transport provided .poll_done();
+ * assumes no completion interrupt was available.
+ */
+ ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
+
+ spin_until_cond(scmi_xfer_done_no_timeout(cinfo,
+ xfer, stop));
+ if (ktime_after(ktime_get(), stop)) {
+ dev_err(dev,
+ "timed out in resp(caller: %pS) - polling\n",
+ (void *)_RET_IP_);
+ ret = -ETIMEDOUT;
+ }
+ }
+
+ if (!ret) {
+ unsigned long flags;
+
+ /*
+ * Do not fetch_response if an out-of-order delayed
+ * response is being processed.
+ */
+ spin_lock_irqsave(&xfer->lock, flags);
+ if (xfer->state == SCMI_XFER_SENT_OK) {
+ info->desc->ops->fetch_response(cinfo, xfer);
+ xfer->state = SCMI_XFER_RESP_OK;
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ }
+ } else {
+ /* And we wait for the response. */
+ if (!wait_for_completion_timeout(&xfer->done,
+ msecs_to_jiffies(timeout_ms))) {
+ dev_err(dev, "timed out in resp(caller: %pS)\n",
+ (void *)_RET_IP_);
+ ret = -ETIMEDOUT;
+ }
+ }
+
+ return ret;
+}
+
+/**
* do_xfer() - Do one transfer
*
* @ph: Pointer to SCMI protocol handle
@@ -754,18 +853,26 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
struct scmi_xfer *xfer)
{
int ret;
- int timeout;
const struct scmi_protocol_instance *pi = ph_to_pi(ph);
struct scmi_info *info = handle_to_scmi_info(pi->handle);
struct device *dev = info->dev;
struct scmi_chan_info *cinfo;
- if (xfer->hdr.poll_completion && !info->desc->ops->poll_done) {
+ /* Check for polling request on custom command xfers at first */
+ if (xfer->hdr.poll_completion && !is_transport_polling_capable(info)) {
dev_warn_once(dev,
"Polling mode is not supported by transport.\n");
return -EINVAL;
}
+ cinfo = idr_find(&info->tx_idr, pi->proto->id);
+ if (unlikely(!cinfo))
+ return -EINVAL;
+
+ /* True ONLY if also supported by transport. */
+ if (is_polling_enabled(cinfo, info))
+ xfer->hdr.poll_completion = true;
+
/*
* Initialise protocol id now from protocol handle to avoid it being
* overridden by mistake (or malice) by the protocol code mangling with
@@ -774,10 +881,6 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
xfer->hdr.protocol_id = pi->proto->id;
reinit_completion(&xfer->done);
- cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
- if (unlikely(!cinfo))
- return -EINVAL;
-
trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
xfer->hdr.poll_completion);
@@ -798,41 +901,12 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
return ret;
}
- if (xfer->hdr.poll_completion) {
- ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
-
- spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
- if (ktime_before(ktime_get(), stop)) {
- unsigned long flags;
-
- /*
- * Do not fetch_response if an out-of-order delayed
- * response is being processed.
- */
- spin_lock_irqsave(&xfer->lock, flags);
- if (xfer->state == SCMI_XFER_SENT_OK) {
- info->desc->ops->fetch_response(cinfo, xfer);
- xfer->state = SCMI_XFER_RESP_OK;
- }
- spin_unlock_irqrestore(&xfer->lock, flags);
- } else {
- ret = -ETIMEDOUT;
- }
- } else {
- /* And we wait for the response. */
- timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
- if (!wait_for_completion_timeout(&xfer->done, timeout)) {
- dev_err(dev, "timed out in resp(caller: %pS)\n",
- (void *)_RET_IP_);
- ret = -ETIMEDOUT;
- }
- }
-
+ ret = scmi_wait_for_message_response(cinfo, xfer);
if (!ret && xfer->hdr.status)
ret = scmi_to_linux_errno(xfer->hdr.status);
if (info->desc->ops->mark_txdone)
- info->desc->ops->mark_txdone(cinfo, ret);
+ info->desc->ops->mark_txdone(cinfo, ret, xfer);
trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq, ret);
@@ -858,6 +932,20 @@ static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
* @ph: Pointer to SCMI protocol handle
* @xfer: Transfer to initiate and wait for response
*
+ * Using asynchronous commands in atomic/polling mode should be avoided since
+ * it could cause long busy-waiting here, so ignore polling for the delayed
+ * response and WARN if it was requested for this command transaction since
+ * upper layers should refrain from issuing such kind of requests.
+ *
+ * The only other option would have been to refrain from using any asynchronous
+ * command even if made available, when an atomic transport is detected, and
+ * instead forcibly use the synchronous version (thing that can be easily
+ * attained at the protocol layer), but this would also have led to longer
+ * stalls of the channel for synchronous commands and possibly timeouts.
+ * (in other words there is usually a good reason if a platform provides an
+ * asynchronous version of a command and we should prefer to use it...just not
+ * when using atomic/polling mode)
+ *
* Return: -ETIMEDOUT in case of no delayed response, if transmit error,
* return corresponding error, else if all goes well, return 0.
*/
@@ -869,12 +957,24 @@ static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
xfer->async_done = &async_response;
+ /*
+ * Delayed responses should not be polled, so an async command should
+ * not have been used when requiring an atomic/poll context; WARN and
+ * perform instead a sleeping wait.
+ * (Note Async + IgnoreDelayedResponses are sent via do_xfer)
+ */
+ WARN_ON_ONCE(xfer->hdr.poll_completion);
+
ret = do_xfer(ph, xfer);
if (!ret) {
- if (!wait_for_completion_timeout(xfer->async_done, timeout))
+ if (!wait_for_completion_timeout(xfer->async_done, timeout)) {
+ dev_err(ph->dev,
+ "timed out in delayed resp(caller: %pS)\n",
+ (void *)_RET_IP_);
ret = -ETIMEDOUT;
- else if (xfer->hdr.status)
+ } else if (xfer->hdr.status) {
ret = scmi_to_linux_errno(xfer->hdr.status);
+ }
}
xfer->async_done = NULL;
@@ -1308,6 +1408,29 @@ static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
WARN_ON(ret);
}
+/**
+ * scmi_is_transport_atomic - Method to check if underlying transport for an
+ * SCMI instance is configured as atomic.
+ *
+ * @handle: A reference to the SCMI platform instance.
+ * @atomic_threshold: An optional return value for the system wide currently
+ * configured threshold for atomic operations.
+ *
+ * Return: True if transport is configured as atomic
+ */
+static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
+ unsigned int *atomic_threshold)
+{
+ bool ret;
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ ret = info->desc->atomic_enabled && is_transport_polling_capable(info);
+ if (ret && atomic_threshold)
+ *atomic_threshold = info->atomic_threshold;
+
+ return ret;
+}
+
static inline
struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
{
@@ -1499,6 +1622,16 @@ static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
if (ret)
return ret;
+ if (tx && is_polling_required(cinfo, info)) {
+ if (is_transport_polling_capable(info))
+ dev_info(dev,
+ "Enabled polling mode TX channel - prot_id:%d\n",
+ prot_id);
+ else
+ dev_warn(dev,
+ "Polling mode NOT supported by transport.\n");
+ }
+
idr_alloc:
ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
if (ret != prot_id) {
@@ -1836,6 +1969,14 @@ static int scmi_probe(struct platform_device *pdev)
handle->devm_protocol_get = scmi_devm_protocol_get;
handle->devm_protocol_put = scmi_devm_protocol_put;
+ /* System wide atomic threshold for atomic ops .. if any */
+ if (!of_property_read_u32(np, "atomic-threshold-us",
+ &info->atomic_threshold))
+ dev_info(dev,
+ "SCMI System wide atomic threshold set to %d us\n",
+ info->atomic_threshold);
+ handle->is_transport_atomic = scmi_is_transport_atomic;
+
if (desc->ops->link_supplier) {
ret = desc->ops->link_supplier(dev);
if (ret)
@@ -1853,6 +1994,10 @@ static int scmi_probe(struct platform_device *pdev)
if (scmi_notification_init(handle))
dev_err(dev, "SCMI Notifications NOT available.\n");
+ if (info->desc->atomic_enabled && !is_transport_polling_capable(info))
+ dev_err(dev,
+ "Transport is not polling capable. Atomic mode not supported.\n");
+
/*
* Trigger SCMI Base protocol initialization.
* It's mandatory and won't be ever released/deinit until the
@@ -1994,6 +2139,9 @@ static const struct of_device_id scmi_of_match[] = {
#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
{ .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
#endif
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE
+ { .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc },
+#endif
#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
{ .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
#endif
diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
index e09eb12bf421..08ff4d110beb 100644
--- a/drivers/firmware/arm_scmi/mailbox.c
+++ b/drivers/firmware/arm_scmi/mailbox.c
@@ -140,7 +140,8 @@ static int mailbox_send_message(struct scmi_chan_info *cinfo,
return ret;
}
-static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret)
+static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret,
+ struct scmi_xfer *__unused)
{
struct scmi_mailbox *smbox = cinfo->transport_info;
diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c
new file mode 100644
index 000000000000..734f1eeee161
--- /dev/null
+++ b/drivers/firmware/arm_scmi/optee.c
@@ -0,0 +1,567 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019-2021 Linaro Ltd.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/uuid.h>
+#include <uapi/linux/tee.h>
+
+#include "common.h"
+
+#define SCMI_OPTEE_MAX_MSG_SIZE 128
+
+enum scmi_optee_pta_cmd {
+ /*
+ * PTA_SCMI_CMD_CAPABILITIES - Get channel capabilities
+ *
+ * [out] value[0].a: Capability bit mask (enum pta_scmi_caps)
+ * [out] value[0].b: Extended capabilities or 0
+ */
+ PTA_SCMI_CMD_CAPABILITIES = 0,
+
+ /*
+ * PTA_SCMI_CMD_PROCESS_SMT_CHANNEL - Process SCMI message in SMT buffer
+ *
+ * [in] value[0].a: Channel handle
+ *
+ * Shared memory used for SCMI message/response exhange is expected
+ * already identified and bound to channel handle in both SCMI agent
+ * and SCMI server (OP-TEE) parts.
+ * The memory uses SMT header to carry SCMI meta-data (protocol ID and
+ * protocol message ID).
+ */
+ PTA_SCMI_CMD_PROCESS_SMT_CHANNEL = 1,
+
+ /*
+ * PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE - Process SMT/SCMI message
+ *
+ * [in] value[0].a: Channel handle
+ * [in/out] memref[1]: Message/response buffer (SMT and SCMI payload)
+ *
+ * Shared memory used for SCMI message/response is a SMT buffer
+ * referenced by param[1]. It shall be 128 bytes large to fit response
+ * payload whatever message playload size.
+ * The memory uses SMT header to carry SCMI meta-data (protocol ID and
+ * protocol message ID).
+ */
+ PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE = 2,
+
+ /*
+ * PTA_SCMI_CMD_GET_CHANNEL - Get channel handle
+ *
+ * SCMI shm information are 0 if agent expects to use OP-TEE regular SHM
+ *
+ * [in] value[0].a: Channel identifier
+ * [out] value[0].a: Returned channel handle
+ * [in] value[0].b: Requested capabilities mask (enum pta_scmi_caps)
+ */
+ PTA_SCMI_CMD_GET_CHANNEL = 3,
+};
+
+/*
+ * OP-TEE SCMI service capabilities bit flags (32bit)
+ *
+ * PTA_SCMI_CAPS_SMT_HEADER
+ * When set, OP-TEE supports command using SMT header protocol (SCMI shmem) in
+ * shared memory buffers to carry SCMI protocol synchronisation information.
+ */
+#define PTA_SCMI_CAPS_NONE 0
+#define PTA_SCMI_CAPS_SMT_HEADER BIT(0)
+
+/**
+ * struct scmi_optee_channel - Description of an OP-TEE SCMI channel
+ *
+ * @channel_id: OP-TEE channel ID used for this transport
+ * @tee_session: TEE session identifier
+ * @caps: OP-TEE SCMI channel capabilities
+ * @mu: Mutex protection on channel access
+ * @cinfo: SCMI channel information
+ * @shmem: Virtual base address of the shared memory
+ * @tee_shm: Reference to TEE shared memory or NULL if using static shmem
+ * @link: Reference in agent's channel list
+ */
+struct scmi_optee_channel {
+ u32 channel_id;
+ u32 tee_session;
+ u32 caps;
+ struct mutex mu;
+ struct scmi_chan_info *cinfo;
+ struct scmi_shared_mem __iomem *shmem;
+ struct tee_shm *tee_shm;
+ struct list_head link;
+};
+
+/**
+ * struct scmi_optee_agent - OP-TEE transport private data
+ *
+ * @dev: Device used for communication with TEE
+ * @tee_ctx: TEE context used for communication
+ * @caps: Supported channel capabilities
+ * @mu: Mutex for protection of @channel_list
+ * @channel_list: List of all created channels for the agent
+ */
+struct scmi_optee_agent {
+ struct device *dev;
+ struct tee_context *tee_ctx;
+ u32 caps;
+ struct mutex mu;
+ struct list_head channel_list;
+};
+
+/* There can be only 1 SCMI service in OP-TEE we connect to */
+static struct scmi_optee_agent *scmi_optee_private;
+
+/* Forward reference to scmi_optee transport initialization */
+static int scmi_optee_init(void);
+
+/* Open a session toward SCMI OP-TEE service with REE_KERNEL identity */
+static int open_session(struct scmi_optee_agent *agent, u32 *tee_session)
+{
+ struct device *dev = agent->dev;
+ struct tee_client_device *scmi_pta = to_tee_client_device(dev);
+ struct tee_ioctl_open_session_arg arg = { };
+ int ret;
+
+ memcpy(arg.uuid, scmi_pta->id.uuid.b, TEE_IOCTL_UUID_LEN);
+ arg.clnt_login = TEE_IOCTL_LOGIN_REE_KERNEL;
+
+ ret = tee_client_open_session(agent->tee_ctx, &arg, NULL);
+ if (ret < 0 || arg.ret) {
+ dev_err(dev, "Can't open tee session: %d / %#x\n", ret, arg.ret);
+ return -EOPNOTSUPP;
+ }
+
+ *tee_session = arg.session;
+
+ return 0;
+}
+
+static void close_session(struct scmi_optee_agent *agent, u32 tee_session)
+{
+ tee_client_close_session(agent->tee_ctx, tee_session);
+}
+
+static int get_capabilities(struct scmi_optee_agent *agent)
+{
+ struct tee_ioctl_invoke_arg arg = { };
+ struct tee_param param[1] = { };
+ u32 caps;
+ u32 tee_session;
+ int ret;
+
+ ret = open_session(agent, &tee_session);
+ if (ret)
+ return ret;
+
+ arg.func = PTA_SCMI_CMD_CAPABILITIES;
+ arg.session = tee_session;
+ arg.num_params = 1;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT;
+
+ ret = tee_client_invoke_func(agent->tee_ctx, &arg, param);
+
+ close_session(agent, tee_session);
+
+ if (ret < 0 || arg.ret) {
+ dev_err(agent->dev, "Can't get capabilities: %d / %#x\n", ret, arg.ret);
+ return -EOPNOTSUPP;
+ }
+
+ caps = param[0].u.value.a;
+
+ if (!(caps & PTA_SCMI_CAPS_SMT_HEADER)) {
+ dev_err(agent->dev, "OP-TEE SCMI PTA doesn't support SMT\n");
+ return -EOPNOTSUPP;
+ }
+
+ agent->caps = caps;
+
+ return 0;
+}
+
+static int get_channel(struct scmi_optee_channel *channel)
+{
+ struct device *dev = scmi_optee_private->dev;
+ struct tee_ioctl_invoke_arg arg = { };
+ struct tee_param param[1] = { };
+ unsigned int caps = PTA_SCMI_CAPS_SMT_HEADER;
+ int ret;
+
+ arg.func = PTA_SCMI_CMD_GET_CHANNEL;
+ arg.session = channel->tee_session;
+ arg.num_params = 1;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
+ param[0].u.value.a = channel->channel_id;
+ param[0].u.value.b = caps;
+
+ ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param);
+
+ if (ret || arg.ret) {
+ dev_err(dev, "Can't get channel with caps %#x: %d / %#x\n", caps, ret, arg.ret);
+ return -EOPNOTSUPP;
+ }
+
+ /* From now on use channel identifer provided by OP-TEE SCMI service */
+ channel->channel_id = param[0].u.value.a;
+ channel->caps = caps;
+
+ return 0;
+}
+
+static int invoke_process_smt_channel(struct scmi_optee_channel *channel)
+{
+ struct tee_ioctl_invoke_arg arg = { };
+ struct tee_param param[2] = { };
+ int ret;
+
+ arg.session = channel->tee_session;
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ param[0].u.value.a = channel->channel_id;
+
+ if (channel->tee_shm) {
+ param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT;
+ param[1].u.memref.shm = channel->tee_shm;
+ param[1].u.memref.size = SCMI_OPTEE_MAX_MSG_SIZE;
+ arg.num_params = 2;
+ arg.func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE;
+ } else {
+ arg.num_params = 1;
+ arg.func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL;
+ }
+
+ ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param);
+ if (ret < 0 || arg.ret) {
+ dev_err(scmi_optee_private->dev, "Can't invoke channel %u: %d / %#x\n",
+ channel->channel_id, ret, arg.ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int scmi_optee_link_supplier(struct device *dev)
+{
+ if (!scmi_optee_private) {
+ if (scmi_optee_init())
+ dev_dbg(dev, "Optee bus not yet ready\n");
+
+ /* Wait for optee bus */
+ return -EPROBE_DEFER;
+ }
+
+ if (!device_link_add(dev, scmi_optee_private->dev, DL_FLAG_AUTOREMOVE_CONSUMER)) {
+ dev_err(dev, "Adding link to supplier optee device failed\n");
+ return -ECANCELED;
+ }
+
+ return 0;
+}
+
+static bool scmi_optee_chan_available(struct device *dev, int idx)
+{
+ u32 channel_id;
+
+ return !of_property_read_u32_index(dev->of_node, "linaro,optee-channel-id",
+ idx, &channel_id);
+}
+
+static void scmi_optee_clear_channel(struct scmi_chan_info *cinfo)
+{
+ struct scmi_optee_channel *channel = cinfo->transport_info;
+
+ shmem_clear_channel(channel->shmem);
+}
+
+static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo,
+ struct scmi_optee_channel *channel)
+{
+ struct device_node *np;
+ resource_size_t size;
+ struct resource res;
+ int ret;
+
+ np = of_parse_phandle(cinfo->dev->of_node, "shmem", 0);
+ if (!of_device_is_compatible(np, "arm,scmi-shmem")) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret) {
+ dev_err(dev, "Failed to get SCMI Tx shared memory\n");
+ goto out;
+ }
+
+ size = resource_size(&res);
+
+ channel->shmem = devm_ioremap(dev, res.start, size);
+ if (!channel->shmem) {
+ dev_err(dev, "Failed to ioremap SCMI Tx shared memory\n");
+ ret = -EADDRNOTAVAIL;
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ of_node_put(np);
+
+ return ret;
+}
+
+static int setup_shmem(struct device *dev, struct scmi_chan_info *cinfo,
+ struct scmi_optee_channel *channel)
+{
+ if (of_find_property(cinfo->dev->of_node, "shmem", NULL))
+ return setup_static_shmem(dev, cinfo, channel);
+ else
+ return -ENOMEM;
+}
+
+static int scmi_optee_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, bool tx)
+{
+ struct scmi_optee_channel *channel;
+ uint32_t channel_id;
+ int ret;
+
+ if (!tx)
+ return -ENODEV;
+
+ channel = devm_kzalloc(dev, sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_index(cinfo->dev->of_node, "linaro,optee-channel-id",
+ 0, &channel_id);
+ if (ret)
+ return ret;
+
+ cinfo->transport_info = channel;
+ channel->cinfo = cinfo;
+ channel->channel_id = channel_id;
+ mutex_init(&channel->mu);
+
+ ret = setup_shmem(dev, cinfo, channel);
+ if (ret)
+ return ret;
+
+ ret = open_session(scmi_optee_private, &channel->tee_session);
+ if (ret)
+ goto err_free_shm;
+
+ ret = get_channel(channel);
+ if (ret)
+ goto err_close_sess;
+
+ /* Enable polling */
+ cinfo->no_completion_irq = true;
+
+ mutex_lock(&scmi_optee_private->mu);
+ list_add(&channel->link, &scmi_optee_private->channel_list);
+ mutex_unlock(&scmi_optee_private->mu);
+
+ return 0;
+
+err_close_sess:
+ close_session(scmi_optee_private, channel->tee_session);
+err_free_shm:
+ if (channel->tee_shm)
+ tee_shm_free(channel->tee_shm);
+
+ return ret;
+}
+
+static int scmi_optee_chan_free(int id, void *p, void *data)
+{
+ struct scmi_chan_info *cinfo = p;
+ struct scmi_optee_channel *channel = cinfo->transport_info;
+
+ mutex_lock(&scmi_optee_private->mu);
+ list_del(&channel->link);
+ mutex_unlock(&scmi_optee_private->mu);
+
+ close_session(scmi_optee_private, channel->tee_session);
+
+ if (channel->tee_shm) {
+ tee_shm_free(channel->tee_shm);
+ channel->tee_shm = NULL;
+ }
+
+ cinfo->transport_info = NULL;
+ channel->cinfo = NULL;
+
+ scmi_free_channel(cinfo, data, id);
+
+ return 0;
+}
+
+static struct scmi_shared_mem *get_channel_shm(struct scmi_optee_channel *chan,
+ struct scmi_xfer *xfer)
+{
+ if (!chan)
+ return NULL;
+
+ return chan->shmem;
+}
+
+
+static int scmi_optee_send_message(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_optee_channel *channel = cinfo->transport_info;
+ struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer);
+ int ret;
+
+ mutex_lock(&channel->mu);
+ shmem_tx_prepare(shmem, xfer);
+
+ ret = invoke_process_smt_channel(channel);
+ if (ret)
+ mutex_unlock(&channel->mu);
+
+ return ret;
+}
+
+static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_optee_channel *channel = cinfo->transport_info;
+ struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer);
+
+ shmem_fetch_response(shmem, xfer);
+}
+
+static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret,
+ struct scmi_xfer *__unused)
+{
+ struct scmi_optee_channel *channel = cinfo->transport_info;
+
+ mutex_unlock(&channel->mu);
+}
+
+static struct scmi_transport_ops scmi_optee_ops = {
+ .link_supplier = scmi_optee_link_supplier,
+ .chan_available = scmi_optee_chan_available,
+ .chan_setup = scmi_optee_chan_setup,
+ .chan_free = scmi_optee_chan_free,
+ .send_message = scmi_optee_send_message,
+ .mark_txdone = scmi_optee_mark_txdone,
+ .fetch_response = scmi_optee_fetch_response,
+ .clear_channel = scmi_optee_clear_channel,
+};
+
+static int scmi_optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ return ver->impl_id == TEE_IMPL_ID_OPTEE;
+}
+
+static int scmi_optee_service_probe(struct device *dev)
+{
+ struct scmi_optee_agent *agent;
+ struct tee_context *tee_ctx;
+ int ret;
+
+ /* Only one SCMI OP-TEE device allowed */
+ if (scmi_optee_private) {
+ dev_err(dev, "An SCMI OP-TEE device was already initialized: only one allowed\n");
+ return -EBUSY;
+ }
+
+ tee_ctx = tee_client_open_context(NULL, scmi_optee_ctx_match, NULL, NULL);
+ if (IS_ERR(tee_ctx))
+ return -ENODEV;
+
+ agent = devm_kzalloc(dev, sizeof(*agent), GFP_KERNEL);
+ if (!agent) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ agent->dev = dev;
+ agent->tee_ctx = tee_ctx;
+ INIT_LIST_HEAD(&agent->channel_list);
+ mutex_init(&agent->mu);
+
+ ret = get_capabilities(agent);
+ if (ret)
+ goto err;
+
+ /* Ensure agent resources are all visible before scmi_optee_private is */
+ smp_mb();
+ scmi_optee_private = agent;
+
+ return 0;
+
+err:
+ tee_client_close_context(tee_ctx);
+
+ return ret;
+}
+
+static int scmi_optee_service_remove(struct device *dev)
+{
+ struct scmi_optee_agent *agent = scmi_optee_private;
+
+ if (!scmi_optee_private)
+ return -EINVAL;
+
+ if (!list_empty(&scmi_optee_private->channel_list))
+ return -EBUSY;
+
+ /* Ensure cleared reference is visible before resources are released */
+ smp_store_mb(scmi_optee_private, NULL);
+
+ tee_client_close_context(agent->tee_ctx);
+
+ return 0;
+}
+
+static const struct tee_client_device_id scmi_optee_service_id[] = {
+ {
+ UUID_INIT(0xa8cfe406, 0xd4f5, 0x4a2e,
+ 0x9f, 0x8d, 0xa2, 0x5d, 0xc7, 0x54, 0xc0, 0x99)
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(tee, scmi_optee_service_id);
+
+static struct tee_client_driver scmi_optee_driver = {
+ .id_table = scmi_optee_service_id,
+ .driver = {
+ .name = "scmi-optee",
+ .bus = &tee_bus_type,
+ .probe = scmi_optee_service_probe,
+ .remove = scmi_optee_service_remove,
+ },
+};
+
+static int scmi_optee_init(void)
+{
+ return driver_register(&scmi_optee_driver.driver);
+}
+
+static void scmi_optee_exit(void)
+{
+ if (scmi_optee_private)
+ driver_unregister(&scmi_optee_driver.driver);
+}
+
+const struct scmi_desc scmi_optee_desc = {
+ .transport_exit = scmi_optee_exit,
+ .ops = &scmi_optee_ops,
+ .max_rx_timeout_ms = 30,
+ .max_msg = 20,
+ .max_msg_size = SCMI_OPTEE_MAX_MSG_SIZE,
+ .sync_cmds_completed_on_ret = true,
+};
diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
index 4effecc3bb46..745acfdd0b3d 100644
--- a/drivers/firmware/arm_scmi/smc.c
+++ b/drivers/firmware/arm_scmi/smc.c
@@ -7,6 +7,7 @@
*/
#include <linux/arm-smccc.h>
+#include <linux/atomic.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -14,6 +15,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/processor.h>
#include <linux/slab.h>
#include "common.h"
@@ -23,26 +25,29 @@
*
* @cinfo: SCMI channel info
* @shmem: Transmit/Receive shared memory area
- * @shmem_lock: Lock to protect access to Tx/Rx shared memory area
+ * @shmem_lock: Lock to protect access to Tx/Rx shared memory area.
+ * Used when NOT operating in atomic mode.
+ * @inflight: Atomic flag to protect access to Tx/Rx shared memory area.
+ * Used when operating in atomic mode.
* @func_id: smc/hvc call function id
- * @irq: Optional; employed when platforms indicates msg completion by intr.
- * @tx_complete: Optional, employed only when irq is valid.
*/
struct scmi_smc {
struct scmi_chan_info *cinfo;
struct scmi_shared_mem __iomem *shmem;
+ /* Protect access to shmem area */
struct mutex shmem_lock;
+#define INFLIGHT_NONE MSG_TOKEN_MAX
+ atomic_t inflight;
u32 func_id;
- int irq;
- struct completion tx_complete;
};
static irqreturn_t smc_msg_done_isr(int irq, void *data)
{
struct scmi_smc *scmi_info = data;
- complete(&scmi_info->tx_complete);
+ scmi_rx_callback(scmi_info->cinfo,
+ shmem_read_header(scmi_info->shmem), NULL);
return IRQ_HANDLED;
}
@@ -57,6 +62,41 @@ static bool smc_chan_available(struct device *dev, int idx)
return true;
}
+static inline void smc_channel_lock_init(struct scmi_smc *scmi_info)
+{
+ if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
+ atomic_set(&scmi_info->inflight, INFLIGHT_NONE);
+ else
+ mutex_init(&scmi_info->shmem_lock);
+}
+
+static bool smc_xfer_inflight(struct scmi_xfer *xfer, atomic_t *inflight)
+{
+ int ret;
+
+ ret = atomic_cmpxchg(inflight, INFLIGHT_NONE, xfer->hdr.seq);
+
+ return ret == INFLIGHT_NONE;
+}
+
+static inline void
+smc_channel_lock_acquire(struct scmi_smc *scmi_info,
+ struct scmi_xfer *xfer __maybe_unused)
+{
+ if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
+ spin_until_cond(smc_xfer_inflight(xfer, &scmi_info->inflight));
+ else
+ mutex_lock(&scmi_info->shmem_lock);
+}
+
+static inline void smc_channel_lock_release(struct scmi_smc *scmi_info)
+{
+ if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
+ atomic_set(&scmi_info->inflight, INFLIGHT_NONE);
+ else
+ mutex_unlock(&scmi_info->shmem_lock);
+}
+
static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
bool tx)
{
@@ -111,13 +151,13 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
dev_err(dev, "failed to setup SCMI smc irq\n");
return ret;
}
- init_completion(&scmi_info->tx_complete);
- scmi_info->irq = irq;
+ } else {
+ cinfo->no_completion_irq = true;
}
scmi_info->func_id = func_id;
scmi_info->cinfo = cinfo;
- mutex_init(&scmi_info->shmem_lock);
+ smc_channel_lock_init(scmi_info);
cinfo->transport_info = scmi_info;
return 0;
@@ -142,26 +182,22 @@ static int smc_send_message(struct scmi_chan_info *cinfo,
struct scmi_smc *scmi_info = cinfo->transport_info;
struct arm_smccc_res res;
- mutex_lock(&scmi_info->shmem_lock);
+ /*
+ * Channel will be released only once response has been
+ * surely fully retrieved, so after .mark_txdone()
+ */
+ smc_channel_lock_acquire(scmi_info, xfer);
shmem_tx_prepare(scmi_info->shmem, xfer);
- if (scmi_info->irq)
- reinit_completion(&scmi_info->tx_complete);
-
arm_smccc_1_1_invoke(scmi_info->func_id, 0, 0, 0, 0, 0, 0, 0, &res);
- if (scmi_info->irq)
- wait_for_completion(&scmi_info->tx_complete);
-
- scmi_rx_callback(scmi_info->cinfo,
- shmem_read_header(scmi_info->shmem), NULL);
-
- mutex_unlock(&scmi_info->shmem_lock);
-
/* Only SMCCC_RET_NOT_SUPPORTED is valid error code */
- if (res.a0)
+ if (res.a0) {
+ smc_channel_lock_release(scmi_info);
return -EOPNOTSUPP;
+ }
+
return 0;
}
@@ -173,12 +209,12 @@ static void smc_fetch_response(struct scmi_chan_info *cinfo,
shmem_fetch_response(scmi_info->shmem, xfer);
}
-static bool
-smc_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
+static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret,
+ struct scmi_xfer *__unused)
{
struct scmi_smc *scmi_info = cinfo->transport_info;
- return shmem_poll_done(scmi_info->shmem, xfer);
+ smc_channel_lock_release(scmi_info);
}
static const struct scmi_transport_ops scmi_smc_ops = {
@@ -186,8 +222,8 @@ static const struct scmi_transport_ops scmi_smc_ops = {
.chan_setup = smc_chan_setup,
.chan_free = smc_chan_free,
.send_message = smc_send_message,
+ .mark_txdone = smc_mark_txdone,
.fetch_response = smc_fetch_response,
- .poll_done = smc_poll_done,
};
const struct scmi_desc scmi_smc_desc = {
@@ -195,4 +231,14 @@ const struct scmi_desc scmi_smc_desc = {
.max_rx_timeout_ms = 30,
.max_msg = 20,
.max_msg_size = 128,
+ /*
+ * Setting .sync_cmds_atomic_replies to true for SMC assumes that,
+ * once the SMC instruction has completed successfully, the issued
+ * SCMI command would have been already fully processed by the SCMI
+ * platform firmware and so any possible response value expected
+ * for the issued command will be immmediately ready to be fetched
+ * from the shared memory area.
+ */
+ .sync_cmds_completed_on_ret = true,
+ .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE),
};
diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c
index eefcc4146749..14709dbc96a1 100644
--- a/drivers/firmware/arm_scmi/virtio.c
+++ b/drivers/firmware/arm_scmi/virtio.c
@@ -3,8 +3,8 @@
* Virtio Transport driver for Arm System Control and Management Interface
* (SCMI).
*
- * Copyright (C) 2020-2021 OpenSynergy.
- * Copyright (C) 2021 ARM Ltd.
+ * Copyright (C) 2020-2022 OpenSynergy.
+ * Copyright (C) 2021-2022 ARM Ltd.
*/
/**
@@ -17,7 +17,9 @@
* virtqueue. Access to each virtqueue is protected by spinlocks.
*/
+#include <linux/completion.h>
#include <linux/errno.h>
+#include <linux/refcount.h>
#include <linux/slab.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
@@ -27,6 +29,7 @@
#include "common.h"
+#define VIRTIO_MAX_RX_TIMEOUT_MS 60000
#define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */
#define VIRTIO_SCMI_MAX_PDU_SIZE \
(VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD)
@@ -37,25 +40,46 @@
*
* @vqueue: Associated virtqueue
* @cinfo: SCMI Tx or Rx channel
+ * @free_lock: Protects access to the @free_list.
* @free_list: List of unused scmi_vio_msg, maintained for Tx channels only
+ * @deferred_tx_work: Worker for TX deferred replies processing
+ * @deferred_tx_wq: Workqueue for TX deferred replies
+ * @pending_lock: Protects access to the @pending_cmds_list.
+ * @pending_cmds_list: List of pre-fetched commands queueud for later processing
* @is_rx: Whether channel is an Rx channel
- * @ready: Whether transport user is ready to hear about channel
* @max_msg: Maximum number of pending messages for this channel.
- * @lock: Protects access to all members except ready.
- * @ready_lock: Protects access to ready. If required, it must be taken before
- * lock.
+ * @lock: Protects access to all members except users, free_list and
+ * pending_cmds_list.
+ * @shutdown_done: A reference to a completion used when freeing this channel.
+ * @users: A reference count to currently active users of this channel.
*/
struct scmi_vio_channel {
struct virtqueue *vqueue;
struct scmi_chan_info *cinfo;
+ /* lock to protect access to the free list. */
+ spinlock_t free_lock;
struct list_head free_list;
+ /* lock to protect access to the pending list. */
+ spinlock_t pending_lock;
+ struct list_head pending_cmds_list;
+ struct work_struct deferred_tx_work;
+ struct workqueue_struct *deferred_tx_wq;
bool is_rx;
- bool ready;
unsigned int max_msg;
- /* lock to protect access to all members except ready. */
+ /*
+ * Lock to protect access to all members except users, free_list and
+ * pending_cmds_list
+ */
spinlock_t lock;
- /* lock to rotects access to ready flag. */
- spinlock_t ready_lock;
+ struct completion *shutdown_done;
+ refcount_t users;
+};
+
+enum poll_states {
+ VIO_MSG_NOT_POLLED,
+ VIO_MSG_POLL_TIMEOUT,
+ VIO_MSG_POLLING,
+ VIO_MSG_POLL_DONE,
};
/**
@@ -65,29 +89,154 @@ struct scmi_vio_channel {
* @input: SDU used for (delayed) responses and notifications
* @list: List which scmi_vio_msg may be part of
* @rx_len: Input SDU size in bytes, once input has been received
+ * @poll_idx: Last used index registered for polling purposes if this message
+ * transaction reply was configured for polling.
+ * @poll_status: Polling state for this message.
+ * @poll_lock: A lock to protect @poll_status
+ * @users: A reference count to track this message users and avoid premature
+ * freeing (and reuse) when polling and IRQ execution paths interleave.
*/
struct scmi_vio_msg {
struct scmi_msg_payld *request;
struct scmi_msg_payld *input;
struct list_head list;
unsigned int rx_len;
+ unsigned int poll_idx;
+ enum poll_states poll_status;
+ /* Lock to protect access to poll_status */
+ spinlock_t poll_lock;
+ refcount_t users;
};
/* Only one SCMI VirtIO device can possibly exist */
static struct virtio_device *scmi_vdev;
+static void scmi_vio_channel_ready(struct scmi_vio_channel *vioch,
+ struct scmi_chan_info *cinfo)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vioch->lock, flags);
+ cinfo->transport_info = vioch;
+ /* Indirectly setting channel not available any more */
+ vioch->cinfo = cinfo;
+ spin_unlock_irqrestore(&vioch->lock, flags);
+
+ refcount_set(&vioch->users, 1);
+}
+
+static inline bool scmi_vio_channel_acquire(struct scmi_vio_channel *vioch)
+{
+ return refcount_inc_not_zero(&vioch->users);
+}
+
+static inline void scmi_vio_channel_release(struct scmi_vio_channel *vioch)
+{
+ if (refcount_dec_and_test(&vioch->users)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&vioch->lock, flags);
+ if (vioch->shutdown_done) {
+ vioch->cinfo = NULL;
+ complete(vioch->shutdown_done);
+ }
+ spin_unlock_irqrestore(&vioch->lock, flags);
+ }
+}
+
+static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch)
+{
+ unsigned long flags;
+ DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done);
+ void *deferred_wq = NULL;
+
+ /*
+ * Prepare to wait for the last release if not already released
+ * or in progress.
+ */
+ spin_lock_irqsave(&vioch->lock, flags);
+ if (!vioch->cinfo || vioch->shutdown_done) {
+ spin_unlock_irqrestore(&vioch->lock, flags);
+ return;
+ }
+
+ vioch->shutdown_done = &vioch_shutdown_done;
+ virtio_break_device(vioch->vqueue->vdev);
+ if (!vioch->is_rx && vioch->deferred_tx_wq) {
+ deferred_wq = vioch->deferred_tx_wq;
+ /* Cannot be kicked anymore after this...*/
+ vioch->deferred_tx_wq = NULL;
+ }
+ spin_unlock_irqrestore(&vioch->lock, flags);
+
+ if (deferred_wq)
+ destroy_workqueue(deferred_wq);
+
+ scmi_vio_channel_release(vioch);
+
+ /* Let any possibly concurrent RX path release the channel */
+ wait_for_completion(vioch->shutdown_done);
+}
+
+/* Assumes to be called with vio channel acquired already */
+static struct scmi_vio_msg *
+scmi_virtio_get_free_msg(struct scmi_vio_channel *vioch)
+{
+ unsigned long flags;
+ struct scmi_vio_msg *msg;
+
+ spin_lock_irqsave(&vioch->free_lock, flags);
+ if (list_empty(&vioch->free_list)) {
+ spin_unlock_irqrestore(&vioch->free_lock, flags);
+ return NULL;
+ }
+
+ msg = list_first_entry(&vioch->free_list, typeof(*msg), list);
+ list_del_init(&msg->list);
+ spin_unlock_irqrestore(&vioch->free_lock, flags);
+
+ /* Still no users, no need to acquire poll_lock */
+ msg->poll_status = VIO_MSG_NOT_POLLED;
+ refcount_set(&msg->users, 1);
+
+ return msg;
+}
+
+static inline bool scmi_vio_msg_acquire(struct scmi_vio_msg *msg)
+{
+ return refcount_inc_not_zero(&msg->users);
+}
+
+/* Assumes to be called with vio channel acquired already */
+static inline bool scmi_vio_msg_release(struct scmi_vio_channel *vioch,
+ struct scmi_vio_msg *msg)
+{
+ bool ret;
+
+ ret = refcount_dec_and_test(&msg->users);
+ if (ret) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&vioch->free_lock, flags);
+ list_add_tail(&msg->list, &vioch->free_list);
+ spin_unlock_irqrestore(&vioch->free_lock, flags);
+ }
+
+ return ret;
+}
+
static bool scmi_vio_have_vq_rx(struct virtio_device *vdev)
{
return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS);
}
static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
- struct scmi_vio_msg *msg,
- struct device *dev)
+ struct scmi_vio_msg *msg)
{
struct scatterlist sg_in;
int rc;
unsigned long flags;
+ struct device *dev = &vioch->vqueue->vdev->dev;
sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE);
@@ -95,7 +244,7 @@ static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC);
if (rc)
- dev_err_once(dev, "failed to add to virtqueue (%d)\n", rc);
+ dev_err(dev, "failed to add to RX virtqueue (%d)\n", rc);
else
virtqueue_kick(vioch->vqueue);
@@ -104,22 +253,22 @@ static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
return rc;
}
+/*
+ * Assume to be called with channel already acquired or not ready at all;
+ * vioch->lock MUST NOT have been already acquired.
+ */
static void scmi_finalize_message(struct scmi_vio_channel *vioch,
struct scmi_vio_msg *msg)
{
- if (vioch->is_rx) {
- scmi_vio_feed_vq_rx(vioch, msg, vioch->cinfo->dev);
- } else {
- /* Here IRQs are assumed to be already disabled by the caller */
- spin_lock(&vioch->lock);
- list_add(&msg->list, &vioch->free_list);
- spin_unlock(&vioch->lock);
- }
+ if (vioch->is_rx)
+ scmi_vio_feed_vq_rx(vioch, msg);
+ else
+ scmi_vio_msg_release(vioch, msg);
}
static void scmi_vio_complete_cb(struct virtqueue *vqueue)
{
- unsigned long ready_flags;
+ unsigned long flags;
unsigned int length;
struct scmi_vio_channel *vioch;
struct scmi_vio_msg *msg;
@@ -130,27 +279,25 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue)
vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index];
for (;;) {
- spin_lock_irqsave(&vioch->ready_lock, ready_flags);
+ if (!scmi_vio_channel_acquire(vioch))
+ return;
- if (!vioch->ready) {
- if (!cb_enabled)
- (void)virtqueue_enable_cb(vqueue);
- goto unlock_ready_out;
- }
-
- /* IRQs already disabled here no need to irqsave */
- spin_lock(&vioch->lock);
+ spin_lock_irqsave(&vioch->lock, flags);
if (cb_enabled) {
virtqueue_disable_cb(vqueue);
cb_enabled = false;
}
+
msg = virtqueue_get_buf(vqueue, &length);
if (!msg) {
- if (virtqueue_enable_cb(vqueue))
- goto unlock_out;
+ if (virtqueue_enable_cb(vqueue)) {
+ spin_unlock_irqrestore(&vioch->lock, flags);
+ scmi_vio_channel_release(vioch);
+ return;
+ }
cb_enabled = true;
}
- spin_unlock(&vioch->lock);
+ spin_unlock_irqrestore(&vioch->lock, flags);
if (msg) {
msg->rx_len = length;
@@ -161,19 +308,57 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue)
}
/*
- * Release ready_lock and re-enable IRQs between loop iterations
- * to allow virtio_chan_free() to possibly kick in and set the
- * flag vioch->ready to false even in between processing of
- * messages, so as to force outstanding messages to be ignored
- * when system is shutting down.
+ * Release vio channel between loop iterations to allow
+ * virtio_chan_free() to eventually fully release it when
+ * shutting down; in such a case, any outstanding message will
+ * be ignored since this loop will bail out at the next
+ * iteration.
+ */
+ scmi_vio_channel_release(vioch);
+ }
+}
+
+static void scmi_vio_deferred_tx_worker(struct work_struct *work)
+{
+ unsigned long flags;
+ struct scmi_vio_channel *vioch;
+ struct scmi_vio_msg *msg, *tmp;
+
+ vioch = container_of(work, struct scmi_vio_channel, deferred_tx_work);
+
+ if (!scmi_vio_channel_acquire(vioch))
+ return;
+
+ /*
+ * Process pre-fetched messages: these could be non-polled messages or
+ * late timed-out replies to polled messages dequeued by chance while
+ * polling for some other messages: this worker is in charge to process
+ * the valid non-expired messages and anyway finally free all of them.
+ */
+ spin_lock_irqsave(&vioch->pending_lock, flags);
+
+ /* Scan the list of possibly pre-fetched messages during polling. */
+ list_for_each_entry_safe(msg, tmp, &vioch->pending_cmds_list, list) {
+ list_del(&msg->list);
+
+ /*
+ * Channel is acquired here (cannot vanish) and this message
+ * is no more processed elsewhere so no poll_lock needed.
*/
- spin_unlock_irqrestore(&vioch->ready_lock, ready_flags);
+ if (msg->poll_status == VIO_MSG_NOT_POLLED)
+ scmi_rx_callback(vioch->cinfo,
+ msg_read_header(msg->input), msg);
+
+ /* Free the processed message once done */
+ scmi_vio_msg_release(vioch, msg);
}
-unlock_out:
- spin_unlock(&vioch->lock);
-unlock_ready_out:
- spin_unlock_irqrestore(&vioch->ready_lock, ready_flags);
+ spin_unlock_irqrestore(&vioch->pending_lock, flags);
+
+ /* Process possibly still pending messages */
+ scmi_vio_complete_cb(vioch->vqueue);
+
+ scmi_vio_channel_release(vioch);
}
static const char *const scmi_vio_vqueue_names[] = { "tx", "rx" };
@@ -193,8 +378,8 @@ static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo)
static int virtio_link_supplier(struct device *dev)
{
if (!scmi_vdev) {
- dev_notice_once(dev,
- "Deferring probe after not finding a bound scmi-virtio device\n");
+ dev_notice(dev,
+ "Deferring probe after not finding a bound scmi-virtio device\n");
return -EPROBE_DEFER;
}
@@ -234,7 +419,6 @@ static bool virtio_chan_available(struct device *dev, int idx)
static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
bool tx)
{
- unsigned long flags;
struct scmi_vio_channel *vioch;
int index = tx ? VIRTIO_SCMI_VQ_TX : VIRTIO_SCMI_VQ_RX;
int i;
@@ -244,6 +428,19 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index];
+ /* Setup a deferred worker for polling. */
+ if (tx && !vioch->deferred_tx_wq) {
+ vioch->deferred_tx_wq =
+ alloc_workqueue(dev_name(&scmi_vdev->dev),
+ WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,
+ 0);
+ if (!vioch->deferred_tx_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&vioch->deferred_tx_work,
+ scmi_vio_deferred_tx_worker);
+ }
+
for (i = 0; i < vioch->max_msg; i++) {
struct scmi_vio_msg *msg;
@@ -257,6 +454,8 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
GFP_KERNEL);
if (!msg->request)
return -ENOMEM;
+ spin_lock_init(&msg->poll_lock);
+ refcount_set(&msg->users, 1);
}
msg->input = devm_kzalloc(cinfo->dev, VIRTIO_SCMI_MAX_PDU_SIZE,
@@ -264,44 +463,23 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
if (!msg->input)
return -ENOMEM;
- if (tx) {
- spin_lock_irqsave(&vioch->lock, flags);
- list_add_tail(&msg->list, &vioch->free_list);
- spin_unlock_irqrestore(&vioch->lock, flags);
- } else {
- scmi_vio_feed_vq_rx(vioch, msg, cinfo->dev);
- }
+ scmi_finalize_message(vioch, msg);
}
- spin_lock_irqsave(&vioch->lock, flags);
- cinfo->transport_info = vioch;
- /* Indirectly setting channel not available any more */
- vioch->cinfo = cinfo;
- spin_unlock_irqrestore(&vioch->lock, flags);
-
- spin_lock_irqsave(&vioch->ready_lock, flags);
- vioch->ready = true;
- spin_unlock_irqrestore(&vioch->ready_lock, flags);
+ scmi_vio_channel_ready(vioch, cinfo);
return 0;
}
static int virtio_chan_free(int id, void *p, void *data)
{
- unsigned long flags;
struct scmi_chan_info *cinfo = p;
struct scmi_vio_channel *vioch = cinfo->transport_info;
- spin_lock_irqsave(&vioch->ready_lock, flags);
- vioch->ready = false;
- spin_unlock_irqrestore(&vioch->ready_lock, flags);
+ scmi_vio_channel_cleanup_sync(vioch);
scmi_free_channel(cinfo, data, id);
- spin_lock_irqsave(&vioch->lock, flags);
- vioch->cinfo = NULL;
- spin_unlock_irqrestore(&vioch->lock, flags);
-
return 0;
}
@@ -316,33 +494,56 @@ static int virtio_send_message(struct scmi_chan_info *cinfo,
int rc;
struct scmi_vio_msg *msg;
- spin_lock_irqsave(&vioch->lock, flags);
+ if (!scmi_vio_channel_acquire(vioch))
+ return -EINVAL;
- if (list_empty(&vioch->free_list)) {
- spin_unlock_irqrestore(&vioch->lock, flags);
+ msg = scmi_virtio_get_free_msg(vioch);
+ if (!msg) {
+ scmi_vio_channel_release(vioch);
return -EBUSY;
}
- msg = list_first_entry(&vioch->free_list, typeof(*msg), list);
- list_del(&msg->list);
-
msg_tx_prepare(msg->request, xfer);
sg_init_one(&sg_out, msg->request, msg_command_size(xfer));
sg_init_one(&sg_in, msg->input, msg_response_size(xfer));
+ spin_lock_irqsave(&vioch->lock, flags);
+
+ /*
+ * If polling was requested for this transaction:
+ * - retrieve last used index (will be used as polling reference)
+ * - bind the polled message to the xfer via .priv
+ * - grab an additional msg refcount for the poll-path
+ */
+ if (xfer->hdr.poll_completion) {
+ msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue);
+ /* Still no users, no need to acquire poll_lock */
+ msg->poll_status = VIO_MSG_POLLING;
+ scmi_vio_msg_acquire(msg);
+ /* Ensure initialized msg is visibly bound to xfer */
+ smp_store_mb(xfer->priv, msg);
+ }
+
rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC);
- if (rc) {
- list_add(&msg->list, &vioch->free_list);
- dev_err_once(vioch->cinfo->dev,
- "%s() failed to add to virtqueue (%d)\n", __func__,
- rc);
- } else {
+ if (rc)
+ dev_err(vioch->cinfo->dev,
+ "failed to add to TX virtqueue (%d)\n", rc);
+ else
virtqueue_kick(vioch->vqueue);
- }
spin_unlock_irqrestore(&vioch->lock, flags);
+ if (rc) {
+ /* Ensure order between xfer->priv clear and vq feeding */
+ smp_store_mb(xfer->priv, NULL);
+ if (xfer->hdr.poll_completion)
+ scmi_vio_msg_release(vioch, msg);
+ scmi_vio_msg_release(vioch, msg);
+ }
+
+ scmi_vio_channel_release(vioch);
+
return rc;
}
@@ -351,10 +552,8 @@ static void virtio_fetch_response(struct scmi_chan_info *cinfo,
{
struct scmi_vio_msg *msg = xfer->priv;
- if (msg) {
+ if (msg)
msg_fetch_response(msg->input, msg->rx_len, xfer);
- xfer->priv = NULL;
- }
}
static void virtio_fetch_notification(struct scmi_chan_info *cinfo,
@@ -362,10 +561,225 @@ static void virtio_fetch_notification(struct scmi_chan_info *cinfo,
{
struct scmi_vio_msg *msg = xfer->priv;
- if (msg) {
+ if (msg)
msg_fetch_notification(msg->input, msg->rx_len, max_len, xfer);
- xfer->priv = NULL;
+}
+
+/**
+ * virtio_mark_txdone - Mark transmission done
+ *
+ * Free only completed polling transfer messages.
+ *
+ * Note that in the SCMI VirtIO transport we never explicitly release still
+ * outstanding but timed-out messages by forcibly re-adding them to the
+ * free-list inside the TX code path; we instead let IRQ/RX callbacks, or the
+ * TX deferred worker, eventually clean up such messages once, finally, a late
+ * reply is received and discarded (if ever).
+ *
+ * This approach was deemed preferable since those pending timed-out buffers are
+ * still effectively owned by the SCMI platform VirtIO device even after timeout
+ * expiration: forcibly freeing and reusing them before they had been returned
+ * explicitly by the SCMI platform could lead to subtle bugs due to message
+ * corruption.
+ * An SCMI platform VirtIO device which never returns message buffers is
+ * anyway broken and it will quickly lead to exhaustion of available messages.
+ *
+ * For this same reason, here, we take care to free only the polled messages
+ * that had been somehow replied (only if not by chance already processed on the
+ * IRQ path - the initial scmi_vio_msg_release() takes care of this) and also
+ * any timed-out polled message if that indeed appears to have been at least
+ * dequeued from the virtqueues (VIO_MSG_POLL_DONE): this is needed since such
+ * messages won't be freed elsewhere. Any other polled message is marked as
+ * VIO_MSG_POLL_TIMEOUT.
+ *
+ * Possible late replies to timed-out polled messages will be eventually freed
+ * by RX callbacks if delivered on the IRQ path or by the deferred TX worker if
+ * dequeued on some other polling path.
+ *
+ * @cinfo: SCMI channel info
+ * @ret: Transmission return code
+ * @xfer: Transfer descriptor
+ */
+static void virtio_mark_txdone(struct scmi_chan_info *cinfo, int ret,
+ struct scmi_xfer *xfer)
+{
+ unsigned long flags;
+ struct scmi_vio_channel *vioch = cinfo->transport_info;
+ struct scmi_vio_msg *msg = xfer->priv;
+
+ if (!msg || !scmi_vio_channel_acquire(vioch))
+ return;
+
+ /* Ensure msg is unbound from xfer anyway at this point */
+ smp_store_mb(xfer->priv, NULL);
+
+ /* Must be a polled xfer and not already freed on the IRQ path */
+ if (!xfer->hdr.poll_completion || scmi_vio_msg_release(vioch, msg)) {
+ scmi_vio_channel_release(vioch);
+ return;
}
+
+ spin_lock_irqsave(&msg->poll_lock, flags);
+ /* Do not free timedout polled messages only if still inflight */
+ if (ret != -ETIMEDOUT || msg->poll_status == VIO_MSG_POLL_DONE)
+ scmi_vio_msg_release(vioch, msg);
+ else if (msg->poll_status == VIO_MSG_POLLING)
+ msg->poll_status = VIO_MSG_POLL_TIMEOUT;
+ spin_unlock_irqrestore(&msg->poll_lock, flags);
+
+ scmi_vio_channel_release(vioch);
+}
+
+/**
+ * virtio_poll_done - Provide polling support for VirtIO transport
+ *
+ * @cinfo: SCMI channel info
+ * @xfer: Reference to the transfer being poll for.
+ *
+ * VirtIO core provides a polling mechanism based only on last used indexes:
+ * this means that it is possible to poll the virtqueues waiting for something
+ * new to arrive from the host side, but the only way to check if the freshly
+ * arrived buffer was indeed what we were waiting for is to compare the newly
+ * arrived message descriptor with the one we are polling on.
+ *
+ * As a consequence it can happen to dequeue something different from the buffer
+ * we were poll-waiting for: if that is the case such early fetched buffers are
+ * then added to a the @pending_cmds_list list for later processing by a
+ * dedicated deferred worker.
+ *
+ * So, basically, once something new is spotted we proceed to de-queue all the
+ * freshly received used buffers until we found the one we were polling on, or,
+ * we have 'seemingly' emptied the virtqueue; if some buffers are still pending
+ * in the vqueue at the end of the polling loop (possible due to inherent races
+ * in virtqueues handling mechanisms), we similarly kick the deferred worker
+ * and let it process those, to avoid indefinitely looping in the .poll_done
+ * busy-waiting helper.
+ *
+ * Finally, we delegate to the deferred worker also the final free of any timed
+ * out reply to a polled message that we should dequeue.
+ *
+ * Note that, since we do NOT have per-message suppress notification mechanism,
+ * the message we are polling for could be alternatively delivered via usual
+ * IRQs callbacks on another core which happened to have IRQs enabled while we
+ * are actively polling for it here: in such a case it will be handled as such
+ * by scmi_rx_callback() and the polling loop in the SCMI Core TX path will be
+ * transparently terminated anyway.
+ *
+ * Return: True once polling has successfully completed.
+ */
+static bool virtio_poll_done(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ bool pending, found = false;
+ unsigned int length, any_prefetched = 0;
+ unsigned long flags;
+ struct scmi_vio_msg *next_msg, *msg = xfer->priv;
+ struct scmi_vio_channel *vioch = cinfo->transport_info;
+
+ if (!msg)
+ return true;
+
+ /*
+ * Processed already by other polling loop on another CPU ?
+ *
+ * Note that this message is acquired on the poll path so cannot vanish
+ * while inside this loop iteration even if concurrently processed on
+ * the IRQ path.
+ *
+ * Avoid to acquire poll_lock since polled_status can be changed
+ * in a relevant manner only later in this same thread of execution:
+ * any other possible changes made concurrently by other polling loops
+ * or by a reply delivered on the IRQ path have no meaningful impact on
+ * this loop iteration: in other words it is harmless to allow this
+ * possible race but let has avoid spinlocking with irqs off in this
+ * initial part of the polling loop.
+ */
+ if (msg->poll_status == VIO_MSG_POLL_DONE)
+ return true;
+
+ if (!scmi_vio_channel_acquire(vioch))
+ return true;
+
+ /* Has cmdq index moved at all ? */
+ pending = virtqueue_poll(vioch->vqueue, msg->poll_idx);
+ if (!pending) {
+ scmi_vio_channel_release(vioch);
+ return false;
+ }
+
+ spin_lock_irqsave(&vioch->lock, flags);
+ virtqueue_disable_cb(vioch->vqueue);
+
+ /*
+ * Process all new messages till the polled-for message is found OR
+ * the vqueue is empty.
+ */
+ while ((next_msg = virtqueue_get_buf(vioch->vqueue, &length))) {
+ bool next_msg_done = false;
+
+ /*
+ * Mark any dequeued buffer message as VIO_MSG_POLL_DONE so
+ * that can be properly freed even on timeout in mark_txdone.
+ */
+ spin_lock(&next_msg->poll_lock);
+ if (next_msg->poll_status == VIO_MSG_POLLING) {
+ next_msg->poll_status = VIO_MSG_POLL_DONE;
+ next_msg_done = true;
+ }
+ spin_unlock(&next_msg->poll_lock);
+
+ next_msg->rx_len = length;
+ /* Is the message we were polling for ? */
+ if (next_msg == msg) {
+ found = true;
+ break;
+ } else if (next_msg_done) {
+ /* Skip the rest if this was another polled msg */
+ continue;
+ }
+
+ /*
+ * Enqueue for later processing any non-polled message and any
+ * timed-out polled one that we happen to have dequeued.
+ */
+ spin_lock(&next_msg->poll_lock);
+ if (next_msg->poll_status == VIO_MSG_NOT_POLLED ||
+ next_msg->poll_status == VIO_MSG_POLL_TIMEOUT) {
+ spin_unlock(&next_msg->poll_lock);
+
+ any_prefetched++;
+ spin_lock(&vioch->pending_lock);
+ list_add_tail(&next_msg->list,
+ &vioch->pending_cmds_list);
+ spin_unlock(&vioch->pending_lock);
+ } else {
+ spin_unlock(&next_msg->poll_lock);
+ }
+ }
+
+ /*
+ * When the polling loop has successfully terminated if something
+ * else was queued in the meantime, it will be served by a deferred
+ * worker OR by the normal IRQ/callback OR by other poll loops.
+ *
+ * If we are still looking for the polled reply, the polling index has
+ * to be updated to the current vqueue last used index.
+ */
+ if (found) {
+ pending = !virtqueue_enable_cb(vioch->vqueue);
+ } else {
+ msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue);
+ pending = virtqueue_poll(vioch->vqueue, msg->poll_idx);
+ }
+
+ if (vioch->deferred_tx_wq && (any_prefetched || pending))
+ queue_work(vioch->deferred_tx_wq, &vioch->deferred_tx_work);
+
+ spin_unlock_irqrestore(&vioch->lock, flags);
+
+ scmi_vio_channel_release(vioch);
+
+ return found;
}
static const struct scmi_transport_ops scmi_virtio_ops = {
@@ -377,6 +791,8 @@ static const struct scmi_transport_ops scmi_virtio_ops = {
.send_message = virtio_send_message,
.fetch_response = virtio_fetch_response,
.fetch_notification = virtio_fetch_notification,
+ .mark_txdone = virtio_mark_txdone,
+ .poll_done = virtio_poll_done,
};
static int scmi_vio_probe(struct virtio_device *vdev)
@@ -417,8 +833,10 @@ static int scmi_vio_probe(struct virtio_device *vdev)
unsigned int sz;
spin_lock_init(&channels[i].lock);
- spin_lock_init(&channels[i].ready_lock);
+ spin_lock_init(&channels[i].free_lock);
INIT_LIST_HEAD(&channels[i].free_list);
+ spin_lock_init(&channels[i].pending_lock);
+ INIT_LIST_HEAD(&channels[i].pending_cmds_list);
channels[i].vqueue = vqs[i];
sz = virtqueue_get_vring_size(channels[i].vqueue);
@@ -427,10 +845,10 @@ static int scmi_vio_probe(struct virtio_device *vdev)
sz /= DESCRIPTORS_PER_TX_MSG;
if (sz > MSG_TOKEN_MAX) {
- dev_info_once(dev,
- "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n",
- channels[i].is_rx ? "rx" : "tx",
- sz, MSG_TOKEN_MAX);
+ dev_info(dev,
+ "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n",
+ channels[i].is_rx ? "rx" : "tx",
+ sz, MSG_TOKEN_MAX);
sz = MSG_TOKEN_MAX;
}
channels[i].max_msg = sz;
@@ -460,12 +878,13 @@ static void scmi_vio_remove(struct virtio_device *vdev)
static int scmi_vio_validate(struct virtio_device *vdev)
{
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE
if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
dev_err(&vdev->dev,
"device does not comply with spec version 1.x\n");
return -EINVAL;
}
-
+#endif
return 0;
}
@@ -503,7 +922,9 @@ const struct scmi_desc scmi_virtio_desc = {
.transport_init = virtio_scmi_init,
.transport_exit = virtio_scmi_exit,
.ops = &scmi_virtio_ops,
- .max_rx_timeout_ms = 60000, /* for non-realtime virtio devices */
+ /* for non-realtime virtio devices */
+ .max_rx_timeout_ms = VIRTIO_MAX_RX_TIMEOUT_MS,
.max_msg = 0, /* overridden by virtio_get_max_msg() */
.max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE,
+ .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE),
};