summaryrefslogtreecommitdiff
path: root/drivers/firmware/arm_scmi/driver.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/firmware/arm_scmi/driver.c')
-rw-r--r--drivers/firmware/arm_scmi/driver.c236
1 files changed, 192 insertions, 44 deletions
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index b406b3f78f46..46118300a4d1 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -131,6 +131,12 @@ struct scmi_protocol_instance {
* MAX_PROTOCOLS_IMP elements allocated by the base protocol
* @active_protocols: IDR storing device_nodes for protocols actually defined
* in the DT and confirmed as implemented by fw.
+ * @atomic_threshold: Optional system wide DT-configured threshold, expressed
+ * in microseconds, for atomic operations.
+ * Only SCMI synchronous commands reported by the platform
+ * to have an execution latency lesser-equal to the threshold
+ * should be considered for atomic mode operation: such
+ * decision is finally left up to the SCMI drivers.
* @notify_priv: Pointer to private data structure specific to notifications.
* @node: List head
* @users: Number of users of this instance
@@ -149,6 +155,7 @@ struct scmi_info {
struct mutex protocols_mtx;
u8 *protocols_imp;
struct idr active_protocols;
+ unsigned int atomic_threshold;
void *notify_priv;
struct list_head node;
int users;
@@ -609,6 +616,25 @@ static inline void scmi_clear_channel(struct scmi_info *info,
info->desc->ops->clear_channel(cinfo);
}
+static inline bool is_polling_required(struct scmi_chan_info *cinfo,
+ struct scmi_info *info)
+{
+ return cinfo->no_completion_irq || info->desc->force_polling;
+}
+
+static inline bool is_transport_polling_capable(struct scmi_info *info)
+{
+ return info->desc->ops->poll_done ||
+ info->desc->sync_cmds_completed_on_ret;
+}
+
+static inline bool is_polling_enabled(struct scmi_chan_info *cinfo,
+ struct scmi_info *info)
+{
+ return is_polling_required(cinfo, info) &&
+ is_transport_polling_capable(info);
+}
+
static void scmi_handle_notification(struct scmi_chan_info *cinfo,
u32 msg_hdr, void *priv)
{
@@ -629,7 +655,8 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
unpack_scmi_header(msg_hdr, &xfer->hdr);
if (priv)
- xfer->priv = priv;
+ /* Ensure order between xfer->priv store and following ops */
+ smp_store_mb(xfer->priv, priv);
info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
xfer);
scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
@@ -661,7 +688,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
xfer->rx.len = info->desc->max_msg_size;
if (priv)
- xfer->priv = priv;
+ /* Ensure order between xfer->priv store and following ops */
+ smp_store_mb(xfer->priv, priv);
info->desc->ops->fetch_response(cinfo, xfer);
trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
@@ -724,8 +752,6 @@ static void xfer_put(const struct scmi_protocol_handle *ph,
__scmi_xfer_put(&info->tx_minfo, xfer);
}
-#define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC)
-
static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer, ktime_t stop)
{
@@ -741,6 +767,79 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
}
/**
+ * scmi_wait_for_message_response - An helper to group all the possible ways of
+ * waiting for a synchronous message response.
+ *
+ * @cinfo: SCMI channel info
+ * @xfer: Reference to the transfer being waited for.
+ *
+ * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
+ * configuration flags like xfer->hdr.poll_completion.
+ *
+ * Return: 0 on Success, error otherwise.
+ */
+static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct device *dev = info->dev;
+ int ret = 0, timeout_ms = info->desc->max_rx_timeout_ms;
+
+ trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ timeout_ms,
+ xfer->hdr.poll_completion);
+
+ if (xfer->hdr.poll_completion) {
+ /*
+ * Real polling is needed only if transport has NOT declared
+ * itself to support synchronous commands replies.
+ */
+ if (!info->desc->sync_cmds_completed_on_ret) {
+ /*
+ * Poll on xfer using transport provided .poll_done();
+ * assumes no completion interrupt was available.
+ */
+ ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
+
+ spin_until_cond(scmi_xfer_done_no_timeout(cinfo,
+ xfer, stop));
+ if (ktime_after(ktime_get(), stop)) {
+ dev_err(dev,
+ "timed out in resp(caller: %pS) - polling\n",
+ (void *)_RET_IP_);
+ ret = -ETIMEDOUT;
+ }
+ }
+
+ if (!ret) {
+ unsigned long flags;
+
+ /*
+ * Do not fetch_response if an out-of-order delayed
+ * response is being processed.
+ */
+ spin_lock_irqsave(&xfer->lock, flags);
+ if (xfer->state == SCMI_XFER_SENT_OK) {
+ info->desc->ops->fetch_response(cinfo, xfer);
+ xfer->state = SCMI_XFER_RESP_OK;
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ }
+ } else {
+ /* And we wait for the response. */
+ if (!wait_for_completion_timeout(&xfer->done,
+ msecs_to_jiffies(timeout_ms))) {
+ dev_err(dev, "timed out in resp(caller: %pS)\n",
+ (void *)_RET_IP_);
+ ret = -ETIMEDOUT;
+ }
+ }
+
+ return ret;
+}
+
+/**
* do_xfer() - Do one transfer
*
* @ph: Pointer to SCMI protocol handle
@@ -754,18 +853,26 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
struct scmi_xfer *xfer)
{
int ret;
- int timeout;
const struct scmi_protocol_instance *pi = ph_to_pi(ph);
struct scmi_info *info = handle_to_scmi_info(pi->handle);
struct device *dev = info->dev;
struct scmi_chan_info *cinfo;
- if (xfer->hdr.poll_completion && !info->desc->ops->poll_done) {
+ /* Check for polling request on custom command xfers at first */
+ if (xfer->hdr.poll_completion && !is_transport_polling_capable(info)) {
dev_warn_once(dev,
"Polling mode is not supported by transport.\n");
return -EINVAL;
}
+ cinfo = idr_find(&info->tx_idr, pi->proto->id);
+ if (unlikely(!cinfo))
+ return -EINVAL;
+
+ /* True ONLY if also supported by transport. */
+ if (is_polling_enabled(cinfo, info))
+ xfer->hdr.poll_completion = true;
+
/*
* Initialise protocol id now from protocol handle to avoid it being
* overridden by mistake (or malice) by the protocol code mangling with
@@ -774,10 +881,6 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
xfer->hdr.protocol_id = pi->proto->id;
reinit_completion(&xfer->done);
- cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
- if (unlikely(!cinfo))
- return -EINVAL;
-
trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
xfer->hdr.poll_completion);
@@ -798,41 +901,12 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
return ret;
}
- if (xfer->hdr.poll_completion) {
- ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
-
- spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
- if (ktime_before(ktime_get(), stop)) {
- unsigned long flags;
-
- /*
- * Do not fetch_response if an out-of-order delayed
- * response is being processed.
- */
- spin_lock_irqsave(&xfer->lock, flags);
- if (xfer->state == SCMI_XFER_SENT_OK) {
- info->desc->ops->fetch_response(cinfo, xfer);
- xfer->state = SCMI_XFER_RESP_OK;
- }
- spin_unlock_irqrestore(&xfer->lock, flags);
- } else {
- ret = -ETIMEDOUT;
- }
- } else {
- /* And we wait for the response. */
- timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
- if (!wait_for_completion_timeout(&xfer->done, timeout)) {
- dev_err(dev, "timed out in resp(caller: %pS)\n",
- (void *)_RET_IP_);
- ret = -ETIMEDOUT;
- }
- }
-
+ ret = scmi_wait_for_message_response(cinfo, xfer);
if (!ret && xfer->hdr.status)
ret = scmi_to_linux_errno(xfer->hdr.status);
if (info->desc->ops->mark_txdone)
- info->desc->ops->mark_txdone(cinfo, ret);
+ info->desc->ops->mark_txdone(cinfo, ret, xfer);
trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq, ret);
@@ -858,6 +932,20 @@ static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
* @ph: Pointer to SCMI protocol handle
* @xfer: Transfer to initiate and wait for response
*
+ * Using asynchronous commands in atomic/polling mode should be avoided since
+ * it could cause long busy-waiting here, so ignore polling for the delayed
+ * response and WARN if it was requested for this command transaction since
+ * upper layers should refrain from issuing such kind of requests.
+ *
+ * The only other option would have been to refrain from using any asynchronous
+ * command even if made available, when an atomic transport is detected, and
+ * instead forcibly use the synchronous version (thing that can be easily
+ * attained at the protocol layer), but this would also have led to longer
+ * stalls of the channel for synchronous commands and possibly timeouts.
+ * (in other words there is usually a good reason if a platform provides an
+ * asynchronous version of a command and we should prefer to use it...just not
+ * when using atomic/polling mode)
+ *
* Return: -ETIMEDOUT in case of no delayed response, if transmit error,
* return corresponding error, else if all goes well, return 0.
*/
@@ -869,12 +957,24 @@ static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
xfer->async_done = &async_response;
+ /*
+ * Delayed responses should not be polled, so an async command should
+ * not have been used when requiring an atomic/poll context; WARN and
+ * perform instead a sleeping wait.
+ * (Note Async + IgnoreDelayedResponses are sent via do_xfer)
+ */
+ WARN_ON_ONCE(xfer->hdr.poll_completion);
+
ret = do_xfer(ph, xfer);
if (!ret) {
- if (!wait_for_completion_timeout(xfer->async_done, timeout))
+ if (!wait_for_completion_timeout(xfer->async_done, timeout)) {
+ dev_err(ph->dev,
+ "timed out in delayed resp(caller: %pS)\n",
+ (void *)_RET_IP_);
ret = -ETIMEDOUT;
- else if (xfer->hdr.status)
+ } else if (xfer->hdr.status) {
ret = scmi_to_linux_errno(xfer->hdr.status);
+ }
}
xfer->async_done = NULL;
@@ -1308,6 +1408,29 @@ static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
WARN_ON(ret);
}
+/**
+ * scmi_is_transport_atomic - Method to check if underlying transport for an
+ * SCMI instance is configured as atomic.
+ *
+ * @handle: A reference to the SCMI platform instance.
+ * @atomic_threshold: An optional return value for the system wide currently
+ * configured threshold for atomic operations.
+ *
+ * Return: True if transport is configured as atomic
+ */
+static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
+ unsigned int *atomic_threshold)
+{
+ bool ret;
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ ret = info->desc->atomic_enabled && is_transport_polling_capable(info);
+ if (ret && atomic_threshold)
+ *atomic_threshold = info->atomic_threshold;
+
+ return ret;
+}
+
static inline
struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
{
@@ -1499,6 +1622,16 @@ static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
if (ret)
return ret;
+ if (tx && is_polling_required(cinfo, info)) {
+ if (is_transport_polling_capable(info))
+ dev_info(dev,
+ "Enabled polling mode TX channel - prot_id:%d\n",
+ prot_id);
+ else
+ dev_warn(dev,
+ "Polling mode NOT supported by transport.\n");
+ }
+
idr_alloc:
ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
if (ret != prot_id) {
@@ -1836,6 +1969,14 @@ static int scmi_probe(struct platform_device *pdev)
handle->devm_protocol_get = scmi_devm_protocol_get;
handle->devm_protocol_put = scmi_devm_protocol_put;
+ /* System wide atomic threshold for atomic ops .. if any */
+ if (!of_property_read_u32(np, "atomic-threshold-us",
+ &info->atomic_threshold))
+ dev_info(dev,
+ "SCMI System wide atomic threshold set to %d us\n",
+ info->atomic_threshold);
+ handle->is_transport_atomic = scmi_is_transport_atomic;
+
if (desc->ops->link_supplier) {
ret = desc->ops->link_supplier(dev);
if (ret)
@@ -1853,6 +1994,10 @@ static int scmi_probe(struct platform_device *pdev)
if (scmi_notification_init(handle))
dev_err(dev, "SCMI Notifications NOT available.\n");
+ if (info->desc->atomic_enabled && !is_transport_polling_capable(info))
+ dev_err(dev,
+ "Transport is not polling capable. Atomic mode not supported.\n");
+
/*
* Trigger SCMI Base protocol initialization.
* It's mandatory and won't be ever released/deinit until the
@@ -1994,6 +2139,9 @@ static const struct of_device_id scmi_of_match[] = {
#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
{ .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
#endif
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE
+ { .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc },
+#endif
#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
{ .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
#endif
@@ -2112,7 +2260,7 @@ static void __exit scmi_driver_exit(void)
}
module_exit(scmi_driver_exit);
-MODULE_ALIAS("platform: arm-scmi");
+MODULE_ALIAS("platform:arm-scmi");
MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
MODULE_DESCRIPTION("ARM SCMI protocol driver");
MODULE_LICENSE("GPL v2");