diff options
Diffstat (limited to 'drivers/firmware/arm_scmi/driver.c')
| -rw-r--r-- | drivers/firmware/arm_scmi/driver.c | 301 |
1 files changed, 183 insertions, 118 deletions
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 1f53ca1f87e3..5caa9191a8d1 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -11,7 +11,7 @@ * various power domain DVFS including the core/cluster, certain system * clocks configuration, thermal sensors and many others. * - * Copyright (C) 2018-2024 ARM Ltd. + * Copyright (C) 2018-2025 ARM Ltd. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -24,6 +24,7 @@ #include <linux/io.h> #include <linux/io-64-nonatomic-hi-lo.h> #include <linux/kernel.h> +#include <linux/kmod.h> #include <linux/ktime.h> #include <linux/hashtable.h> #include <linux/list.h> @@ -37,12 +38,15 @@ #include "common.h" #include "notify.h" +#include "quirks.h" #include "raw_mode.h" #define CREATE_TRACE_POINTS #include <trace/events/scmi.h> +#define SCMI_VENDOR_MODULE_ALIAS_FMT "scmi-protocol-0x%02x-%s" + static DEFINE_IDA(scmi_id); static DEFINE_XARRAY(scmi_protocols); @@ -112,22 +116,6 @@ struct scmi_protocol_instance { #define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph) /** - * struct scmi_debug_info - Debug common info - * @top_dentry: A reference to the top debugfs dentry - * @name: Name of this SCMI instance - * @type: Type of this SCMI instance - * @is_atomic: Flag to state if the transport of this instance is atomic - * @counters: An array of atomic_c's used for tracking statistics (if enabled) - */ -struct scmi_debug_info { - struct dentry *top_dentry; - const char *name; - const char *type; - bool is_atomic; - atomic_t counters[SCMI_DEBUG_COUNTERS_LAST]; -}; - -/** * struct scmi_info - Structure representing a SCMI instance * * @id: A sequence number starting from zero identifying this instance @@ -186,6 +174,7 @@ struct scmi_info { }; #define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle) +#define tx_minfo_to_scmi_info(h) container_of(h, struct scmi_info, tx_minfo) #define bus_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, bus_nb) #define req_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, dev_req_nb) @@ -276,6 +265,44 @@ scmi_vendor_protocol_lookup(int protocol_id, char *vendor_id, } static const struct scmi_protocol * +scmi_vendor_protocol_get(int protocol_id, struct scmi_revision_info *version) +{ + const struct scmi_protocol *proto; + + proto = scmi_vendor_protocol_lookup(protocol_id, version->vendor_id, + version->sub_vendor_id, + version->impl_ver); + if (!proto) { + int ret; + + pr_debug("Looking for '" SCMI_VENDOR_MODULE_ALIAS_FMT "'\n", + protocol_id, version->vendor_id); + + /* Note that vendor_id is mandatory for vendor protocols */ + ret = request_module(SCMI_VENDOR_MODULE_ALIAS_FMT, + protocol_id, version->vendor_id); + if (ret) { + pr_warn("Problem loading module for protocol 0x%x\n", + protocol_id); + return NULL; + } + + /* Lookup again, once modules loaded */ + proto = scmi_vendor_protocol_lookup(protocol_id, + version->vendor_id, + version->sub_vendor_id, + version->impl_ver); + } + + if (proto) + pr_info("Loaded SCMI Vendor Protocol 0x%x - %s %s %X\n", + protocol_id, proto->vendor_id ?: "", + proto->sub_vendor_id ?: "", proto->impl_ver); + + return proto; +} + +static const struct scmi_protocol * scmi_protocol_get(int protocol_id, struct scmi_revision_info *version) { const struct scmi_protocol *proto = NULL; @@ -283,10 +310,8 @@ scmi_protocol_get(int protocol_id, struct scmi_revision_info *version) if (protocol_id < SCMI_PROTOCOL_VENDOR_BASE) proto = xa_load(&scmi_protocols, protocol_id); else - proto = scmi_vendor_protocol_lookup(protocol_id, - version->vendor_id, - version->sub_vendor_id, - version->impl_ver); + proto = scmi_vendor_protocol_get(protocol_id, version); + if (!proto || !try_module_get(proto->owner)) { pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id); return NULL; @@ -294,11 +319,6 @@ scmi_protocol_get(int protocol_id, struct scmi_revision_info *version) pr_debug("Found SCMI Protocol 0x%x\n", protocol_id); - if (protocol_id >= SCMI_PROTOCOL_VENDOR_BASE) - pr_info("Loaded SCMI Vendor Protocol 0x%x - %s %s %X\n", - protocol_id, proto->vendor_id ?: "", - proto->sub_vendor_id ?: "", proto->impl_ver); - return proto; } @@ -366,7 +386,9 @@ int scmi_protocol_register(const struct scmi_protocol *proto) return ret; } - pr_debug("Registered SCMI Protocol 0x%x\n", proto->id); + pr_debug("Registered SCMI Protocol 0x%x - %s %s 0x%08X\n", + proto->id, proto->vendor_id, proto->sub_vendor_id, + proto->impl_ver); return 0; } @@ -403,14 +425,8 @@ static void scmi_create_protocol_devices(struct device_node *np, struct scmi_info *info, int prot_id, const char *name) { - struct scmi_device *sdev; - mutex_lock(&info->devreq_mtx); - sdev = scmi_device_create(np, info->dev, prot_id, name); - if (name && !sdev) - dev_err(info->dev, - "failed to create device for protocol 0x%X (%s)\n", - prot_id, name); + scmi_device_create(np, info->dev, prot_id, name); mutex_unlock(&info->devreq_mtx); } @@ -572,9 +588,14 @@ static inline void scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer, struct scmi_xfers_info *minfo) { + /* In this context minfo will be tx_minfo due to the xfer pending */ + struct scmi_info *info = tx_minfo_to_scmi_info(minfo); + /* Set in-flight */ set_bit(xfer->hdr.seq, minfo->xfer_alloc_table); hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq); + scmi_inc_count(info->dbg, XFERS_INFLIGHT); + xfer->pending = true; } @@ -776,10 +797,15 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer) spin_lock_irqsave(&minfo->xfer_lock, flags); if (refcount_dec_and_test(&xfer->users)) { if (xfer->pending) { + struct scmi_info *info = tx_minfo_to_scmi_info(minfo); + scmi_xfer_token_clear(minfo, xfer); hash_del(&xfer->node); xfer->pending = false; + + scmi_dec_count(info->dbg, XFERS_INFLIGHT); } + xfer->flags = 0; hlist_add_head(&xfer->node, &minfo->free_xfers); } spin_unlock_irqrestore(&minfo->xfer_lock, flags); @@ -798,8 +824,6 @@ void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer) { struct scmi_info *info = handle_to_scmi_info(handle); - xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW; - xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET; return __scmi_xfer_put(&info->tx_minfo, xfer); } @@ -993,7 +1017,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr) spin_unlock_irqrestore(&minfo->xfer_lock, flags); scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNEXPECTED); - scmi_inc_count(info->dbg->counters, ERR_MSG_UNEXPECTED); + scmi_inc_count(info->dbg, ERR_MSG_UNEXPECTED); return xfer; } @@ -1021,7 +1045,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr) msg_type, xfer_id, msg_hdr, xfer->state); scmi_bad_message_trace(cinfo, msg_hdr, MSG_INVALID); - scmi_inc_count(info->dbg->counters, ERR_MSG_INVALID); + scmi_inc_count(info->dbg, ERR_MSG_INVALID); /* On error the refcount incremented above has to be dropped */ __scmi_xfer_put(minfo, xfer); @@ -1066,7 +1090,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, PTR_ERR(xfer)); scmi_bad_message_trace(cinfo, msg_hdr, MSG_NOMEM); - scmi_inc_count(info->dbg->counters, ERR_MSG_NOMEM); + scmi_inc_count(info->dbg, ERR_MSG_NOMEM); scmi_clear_channel(info, cinfo); return; @@ -1082,7 +1106,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, xfer->hdr.id, "NOTI", xfer->hdr.seq, xfer->hdr.status, xfer->rx.buf, xfer->rx.len); - scmi_inc_count(info->dbg->counters, NOTIFICATION_OK); + scmi_inc_count(info->dbg, NOTIFICATION_OK); scmi_notify(cinfo->handle, xfer->hdr.protocol_id, xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts); @@ -1142,10 +1166,10 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) { scmi_clear_channel(info, cinfo); complete(xfer->async_done); - scmi_inc_count(info->dbg->counters, DELAYED_RESPONSE_OK); + scmi_inc_count(info->dbg, DELAYED_RESPONSE_OK); } else { complete(&xfer->done); - scmi_inc_count(info->dbg->counters, RESPONSE_OK); + scmi_inc_count(info->dbg, RESPONSE_OK); } if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { @@ -1154,7 +1178,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, * RX path since it will be already queued at the end of the TX * poll loop. */ - if (!xfer->hdr.poll_completion) + if (!xfer->hdr.poll_completion || + xfer->hdr.type == MSG_TYPE_DELAYED_RESP) scmi_raw_message_report(info->raw, xfer, SCMI_RAW_REPLY_QUEUE, cinfo->id); @@ -1212,7 +1237,8 @@ static void xfer_put(const struct scmi_protocol_handle *ph, } static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo, - struct scmi_xfer *xfer, ktime_t stop) + struct scmi_xfer *xfer, ktime_t stop, + bool *ooo) { struct scmi_info *info = handle_to_scmi_info(cinfo->handle); @@ -1221,7 +1247,7 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo, * in case of out-of-order receptions of delayed responses */ return info->desc->ops->poll_done(cinfo, xfer) || - try_wait_for_completion(&xfer->done) || + (*ooo = try_wait_for_completion(&xfer->done)) || ktime_after(ktime_get(), stop); } @@ -1238,20 +1264,22 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc, * itself to support synchronous commands replies. */ if (!desc->sync_cmds_completed_on_ret) { + bool ooo = false; + /* * Poll on xfer using transport provided .poll_done(); * assumes no completion interrupt was available. */ ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms); - spin_until_cond(scmi_xfer_done_no_timeout(cinfo, - xfer, stop)); - if (ktime_after(ktime_get(), stop)) { + spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, + stop, &ooo)); + if (!ooo && !info->desc->ops->poll_done(cinfo, xfer)) { dev_err(dev, "timed out in resp(caller: %pS) - polling\n", (void *)_RET_IP_); ret = -ETIMEDOUT; - scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_POLLED_TIMEOUT); + scmi_inc_count(info->dbg, XFERS_RESPONSE_POLLED_TIMEOUT); } } @@ -1276,7 +1304,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc, "RESP" : "resp", xfer->hdr.seq, xfer->hdr.status, xfer->rx.buf, xfer->rx.len); - scmi_inc_count(info->dbg->counters, RESPONSE_POLLED_OK); + scmi_inc_count(info->dbg, RESPONSE_POLLED_OK); if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { scmi_raw_message_report(info->raw, xfer, @@ -1291,7 +1319,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc, dev_err(dev, "timed out in resp(caller: %pS)\n", (void *)_RET_IP_); ret = -ETIMEDOUT; - scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_TIMEOUT); + scmi_inc_count(info->dbg, XFERS_RESPONSE_TIMEOUT); } } @@ -1375,13 +1403,13 @@ static int do_xfer(const struct scmi_protocol_handle *ph, !is_transport_polling_capable(info->desc)) { dev_warn_once(dev, "Polling mode is not supported by transport.\n"); - scmi_inc_count(info->dbg->counters, SENT_FAIL_POLLING_UNSUPPORTED); + scmi_inc_count(info->dbg, SENT_FAIL_POLLING_UNSUPPORTED); return -EINVAL; } cinfo = idr_find(&info->tx_idr, pi->proto->id); if (unlikely(!cinfo)) { - scmi_inc_count(info->dbg->counters, SENT_FAIL_CHANNEL_NOT_FOUND); + scmi_inc_count(info->dbg, SENT_FAIL_CHANNEL_NOT_FOUND); return -EINVAL; } /* True ONLY if also supported by transport. */ @@ -1398,7 +1426,8 @@ static int do_xfer(const struct scmi_protocol_handle *ph, trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id, xfer->hdr.protocol_id, xfer->hdr.seq, - xfer->hdr.poll_completion); + xfer->hdr.poll_completion, + scmi_inflight_count(&info->handle)); /* Clear any stale status */ xfer->hdr.status = SCMI_SUCCESS; @@ -1415,26 +1444,27 @@ static int do_xfer(const struct scmi_protocol_handle *ph, ret = info->desc->ops->send_message(cinfo, xfer); if (ret < 0) { dev_dbg(dev, "Failed to send message %d\n", ret); - scmi_inc_count(info->dbg->counters, SENT_FAIL); + scmi_inc_count(info->dbg, SENT_FAIL); return ret; } trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, xfer->hdr.id, "CMND", xfer->hdr.seq, xfer->hdr.status, xfer->tx.buf, xfer->tx.len); - scmi_inc_count(info->dbg->counters, SENT_OK); + scmi_inc_count(info->dbg, SENT_OK); ret = scmi_wait_for_message_response(cinfo, xfer); if (!ret && xfer->hdr.status) { ret = scmi_to_linux_errno(xfer->hdr.status); - scmi_inc_count(info->dbg->counters, ERR_PROTOCOL); + scmi_inc_count(info->dbg, ERR_PROTOCOL); } if (info->desc->ops->mark_txdone) info->desc->ops->mark_txdone(cinfo, ret, xfer); trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id, - xfer->hdr.protocol_id, xfer->hdr.seq, ret); + xfer->hdr.protocol_id, xfer->hdr.seq, ret, + scmi_inflight_count(&info->handle)); return ret; } @@ -1699,6 +1729,39 @@ static int scmi_common_get_max_msg_size(const struct scmi_protocol_handle *ph) } /** + * scmi_protocol_msg_check - Check protocol message attributes + * + * @ph: A reference to the protocol handle. + * @message_id: The ID of the message to check. + * @attributes: A parameter to optionally return the retrieved message + * attributes, in case of Success. + * + * An helper to check protocol message attributes for a specific protocol + * and message pair. + * + * Return: 0 on SUCCESS + */ +static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph, + u32 message_id, u32 *attributes) +{ + int ret; + struct scmi_xfer *t; + + ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES, + sizeof(__le32), 0, &t); + if (ret) + return ret; + + put_unaligned_le32(message_id, t->tx.buf); + ret = do_xfer(ph, t); + if (!ret && attributes) + *attributes = get_unaligned_le32(t->rx.buf); + xfer_put(ph, t); + + return ret; +} + +/** * struct scmi_iterator - Iterator descriptor * @msg: A reference to the message TX buffer; filled by @prepare_message with * a proper custom command payload for each multi-part command request. @@ -1830,6 +1893,13 @@ struct scmi_msg_resp_desc_fc { __le32 db_preserve_hmask; }; +#define QUIRK_PERF_FC_FORCE \ + ({ \ + if (pi->proto->id == SCMI_PROTOCOL_PERF && \ + message_id == 0x8 /* PERF_LEVEL_GET */) \ + attributes |= BIT(0); \ + }) + static void scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph, u8 describe_id, u32 message_id, u32 valid_size, @@ -1839,6 +1909,7 @@ scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph, int ret; u32 flags; u64 phys_addr; + u32 attributes; u8 size; void __iomem *addr; struct scmi_xfer *t; @@ -1847,6 +1918,16 @@ scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph, struct scmi_msg_resp_desc_fc *resp; const struct scmi_protocol_instance *pi = ph_to_pi(ph); + /* Check if the MSG_ID supports fastchannel */ + ret = scmi_protocol_msg_check(ph, message_id, &attributes); + SCMI_QUIRK(perf_level_get_fc_force, QUIRK_PERF_FC_FORCE); + if (ret || !MSG_SUPPORTS_FASTCHANNEL(attributes)) { + dev_dbg(ph->dev, + "Skip FC init for 0x%02X/%d domain:%d - ret:%d\n", + pi->proto->id, message_id, domain, ret); + return; + } + if (!p_addr) { ret = -EINVAL; goto err_out; @@ -1961,50 +2042,7 @@ static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db) else if (db->width == 4) SCMI_PROTO_FC_RING_DB(32); else /* db->width == 8 */ -#ifdef CONFIG_64BIT SCMI_PROTO_FC_RING_DB(64); -#else - { - u64 val = 0; - - if (db->mask) - val = ioread64_hi_lo(db->addr) & db->mask; - iowrite64_hi_lo(db->set | val, db->addr); - } -#endif -} - -/** - * scmi_protocol_msg_check - Check protocol message attributes - * - * @ph: A reference to the protocol handle. - * @message_id: The ID of the message to check. - * @attributes: A parameter to optionally return the retrieved message - * attributes, in case of Success. - * - * An helper to check protocol message attributes for a specific protocol - * and message pair. - * - * Return: 0 on SUCCESS - */ -static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph, - u32 message_id, u32 *attributes) -{ - int ret; - struct scmi_xfer *t; - - ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES, - sizeof(__le32), 0, &t); - if (ret) - return ret; - - put_unaligned_le32(message_id, t->tx.buf); - ret = do_xfer(ph, t); - if (!ret && attributes) - *attributes = get_unaligned_le32(t->rx.buf); - xfer_put(ph, t); - - return ret; } static const struct scmi_proto_helpers_ops helpers_ops = { @@ -2799,9 +2837,8 @@ static int scmi_bus_notifier(struct notifier_block *nb, struct scmi_info *info = bus_nb_to_scmi_info(nb); struct scmi_device *sdev = to_scmi_dev(data); - /* Skip transport devices and devices of different SCMI instances */ - if (!strncmp(sdev->name, "__scmi_transport_device", 23) || - sdev->dev.parent != info->dev) + /* Skip devices of different SCMI instances */ + if (sdev->dev.parent != info->dev) return NOTIFY_DONE; switch (action) { @@ -2870,6 +2907,7 @@ static const char * const dbg_counter_strs[] = { "err_msg_invalid", "err_msg_nomem", "err_protocol", + "xfers_inflight", }; static ssize_t reset_all_on_write(struct file *filp, const char __user *buf, @@ -2989,9 +3027,6 @@ static int scmi_debugfs_raw_mode_setup(struct scmi_info *info) u8 channels[SCMI_MAX_CHANNELS] = {}; DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {}; - if (!info->dbg) - return -EINVAL; - /* Enumerate all channels to collect their ids */ idr_for_each_entry(&info->tx_idr, cinfo, id) { /* @@ -3028,7 +3063,7 @@ static const struct scmi_desc *scmi_transport_setup(struct device *dev) int ret; trans = dev_get_platdata(dev); - if (!trans || !trans->desc || !trans->supplier || !trans->core_ops) + if (!trans || !trans->supplier || !trans->core_ops) return NULL; if (!device_link_add(dev, trans->supplier, DL_FLAG_AUTOREMOVE_CONSUMER)) { @@ -3043,33 +3078,45 @@ static const struct scmi_desc *scmi_transport_setup(struct device *dev) dev_info(dev, "Using %s\n", dev_driver_string(trans->supplier)); ret = of_property_read_u32(dev->of_node, "arm,max-rx-timeout-ms", - &trans->desc->max_rx_timeout_ms); + &trans->desc.max_rx_timeout_ms); if (ret && ret != -EINVAL) dev_err(dev, "Malformed arm,max-rx-timeout-ms DT property.\n"); ret = of_property_read_u32(dev->of_node, "arm,max-msg-size", - &trans->desc->max_msg_size); + &trans->desc.max_msg_size); if (ret && ret != -EINVAL) dev_err(dev, "Malformed arm,max-msg-size DT property.\n"); ret = of_property_read_u32(dev->of_node, "arm,max-msg", - &trans->desc->max_msg); + &trans->desc.max_msg); if (ret && ret != -EINVAL) dev_err(dev, "Malformed arm,max-msg DT property.\n"); dev_info(dev, "SCMI max-rx-timeout: %dms / max-msg-size: %dbytes / max-msg: %d\n", - trans->desc->max_rx_timeout_ms, trans->desc->max_msg_size, - trans->desc->max_msg); + trans->desc.max_rx_timeout_ms, trans->desc.max_msg_size, + trans->desc.max_msg); /* System wide atomic threshold for atomic ops .. if any */ if (!of_property_read_u32(dev->of_node, "atomic-threshold-us", - &trans->desc->atomic_threshold)) + &trans->desc.atomic_threshold)) dev_info(dev, "SCMI System wide atomic threshold set to %u us\n", - trans->desc->atomic_threshold); + trans->desc.atomic_threshold); - return trans->desc; + return &trans->desc; +} + +static void scmi_enable_matching_quirks(struct scmi_info *info) +{ + struct scmi_revision_info *rev = &info->version; + + dev_dbg(info->dev, "Looking for quirks matching: %s/%s/0x%08X\n", + rev->vendor_id, rev->sub_vendor_id, rev->impl_ver); + + /* Enable applicable quirks */ + scmi_quirks_enable(info->dev, rev->vendor_id, + rev->sub_vendor_id, rev->impl_ver); } static int scmi_probe(struct platform_device *pdev) @@ -3151,7 +3198,7 @@ static int scmi_probe(struct platform_device *pdev) if (!info->dbg) dev_warn(dev, "Failed to setup SCMI debugfs.\n"); - if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { + if (info->dbg && IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { ret = scmi_debugfs_raw_mode_setup(info); if (!coex) { if (ret) @@ -3193,6 +3240,8 @@ static int scmi_probe(struct platform_device *pdev) list_add_tail(&info->node, &scmi_list); mutex_unlock(&scmi_list_mutex); + scmi_enable_matching_quirks(info); + for_each_available_child_of_node(np, child) { u32 prot_id; @@ -3333,7 +3382,7 @@ static struct platform_driver scmi_driver = { .dev_groups = versions_groups, }, .probe = scmi_probe, - .remove_new = scmi_remove, + .remove = scmi_remove, }; static struct dentry *scmi_debugfs_init(void) @@ -3349,8 +3398,24 @@ static struct dentry *scmi_debugfs_init(void) return d; } +int scmi_inflight_count(const struct scmi_handle *handle) +{ + if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) { + struct scmi_info *info = handle_to_scmi_info(handle); + + if (!info->dbg) + return 0; + + return atomic_read(&info->dbg->counters[XFERS_INFLIGHT]); + } else { + return 0; + } +} + static int __init scmi_driver_init(void) { + scmi_quirks_initialize(); + /* Bail out if no SCMI transport was configured */ if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT))) return -EINVAL; |
