diff options
Diffstat (limited to 'drivers/firmware')
| -rw-r--r-- | drivers/firmware/imx/imx-scu-irq.c | 32 | ||||
| -rw-r--r-- | drivers/firmware/imx/imx-scu.c | 11 | ||||
| -rw-r--r-- | drivers/firmware/stratix10-rsu.c | 279 | ||||
| -rw-r--r-- | drivers/firmware/stratix10-svc.c | 761 | ||||
| -rw-r--r-- | drivers/firmware/ti_sci.c | 155 | ||||
| -rw-r--r-- | drivers/firmware/ti_sci.h | 7 | ||||
| -rw-r--r-- | drivers/firmware/xilinx/Makefile | 2 | ||||
| -rw-r--r-- | drivers/firmware/xilinx/zynqmp-debug.c | 13 | ||||
| -rw-r--r-- | drivers/firmware/xilinx/zynqmp-ufs.c | 118 | ||||
| -rw-r--r-- | drivers/firmware/xilinx/zynqmp.c | 160 |
10 files changed, 1322 insertions, 216 deletions
diff --git a/drivers/firmware/imx/imx-scu-irq.c b/drivers/firmware/imx/imx-scu-irq.c index 6125cccc9ba7..a68d38f89254 100644 --- a/drivers/firmware/imx/imx-scu-irq.c +++ b/drivers/firmware/imx/imx-scu-irq.c @@ -203,6 +203,18 @@ int imx_scu_enable_general_irq_channel(struct device *dev) struct mbox_chan *ch; int ret = 0, i = 0; + if (!of_parse_phandle_with_args(dev->of_node, "mboxes", + "#mbox-cells", 0, &spec)) { + i = of_alias_get_id(spec.np, "mu"); + of_node_put(spec.np); + } + + /* use mu1 as general mu irq channel if failed */ + if (i < 0) + i = 1; + + mu_resource_id = IMX_SC_R_MU_0A + i; + ret = imx_scu_get_handle(&imx_sc_irq_ipc_handle); if (ret) return ret; @@ -214,27 +226,16 @@ int imx_scu_enable_general_irq_channel(struct device *dev) cl->dev = dev; cl->rx_callback = imx_scu_irq_callback; + INIT_WORK(&imx_sc_irq_work, imx_scu_irq_work_handler); + /* SCU general IRQ uses general interrupt channel 3 */ ch = mbox_request_channel_byname(cl, "gip3"); if (IS_ERR(ch)) { ret = PTR_ERR(ch); dev_err(dev, "failed to request mbox chan gip3, ret %d\n", ret); - devm_kfree(dev, cl); - return ret; + goto free_cl; } - INIT_WORK(&imx_sc_irq_work, imx_scu_irq_work_handler); - - if (!of_parse_phandle_with_args(dev->of_node, "mboxes", - "#mbox-cells", 0, &spec)) - i = of_alias_get_id(spec.np, "mu"); - - /* use mu1 as general mu irq channel if failed */ - if (i < 0) - i = 1; - - mu_resource_id = IMX_SC_R_MU_0A + i; - /* Create directory under /sysfs/firmware */ wakeup_obj = kobject_create_and_add("scu_wakeup_source", firmware_kobj); if (!wakeup_obj) { @@ -253,7 +254,8 @@ int imx_scu_enable_general_irq_channel(struct device *dev) free_ch: mbox_free_channel(ch); +free_cl: + devm_kfree(dev, cl); return ret; } -EXPORT_SYMBOL(imx_scu_enable_general_irq_channel); diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c index 8c28e25ddc8a..67b267a7408a 100644 --- a/drivers/firmware/imx/imx-scu.c +++ b/drivers/firmware/imx/imx-scu.c @@ -73,9 +73,9 @@ static int imx_sc_linux_errmap[IMX_SC_ERR_LAST] = { -EACCES, /* IMX_SC_ERR_NOACCESS */ -EACCES, /* IMX_SC_ERR_LOCKED */ -ERANGE, /* IMX_SC_ERR_UNAVAILABLE */ - -EEXIST, /* IMX_SC_ERR_NOTFOUND */ - -EPERM, /* IMX_SC_ERR_NOPOWER */ - -EPIPE, /* IMX_SC_ERR_IPC */ + -ENOENT, /* IMX_SC_ERR_NOTFOUND */ + -ENODEV, /* IMX_SC_ERR_NOPOWER */ + -ECOMM, /* IMX_SC_ERR_IPC */ -EBUSY, /* IMX_SC_ERR_BUSY */ -EIO, /* IMX_SC_ERR_FAIL */ }; @@ -324,7 +324,9 @@ static int imx_scu_probe(struct platform_device *pdev) } sc_ipc->dev = dev; - mutex_init(&sc_ipc->lock); + ret = devm_mutex_init(dev, &sc_ipc->lock); + if (ret) + return ret; init_completion(&sc_ipc->done); imx_sc_ipc_handle = sc_ipc; @@ -352,6 +354,7 @@ static struct platform_driver imx_scu_driver = { .driver = { .name = "imx-scu", .of_match_table = imx_scu_match, + .suppress_bind_attrs = true, }, .probe = imx_scu_probe, }; diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c index 1ea39a0a76c7..41da07c445a6 100644 --- a/drivers/firmware/stratix10-rsu.c +++ b/drivers/firmware/stratix10-rsu.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2018-2019, Intel Corporation + * Copyright (C) 2025, Altera Corporation */ #include <linux/arm-smccc.h> @@ -14,11 +15,9 @@ #include <linux/firmware/intel/stratix10-svc-client.h> #include <linux/string.h> #include <linux/sysfs.h> +#include <linux/delay.h> -#define RSU_STATE_MASK GENMASK_ULL(31, 0) -#define RSU_VERSION_MASK GENMASK_ULL(63, 32) -#define RSU_ERROR_LOCATION_MASK GENMASK_ULL(31, 0) -#define RSU_ERROR_DETAIL_MASK GENMASK_ULL(63, 32) +#define RSU_ERASE_SIZE_MASK GENMASK_ULL(63, 32) #define RSU_DCMF0_MASK GENMASK_ULL(31, 0) #define RSU_DCMF1_MASK GENMASK_ULL(63, 32) #define RSU_DCMF2_MASK GENMASK_ULL(31, 0) @@ -35,7 +34,8 @@ #define INVALID_DCMF_STATUS 0xFFFFFFFF #define INVALID_SPT_ADDRESS 0x0 -#define RSU_GET_SPT_CMD 0x5A +#define RSU_RETRY_SLEEP_MS (1U) +#define RSU_ASYNC_MSG_RETRY (3U) #define RSU_GET_SPT_RESP_LEN (4 * sizeof(unsigned int)) typedef void (*rsu_callback)(struct stratix10_svc_client *client, @@ -64,7 +64,6 @@ typedef void (*rsu_callback)(struct stratix10_svc_client *client, * @max_retry: the preset max retry value * @spt0_address: address of spt0 * @spt1_address: address of spt1 - * @get_spt_response_buf: response from sdm for get_spt command */ struct stratix10_rsu_priv { struct stratix10_svc_chan *chan; @@ -99,47 +98,32 @@ struct stratix10_rsu_priv { unsigned long spt0_address; unsigned long spt1_address; - - unsigned int *get_spt_response_buf; }; +typedef void (*rsu_async_callback)(struct device *dev, + struct stratix10_rsu_priv *priv, struct stratix10_svc_cb_data *data); + /** - * rsu_status_callback() - Status callback from Intel Service Layer - * @client: pointer to service client + * rsu_async_status_callback() - Status callback from rsu_async_send() + * @dev: pointer to device object + * @priv: pointer to priv object * @data: pointer to callback data structure * - * Callback from Intel service layer for RSU status request. Status is - * only updated after a system reboot, so a get updated status call is - * made during driver probe. + * Callback from rsu_async_send() to get the system rsu error status. */ -static void rsu_status_callback(struct stratix10_svc_client *client, - struct stratix10_svc_cb_data *data) +static void rsu_async_status_callback(struct device *dev, + struct stratix10_rsu_priv *priv, + struct stratix10_svc_cb_data *data) { - struct stratix10_rsu_priv *priv = client->priv; - struct arm_smccc_res *res = (struct arm_smccc_res *)data->kaddr1; - - if (data->status == BIT(SVC_STATUS_OK)) { - priv->status.version = FIELD_GET(RSU_VERSION_MASK, - res->a2); - priv->status.state = FIELD_GET(RSU_STATE_MASK, res->a2); - priv->status.fail_image = res->a1; - priv->status.current_image = res->a0; - priv->status.error_location = - FIELD_GET(RSU_ERROR_LOCATION_MASK, res->a3); - priv->status.error_details = - FIELD_GET(RSU_ERROR_DETAIL_MASK, res->a3); - } else { - dev_err(client->dev, "COMMAND_RSU_STATUS returned 0x%lX\n", - res->a0); - priv->status.version = 0; - priv->status.state = 0; - priv->status.fail_image = 0; - priv->status.current_image = 0; - priv->status.error_location = 0; - priv->status.error_details = 0; - } - - complete(&priv->completion); + struct arm_smccc_1_2_regs *res = (struct arm_smccc_1_2_regs *)data->kaddr1; + + priv->status.current_image = res->a2; + priv->status.fail_image = res->a3; + priv->status.state = res->a4; + priv->status.version = res->a5; + priv->status.error_location = res->a7; + priv->status.error_details = res->a8; + priv->retry_counter = res->a9; } /** @@ -163,32 +147,6 @@ static void rsu_command_callback(struct stratix10_svc_client *client, complete(&priv->completion); } -/** - * rsu_retry_callback() - Callback from Intel service layer for getting - * the current image's retry counter from the firmware - * @client: pointer to client - * @data: pointer to callback data structure - * - * Callback from Intel service layer for retry counter, which is used by - * user to know how many times the images is still allowed to reload - * itself before giving up and starting RSU fail-over flow. - */ -static void rsu_retry_callback(struct stratix10_svc_client *client, - struct stratix10_svc_cb_data *data) -{ - struct stratix10_rsu_priv *priv = client->priv; - unsigned int *counter = (unsigned int *)data->kaddr1; - - if (data->status == BIT(SVC_STATUS_OK)) - priv->retry_counter = *counter; - else if (data->status == BIT(SVC_STATUS_NO_SUPPORT)) - dev_warn(client->dev, "Secure FW doesn't support retry\n"); - else - dev_err(client->dev, "Failed to get retry counter %lu\n", - BIT(data->status)); - - complete(&priv->completion); -} /** * rsu_max_retry_callback() - Callback from Intel service layer for getting @@ -270,34 +228,19 @@ static void rsu_dcmf_status_callback(struct stratix10_svc_client *client, complete(&priv->completion); } -static void rsu_get_spt_callback(struct stratix10_svc_client *client, - struct stratix10_svc_cb_data *data) +/** + * rsu_async_get_spt_table_callback() - Callback to be used by the rsu_async_send() + * to retrieve the SPT table information. + * @dev: pointer to device object + * @priv: pointer to priv object + * @data: pointer to callback data structure + */ +static void rsu_async_get_spt_table_callback(struct device *dev, + struct stratix10_rsu_priv *priv, + struct stratix10_svc_cb_data *data) { - struct stratix10_rsu_priv *priv = client->priv; - unsigned long *mbox_err = (unsigned long *)data->kaddr1; - unsigned long *resp_len = (unsigned long *)data->kaddr2; - - if (data->status != BIT(SVC_STATUS_OK) || (*mbox_err) || - (*resp_len != RSU_GET_SPT_RESP_LEN)) - goto error; - - priv->spt0_address = priv->get_spt_response_buf[0]; - priv->spt0_address <<= 32; - priv->spt0_address |= priv->get_spt_response_buf[1]; - - priv->spt1_address = priv->get_spt_response_buf[2]; - priv->spt1_address <<= 32; - priv->spt1_address |= priv->get_spt_response_buf[3]; - - goto complete; - -error: - dev_err(client->dev, "failed to get SPTs\n"); - -complete: - stratix10_svc_free_memory(priv->chan, priv->get_spt_response_buf); - priv->get_spt_response_buf = NULL; - complete(&priv->completion); + priv->spt0_address = *((unsigned long *)data->kaddr1); + priv->spt1_address = *((unsigned long *)data->kaddr2); } /** @@ -329,14 +272,6 @@ static int rsu_send_msg(struct stratix10_rsu_priv *priv, if (arg) msg.arg[0] = arg; - if (command == COMMAND_MBOX_SEND_CMD) { - msg.arg[1] = 0; - msg.payload = NULL; - msg.payload_length = 0; - msg.payload_output = priv->get_spt_response_buf; - msg.payload_length_output = RSU_GET_SPT_RESP_LEN; - } - ret = stratix10_svc_send(priv->chan, &msg); if (ret < 0) goto status_done; @@ -362,6 +297,95 @@ status_done: return ret; } +/** + * soc64_async_callback() - Callback from Intel service layer for async requests + * @ptr: pointer to the completion object + */ +static void soc64_async_callback(void *ptr) +{ + if (ptr) + complete(ptr); +} + +/** + * rsu_send_async_msg() - send an async message to Intel service layer + * @dev: pointer to device object + * @priv: pointer to rsu private data + * @command: RSU status or update command + * @arg: the request argument, notify status + * @callback: function pointer for the callback (status or update) + */ +static int rsu_send_async_msg(struct device *dev, struct stratix10_rsu_priv *priv, + enum stratix10_svc_command_code command, + unsigned long arg, + rsu_async_callback callback) +{ + struct stratix10_svc_client_msg msg = {0}; + struct stratix10_svc_cb_data data = {0}; + struct completion completion; + int status, index, ret; + void *handle = NULL; + + msg.command = command; + msg.arg[0] = arg; + + init_completion(&completion); + + for (index = 0; index < RSU_ASYNC_MSG_RETRY; index++) { + status = stratix10_svc_async_send(priv->chan, &msg, + &handle, soc64_async_callback, + &completion); + if (status == 0) + break; + dev_warn(dev, "Failed to send async message\n"); + msleep(RSU_RETRY_SLEEP_MS); + } + + if (status && !handle) { + dev_err(dev, "Failed to send async message\n"); + return -ETIMEDOUT; + } + + ret = wait_for_completion_io_timeout(&completion, RSU_TIMEOUT); + if (ret > 0) + dev_dbg(dev, "Received async interrupt\n"); + else if (ret == 0) + dev_dbg(dev, "Timeout occurred. Trying to poll the response\n"); + + for (index = 0; index < RSU_ASYNC_MSG_RETRY; index++) { + status = stratix10_svc_async_poll(priv->chan, handle, &data); + if (status == -EAGAIN) { + dev_dbg(dev, "Async message is still in progress\n"); + } else if (status < 0) { + dev_alert(dev, "Failed to poll async message\n"); + ret = -ETIMEDOUT; + } else if (status == 0) { + ret = 0; + break; + } + msleep(RSU_RETRY_SLEEP_MS); + } + + if (ret) { + dev_err(dev, "Failed to get async response\n"); + goto status_done; + } + + if (data.status == 0) { + ret = 0; + if (callback) + callback(dev, priv, &data); + } else { + dev_err(dev, "%s returned 0x%x from SDM\n", __func__, + data.status); + ret = -EFAULT; + } + +status_done: + stratix10_svc_async_done(priv->chan, handle); + return ret; +} + /* * This driver exposes some optional features of the Intel Stratix 10 SoC FPGA. * The sysfs interfaces exposed here are FPGA Remote System Update (RSU) @@ -454,8 +478,7 @@ static ssize_t max_retry_show(struct device *dev, if (!priv) return -ENODEV; - return scnprintf(buf, sizeof(priv->max_retry), - "0x%08x\n", priv->max_retry); + return sysfs_emit(buf, "0x%08x\n", priv->max_retry); } static ssize_t dcmf0_show(struct device *dev, @@ -597,27 +620,20 @@ static ssize_t notify_store(struct device *dev, if (ret) return ret; - ret = rsu_send_msg(priv, COMMAND_RSU_NOTIFY, - status, rsu_command_callback); + ret = rsu_send_async_msg(dev, priv, COMMAND_RSU_NOTIFY, status, NULL); if (ret) { dev_err(dev, "Error, RSU notify returned %i\n", ret); return ret; } /* to get the updated state */ - ret = rsu_send_msg(priv, COMMAND_RSU_STATUS, - 0, rsu_status_callback); + ret = rsu_send_async_msg(dev, priv, COMMAND_RSU_STATUS, 0, + rsu_async_status_callback); if (ret) { dev_err(dev, "Error, getting RSU status %i\n", ret); return ret; } - ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback); - if (ret) { - dev_err(dev, "Error, getting RSU retry %i\n", ret); - return ret; - } - return count; } @@ -632,7 +648,7 @@ static ssize_t spt0_address_show(struct device *dev, if (priv->spt0_address == INVALID_SPT_ADDRESS) return -EIO; - return scnprintf(buf, PAGE_SIZE, "0x%08lx\n", priv->spt0_address); + return sysfs_emit(buf, "0x%08lx\n", priv->spt0_address); } static ssize_t spt1_address_show(struct device *dev, @@ -646,7 +662,7 @@ static ssize_t spt1_address_show(struct device *dev, if (priv->spt1_address == INVALID_SPT_ADDRESS) return -EIO; - return scnprintf(buf, PAGE_SIZE, "0x%08lx\n", priv->spt1_address); + return sysfs_emit(buf, "0x%08lx\n", priv->spt1_address); } static DEVICE_ATTR_RO(current_image); @@ -737,12 +753,19 @@ static int stratix10_rsu_probe(struct platform_device *pdev) return PTR_ERR(priv->chan); } + ret = stratix10_svc_add_async_client(priv->chan, false); + if (ret) { + dev_err(dev, "failed to add async client\n"); + stratix10_svc_free_channel(priv->chan); + return ret; + } + init_completion(&priv->completion); platform_set_drvdata(pdev, priv); /* get the initial state from firmware */ - ret = rsu_send_msg(priv, COMMAND_RSU_STATUS, - 0, rsu_status_callback); + ret = rsu_send_async_msg(dev, priv, COMMAND_RSU_STATUS, 0, + rsu_async_status_callback); if (ret) { dev_err(dev, "Error, getting RSU status %i\n", ret); stratix10_svc_free_channel(priv->chan); @@ -763,12 +786,6 @@ static int stratix10_rsu_probe(struct platform_device *pdev) stratix10_svc_free_channel(priv->chan); } - ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback); - if (ret) { - dev_err(dev, "Error, getting RSU retry %i\n", ret); - stratix10_svc_free_channel(priv->chan); - } - ret = rsu_send_msg(priv, COMMAND_RSU_MAX_RETRY, 0, rsu_max_retry_callback); if (ret) { @@ -776,18 +793,12 @@ static int stratix10_rsu_probe(struct platform_device *pdev) stratix10_svc_free_channel(priv->chan); } - priv->get_spt_response_buf = - stratix10_svc_allocate_memory(priv->chan, RSU_GET_SPT_RESP_LEN); - if (IS_ERR(priv->get_spt_response_buf)) { - dev_err(dev, "failed to allocate get spt buffer\n"); - } else { - ret = rsu_send_msg(priv, COMMAND_MBOX_SEND_CMD, - RSU_GET_SPT_CMD, rsu_get_spt_callback); - if (ret) { - dev_err(dev, "Error, getting SPT table %i\n", ret); - stratix10_svc_free_channel(priv->chan); - } + ret = rsu_send_async_msg(dev, priv, COMMAND_RSU_GET_SPT_TABLE, 0, + rsu_async_get_spt_table_callback); + if (ret) { + dev_err(dev, "Error, getting SPT table %i\n", ret); + stratix10_svc_free_channel(priv->chan); } return ret; diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c index 00f58e27f6de..515b948ff320 100644 --- a/drivers/firmware/stratix10-svc.c +++ b/drivers/firmware/stratix10-svc.c @@ -1,11 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2017-2018, Intel Corporation + * Copyright (C) 2025, Altera Corporation */ +#include <linux/atomic.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/genalloc.h> +#include <linux/hashtable.h> +#include <linux/idr.h> #include <linux/io.h> #include <linux/kfifo.h> #include <linux/kthread.h> @@ -34,7 +38,7 @@ * timeout is set to 30 seconds (30 * 1000) at Intel Stratix10 SoC. */ #define SVC_NUM_DATA_IN_FIFO 32 -#define SVC_NUM_CHANNEL 3 +#define SVC_NUM_CHANNEL 4 #define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS 200 #define FPGA_CONFIG_STATUS_TIMEOUT_SEC 30 #define BYTE_TO_WORD_SIZE 4 @@ -43,6 +47,55 @@ #define STRATIX10_RSU "stratix10-rsu" #define INTEL_FCS "intel-fcs" +/* Maximum number of SDM client IDs. */ +#define MAX_SDM_CLIENT_IDS 16 +/* Client ID for SIP Service Version 1. */ +#define SIP_SVC_V1_CLIENT_ID 0x1 +/* Maximum number of SDM job IDs. */ +#define MAX_SDM_JOB_IDS 16 +/* Number of bits used for asynchronous transaction hashing. */ +#define ASYNC_TRX_HASH_BITS 3 +/* + * Total number of transaction IDs, which is a combination of + * client ID and job ID. + */ +#define TOTAL_TRANSACTION_IDS \ + (MAX_SDM_CLIENT_IDS * MAX_SDM_JOB_IDS) + +/* Minimum major version of the ATF for Asynchronous transactions. */ +#define ASYNC_ATF_MINIMUM_MAJOR_VERSION 0x3 +/* Minimum minor version of the ATF for Asynchronous transactions.*/ +#define ASYNC_ATF_MINIMUM_MINOR_VERSION 0x0 + +/* Job ID field in the transaction ID */ +#define STRATIX10_JOB_FIELD GENMASK(3, 0) +/* Client ID field in the transaction ID */ +#define STRATIX10_CLIENT_FIELD GENMASK(7, 4) +/* Transaction ID mask for Stratix10 service layer */ +#define STRATIX10_TRANS_ID_FIELD GENMASK(7, 0) + +/* Macro to extract the job ID from a transaction ID. */ +#define STRATIX10_GET_JOBID(transaction_id) \ + (FIELD_GET(STRATIX10_JOB_FIELD, transaction_id)) +/* Macro to set the job ID in a transaction ID. */ +#define STRATIX10_SET_JOBID(jobid) \ + (FIELD_PREP(STRATIX10_JOB_FIELD, jobid)) +/* Macro to set the client ID in a transaction ID. */ +#define STRATIX10_SET_CLIENTID(clientid) \ + (FIELD_PREP(STRATIX10_CLIENT_FIELD, clientid)) +/* Macro to set a transaction ID using a client ID and a job ID. */ +#define STRATIX10_SET_TRANSACTIONID(clientid, jobid) \ + (STRATIX10_SET_CLIENTID(clientid) | STRATIX10_SET_JOBID(jobid)) +/* Macro to set a transaction ID for SIP SMC Async transactions */ +#define STRATIX10_SIP_SMC_SET_TRANSACTIONID_X1(transaction_id) \ + (FIELD_PREP(STRATIX10_TRANS_ID_FIELD, transaction_id)) + +/* 10-bit mask for extracting the SDM status code */ +#define STRATIX10_SDM_STATUS_MASK GENMASK(9, 0) +/* Macro to get the SDM mailbox error status */ +#define STRATIX10_GET_SDM_STATUS_CODE(status) \ + (FIELD_GET(STRATIX10_SDM_STATUS_MASK, status)) + typedef void (svc_invoke_fn)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, @@ -52,6 +105,7 @@ struct stratix10_svc_chan; /** * struct stratix10_svc - svc private data * @stratix10_svc_rsu: pointer to stratix10 RSU device + * @intel_svc_fcs: pointer to the FCS device */ struct stratix10_svc { struct platform_device *stratix10_svc_rsu; @@ -63,7 +117,7 @@ struct stratix10_svc { * @sync_complete: state for a completion * @addr: physical address of shared memory block * @size: size of shared memory block - * @invoke_fn: function to issue secure monitor or hypervisor call + * @invoke_fn: service clients to handle secure monitor or hypervisor calls * * This struct is used to save physical address and size of shared memory * block. The shared memory blocked is allocated by secure monitor software @@ -122,6 +176,74 @@ struct stratix10_svc_data { }; /** + * struct stratix10_svc_async_handler - Asynchronous handler for Stratix10 + * service layer + * @transaction_id: Unique identifier for the transaction + * @achan: Pointer to the asynchronous channel structure + * @cb_arg: Argument to be passed to the callback function + * @cb: Callback function to be called upon completion + * @msg: Pointer to the client message structure + * @next: Node in the hash list + * @res: Response structure to store result from the secure firmware + * + * This structure is used to handle asynchronous transactions in the + * Stratix10 service layer. It maintains the necessary information + * for processing and completing asynchronous requests. + */ + +struct stratix10_svc_async_handler { + u8 transaction_id; + struct stratix10_async_chan *achan; + void *cb_arg; + async_callback_t cb; + struct stratix10_svc_client_msg *msg; + struct hlist_node next; + struct arm_smccc_1_2_regs res; +}; + +/** + * struct stratix10_async_chan - Structure representing an asynchronous channel + * @async_client_id: Unique client identifier for the asynchronous operation + * @job_id_pool: Pointer to the job ID pool associated with this channel + */ + +struct stratix10_async_chan { + unsigned long async_client_id; + struct ida job_id_pool; +}; + +/** + * struct stratix10_async_ctrl - Control structure for Stratix10 + * asynchronous operations + * @initialized: Flag indicating whether the control structure has + * been initialized + * @invoke_fn: Function pointer for invoking Stratix10 service calls + * to EL3 secure firmware + * @async_id_pool: Pointer to the ID pool used for asynchronous + * operations + * @common_achan_refcount: Atomic reference count for the common + * asynchronous channel usage + * @common_async_chan: Pointer to the common asynchronous channel + * structure + * @trx_list_lock: Spinlock for protecting the transaction list + * operations + * @trx_list: Hash table for managing asynchronous transactions + */ + +struct stratix10_async_ctrl { + bool initialized; + void (*invoke_fn)(struct stratix10_async_ctrl *actrl, + const struct arm_smccc_1_2_regs *args, + struct arm_smccc_1_2_regs *res); + struct ida async_id_pool; + atomic_t common_achan_refcount; + struct stratix10_async_chan *common_async_chan; + /* spinlock to protect trx_list hash table */ + spinlock_t trx_list_lock; + DECLARE_HASHTABLE(trx_list, ASYNC_TRX_HASH_BITS); +}; + +/** * struct stratix10_svc_controller - service controller * @dev: device * @chans: array of service channels @@ -135,6 +257,7 @@ struct stratix10_svc_data { * @svc_fifo_lock: protect access to service message data queue * @invoke_fn: function to issue secure monitor call or hypervisor call * @svc: manages the list of client svc drivers + * @actrl: async control structure * * This struct is used to create communication channels for service clients, to * handle secure monitor or hypervisor call. @@ -152,6 +275,7 @@ struct stratix10_svc_controller { spinlock_t svc_fifo_lock; svc_invoke_fn *invoke_fn; struct stratix10_svc *svc; + struct stratix10_async_ctrl actrl; }; /** @@ -160,20 +284,28 @@ struct stratix10_svc_controller { * @scl: pointer to service client which owns the channel * @name: service client name associated with the channel * @lock: protect access to the channel + * @async_chan: reference to asynchronous channel object for this channel * - * This struct is used by service client to communicate with service layer, each - * service client has its own channel created by service controller. + * This struct is used by service client to communicate with service layer. + * Each service client has its own channel created by service controller. */ struct stratix10_svc_chan { struct stratix10_svc_controller *ctrl; struct stratix10_svc_client *scl; char *name; spinlock_t lock; + struct stratix10_async_chan *async_chan; }; static LIST_HEAD(svc_ctrl); static LIST_HEAD(svc_data_mem); +/* + * svc_mem_lock protects access to the svc_data_mem list for + * concurrent multi-client operations + */ +static DEFINE_MUTEX(svc_mem_lock); + /** * svc_pa_to_va() - translate physical address to virtual address * @addr: to be translated physical address @@ -186,6 +318,7 @@ static void *svc_pa_to_va(unsigned long addr) struct stratix10_svc_data_mem *pmem; pr_debug("claim back P-addr=0x%016x\n", (unsigned int)addr); + guard(mutex)(&svc_mem_lock); list_for_each_entry(pmem, &svc_data_mem, node) if (pmem->paddr == addr) return pmem->vaddr; @@ -343,6 +476,8 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data, case COMMAND_RSU_MAX_RETRY: case COMMAND_RSU_DCMF_STATUS: case COMMAND_FIRMWARE_VERSION: + case COMMAND_HWMON_READTEMP: + case COMMAND_HWMON_READVOLT: cb_data->status = BIT(SVC_STATUS_OK); cb_data->kaddr1 = &res.a1; break; @@ -527,7 +662,17 @@ static int svc_normal_to_secure_thread(void *data) a1 = (unsigned long)pdata->paddr; a2 = 0; break; - + /* for HWMON */ + case COMMAND_HWMON_READTEMP: + a0 = INTEL_SIP_SMC_HWMON_READTEMP; + a1 = pdata->arg[0]; + a2 = 0; + break; + case COMMAND_HWMON_READVOLT: + a0 = INTEL_SIP_SMC_HWMON_READVOLT; + a1 = pdata->arg[0]; + a2 = 0; + break; /* for polling */ case COMMAND_POLL_SERVICE_STATUS: a0 = INTEL_SIP_SMC_SERVICE_COMPLETED; @@ -925,6 +1070,591 @@ struct stratix10_svc_chan *stratix10_svc_request_channel_byname( EXPORT_SYMBOL_GPL(stratix10_svc_request_channel_byname); /** + * stratix10_svc_add_async_client - Add an asynchronous client to the + * Stratix10 service channel. + * @chan: Pointer to the Stratix10 service channel structure. + * @use_unique_clientid: Boolean flag indicating whether to use a + * unique client ID. + * + * This function adds an asynchronous client to the specified + * Stratix10 service channel. If the `use_unique_clientid` flag is + * set to true, a unique client ID is allocated for the asynchronous + * channel. Otherwise, a common asynchronous channel is used. + * + * Return: 0 on success, or a negative error code on failure: + * -EINVAL if the channel is NULL or the async controller is + * not initialized. + * -EALREADY if the async channel is already allocated. + * -ENOMEM if memory allocation fails. + * Other negative values if ID allocation fails. + */ +int stratix10_svc_add_async_client(struct stratix10_svc_chan *chan, + bool use_unique_clientid) +{ + struct stratix10_svc_controller *ctrl; + struct stratix10_async_ctrl *actrl; + struct stratix10_async_chan *achan; + int ret = 0; + + if (!chan) + return -EINVAL; + + ctrl = chan->ctrl; + actrl = &ctrl->actrl; + + if (!actrl->initialized) { + dev_err(ctrl->dev, "Async controller not initialized\n"); + return -EINVAL; + } + + if (chan->async_chan) { + dev_err(ctrl->dev, "async channel already allocated\n"); + return -EALREADY; + } + + if (use_unique_clientid && + atomic_read(&actrl->common_achan_refcount) > 0) { + chan->async_chan = actrl->common_async_chan; + atomic_inc(&actrl->common_achan_refcount); + return 0; + } + + achan = kzalloc(sizeof(*achan), GFP_KERNEL); + if (!achan) + return -ENOMEM; + + ida_init(&achan->job_id_pool); + + ret = ida_alloc_max(&actrl->async_id_pool, MAX_SDM_CLIENT_IDS, + GFP_KERNEL); + if (ret < 0) { + dev_err(ctrl->dev, + "Failed to allocate async client id\n"); + ida_destroy(&achan->job_id_pool); + kfree(achan); + return ret; + } + + achan->async_client_id = ret; + chan->async_chan = achan; + + if (use_unique_clientid && + atomic_read(&actrl->common_achan_refcount) == 0) { + actrl->common_async_chan = achan; + atomic_inc(&actrl->common_achan_refcount); + } + + return 0; +} +EXPORT_SYMBOL_GPL(stratix10_svc_add_async_client); + +/** + * stratix10_svc_remove_async_client - Remove an asynchronous client + * from the Stratix10 service + * channel. + * @chan: Pointer to the Stratix10 service channel structure. + * + * This function removes an asynchronous client associated with the + * given service channel. It checks if the channel and the + * asynchronous channel are valid, and then proceeds to decrement + * the reference count for the common asynchronous channel if + * applicable. If the reference count reaches zero, it destroys the + * job ID pool and deallocates the asynchronous client ID. For + * non-common asynchronous channels, it directly destroys the job ID + * pool, deallocates the asynchronous client ID, and frees the + * memory allocated for the asynchronous channel. + * + * Return: 0 on success, -EINVAL if the channel or asynchronous + * channel is invalid. + */ +int stratix10_svc_remove_async_client(struct stratix10_svc_chan *chan) +{ + struct stratix10_svc_controller *ctrl; + struct stratix10_async_ctrl *actrl; + struct stratix10_async_chan *achan; + + if (!chan) + return -EINVAL; + + ctrl = chan->ctrl; + actrl = &ctrl->actrl; + achan = chan->async_chan; + + if (!achan) { + dev_err(ctrl->dev, "async channel not allocated\n"); + return -EINVAL; + } + + if (achan == actrl->common_async_chan) { + atomic_dec(&actrl->common_achan_refcount); + if (atomic_read(&actrl->common_achan_refcount) == 0) { + ida_destroy(&achan->job_id_pool); + ida_free(&actrl->async_id_pool, + achan->async_client_id); + kfree(achan); + actrl->common_async_chan = NULL; + } + } else { + ida_destroy(&achan->job_id_pool); + ida_free(&actrl->async_id_pool, achan->async_client_id); + kfree(achan); + } + chan->async_chan = NULL; + + return 0; +} +EXPORT_SYMBOL_GPL(stratix10_svc_remove_async_client); + +/** + * stratix10_svc_async_send - Send an asynchronous message to the + * Stratix10 service + * @chan: Pointer to the service channel structure + * @msg: Pointer to the message to be sent + * @handler: Pointer to the handler for the asynchronous message + * used by caller for later reference. + * @cb: Callback function to be called upon completion + * @cb_arg: Argument to be passed to the callback function + * + * This function sends an asynchronous message to the SDM mailbox in + * EL3 secure firmware. It performs various checks and setups, + * including allocating a job ID, setting up the transaction ID and + * packaging it to El3 firmware. The function handles different + * commands by setting up the appropriate arguments for the SMC call. + * If the SMC call is successful, the handler is set up and the + * function returns 0. If the SMC call fails, appropriate error + * handling is performed along with cleanup of resources. + * + * Return: 0 on success, -EINVAL for invalid argument, -ENOMEM if + * memory is not available, -EAGAIN if EL3 firmware is busy, -EBADF + * if the message is rejected by EL3 firmware and -EIO on other + * errors from EL3 firmware. + */ +int stratix10_svc_async_send(struct stratix10_svc_chan *chan, void *msg, + void **handler, async_callback_t cb, void *cb_arg) +{ + struct arm_smccc_1_2_regs args = { 0 }, res = { 0 }; + struct stratix10_svc_async_handler *handle = NULL; + struct stratix10_svc_client_msg *p_msg = + (struct stratix10_svc_client_msg *)msg; + struct stratix10_svc_controller *ctrl; + struct stratix10_async_ctrl *actrl; + struct stratix10_async_chan *achan; + int ret = 0; + + if (!chan || !msg || !handler) + return -EINVAL; + + achan = chan->async_chan; + ctrl = chan->ctrl; + actrl = &ctrl->actrl; + + if (!actrl->initialized) { + dev_err(ctrl->dev, "Async controller not initialized\n"); + return -EINVAL; + } + + if (!achan) { + dev_err(ctrl->dev, "Async channel not allocated\n"); + return -EINVAL; + } + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + ret = ida_alloc_max(&achan->job_id_pool, MAX_SDM_JOB_IDS, + GFP_KERNEL); + if (ret < 0) { + dev_err(ctrl->dev, "Failed to allocate job id\n"); + kfree(handle); + return -ENOMEM; + } + + handle->transaction_id = + STRATIX10_SET_TRANSACTIONID(achan->async_client_id, ret); + handle->cb = cb; + handle->msg = p_msg; + handle->cb_arg = cb_arg; + handle->achan = achan; + + /*set the transaction jobid in args.a1*/ + args.a1 = + STRATIX10_SIP_SMC_SET_TRANSACTIONID_X1(handle->transaction_id); + + switch (p_msg->command) { + case COMMAND_RSU_GET_SPT_TABLE: + args.a0 = INTEL_SIP_SMC_ASYNC_RSU_GET_SPT; + break; + case COMMAND_RSU_STATUS: + args.a0 = INTEL_SIP_SMC_ASYNC_RSU_GET_ERROR_STATUS; + break; + case COMMAND_RSU_NOTIFY: + args.a0 = INTEL_SIP_SMC_ASYNC_RSU_NOTIFY; + args.a2 = p_msg->arg[0]; + break; + default: + dev_err(ctrl->dev, "Invalid command ,%d\n", p_msg->command); + ret = -EINVAL; + goto deallocate_id; + } + + /** + * There is a chance that during the execution of async_send() + * in one core, an interrupt might be received in another core; + * to mitigate this we are adding the handle to the DB and then + * send the smc call. If the smc call is rejected or busy then + * we will deallocate the handle for the client to retry again. + */ + scoped_guard(spinlock_bh, &actrl->trx_list_lock) { + hash_add(actrl->trx_list, &handle->next, + handle->transaction_id); + } + + actrl->invoke_fn(actrl, &args, &res); + + switch (res.a0) { + case INTEL_SIP_SMC_STATUS_OK: + dev_dbg(ctrl->dev, + "Async message sent with transaction_id 0x%02x\n", + handle->transaction_id); + *handler = handle; + return 0; + case INTEL_SIP_SMC_STATUS_BUSY: + dev_warn(ctrl->dev, "Mailbox is busy, try after some time\n"); + ret = -EAGAIN; + break; + case INTEL_SIP_SMC_STATUS_REJECTED: + dev_err(ctrl->dev, "Async message rejected\n"); + ret = -EBADF; + break; + default: + dev_err(ctrl->dev, + "Failed to send async message ,got status as %ld\n", + res.a0); + ret = -EIO; + } + + scoped_guard(spinlock_bh, &actrl->trx_list_lock) { + hash_del(&handle->next); + } + +deallocate_id: + ida_free(&achan->job_id_pool, + STRATIX10_GET_JOBID(handle->transaction_id)); + kfree(handle); + return ret; +} +EXPORT_SYMBOL_GPL(stratix10_svc_async_send); + +/** + * stratix10_svc_async_prepare_response - Prepare the response data for + * an asynchronous transaction. + * @chan: Pointer to the service channel structure. + * @handle: Pointer to the asynchronous handler structure. + * @data: Pointer to the callback data structure. + * + * This function prepares the response data for an asynchronous transaction. It + * extracts the response data from the SMC response structure and stores it in + * the callback data structure. The function also logs the completion of the + * asynchronous transaction. + * + * Return: 0 on success, -ENOENT if the command is invalid + */ +static int stratix10_svc_async_prepare_response(struct stratix10_svc_chan *chan, + struct stratix10_svc_async_handler *handle, + struct stratix10_svc_cb_data *data) +{ + struct stratix10_svc_client_msg *p_msg = + (struct stratix10_svc_client_msg *)handle->msg; + struct stratix10_svc_controller *ctrl = chan->ctrl; + + data->status = STRATIX10_GET_SDM_STATUS_CODE(handle->res.a1); + + switch (p_msg->command) { + case COMMAND_RSU_NOTIFY: + break; + case COMMAND_RSU_GET_SPT_TABLE: + data->kaddr1 = (void *)&handle->res.a2; + data->kaddr2 = (void *)&handle->res.a3; + break; + case COMMAND_RSU_STATUS: + /* COMMAND_RSU_STATUS has more elements than the cb_data + * can acomodate, so passing the response structure to the + * response function to be handled before done command is + * executed by the client. + */ + data->kaddr1 = (void *)&handle->res; + break; + + default: + dev_alert(ctrl->dev, "Invalid command\n ,%d", p_msg->command); + return -ENOENT; + } + dev_dbg(ctrl->dev, "Async message completed transaction_id 0x%02x\n", + handle->transaction_id); + return 0; +} + +/** + * stratix10_svc_async_poll - Polls the status of an asynchronous + * transaction. + * @chan: Pointer to the service channel structure. + * @tx_handle: Handle to the transaction being polled. + * @data: Pointer to the callback data structure. + * + * This function polls the status of an asynchronous transaction + * identified by the given transaction handle. It ensures that the + * necessary structures are initialized and valid before proceeding + * with the poll operation. The function sets up the necessary + * arguments for the SMC call, invokes the call, and prepares the + * response data if the call is successful. If the call fails, the + * function returns the error mapped to the SVC status error. + * + * Return: 0 on success, -EINVAL if any input parameter is invalid, + * -EAGAIN if the transaction is still in progress, + * -EPERM if the command is invalid, or other negative + * error codes on failure. + */ +int stratix10_svc_async_poll(struct stratix10_svc_chan *chan, + void *tx_handle, + struct stratix10_svc_cb_data *data) +{ + struct stratix10_svc_async_handler *handle; + struct arm_smccc_1_2_regs args = { 0 }; + struct stratix10_svc_controller *ctrl; + struct stratix10_async_ctrl *actrl; + struct stratix10_async_chan *achan; + int ret; + + if (!chan || !tx_handle || !data) + return -EINVAL; + + ctrl = chan->ctrl; + actrl = &ctrl->actrl; + achan = chan->async_chan; + + if (!achan) { + dev_err(ctrl->dev, "Async channel not allocated\n"); + return -EINVAL; + } + + handle = (struct stratix10_svc_async_handler *)tx_handle; + scoped_guard(spinlock_bh, &actrl->trx_list_lock) { + if (!hash_hashed(&handle->next)) { + dev_err(ctrl->dev, "Invalid transaction handler"); + return -EINVAL; + } + } + + args.a0 = INTEL_SIP_SMC_ASYNC_POLL; + args.a1 = + STRATIX10_SIP_SMC_SET_TRANSACTIONID_X1(handle->transaction_id); + + actrl->invoke_fn(actrl, &args, &handle->res); + + /*clear data for response*/ + memset(data, 0, sizeof(*data)); + + if (handle->res.a0 == INTEL_SIP_SMC_STATUS_OK) { + ret = stratix10_svc_async_prepare_response(chan, handle, data); + if (ret) { + dev_err(ctrl->dev, "Error in preparation of response,%d\n", ret); + WARN_ON_ONCE(1); + } + return 0; + } else if (handle->res.a0 == INTEL_SIP_SMC_STATUS_BUSY) { + dev_dbg(ctrl->dev, "async message is still in progress\n"); + return -EAGAIN; + } + + dev_err(ctrl->dev, + "Failed to poll async message ,got status as %ld\n", + handle->res.a0); + return -EINVAL; +} +EXPORT_SYMBOL_GPL(stratix10_svc_async_poll); + +/** + * stratix10_svc_async_done - Completes an asynchronous transaction. + * @chan: Pointer to the service channel structure. + * @tx_handle: Handle to the transaction being completed. + * + * This function completes an asynchronous transaction identified by + * the given transaction handle. It ensures that the necessary + * structures are initialized and valid before proceeding with the + * completion operation. The function deallocates the transaction ID, + * frees the memory allocated for the handler, and removes the handler + * from the transaction list. + * + * Return: 0 on success, -EINVAL if any input parameter is invalid, + * or other negative error codes on failure. + */ +int stratix10_svc_async_done(struct stratix10_svc_chan *chan, void *tx_handle) +{ + struct stratix10_svc_async_handler *handle; + struct stratix10_svc_controller *ctrl; + struct stratix10_async_chan *achan; + struct stratix10_async_ctrl *actrl; + + if (!chan || !tx_handle) + return -EINVAL; + + ctrl = chan->ctrl; + achan = chan->async_chan; + actrl = &ctrl->actrl; + + if (!achan) { + dev_err(ctrl->dev, "async channel not allocated\n"); + return -EINVAL; + } + + handle = (struct stratix10_svc_async_handler *)tx_handle; + scoped_guard(spinlock_bh, &actrl->trx_list_lock) { + if (!hash_hashed(&handle->next)) { + dev_err(ctrl->dev, "Invalid transaction handle"); + return -EINVAL; + } + hash_del(&handle->next); + } + ida_free(&achan->job_id_pool, + STRATIX10_GET_JOBID(handle->transaction_id)); + kfree(handle); + return 0; +} +EXPORT_SYMBOL_GPL(stratix10_svc_async_done); + +static inline void stratix10_smc_1_2(struct stratix10_async_ctrl *actrl, + const struct arm_smccc_1_2_regs *args, + struct arm_smccc_1_2_regs *res) +{ + arm_smccc_1_2_smc(args, res); +} + +/** + * stratix10_svc_async_init - Initialize the Stratix10 service + * controller for asynchronous operations. + * @controller: Pointer to the Stratix10 service controller structure. + * + * This function initializes the asynchronous service controller by + * setting up the necessary data structures and initializing the + * transaction list. + * + * Return: 0 on success, -EINVAL if the controller is NULL or already + * initialized, -ENOMEM if memory allocation fails, + * -EADDRINUSE if the client ID is already reserved, or other + * negative error codes on failure. + */ +static int stratix10_svc_async_init(struct stratix10_svc_controller *controller) +{ + struct stratix10_async_ctrl *actrl; + struct arm_smccc_res res; + struct device *dev; + int ret; + + if (!controller) + return -EINVAL; + + actrl = &controller->actrl; + + if (actrl->initialized) + return -EINVAL; + + dev = controller->dev; + + controller->invoke_fn(INTEL_SIP_SMC_SVC_VERSION, 0, 0, 0, 0, 0, 0, 0, &res); + if (res.a0 != INTEL_SIP_SMC_STATUS_OK || + !(res.a1 > ASYNC_ATF_MINIMUM_MAJOR_VERSION || + (res.a1 == ASYNC_ATF_MINIMUM_MAJOR_VERSION && + res.a2 >= ASYNC_ATF_MINIMUM_MINOR_VERSION))) { + dev_err(dev, + "Intel Service Layer Driver: ATF version is not compatible for async operation\n"); + return -EINVAL; + } + + actrl->invoke_fn = stratix10_smc_1_2; + + ida_init(&actrl->async_id_pool); + + /** + * SIP_SVC_V1_CLIENT_ID is used by V1/stratix10_svc_send() clients + * for communicating with SDM synchronously. We need to restrict + * this in V3/stratix10_svc_async_send() usage to distinguish + * between V1 and V3 messages in El3 firmware. + */ + ret = ida_alloc_range(&actrl->async_id_pool, SIP_SVC_V1_CLIENT_ID, + SIP_SVC_V1_CLIENT_ID, GFP_KERNEL); + if (ret < 0) { + dev_err(dev, + "Intel Service Layer Driver: Error on reserving SIP_SVC_V1_CLIENT_ID\n"); + ida_destroy(&actrl->async_id_pool); + actrl->invoke_fn = NULL; + return -EADDRINUSE; + } + + spin_lock_init(&actrl->trx_list_lock); + hash_init(actrl->trx_list); + atomic_set(&actrl->common_achan_refcount, 0); + + actrl->initialized = true; + return 0; +} + +/** + * stratix10_svc_async_exit - Clean up and exit the asynchronous + * service controller + * @ctrl: Pointer to the stratix10_svc_controller structure + * + * This function performs the necessary cleanup for the asynchronous + * service controller. It checks if the controller is valid and if it + * has been initialized. It then locks the transaction list and safely + * removes and deallocates each handler in the list. The function also + * removes any asynchronous clients associated with the controller's + * channels and destroys the asynchronous ID pool. Finally, it resets + * the asynchronous ID pool and invoke function pointers to NULL. + * + * Return: 0 on success, -EINVAL if the controller is invalid or not + * initialized. + */ +static int stratix10_svc_async_exit(struct stratix10_svc_controller *ctrl) +{ + struct stratix10_svc_async_handler *handler; + struct stratix10_async_ctrl *actrl; + struct hlist_node *tmp; + int i; + + if (!ctrl) + return -EINVAL; + + actrl = &ctrl->actrl; + + if (!actrl->initialized) + return -EINVAL; + + actrl->initialized = false; + + scoped_guard(spinlock_bh, &actrl->trx_list_lock) { + hash_for_each_safe(actrl->trx_list, i, tmp, handler, next) { + ida_free(&handler->achan->job_id_pool, + STRATIX10_GET_JOBID(handler->transaction_id)); + hash_del(&handler->next); + kfree(handler); + } + } + + for (i = 0; i < SVC_NUM_CHANNEL; i++) { + if (ctrl->chans[i].async_chan) { + stratix10_svc_remove_async_client(&ctrl->chans[i]); + ctrl->chans[i].async_chan = NULL; + } + } + + ida_destroy(&actrl->async_id_pool); + actrl->invoke_fn = NULL; + + return 0; +} + +/** * stratix10_svc_free_channel() - free service channel * @chan: service channel to be freed * @@ -992,6 +1722,7 @@ int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg) p_data->flag = ct->flags; } } else { + guard(mutex)(&svc_mem_lock); list_for_each_entry(p_mem, &svc_data_mem, node) if (p_mem->vaddr == p_msg->payload) { p_data->paddr = p_mem->paddr; @@ -1074,6 +1805,7 @@ void *stratix10_svc_allocate_memory(struct stratix10_svc_chan *chan, if (!pmem) return ERR_PTR(-ENOMEM); + guard(mutex)(&svc_mem_lock); va = gen_pool_alloc(genpool, s); if (!va) return ERR_PTR(-ENOMEM); @@ -1102,6 +1834,7 @@ EXPORT_SYMBOL_GPL(stratix10_svc_allocate_memory); void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr) { struct stratix10_svc_data_mem *pmem; + guard(mutex)(&svc_mem_lock); list_for_each_entry(pmem, &svc_data_mem, node) if (pmem->vaddr == kaddr) { @@ -1176,11 +1909,18 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev) controller->invoke_fn = invoke_fn; init_completion(&controller->complete_status); + ret = stratix10_svc_async_init(controller); + if (ret) { + dev_dbg(dev, "Intel Service Layer Driver: Error on stratix10_svc_async_init %d\n", + ret); + goto err_destroy_pool; + } + fifo_size = sizeof(struct stratix10_svc_data) * SVC_NUM_DATA_IN_FIFO; ret = kfifo_alloc(&controller->svc_fifo, fifo_size, GFP_KERNEL); if (ret) { dev_err(dev, "failed to allocate FIFO\n"); - goto err_destroy_pool; + goto err_async_exit; } spin_lock_init(&controller->svc_fifo_lock); @@ -1199,6 +1939,11 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev) chans[2].name = SVC_CLIENT_FCS; spin_lock_init(&chans[2].lock); + chans[3].scl = NULL; + chans[3].ctrl = controller; + chans[3].name = SVC_CLIENT_HWMON; + spin_lock_init(&chans[3].lock); + list_add_tail(&controller->node, &svc_ctrl); platform_set_drvdata(pdev, controller); @@ -1250,6 +1995,8 @@ err_unregister_rsu_dev: platform_device_unregister(svc->stratix10_svc_rsu); err_free_kfifo: kfifo_free(&controller->svc_fifo); +err_async_exit: + stratix10_svc_async_exit(controller); err_destroy_pool: gen_pool_destroy(genpool); return ret; @@ -1260,6 +2007,8 @@ static void stratix10_svc_drv_remove(struct platform_device *pdev) struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev); struct stratix10_svc *svc = ctrl->svc; + stratix10_svc_async_exit(ctrl); + of_platform_depopulate(ctrl->dev); platform_device_unregister(svc->intel_svc_fcs); diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index 49fd2ae01055..e027a2bd8f26 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c @@ -398,6 +398,9 @@ static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo, static inline int ti_sci_do_xfer(struct ti_sci_info *info, struct ti_sci_xfer *xfer) { + struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; + bool response_expected = !!(hdr->flags & (TI_SCI_FLAG_REQ_ACK_ON_PROCESSED | + TI_SCI_FLAG_REQ_ACK_ON_RECEIVED)); int ret; int timeout; struct device *dev = info->dev; @@ -409,12 +412,12 @@ static inline int ti_sci_do_xfer(struct ti_sci_info *info, ret = 0; - if (system_state <= SYSTEM_RUNNING) { + if (response_expected && system_state <= SYSTEM_RUNNING) { /* And we wait for the response. */ timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); if (!wait_for_completion_timeout(&xfer->done, timeout)) ret = -ETIMEDOUT; - } else { + } else if (response_expected) { /* * If we are !running, we cannot use wait_for_completion_timeout * during noirq phase, so we must manually poll the completion. @@ -1670,6 +1673,9 @@ fail: static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode, u32 ctx_lo, u32 ctx_hi, u32 debug_flags) { + u32 msg_flags = mode == TISCI_MSG_VALUE_SLEEP_MODE_PARTIAL_IO ? + TI_SCI_FLAG_REQ_GENERIC_NORESPONSE : + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED; struct ti_sci_info *info; struct ti_sci_msg_req_prepare_sleep *req; struct ti_sci_msg_hdr *resp; @@ -1686,7 +1692,7 @@ static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode, dev = info->dev; xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PREPARE_SLEEP, - TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + msg_flags, sizeof(*req), sizeof(*resp)); if (IS_ERR(xfer)) { ret = PTR_ERR(xfer); @@ -1706,11 +1712,12 @@ static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode, goto fail; } - resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; - - if (!ti_sci_is_response_ack(resp)) { - dev_err(dev, "Failed to prepare sleep\n"); - ret = -ENODEV; + if (msg_flags == TI_SCI_FLAG_REQ_ACK_ON_PROCESSED) { + resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; + if (!ti_sci_is_response_ack(resp)) { + dev_err(dev, "Failed to prepare sleep\n"); + ret = -ENODEV; + } } fail: @@ -3664,6 +3671,78 @@ devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev, } EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource); +/* + * Iterate all device nodes that have a wakeup-source property and check if one + * of the possible phandles points to a Partial-IO system state. If it + * does resolve the device node to an actual device and check if wakeup is + * enabled. + */ +static bool ti_sci_partial_io_wakeup_enabled(struct ti_sci_info *info) +{ + struct device_node *wakeup_node = NULL; + + for_each_node_with_property(wakeup_node, "wakeup-source") { + struct of_phandle_iterator it; + int err; + + of_for_each_phandle(&it, err, wakeup_node, "wakeup-source", NULL, 0) { + struct platform_device *pdev; + bool may_wakeup; + + /* + * Continue if idle-state-name is not off-wake. Return + * value is the index of the string which should be 0 if + * off-wake is present. + */ + if (of_property_match_string(it.node, "idle-state-name", "off-wake")) + continue; + + pdev = of_find_device_by_node(wakeup_node); + if (!pdev) + continue; + + may_wakeup = device_may_wakeup(&pdev->dev); + put_device(&pdev->dev); + + if (may_wakeup) { + dev_dbg(info->dev, "%pOF identified as wakeup source for Partial-IO\n", + wakeup_node); + of_node_put(it.node); + of_node_put(wakeup_node); + return true; + } + } + } + + return false; +} + +static int ti_sci_sys_off_handler(struct sys_off_data *data) +{ + struct ti_sci_info *info = data->cb_data; + const struct ti_sci_handle *handle = &info->handle; + bool enter_partial_io = ti_sci_partial_io_wakeup_enabled(info); + int ret; + + if (!enter_partial_io) + return NOTIFY_DONE; + + dev_info(info->dev, "Entering Partial-IO because a powered wakeup-enabled device was found.\n"); + + ret = ti_sci_cmd_prepare_sleep(handle, TISCI_MSG_VALUE_SLEEP_MODE_PARTIAL_IO, 0, 0, 0); + if (ret) { + dev_err(info->dev, + "Failed to enter Partial-IO %pe, trying to do an emergency restart\n", + ERR_PTR(ret)); + emergency_restart(); + } + + mdelay(5000); + emergency_restart(); + + return NOTIFY_DONE; +} + static int tisci_reboot_handler(struct sys_off_data *data) { struct ti_sci_info *info = data->cb_data; @@ -3706,7 +3785,7 @@ static int ti_sci_prepare_system_suspend(struct ti_sci_info *info) } } -static int __maybe_unused ti_sci_suspend(struct device *dev) +static int ti_sci_suspend(struct device *dev) { struct ti_sci_info *info = dev_get_drvdata(dev); struct device *cpu_dev, *cpu_dev_max = NULL; @@ -3746,19 +3825,21 @@ static int __maybe_unused ti_sci_suspend(struct device *dev) return 0; } -static int __maybe_unused ti_sci_suspend_noirq(struct device *dev) +static int ti_sci_suspend_noirq(struct device *dev) { struct ti_sci_info *info = dev_get_drvdata(dev); int ret = 0; - ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_ENABLE); - if (ret) - return ret; + if (info->fw_caps & MSG_FLAG_CAPS_IO_ISOLATION) { + ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_ENABLE); + if (ret) + return ret; + } return 0; } -static int __maybe_unused ti_sci_resume_noirq(struct device *dev) +static int ti_sci_resume_noirq(struct device *dev) { struct ti_sci_info *info = dev_get_drvdata(dev); int ret = 0; @@ -3767,9 +3848,11 @@ static int __maybe_unused ti_sci_resume_noirq(struct device *dev) u8 pin; u8 mode; - ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_DISABLE); - if (ret) - return ret; + if (info->fw_caps & MSG_FLAG_CAPS_IO_ISOLATION) { + ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_DISABLE); + if (ret) + return ret; + } ret = ti_sci_msg_cmd_lpm_wake_reason(&info->handle, &source, &time, &pin, &mode); /* Do not fail to resume on error as the wake reason is not critical */ @@ -3780,7 +3863,7 @@ static int __maybe_unused ti_sci_resume_noirq(struct device *dev) return 0; } -static void __maybe_unused ti_sci_pm_complete(struct device *dev) +static void ti_sci_pm_complete(struct device *dev) { struct ti_sci_info *info = dev_get_drvdata(dev); @@ -3791,12 +3874,10 @@ static void __maybe_unused ti_sci_pm_complete(struct device *dev) } static const struct dev_pm_ops ti_sci_pm_ops = { -#ifdef CONFIG_PM_SLEEP - .suspend = ti_sci_suspend, - .suspend_noirq = ti_sci_suspend_noirq, - .resume_noirq = ti_sci_resume_noirq, - .complete = ti_sci_pm_complete, -#endif + .suspend = pm_sleep_ptr(ti_sci_suspend), + .suspend_noirq = pm_sleep_ptr(ti_sci_suspend_noirq), + .resume_noirq = pm_sleep_ptr(ti_sci_resume_noirq), + .complete = pm_sleep_ptr(ti_sci_pm_complete), }; /* Description for K2G */ @@ -3928,11 +4009,12 @@ static int ti_sci_probe(struct platform_device *pdev) } ti_sci_msg_cmd_query_fw_caps(&info->handle, &info->fw_caps); - dev_dbg(dev, "Detected firmware capabilities: %s%s%s%s\n", + dev_dbg(dev, "Detected firmware capabilities: %s%s%s%s%s\n", info->fw_caps & MSG_FLAG_CAPS_GENERIC ? "Generic" : "", info->fw_caps & MSG_FLAG_CAPS_LPM_PARTIAL_IO ? " Partial-IO" : "", info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED ? " DM-Managed" : "", - info->fw_caps & MSG_FLAG_CAPS_LPM_ABORT ? " LPM-Abort" : "" + info->fw_caps & MSG_FLAG_CAPS_LPM_ABORT ? " LPM-Abort" : "", + info->fw_caps & MSG_FLAG_CAPS_IO_ISOLATION ? " IO-Isolation" : "" ); ti_sci_setup_ops(info); @@ -3943,6 +4025,19 @@ static int ti_sci_probe(struct platform_device *pdev) goto out; } + if (info->fw_caps & MSG_FLAG_CAPS_LPM_PARTIAL_IO) { + ret = devm_register_sys_off_handler(dev, + SYS_OFF_MODE_POWER_OFF, + SYS_OFF_PRIO_FIRMWARE, + ti_sci_sys_off_handler, + info); + if (ret) { + dev_err(dev, "Failed to register sys_off_handler %pe\n", + ERR_PTR(ret)); + goto out; + } + } + dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n", info->handle.version.abi_major, info->handle.version.abi_minor, info->handle.version.firmware_revision, @@ -3952,7 +4047,13 @@ static int ti_sci_probe(struct platform_device *pdev) list_add_tail(&info->node, &ti_sci_list); mutex_unlock(&ti_sci_list_mutex); - return of_platform_populate(dev->of_node, NULL, NULL, dev); + ret = of_platform_populate(dev->of_node, NULL, NULL, dev); + if (ret) { + dev_err(dev, "platform_populate failed %pe\n", ERR_PTR(ret)); + goto out; + } + return 0; + out: if (!IS_ERR(info->chan_tx)) mbox_free_channel(info->chan_tx); diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h index 701c416b2e78..91f234550c43 100644 --- a/drivers/firmware/ti_sci.h +++ b/drivers/firmware/ti_sci.h @@ -149,6 +149,7 @@ struct ti_sci_msg_req_reboot { * MSG_FLAG_CAPS_LPM_PARTIAL_IO: Partial IO in LPM * MSG_FLAG_CAPS_LPM_DM_MANAGED: LPM can be managed by DM * MSG_FLAG_CAPS_LPM_ABORT: Abort entry to LPM + * MSG_FLAG_CAPS_IO_ISOLATION: IO Isolation support * * Response to a generic message with message type TI_SCI_MSG_QUERY_FW_CAPS * providing currently available SOC/firmware capabilities. SoC that don't @@ -160,6 +161,7 @@ struct ti_sci_msg_resp_query_fw_caps { #define MSG_FLAG_CAPS_LPM_PARTIAL_IO TI_SCI_MSG_FLAG(4) #define MSG_FLAG_CAPS_LPM_DM_MANAGED TI_SCI_MSG_FLAG(5) #define MSG_FLAG_CAPS_LPM_ABORT TI_SCI_MSG_FLAG(9) +#define MSG_FLAG_CAPS_IO_ISOLATION TI_SCI_MSG_FLAG(7) #define MSG_MASK_CAPS_LPM GENMASK_ULL(4, 1) u64 fw_caps; } __packed; @@ -595,6 +597,11 @@ struct ti_sci_msg_resp_get_clock_freq { struct ti_sci_msg_req_prepare_sleep { struct ti_sci_msg_hdr hdr; +/* + * When sending prepare_sleep with MODE_PARTIAL_IO no response will be sent, + * no further steps are required. + */ +#define TISCI_MSG_VALUE_SLEEP_MODE_PARTIAL_IO 0x03 #define TISCI_MSG_VALUE_SLEEP_MODE_DM_MANAGED 0xfd u8 mode; u32 ctx_lo; diff --git a/drivers/firmware/xilinx/Makefile b/drivers/firmware/xilinx/Makefile index 875a53703c82..70f8f02f14a3 100644 --- a/drivers/firmware/xilinx/Makefile +++ b/drivers/firmware/xilinx/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for Xilinx firmwares -obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o +obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o zynqmp-ufs.o obj-$(CONFIG_ZYNQMP_FIRMWARE_DEBUG) += zynqmp-debug.o diff --git a/drivers/firmware/xilinx/zynqmp-debug.c b/drivers/firmware/xilinx/zynqmp-debug.c index 22853ae0efdf..36efb827f3da 100644 --- a/drivers/firmware/xilinx/zynqmp-debug.c +++ b/drivers/firmware/xilinx/zynqmp-debug.c @@ -3,6 +3,7 @@ * Xilinx Zynq MPSoC Firmware layer for debugfs APIs * * Copyright (C) 2014-2018 Xilinx, Inc. + * Copyright (C) 2022 - 2025 Advanced Micro Devices, Inc. * * Michal Simek <michal.simek@amd.com> * Davorin Mista <davorin.mista@aggios.com> @@ -38,6 +39,7 @@ static struct pm_api_info pm_api_list[] = { PM_API(PM_RELEASE_NODE), PM_API(PM_SET_REQUIREMENT), PM_API(PM_GET_API_VERSION), + PM_API(PM_GET_NODE_STATUS), PM_API(PM_REGISTER_NOTIFIER), PM_API(PM_RESET_ASSERT), PM_API(PM_RESET_GET_STATUS), @@ -167,6 +169,17 @@ static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret) pm_api_arg[3] ? pm_api_arg[3] : ZYNQMP_PM_REQUEST_ACK_BLOCKING); break; + case PM_GET_NODE_STATUS: + ret = zynqmp_pm_get_node_status(pm_api_arg[0], + &pm_api_ret[0], + &pm_api_ret[1], + &pm_api_ret[2]); + if (!ret) + sprintf(debugfs_buf, + "GET_NODE_STATUS:\n\tNodeId: %llu\n\tStatus: %u\n\tRequirements: %u\n\tUsage: %u\n", + pm_api_arg[0], pm_api_ret[0], + pm_api_ret[1], pm_api_ret[2]); + break; case PM_REGISTER_NOTIFIER: ret = zynqmp_pm_register_notifier(pm_api_arg[0], pm_api_arg[1] ? diff --git a/drivers/firmware/xilinx/zynqmp-ufs.c b/drivers/firmware/xilinx/zynqmp-ufs.c new file mode 100644 index 000000000000..85da8a822f3a --- /dev/null +++ b/drivers/firmware/xilinx/zynqmp-ufs.c @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Firmware Layer for UFS APIs + * + * Copyright (C) 2025 Advanced Micro Devices, Inc. + */ + +#include <linux/firmware/xlnx-zynqmp.h> +#include <linux/module.h> + +/* Register Node IDs */ +#define PM_REGNODE_PMC_IOU_SLCR 0x30000002 /* PMC IOU SLCR */ +#define PM_REGNODE_EFUSE_CACHE 0x30000003 /* EFUSE Cache */ + +/* Register Offsets for PMC IOU SLCR */ +#define SRAM_CSR_OFFSET 0x104C /* SRAM Control and Status */ +#define TXRX_CFGRDY_OFFSET 0x1054 /* M-PHY TX-RX Config ready */ + +/* Masks for SRAM Control and Status Register */ +#define SRAM_CSR_INIT_DONE_MASK BIT(0) /* SRAM initialization done */ +#define SRAM_CSR_EXT_LD_DONE_MASK BIT(1) /* SRAM External load done */ +#define SRAM_CSR_BYPASS_MASK BIT(2) /* Bypass SRAM interface */ + +/* Mask to check M-PHY TX-RX configuration readiness */ +#define TX_RX_CFG_RDY_MASK GENMASK(3, 0) + +/* Register Offsets for EFUSE Cache */ +#define UFS_CAL_1_OFFSET 0xBE8 /* UFS Calibration Value */ + +/** + * zynqmp_pm_is_mphy_tx_rx_config_ready - check M-PHY TX-RX config readiness + * @is_ready: Store output status (true/false) + * + * Return: Returns 0 on success or error value on failure. + */ +int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready) +{ + u32 regval; + int ret; + + if (!is_ready) + return -EINVAL; + + ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, TXRX_CFGRDY_OFFSET, ®val); + if (ret) + return ret; + + regval &= TX_RX_CFG_RDY_MASK; + if (regval) + *is_ready = true; + else + *is_ready = false; + + return ret; +} +EXPORT_SYMBOL_GPL(zynqmp_pm_is_mphy_tx_rx_config_ready); + +/** + * zynqmp_pm_is_sram_init_done - check SRAM initialization + * @is_done: Store output status (true/false) + * + * Return: Returns 0 on success or error value on failure. + */ +int zynqmp_pm_is_sram_init_done(bool *is_done) +{ + u32 regval; + int ret; + + if (!is_done) + return -EINVAL; + + ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET, ®val); + if (ret) + return ret; + + regval &= SRAM_CSR_INIT_DONE_MASK; + if (regval) + *is_done = true; + else + *is_done = false; + + return ret; +} +EXPORT_SYMBOL_GPL(zynqmp_pm_is_sram_init_done); + +/** + * zynqmp_pm_set_sram_bypass - Set SRAM bypass Control + * + * Return: Returns 0 on success or error value on failure. + */ +int zynqmp_pm_set_sram_bypass(void) +{ + u32 sram_csr; + int ret; + + ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET, &sram_csr); + if (ret) + return ret; + + sram_csr &= ~SRAM_CSR_EXT_LD_DONE_MASK; + sram_csr |= SRAM_CSR_BYPASS_MASK; + + return zynqmp_pm_sec_mask_write_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET, + GENMASK(2, 1), sram_csr); +} +EXPORT_SYMBOL_GPL(zynqmp_pm_set_sram_bypass); + +/** + * zynqmp_pm_get_ufs_calibration_values - Read UFS calibration values + * @val: Store the calibration value + * + * Return: Returns 0 on success or error value on failure. + */ +int zynqmp_pm_get_ufs_calibration_values(u32 *val) +{ + return zynqmp_pm_sec_read_reg(PM_REGNODE_EFUSE_CACHE, UFS_CAL_1_OFFSET, val); +} +EXPORT_SYMBOL_GPL(zynqmp_pm_get_ufs_calibration_values); diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index 02da3e48bc8f..ad811f40e059 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -3,7 +3,7 @@ * Xilinx Zynq MPSoC Firmware layer * * Copyright (C) 2014-2022 Xilinx, Inc. - * Copyright (C) 2022 - 2024, Advanced Micro Devices, Inc. + * Copyright (C) 2022 - 2025 Advanced Micro Devices, Inc. * * Michal Simek <michal.simek@amd.com> * Davorin Mista <davorin.mista@aggios.com> @@ -72,6 +72,15 @@ struct pm_api_feature_data { struct hlist_node hentry; }; +struct platform_fw_data { + /* + * Family code for platform. + */ + const u32 family_code; +}; + +static struct platform_fw_data *active_platform_fw_data; + static const struct mfd_cell firmware_devs[] = { { .name = "zynqmp_power_controller", @@ -464,8 +473,6 @@ int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...) static u32 pm_api_version; static u32 pm_tz_version; -static u32 pm_family_code; -static u32 pm_sub_family_code; int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset) { @@ -532,32 +539,18 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_get_chipid); /** * zynqmp_pm_get_family_info() - Get family info of platform * @family: Returned family code value - * @subfamily: Returned sub-family code value * * Return: Returns status, either success or error+reason */ -int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily) +int zynqmp_pm_get_family_info(u32 *family) { - u32 ret_payload[PAYLOAD_ARG_CNT]; - u32 idcode; - int ret; + if (!active_platform_fw_data) + return -ENODEV; - /* Check is family or sub-family code already received */ - if (pm_family_code && pm_sub_family_code) { - *family = pm_family_code; - *subfamily = pm_sub_family_code; - return 0; - } - - ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, ret_payload, 0); - if (ret < 0) - return ret; + if (!family) + return -EINVAL; - idcode = ret_payload[1]; - pm_family_code = FIELD_GET(FAMILY_CODE_MASK, idcode); - pm_sub_family_code = FIELD_GET(SUB_FAMILY_CODE_MASK, idcode); - *family = pm_family_code; - *subfamily = pm_sub_family_code; + *family = active_platform_fw_data->family_code; return 0; } @@ -1238,8 +1231,13 @@ int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param, u32 value) { int ret; + u32 pm_family_code; + + ret = zynqmp_pm_get_family_info(&pm_family_code); + if (ret) + return ret; - if (pm_family_code == ZYNQMP_FAMILY_CODE && + if (pm_family_code == PM_ZYNQMP_FAMILY_CODE && param == PM_PINCTRL_CONFIG_TRI_STATE) { ret = zynqmp_pm_feature(PM_PINCTRL_CONFIG_PARAM_SET); if (ret < PM_PINCTRL_PARAM_SET_VERSION) { @@ -1414,6 +1412,45 @@ int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode) EXPORT_SYMBOL_GPL(zynqmp_pm_set_tcm_config); /** + * zynqmp_pm_get_node_status - PM call to request a node's current power state + * @node: ID of the component or sub-system in question + * @status: Current operating state of the requested node + * @requirements: Current requirements asserted on the node, + * used for slave nodes only. + * @usage: Usage information, used for slave nodes only: + * PM_USAGE_NO_MASTER - No master is currently using + * the node + * PM_USAGE_CURRENT_MASTER - Only requesting master is + * currently using the node + * PM_USAGE_OTHER_MASTER - Only other masters are + * currently using the node + * PM_USAGE_BOTH_MASTERS - Both the current and at least + * one other master is currently + * using the node + * + * Return: Returns status, either success or error+reason + */ +int zynqmp_pm_get_node_status(const u32 node, u32 *const status, + u32 *const requirements, u32 *const usage) +{ + u32 ret_payload[PAYLOAD_ARG_CNT]; + int ret; + + if (!status || !requirements || !usage) + return -EINVAL; + + ret = zynqmp_pm_invoke_fn(PM_GET_NODE_STATUS, ret_payload, 1, node); + if (ret_payload[0] == XST_PM_SUCCESS) { + *status = ret_payload[1]; + *requirements = ret_payload[2]; + *usage = ret_payload[3]; + } + + return ret; +} +EXPORT_SYMBOL_GPL(zynqmp_pm_get_node_status); + +/** * zynqmp_pm_force_pwrdwn - PM call to request for another PU or subsystem to * be powered down forcefully * @node: Node ID of the targeted PU or subsystem @@ -1617,6 +1654,52 @@ int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, } /** + * zynqmp_pm_sec_read_reg - PM call to securely read from given offset + * of the node + * @node_id: Node Id of the device + * @offset: Offset to be used (20-bit) + * @ret_value: Output data read from the given offset after + * firmware access policy is successfully enforced + * + * Return: Returns 0 on success or error value on failure + */ +int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value) +{ + u32 ret_payload[PAYLOAD_ARG_CNT]; + u32 count = 1; + int ret; + + if (!ret_value) + return -EINVAL; + + ret = zynqmp_pm_invoke_fn(PM_IOCTL, ret_payload, 4, node_id, IOCTL_READ_REG, + offset, count); + + *ret_value = ret_payload[1]; + + return ret; +} +EXPORT_SYMBOL_GPL(zynqmp_pm_sec_read_reg); + +/** + * zynqmp_pm_sec_mask_write_reg - PM call to securely write to given offset + * of the node + * @node_id: Node Id of the device + * @offset: Offset to be used (20-bit) + * @mask: Mask to be used + * @value: Value to be written + * + * Return: Returns 0 on success or error value on failure + */ +int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset, u32 mask, + u32 value) +{ + return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 5, node_id, IOCTL_MASK_WRITE_REG, + offset, mask, value); +} +EXPORT_SYMBOL_GPL(zynqmp_pm_sec_mask_write_reg); + +/** * zynqmp_pm_set_sd_config - PM call to set value of SD config registers * @node: SD node ID * @config: The config type of SD registers @@ -2007,12 +2090,18 @@ static int zynqmp_firmware_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct zynqmp_devinfo *devinfo; + u32 pm_family_code; int ret; ret = get_set_conduit_method(dev->of_node); if (ret) return ret; + /* Get platform-specific firmware data from device tree match */ + active_platform_fw_data = (struct platform_fw_data *)device_get_match_data(dev); + if (!active_platform_fw_data) + return -EINVAL; + /* Get SiP SVC version number */ ret = zynqmp_pm_get_sip_svc_version(&sip_svc_version); if (ret) @@ -2045,8 +2134,8 @@ static int zynqmp_firmware_probe(struct platform_device *pdev) pr_info("%s Platform Management API v%d.%d\n", __func__, pm_api_version >> 16, pm_api_version & 0xFFFF); - /* Get the Family code and sub family code of platform */ - ret = zynqmp_pm_get_family_info(&pm_family_code, &pm_sub_family_code); + /* Get the Family code of platform */ + ret = zynqmp_pm_get_family_info(&pm_family_code); if (ret < 0) return ret; @@ -2073,7 +2162,7 @@ static int zynqmp_firmware_probe(struct platform_device *pdev) zynqmp_pm_api_debugfs_init(); - if (pm_family_code == VERSAL_FAMILY_CODE) { + if (pm_family_code != PM_ZYNQMP_FAMILY_CODE) { em_dev = platform_device_register_data(&pdev->dev, "xlnx_event_manager", -1, NULL, 0); if (IS_ERR(em_dev)) @@ -2113,9 +2202,22 @@ static void zynqmp_firmware_sync_state(struct device *dev) dev_warn(dev, "failed to release power management to firmware\n"); } +static const struct platform_fw_data platform_fw_data_versal = { + .family_code = PM_VERSAL_FAMILY_CODE, +}; + +static const struct platform_fw_data platform_fw_data_versal_net = { + .family_code = PM_VERSAL_NET_FAMILY_CODE, +}; + +static const struct platform_fw_data platform_fw_data_zynqmp = { + .family_code = PM_ZYNQMP_FAMILY_CODE, +}; + static const struct of_device_id zynqmp_firmware_of_match[] = { - {.compatible = "xlnx,zynqmp-firmware"}, - {.compatible = "xlnx,versal-firmware"}, + {.compatible = "xlnx,zynqmp-firmware", .data = &platform_fw_data_zynqmp}, + {.compatible = "xlnx,versal-firmware", .data = &platform_fw_data_versal}, + {.compatible = "xlnx,versal-net-firmware", .data = &platform_fw_data_versal_net}, {}, }; MODULE_DEVICE_TABLE(of, zynqmp_firmware_of_match); |
