diff options
Diffstat (limited to 'drivers/firmware')
172 files changed, 29749 insertions, 2970 deletions
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 71d8b26c4103..bbd2155d8483 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -31,7 +31,6 @@ config ARM_SCPI_PROTOCOL config ARM_SDE_INTERFACE bool "ARM Software Delegated Exception Interface (SDEI)" depends on ARM64 - depends on ACPI_APEI_GHES help The Software Delegated Exception Interface (SDEI) is an ARM standard for registering callbacks from the platform firmware @@ -106,7 +105,7 @@ config ISCSI_IBFT select ISCSI_BOOT_SYSFS select ISCSI_IBFT_FIND if X86 depends on ACPI && SCSI && SCSI_LOWLEVEL - default n + default n help This option enables support for detection and exposing of iSCSI Boot Firmware Table (iBFT) via sysfs to userspace. If you wish to @@ -212,9 +211,20 @@ config SYSFB_SIMPLEFB If unsure, say Y. +config TH1520_AON_PROTOCOL + tristate "Always-On firmware protocol" + depends on ARCH_THEAD || COMPILE_TEST + depends on MAILBOX + help + Power, clock, and resource management capabilities on the TH1520 SoC are + managed by the E902 core. Firmware running on this core communicates with + the kernel through the Always-On protocol, using hardware mailbox as a medium. + Say yes if you need such capabilities. + config TI_SCI_PROTOCOL tristate "TI System Control Interface (TISCI) Message Protocol" depends on TI_MESSAGE_MANAGER + default ARCH_K3 help TI System Control Interface (TISCI) Message Protocol is used to manage compute systems such as ARM, DSP etc with the system controller in @@ -257,6 +267,23 @@ config TURRIS_MOX_RWTM other manufacturing data and also utilize the Entropy Bit Generator for hardware random number generation. +if TURRIS_MOX_RWTM + +config TURRIS_MOX_RWTM_KEYCTL + bool "Turris Mox rWTM ECDSA message signing" + default y + depends on KEYS + depends on ASYMMETRIC_KEY_TYPE + select CZNIC_PLATFORMS + select TURRIS_SIGNING_KEY + help + Say Y here to add support for ECDSA message signing with board private + key (each Turris Mox has an ECDSA private key generated in the secure + coprocessor when manufactured). This functionality is exposed via the + keyctl() syscall. + +endif # TURRIS_MOX_RWTM + source "drivers/firmware/arm_ffa/Kconfig" source "drivers/firmware/broadcom/Kconfig" source "drivers/firmware/cirrus/Kconfig" @@ -267,6 +294,7 @@ source "drivers/firmware/meson/Kconfig" source "drivers/firmware/microchip/Kconfig" source "drivers/firmware/psci/Kconfig" source "drivers/firmware/qcom/Kconfig" +source "drivers/firmware/samsung/Kconfig" source "drivers/firmware/smccc/Kconfig" source "drivers/firmware/tegra/Kconfig" source "drivers/firmware/xilinx/Kconfig" diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 7a8d486e718f..4ddec2820c96 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_RASPBERRYPI_FIRMWARE) += raspberrypi.o obj-$(CONFIG_FW_CFG_SYSFS) += qemu_fw_cfg.o obj-$(CONFIG_SYSFB) += sysfb.o obj-$(CONFIG_SYSFB_SIMPLEFB) += sysfb_simplefb.o +obj-$(CONFIG_TH1520_AON_PROTOCOL) += thead,th1520-aon.o obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o obj-$(CONFIG_TURRIS_MOX_RWTM) += turris-mox-rwtm.o @@ -33,6 +34,7 @@ obj-y += efi/ obj-y += imx/ obj-y += psci/ obj-y += qcom/ +obj-y += samsung/ obj-y += smccc/ obj-y += tegra/ obj-y += xilinx/ diff --git a/drivers/firmware/arm_ffa/Makefile b/drivers/firmware/arm_ffa/Makefile index 9d9f37523200..168990a7e792 100644 --- a/drivers/firmware/arm_ffa/Makefile +++ b/drivers/firmware/arm_ffa/Makefile @@ -2,5 +2,7 @@ ffa-bus-y = bus.o ffa-driver-y = driver.o ffa-transport-$(CONFIG_ARM_FFA_SMCCC) += smccc.o -ffa-module-objs := $(ffa-bus-y) $(ffa-driver-y) $(ffa-transport-y) -obj-$(CONFIG_ARM_FFA_TRANSPORT) = ffa-module.o +ffa-core-objs := $(ffa-bus-y) +ffa-module-objs := $(ffa-driver-y) $(ffa-transport-y) +obj-$(CONFIG_ARM_FFA_TRANSPORT) = ffa-core.o +obj-$(CONFIG_ARM_FFA_TRANSPORT) += ffa-module.o diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c index 2f557e90f2eb..50bfe56c755e 100644 --- a/drivers/firmware/arm_ffa/bus.c +++ b/drivers/firmware/arm_ffa/bus.c @@ -15,11 +15,11 @@ #include "common.h" -#define SCMI_UEVENT_MODALIAS_FMT "arm_ffa:%04x:%pUb" +#define FFA_UEVENT_MODALIAS_FMT "arm_ffa:%04x:%pUb" static DEFINE_IDA(ffa_bus_id); -static int ffa_device_match(struct device *dev, struct device_driver *drv) +static int ffa_device_match(struct device *dev, const struct device_driver *drv) { const struct ffa_device_id *id_table; struct ffa_device *ffa_dev; @@ -30,12 +30,11 @@ static int ffa_device_match(struct device *dev, struct device_driver *drv) while (!uuid_is_null(&id_table->uuid)) { /* * FF-A v1.0 doesn't provide discovery of UUIDs, just the - * partition IDs, so fetch the partitions IDs for this - * id_table UUID and assign the UUID to the device if the - * partition ID matches + * partition IDs, so match it unconditionally here and handle + * it via the installed bus notifier during driver binding. */ if (uuid_is_null(&ffa_dev->uuid)) - ffa_device_match_uuid(ffa_dev, &id_table->uuid); + return 1; if (uuid_equal(&ffa_dev->uuid, &id_table->uuid)) return 1; @@ -50,6 +49,10 @@ static int ffa_device_probe(struct device *dev) struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver); struct ffa_device *ffa_dev = to_ffa_dev(dev); + /* UUID can be still NULL with FF-A v1.0, so just skip probing them */ + if (uuid_is_null(&ffa_dev->uuid)) + return -ENODEV; + return ffa_drv->probe(ffa_dev); } @@ -65,7 +68,7 @@ static int ffa_device_uevent(const struct device *dev, struct kobj_uevent_env *e { const struct ffa_device *ffa_dev = to_ffa_dev(dev); - return add_uevent_var(env, "MODALIAS=" SCMI_UEVENT_MODALIAS_FMT, + return add_uevent_var(env, "MODALIAS=" FFA_UEVENT_MODALIAS_FMT, ffa_dev->vm_id, &ffa_dev->uuid); } @@ -74,7 +77,7 @@ static ssize_t modalias_show(struct device *dev, { struct ffa_device *ffa_dev = to_ffa_dev(dev); - return sysfs_emit(buf, SCMI_UEVENT_MODALIAS_FMT, ffa_dev->vm_id, + return sysfs_emit(buf, FFA_UEVENT_MODALIAS_FMT, ffa_dev->vm_id, &ffa_dev->uuid); } static DEVICE_ATTR_RO(modalias); @@ -157,11 +160,12 @@ static int __ffa_devices_unregister(struct device *dev, void *data) return 0; } -static void ffa_devices_unregister(void) +void ffa_devices_unregister(void) { bus_for_each_dev(&ffa_bus_type, NULL, NULL, __ffa_devices_unregister); } +EXPORT_SYMBOL_GPL(ffa_devices_unregister); bool ffa_device_is_valid(struct ffa_device *ffa_dev) { @@ -184,13 +188,17 @@ bool ffa_device_is_valid(struct ffa_device *ffa_dev) return valid; } -struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id, - const struct ffa_ops *ops) +struct ffa_device * +ffa_device_register(const struct ffa_partition_info *part_info, + const struct ffa_ops *ops) { int id, ret; struct device *dev; struct ffa_device *ffa_dev; + if (!part_info) + return NULL; + id = ida_alloc_min(&ffa_bus_id, 1, GFP_KERNEL); if (id < 0) return NULL; @@ -204,12 +212,14 @@ struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id, dev = &ffa_dev->dev; dev->bus = &ffa_bus_type; dev->release = ffa_release_device; + dev->dma_mask = &dev->coherent_dma_mask; dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id); ffa_dev->id = id; - ffa_dev->vm_id = vm_id; + ffa_dev->vm_id = part_info->id; + ffa_dev->properties = part_info->properties; ffa_dev->ops = ops; - uuid_copy(&ffa_dev->uuid, uuid); + uuid_copy(&ffa_dev->uuid, &part_info->uuid); ret = device_register(&ffa_dev->dev); if (ret) { @@ -232,14 +242,21 @@ void ffa_device_unregister(struct ffa_device *ffa_dev) } EXPORT_SYMBOL_GPL(ffa_device_unregister); -int arm_ffa_bus_init(void) +static int __init arm_ffa_bus_init(void) { return bus_register(&ffa_bus_type); } +subsys_initcall(arm_ffa_bus_init); -void arm_ffa_bus_exit(void) +static void __exit arm_ffa_bus_exit(void) { ffa_devices_unregister(); bus_unregister(&ffa_bus_type); ida_destroy(&ffa_bus_id); } +module_exit(arm_ffa_bus_exit); + +MODULE_ALIAS("ffa-core"); +MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); +MODULE_DESCRIPTION("ARM FF-A bus"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/arm_ffa/common.h b/drivers/firmware/arm_ffa/common.h index d6eccf1fd3f6..9c6425a81d0d 100644 --- a/drivers/firmware/arm_ffa/common.h +++ b/drivers/firmware/arm_ffa/common.h @@ -14,8 +14,6 @@ typedef struct arm_smccc_1_2_regs ffa_value_t; typedef void (ffa_fn)(ffa_value_t, ffa_value_t *); -int arm_ffa_bus_init(void); -void arm_ffa_bus_exit(void); bool ffa_device_is_valid(struct ffa_device *ffa_dev); void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid); diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index 9bc2e10381af..fe55613a8ea9 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -26,6 +26,7 @@ #include <linux/arm_ffa.h> #include <linux/bitfield.h> #include <linux/cpuhotplug.h> +#include <linux/delay.h> #include <linux/device.h> #include <linux/hashtable.h> #include <linux/interrupt.h> @@ -43,7 +44,7 @@ #include "common.h" -#define FFA_DRIVER_VERSION FFA_VERSION_1_1 +#define FFA_DRIVER_VERSION FFA_VERSION_1_2 #define FFA_MIN_VERSION FFA_VERSION_1_0 #define SENDER_ID_MASK GENMASK(31, 16) @@ -53,11 +54,8 @@ #define PACK_TARGET_INFO(s, r) \ (FIELD_PREP(SENDER_ID_MASK, (s)) | FIELD_PREP(RECEIVER_ID_MASK, (r))) -/* - * Keeping RX TX buffer size as 4K for now - * 64K may be preferred to keep it min a page in 64K PAGE_SIZE config - */ -#define RXTX_BUFFER_SIZE SZ_4K +#define RXTX_MAP_MIN_BUFSZ_MASK GENMASK(1, 0) +#define RXTX_MAP_MIN_BUFSZ(x) ((x) & RXTX_MAP_MIN_BUFSZ_MASK) #define FFA_MAX_NOTIFICATIONS 64 @@ -75,6 +73,7 @@ static const int ffa_linux_errmap[] = { -EAGAIN, /* FFA_RET_RETRY */ -ECANCELED, /* FFA_RET_ABORTED */ -ENODATA, /* FFA_RET_NO_DATA */ + -EAGAIN, /* FFA_RET_NOT_READY */ }; static inline int ffa_to_linux_errno(int errno) @@ -97,22 +96,24 @@ struct ffa_drv_info { struct mutex tx_lock; /* lock to protect Tx buffer */ void *rx_buffer; void *tx_buffer; + size_t rxtx_bufsz; bool mem_ops_native; + bool msg_direct_req2_supp; bool bitmap_created; bool notif_enabled; unsigned int sched_recv_irq; + unsigned int notif_pend_irq; unsigned int cpuhp_state; struct ffa_pcpu_irq __percpu *irq_pcpu; struct workqueue_struct *notif_pcpu_wq; struct work_struct notif_pcpu_work; - struct work_struct irq_work; + struct work_struct sched_recv_irq_work; struct xarray partition_info; DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS)); struct mutex notify_lock; /* lock to protect notifier hashtable */ }; static struct ffa_drv_info *drv_info; -static void ffa_partitions_cleanup(void); /* * The driver must be able to support all the versions from the earliest @@ -143,11 +144,19 @@ static int ffa_version_check(u32 *version) .a0 = FFA_VERSION, .a1 = FFA_DRIVER_VERSION, }, &ver); - if (ver.a0 == FFA_RET_NOT_SUPPORTED) { + if ((s32)ver.a0 == FFA_RET_NOT_SUPPORTED) { pr_info("FFA_VERSION returned not supported\n"); return -EOPNOTSUPP; } + if (FFA_MAJOR_VERSION(ver.a0) > FFA_MAJOR_VERSION(FFA_DRIVER_VERSION)) { + pr_err("Incompatible v%d.%d! Latest supported v%d.%d\n", + FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0), + FFA_MAJOR_VERSION(FFA_DRIVER_VERSION), + FFA_MINOR_VERSION(FFA_DRIVER_VERSION)); + return -EINVAL; + } + if (ver.a0 < FFA_MIN_VERSION) { pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n", FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0), @@ -210,6 +219,32 @@ static int ffa_rxtx_unmap(u16 vm_id) return 0; } +static int ffa_features(u32 func_feat_id, u32 input_props, + u32 *if_props_1, u32 *if_props_2) +{ + ffa_value_t id; + + if (!ARM_SMCCC_IS_FAST_CALL(func_feat_id) && input_props) { + pr_err("%s: Invalid Parameters: %x, %x", __func__, + func_feat_id, input_props); + return ffa_to_linux_errno(FFA_RET_INVALID_PARAMETERS); + } + + invoke_ffa_fn((ffa_value_t){ + .a0 = FFA_FEATURES, .a1 = func_feat_id, .a2 = input_props, + }, &id); + + if (id.a0 == FFA_ERROR) + return ffa_to_linux_errno((int)id.a2); + + if (if_props_1) + *if_props_1 = id.a2; + if (if_props_2) + *if_props_2 = id.a3; + + return 0; +} + #define PARTITION_INFO_GET_RETURN_COUNT_ONLY BIT(0) /* buffer must be sizeof(struct ffa_partition_info) * num_partitions */ @@ -248,28 +283,126 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3, } if (buffer && count <= num_partitions) - for (idx = 0; idx < count; idx++) - memcpy(buffer + idx, drv_info->rx_buffer + idx * sz, - buf_sz); + for (idx = 0; idx < count; idx++) { + struct ffa_partition_info_le { + __le16 id; + __le16 exec_ctxt; + __le32 properties; + uuid_t uuid; + } *rx_buf = drv_info->rx_buffer + idx * sz; + struct ffa_partition_info *buf = buffer + idx; + + buf->id = le16_to_cpu(rx_buf->id); + buf->exec_ctxt = le16_to_cpu(rx_buf->exec_ctxt); + buf->properties = le32_to_cpu(rx_buf->properties); + if (buf_sz > 8) + import_uuid(&buf->uuid, (u8 *)&rx_buf->uuid); + } - ffa_rx_release(); + if (!(flags & PARTITION_INFO_GET_RETURN_COUNT_ONLY)) + ffa_rx_release(); mutex_unlock(&drv_info->rx_lock); return count; } +#define LAST_INDEX_MASK GENMASK(15, 0) +#define CURRENT_INDEX_MASK GENMASK(31, 16) +#define UUID_INFO_TAG_MASK GENMASK(47, 32) +#define PARTITION_INFO_SZ_MASK GENMASK(63, 48) +#define PARTITION_COUNT(x) ((u16)(FIELD_GET(LAST_INDEX_MASK, (x))) + 1) +#define CURRENT_INDEX(x) ((u16)(FIELD_GET(CURRENT_INDEX_MASK, (x)))) +#define UUID_INFO_TAG(x) ((u16)(FIELD_GET(UUID_INFO_TAG_MASK, (x)))) +#define PARTITION_INFO_SZ(x) ((u16)(FIELD_GET(PARTITION_INFO_SZ_MASK, (x)))) +#define PART_INFO_ID_MASK GENMASK(15, 0) +#define PART_INFO_EXEC_CXT_MASK GENMASK(31, 16) +#define PART_INFO_PROPS_MASK GENMASK(63, 32) +#define PART_INFO_ID(x) ((u16)(FIELD_GET(PART_INFO_ID_MASK, (x)))) +#define PART_INFO_EXEC_CXT(x) ((u16)(FIELD_GET(PART_INFO_EXEC_CXT_MASK, (x)))) +#define PART_INFO_PROPERTIES(x) ((u32)(FIELD_GET(PART_INFO_PROPS_MASK, (x)))) +static int +__ffa_partition_info_get_regs(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3, + struct ffa_partition_info *buffer, int num_parts) +{ + u16 buf_sz, start_idx, cur_idx, count = 0, prev_idx = 0, tag = 0; + struct ffa_partition_info *buf = buffer; + ffa_value_t partition_info; + + do { + __le64 *regs; + int idx; + + start_idx = prev_idx ? prev_idx + 1 : 0; + + invoke_ffa_fn((ffa_value_t){ + .a0 = FFA_PARTITION_INFO_GET_REGS, + .a1 = (u64)uuid1 << 32 | uuid0, + .a2 = (u64)uuid3 << 32 | uuid2, + .a3 = start_idx | tag << 16, + }, &partition_info); + + if (partition_info.a0 == FFA_ERROR) + return ffa_to_linux_errno((int)partition_info.a2); + + if (!count) + count = PARTITION_COUNT(partition_info.a2); + if (!buffer || !num_parts) /* count only */ + return count; + + cur_idx = CURRENT_INDEX(partition_info.a2); + tag = UUID_INFO_TAG(partition_info.a2); + buf_sz = PARTITION_INFO_SZ(partition_info.a2); + if (buf_sz > sizeof(*buffer)) + buf_sz = sizeof(*buffer); + + regs = (void *)&partition_info.a3; + for (idx = 0; idx < cur_idx - start_idx + 1; idx++, buf++) { + union { + uuid_t uuid; + u64 regs[2]; + } uuid_regs = { + .regs = { + le64_to_cpu(*(regs + 1)), + le64_to_cpu(*(regs + 2)), + } + }; + u64 val = *(u64 *)regs; + + buf->id = PART_INFO_ID(val); + buf->exec_ctxt = PART_INFO_EXEC_CXT(val); + buf->properties = PART_INFO_PROPERTIES(val); + uuid_copy(&buf->uuid, &uuid_regs.uuid); + regs += 3; + } + prev_idx = cur_idx; + + } while (cur_idx < (count - 1)); + + return count; +} + /* buffer is allocated and caller must free the same if returned count > 0 */ static int ffa_partition_probe(const uuid_t *uuid, struct ffa_partition_info **buffer) { int count; u32 uuid0_4[4]; + bool reg_mode = false; struct ffa_partition_info *pbuf; + if (!ffa_features(FFA_PARTITION_INFO_GET_REGS, 0, NULL, NULL)) + reg_mode = true; + export_uuid((u8 *)uuid0_4, uuid); - count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2], - uuid0_4[3], NULL, 0); + if (reg_mode) + count = __ffa_partition_info_get_regs(uuid0_4[0], uuid0_4[1], + uuid0_4[2], uuid0_4[3], + NULL, 0); + else + count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], + uuid0_4[2], uuid0_4[3], + NULL, 0); if (count <= 0) return count; @@ -277,8 +410,14 @@ ffa_partition_probe(const uuid_t *uuid, struct ffa_partition_info **buffer) if (!pbuf) return -ENOMEM; - count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2], - uuid0_4[3], pbuf, count); + if (reg_mode) + count = __ffa_partition_info_get_regs(uuid0_4[0], uuid0_4[1], + uuid0_4[2], uuid0_4[3], + pbuf, count); + else + count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], + uuid0_4[2], uuid0_4[3], + pbuf, count); if (count <= 0) kfree(pbuf); else @@ -304,6 +443,18 @@ static int ffa_id_get(u16 *vm_id) return 0; } +static inline void ffa_msg_send_wait_for_completion(ffa_value_t *ret) +{ + while (ret->a0 == FFA_INTERRUPT || ret->a0 == FFA_YIELD) { + if (ret->a0 == FFA_YIELD) + fsleep(1000); + + invoke_ffa_fn((ffa_value_t){ + .a0 = FFA_RUN, .a1 = ret->a1, + }, ret); + } +} + static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit, struct ffa_send_direct_data *data) { @@ -324,10 +475,7 @@ static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit, .a6 = data->data3, .a7 = data->data4, }, &ret); - while (ret.a0 == FFA_INTERRUPT) - invoke_ffa_fn((ffa_value_t){ - .a0 = FFA_RUN, .a1 = ret.a1, - }, &ret); + ffa_msg_send_wait_for_completion(&ret); if (ret.a0 == FFA_ERROR) return ffa_to_linux_errno((int)ret.a2); @@ -344,6 +492,70 @@ static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit, return -EINVAL; } +static int ffa_msg_send2(struct ffa_device *dev, u16 src_id, void *buf, size_t sz) +{ + u32 src_dst_ids = PACK_TARGET_INFO(src_id, dev->vm_id); + struct ffa_indirect_msg_hdr *msg; + ffa_value_t ret; + int retval = 0; + + if (sz > (drv_info->rxtx_bufsz - sizeof(*msg))) + return -ERANGE; + + mutex_lock(&drv_info->tx_lock); + + msg = drv_info->tx_buffer; + msg->flags = 0; + msg->res0 = 0; + msg->offset = sizeof(*msg); + msg->send_recv_id = src_dst_ids; + msg->size = sz; + uuid_copy(&msg->uuid, &dev->uuid); + memcpy((u8 *)msg + msg->offset, buf, sz); + + /* flags = 0, sender VMID = 0 works for both physical/virtual NS */ + invoke_ffa_fn((ffa_value_t){ + .a0 = FFA_MSG_SEND2, .a1 = 0, .a2 = 0 + }, &ret); + + if (ret.a0 == FFA_ERROR) + retval = ffa_to_linux_errno((int)ret.a2); + + mutex_unlock(&drv_info->tx_lock); + return retval; +} + +static int ffa_msg_send_direct_req2(u16 src_id, u16 dst_id, const uuid_t *uuid, + struct ffa_send_direct_data2 *data) +{ + u32 src_dst_ids = PACK_TARGET_INFO(src_id, dst_id); + union { + uuid_t uuid; + __le64 regs[2]; + } uuid_regs = { .uuid = *uuid }; + ffa_value_t ret, args = { + .a0 = FFA_MSG_SEND_DIRECT_REQ2, + .a1 = src_dst_ids, + .a2 = le64_to_cpu(uuid_regs.regs[0]), + .a3 = le64_to_cpu(uuid_regs.regs[1]), + }; + memcpy((void *)&args + offsetof(ffa_value_t, a4), data, sizeof(*data)); + + invoke_ffa_fn(args, &ret); + + ffa_msg_send_wait_for_completion(&ret); + + if (ret.a0 == FFA_ERROR) + return ffa_to_linux_errno((int)ret.a2); + + if (ret.a0 == FFA_MSG_SEND_DIRECT_RESP2) { + memcpy(data, (void *)&ret + offsetof(ffa_value_t, a4), sizeof(*data)); + return 0; + } + + return -EINVAL; +} + static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz, u32 frag_len, u32 len, u64 *handle) { @@ -528,9 +740,10 @@ static int ffa_memory_ops(u32 func_id, struct ffa_mem_ops_args *args) { int ret; void *buffer; + size_t rxtx_bufsz = drv_info->rxtx_bufsz; if (!args->use_txbuf) { - buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL); + buffer = alloc_pages_exact(rxtx_bufsz, GFP_KERNEL); if (!buffer) return -ENOMEM; } else { @@ -538,12 +751,12 @@ static int ffa_memory_ops(u32 func_id, struct ffa_mem_ops_args *args) mutex_lock(&drv_info->tx_lock); } - ret = ffa_setup_and_transmit(func_id, buffer, RXTX_BUFFER_SIZE, args); + ret = ffa_setup_and_transmit(func_id, buffer, rxtx_bufsz, args); if (args->use_txbuf) mutex_unlock(&drv_info->tx_lock); else - free_pages_exact(buffer, RXTX_BUFFER_SIZE); + free_pages_exact(buffer, rxtx_bufsz); return ret < 0 ? ret : 0; } @@ -564,32 +777,6 @@ static int ffa_memory_reclaim(u64 g_handle, u32 flags) return 0; } -static int ffa_features(u32 func_feat_id, u32 input_props, - u32 *if_props_1, u32 *if_props_2) -{ - ffa_value_t id; - - if (!ARM_SMCCC_IS_FAST_CALL(func_feat_id) && input_props) { - pr_err("%s: Invalid Parameters: %x, %x", __func__, - func_feat_id, input_props); - return ffa_to_linux_errno(FFA_RET_INVALID_PARAMETERS); - } - - invoke_ffa_fn((ffa_value_t){ - .a0 = FFA_FEATURES, .a1 = func_feat_id, .a2 = input_props, - }, &id); - - if (id.a0 == FFA_ERROR) - return ffa_to_linux_errno((int)id.a2); - - if (if_props_1) - *if_props_1 = id.a2; - if (if_props_2) - *if_props_2 = id.a3; - - return 0; -} - static int ffa_notification_bitmap_create(void) { ffa_value_t ret; @@ -621,6 +808,13 @@ static int ffa_notification_bitmap_destroy(void) return 0; } +enum notify_type { + SECURE_PARTITION, + NON_SECURE_VM, + SPM_FRAMEWORK, + NS_HYP_FRAMEWORK, +}; + #define NOTIFICATION_LOW_MASK GENMASK(31, 0) #define NOTIFICATION_HIGH_MASK GENMASK(63, 32) #define NOTIFICATION_BITMAP_HIGH(x) \ @@ -644,10 +838,22 @@ static int ffa_notification_bitmap_destroy(void) #define MAX_IDS_32 10 #define PER_VCPU_NOTIFICATION_FLAG BIT(0) -#define SECURE_PARTITION_BITMAP BIT(0) -#define NON_SECURE_VM_BITMAP BIT(1) -#define SPM_FRAMEWORK_BITMAP BIT(2) -#define NS_HYP_FRAMEWORK_BITMAP BIT(3) +#define SECURE_PARTITION_BITMAP_ENABLE BIT(SECURE_PARTITION) +#define NON_SECURE_VM_BITMAP_ENABLE BIT(NON_SECURE_VM) +#define SPM_FRAMEWORK_BITMAP_ENABLE BIT(SPM_FRAMEWORK) +#define NS_HYP_FRAMEWORK_BITMAP_ENABLE BIT(NS_HYP_FRAMEWORK) +#define FFA_BITMAP_SECURE_ENABLE_MASK \ + (SECURE_PARTITION_BITMAP_ENABLE | SPM_FRAMEWORK_BITMAP_ENABLE) +#define FFA_BITMAP_NS_ENABLE_MASK \ + (NON_SECURE_VM_BITMAP_ENABLE | NS_HYP_FRAMEWORK_BITMAP_ENABLE) +#define FFA_BITMAP_ALL_ENABLE_MASK \ + (FFA_BITMAP_SECURE_ENABLE_MASK | FFA_BITMAP_NS_ENABLE_MASK) + +#define FFA_SECURE_PARTITION_ID_FLAG BIT(15) + +#define SPM_FRAMEWORK_BITMAP(x) NOTIFICATION_BITMAP_LOW(x) +#define NS_HYP_FRAMEWORK_BITMAP(x) NOTIFICATION_BITMAP_HIGH(x) +#define FRAMEWORK_NOTIFY_RX_BUFFER_FULL BIT(0) static int ffa_notification_bind_common(u16 dst_id, u64 bitmap, u32 flags, bool is_bind) @@ -713,9 +919,15 @@ static int ffa_notification_get(u32 flags, struct ffa_notify_bitmaps *notify) else if (ret.a0 != FFA_SUCCESS) return -EINVAL; /* Something else went wrong. */ - notify->sp_map = PACK_NOTIFICATION_BITMAP(ret.a2, ret.a3); - notify->vm_map = PACK_NOTIFICATION_BITMAP(ret.a4, ret.a5); - notify->arch_map = PACK_NOTIFICATION_BITMAP(ret.a6, ret.a7); + if (flags & SECURE_PARTITION_BITMAP_ENABLE) + notify->sp_map = PACK_NOTIFICATION_BITMAP(ret.a2, ret.a3); + if (flags & NON_SECURE_VM_BITMAP_ENABLE) + notify->vm_map = PACK_NOTIFICATION_BITMAP(ret.a4, ret.a5); + if (flags & SPM_FRAMEWORK_BITMAP_ENABLE) + notify->arch_map = SPM_FRAMEWORK_BITMAP(ret.a6); + if (flags & NS_HYP_FRAMEWORK_BITMAP_ENABLE) + notify->arch_map = PACK_NOTIFICATION_BITMAP(notify->arch_map, + ret.a7); return 0; } @@ -724,27 +936,32 @@ struct ffa_dev_part_info { ffa_sched_recv_cb callback; void *cb_data; rwlock_t rw_lock; + struct ffa_device *dev; + struct list_head node; }; static void __do_sched_recv_cb(u16 part_id, u16 vcpu, bool is_per_vcpu) { - struct ffa_dev_part_info *partition; + struct ffa_dev_part_info *partition = NULL, *tmp; ffa_sched_recv_cb callback; + struct list_head *phead; void *cb_data; - partition = xa_load(&drv_info->partition_info, part_id); - if (!partition) { + phead = xa_load(&drv_info->partition_info, part_id); + if (!phead) { pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id); return; } - read_lock(&partition->rw_lock); - callback = partition->callback; - cb_data = partition->cb_data; - read_unlock(&partition->rw_lock); + list_for_each_entry_safe(partition, tmp, phead, node) { + read_lock(&partition->rw_lock); + callback = partition->callback; + cb_data = partition->cb_data; + read_unlock(&partition->rw_lock); - if (callback) - callback(vcpu, is_per_vcpu, cb_data); + if (callback) + callback(vcpu, is_per_vcpu, cb_data); + } } static void ffa_notification_info_get(void) @@ -760,7 +977,7 @@ static void ffa_notification_info_get(void) }, &ret); if (ret.a0 != FFA_FN_NATIVE(SUCCESS) && ret.a0 != FFA_SUCCESS) { - if (ret.a2 != FFA_RET_NO_DATA) + if ((s32)ret.a2 != FFA_RET_NO_DATA) pr_err("Notification Info fetch failed: 0x%lx (0x%lx)", ret.a0, ret.a2); return; @@ -796,7 +1013,7 @@ static void ffa_notification_info_get(void) } /* Per vCPU Notification */ - for (idx = 0; idx < ids_count[list]; idx++) { + for (idx = 1; idx < ids_count[list]; idx++) { if (ids_processed >= max_ids - 1) break; @@ -825,11 +1042,15 @@ static int ffa_run(struct ffa_device *dev, u16 vcpu) return 0; } -static void ffa_set_up_mem_ops_native_flag(void) +static void ffa_drvinfo_flags_init(void) { if (!ffa_features(FFA_FN_NATIVE(MEM_LEND), 0, NULL, NULL) || !ffa_features(FFA_FN_NATIVE(MEM_SHARE), 0, NULL, NULL)) drv_info->mem_ops_native = true; + + if (!ffa_features(FFA_MSG_SEND_DIRECT_REQ2, 0, NULL, NULL) || + !ffa_features(FFA_MSG_SEND_DIRECT_RESP2, 0, NULL, NULL)) + drv_info->msg_direct_req2_supp = true; } static u32 ffa_api_version_get(void) @@ -870,6 +1091,21 @@ static int ffa_sync_send_receive(struct ffa_device *dev, dev->mode_32bit, data); } +static int ffa_indirect_msg_send(struct ffa_device *dev, void *buf, size_t sz) +{ + return ffa_msg_send2(dev, drv_info->vm_id, buf, sz); +} + +static int ffa_sync_send_receive2(struct ffa_device *dev, + struct ffa_send_direct_data2 *data) +{ + if (!drv_info->msg_direct_req2_supp) + return -EOPNOTSUPP; + + return ffa_msg_send_direct_req2(drv_info->vm_id, dev->vm_id, + &dev->uuid, data); +} + static int ffa_memory_share(struct ffa_mem_ops_args *args) { if (drv_info->mem_ops_native) @@ -893,35 +1129,39 @@ static int ffa_memory_lend(struct ffa_mem_ops_args *args) return ffa_memory_ops(FFA_MEM_LEND, args); } -#define FFA_SECURE_PARTITION_ID_FLAG BIT(15) - #define ffa_notifications_disabled() (!drv_info->notif_enabled) -enum notify_type { - NON_SECURE_VM, - SECURE_PARTITION, - FRAMEWORK, -}; - struct notifier_cb_info { struct hlist_node hnode; + struct ffa_device *dev; + ffa_fwk_notifier_cb fwk_cb; ffa_notifier_cb cb; void *cb_data; - enum notify_type type; }; -static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback, - void *cb_data, bool is_registration) +static int +ffa_sched_recv_cb_update(struct ffa_device *dev, ffa_sched_recv_cb callback, + void *cb_data, bool is_registration) { - struct ffa_dev_part_info *partition; + struct ffa_dev_part_info *partition = NULL, *tmp; + struct list_head *phead; bool cb_valid; if (ffa_notifications_disabled()) return -EOPNOTSUPP; - partition = xa_load(&drv_info->partition_info, part_id); + phead = xa_load(&drv_info->partition_info, dev->vm_id); + if (!phead) { + pr_err("%s: Invalid partition ID 0x%x\n", __func__, dev->vm_id); + return -EINVAL; + } + + list_for_each_entry_safe(partition, tmp, phead, node) + if (partition->dev == dev) + break; + if (!partition) { - pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id); + pr_err("%s: No such partition ID 0x%x\n", __func__, dev->vm_id); return -EINVAL; } @@ -943,12 +1183,12 @@ static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback, static int ffa_sched_recv_cb_register(struct ffa_device *dev, ffa_sched_recv_cb cb, void *cb_data) { - return ffa_sched_recv_cb_update(dev->vm_id, cb, cb_data, true); + return ffa_sched_recv_cb_update(dev, cb, cb_data, true); } static int ffa_sched_recv_cb_unregister(struct ffa_device *dev) { - return ffa_sched_recv_cb_update(dev->vm_id, NULL, NULL, false); + return ffa_sched_recv_cb_update(dev, NULL, NULL, false); } static int ffa_notification_bind(u16 dst_id, u64 bitmap, u32 flags) @@ -961,27 +1201,69 @@ static int ffa_notification_unbind(u16 dst_id, u64 bitmap) return ffa_notification_bind_common(dst_id, bitmap, 0, false); } -/* Should be called while the notify_lock is taken */ +static enum notify_type ffa_notify_type_get(u16 vm_id) +{ + if (vm_id & FFA_SECURE_PARTITION_ID_FLAG) + return SECURE_PARTITION; + else + return NON_SECURE_VM; +} + +/* notifier_hnode_get* should be called with notify_lock held */ static struct notifier_cb_info * -notifier_hash_node_get(u16 notify_id, enum notify_type type) +notifier_hnode_get_by_vmid(u16 notify_id, int vmid) { struct notifier_cb_info *node; hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id) - if (type == node->type) + if (node->fwk_cb && vmid == node->dev->vm_id) + return node; + + return NULL; +} + +static struct notifier_cb_info * +notifier_hnode_get_by_vmid_uuid(u16 notify_id, int vmid, const uuid_t *uuid) +{ + struct notifier_cb_info *node; + + if (uuid_is_null(uuid)) + return notifier_hnode_get_by_vmid(notify_id, vmid); + + hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id) + if (node->fwk_cb && vmid == node->dev->vm_id && + uuid_equal(&node->dev->uuid, uuid)) + return node; + + return NULL; +} + +static struct notifier_cb_info * +notifier_hnode_get_by_type(u16 notify_id, enum notify_type type) +{ + struct notifier_cb_info *node; + + hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id) + if (node->cb && type == ffa_notify_type_get(node->dev->vm_id)) return node; return NULL; } static int -update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb, - void *cb_data, bool is_registration) +update_notifier_cb(struct ffa_device *dev, int notify_id, void *cb, + void *cb_data, bool is_registration, bool is_framework) { struct notifier_cb_info *cb_info = NULL; + enum notify_type type = ffa_notify_type_get(dev->vm_id); bool cb_found; - cb_info = notifier_hash_node_get(notify_id, type); + if (is_framework) + cb_info = notifier_hnode_get_by_vmid_uuid(notify_id, dev->vm_id, + &dev->uuid); + else + cb_info = notifier_hnode_get_by_type(notify_id, type); + cb_found = !!cb_info; if (!(is_registration ^ cb_found)) @@ -992,9 +1274,12 @@ update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb, if (!cb_info) return -ENOMEM; - cb_info->type = type; - cb_info->cb = cb; + cb_info->dev = dev; cb_info->cb_data = cb_data; + if (is_framework) + cb_info->fwk_cb = cb; + else + cb_info->cb = cb; hash_add(drv_info->notifier_hash, &cb_info->hnode, notify_id); } else { @@ -1004,18 +1289,10 @@ update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb, return 0; } -static enum notify_type ffa_notify_type_get(u16 vm_id) -{ - if (vm_id & FFA_SECURE_PARTITION_ID_FLAG) - return SECURE_PARTITION; - else - return NON_SECURE_VM; -} - -static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id) +static int __ffa_notify_relinquish(struct ffa_device *dev, int notify_id, + bool is_framework) { int rc; - enum notify_type type = ffa_notify_type_get(dev->vm_id); if (ffa_notifications_disabled()) return -EOPNOTSUPP; @@ -1025,26 +1302,38 @@ static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id) mutex_lock(&drv_info->notify_lock); - rc = update_notifier_cb(notify_id, type, NULL, NULL, false); + rc = update_notifier_cb(dev, notify_id, NULL, NULL, false, + is_framework); if (rc) { pr_err("Could not unregister notification callback\n"); mutex_unlock(&drv_info->notify_lock); return rc; } - rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id)); + if (!is_framework) + rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id)); mutex_unlock(&drv_info->notify_lock); return rc; } -static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu, - ffa_notifier_cb cb, void *cb_data, int notify_id) +static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id) +{ + return __ffa_notify_relinquish(dev, notify_id, false); +} + +static int ffa_fwk_notify_relinquish(struct ffa_device *dev, int notify_id) +{ + return __ffa_notify_relinquish(dev, notify_id, true); +} + +static int __ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu, + void *cb, void *cb_data, + int notify_id, bool is_framework) { int rc; u32 flags = 0; - enum notify_type type = ffa_notify_type_get(dev->vm_id); if (ffa_notifications_disabled()) return -EOPNOTSUPP; @@ -1054,26 +1343,44 @@ static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu, mutex_lock(&drv_info->notify_lock); - if (is_per_vcpu) - flags = PER_VCPU_NOTIFICATION_FLAG; + if (!is_framework) { + if (is_per_vcpu) + flags = PER_VCPU_NOTIFICATION_FLAG; - rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags); - if (rc) { - mutex_unlock(&drv_info->notify_lock); - return rc; + rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags); + if (rc) { + mutex_unlock(&drv_info->notify_lock); + return rc; + } } - rc = update_notifier_cb(notify_id, type, cb, cb_data, true); + rc = update_notifier_cb(dev, notify_id, cb, cb_data, true, + is_framework); if (rc) { pr_err("Failed to register callback for %d - %d\n", notify_id, rc); - ffa_notification_unbind(dev->vm_id, BIT(notify_id)); + if (!is_framework) + ffa_notification_unbind(dev->vm_id, BIT(notify_id)); } mutex_unlock(&drv_info->notify_lock); return rc; } +static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu, + ffa_notifier_cb cb, void *cb_data, int notify_id) +{ + return __ffa_notify_request(dev, is_per_vcpu, cb, cb_data, notify_id, + false); +} + +static int +ffa_fwk_notify_request(struct ffa_device *dev, ffa_fwk_notifier_cb cb, + void *cb_data, int notify_id) +{ + return __ffa_notify_request(dev, false, cb, cb_data, notify_id, true); +} + static int ffa_notify_send(struct ffa_device *dev, int notify_id, bool is_per_vcpu, u16 vcpu) { @@ -1100,7 +1407,7 @@ static void handle_notif_callbacks(u64 bitmap, enum notify_type type) continue; mutex_lock(&drv_info->notify_lock); - cb_info = notifier_hash_node_get(notify_id, type); + cb_info = notifier_hnode_get_by_type(notify_id, type); mutex_unlock(&drv_info->notify_lock); if (cb_info && cb_info->cb) @@ -1108,21 +1415,68 @@ static void handle_notif_callbacks(u64 bitmap, enum notify_type type) } } -static void notif_pcpu_irq_work_fn(struct work_struct *work) +static void handle_fwk_notif_callbacks(u32 bitmap) +{ + void *buf; + uuid_t uuid; + int notify_id = 0, target; + struct ffa_indirect_msg_hdr *msg; + struct notifier_cb_info *cb_info = NULL; + + /* Only one framework notification defined and supported for now */ + if (!(bitmap & FRAMEWORK_NOTIFY_RX_BUFFER_FULL)) + return; + + mutex_lock(&drv_info->rx_lock); + + msg = drv_info->rx_buffer; + buf = kmemdup((void *)msg + msg->offset, msg->size, GFP_KERNEL); + if (!buf) { + mutex_unlock(&drv_info->rx_lock); + return; + } + + target = SENDER_ID(msg->send_recv_id); + if (msg->offset >= sizeof(*msg)) + uuid_copy(&uuid, &msg->uuid); + else + uuid_copy(&uuid, &uuid_null); + + mutex_unlock(&drv_info->rx_lock); + + ffa_rx_release(); + + mutex_lock(&drv_info->notify_lock); + cb_info = notifier_hnode_get_by_vmid_uuid(notify_id, target, &uuid); + mutex_unlock(&drv_info->notify_lock); + + if (cb_info && cb_info->fwk_cb) + cb_info->fwk_cb(notify_id, cb_info->cb_data, buf); + kfree(buf); +} + +static void notif_get_and_handle(void *cb_data) { int rc; - struct ffa_notify_bitmaps bitmaps; + u32 flags; + struct ffa_drv_info *info = cb_data; + struct ffa_notify_bitmaps bitmaps = { 0 }; + + if (info->vm_id == 0) /* Non secure physical instance */ + flags = FFA_BITMAP_SECURE_ENABLE_MASK; + else + flags = FFA_BITMAP_ALL_ENABLE_MASK; - rc = ffa_notification_get(SECURE_PARTITION_BITMAP | - SPM_FRAMEWORK_BITMAP, &bitmaps); + rc = ffa_notification_get(flags, &bitmaps); if (rc) { pr_err("Failed to retrieve notifications with %d!\n", rc); return; } + handle_fwk_notif_callbacks(SPM_FRAMEWORK_BITMAP(bitmaps.arch_map)); + handle_fwk_notif_callbacks(NS_HYP_FRAMEWORK_BITMAP(bitmaps.arch_map)); handle_notif_callbacks(bitmaps.vm_map, NON_SECURE_VM); handle_notif_callbacks(bitmaps.sp_map, SECURE_PARTITION); - handle_notif_callbacks(bitmaps.arch_map, FRAMEWORK); } static void @@ -1131,10 +1485,17 @@ ffa_self_notif_handle(u16 vcpu, bool is_per_vcpu, void *cb_data) struct ffa_drv_info *info = cb_data; if (!is_per_vcpu) - notif_pcpu_irq_work_fn(&info->notif_pcpu_work); + notif_get_and_handle(info); else - queue_work_on(vcpu, info->notif_pcpu_wq, - &info->notif_pcpu_work); + smp_call_function_single(vcpu, notif_get_and_handle, info, 0); +} + +static void notif_pcpu_irq_work_fn(struct work_struct *work) +{ + struct ffa_drv_info *info = container_of(work, struct ffa_drv_info, + notif_pcpu_work); + + ffa_self_notif_handle(smp_processor_id(), true, info); } static const struct ffa_info_ops ffa_drv_info_ops = { @@ -1145,6 +1506,8 @@ static const struct ffa_info_ops ffa_drv_info_ops = { static const struct ffa_msg_ops ffa_drv_msg_ops = { .mode_32bit_set = ffa_mode_32bit_set, .sync_send_receive = ffa_sync_send_receive, + .indirect_send = ffa_indirect_msg_send, + .sync_send_receive2 = ffa_sync_send_receive2, }; static const struct ffa_mem_ops ffa_drv_mem_ops = { @@ -1162,6 +1525,8 @@ static const struct ffa_notifier_ops ffa_drv_notifier_ops = { .sched_recv_cb_unregister = ffa_sched_recv_cb_unregister, .notify_request = ffa_notify_request, .notify_relinquish = ffa_notify_relinquish, + .fwk_notify_request = ffa_fwk_notify_request, + .fwk_notify_relinquish = ffa_fwk_notify_relinquish, .notify_send = ffa_notify_send, }; @@ -1178,14 +1543,6 @@ void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid) int count, idx; struct ffa_partition_info *pbuf, *tpbuf; - /* - * FF-A v1.1 provides UUID for each partition as part of the discovery - * API, the discovered UUID must be populated in the device's UUID and - * there is no need to copy the same from the driver table. - */ - if (drv_info->version > FFA_VERSION_1_0) - return; - count = ffa_partition_probe(uuid, &pbuf); if (count <= 0) return; @@ -1196,14 +1553,147 @@ void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid) kfree(pbuf); } +static int +ffa_bus_notifier(struct notifier_block *nb, unsigned long action, void *data) +{ + struct device *dev = data; + struct ffa_device *fdev = to_ffa_dev(dev); + + if (action == BUS_NOTIFY_BIND_DRIVER) { + struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver); + const struct ffa_device_id *id_table = ffa_drv->id_table; + + /* + * FF-A v1.1 provides UUID for each partition as part of the + * discovery API, the discovered UUID must be populated in the + * device's UUID and there is no need to workaround by copying + * the same from the driver table. + */ + if (uuid_is_null(&fdev->uuid)) + ffa_device_match_uuid(fdev, &id_table->uuid); + + return NOTIFY_OK; + } + + return NOTIFY_DONE; +} + +static struct notifier_block ffa_bus_nb = { + .notifier_call = ffa_bus_notifier, +}; + +static int ffa_xa_add_partition_info(struct ffa_device *dev) +{ + struct ffa_dev_part_info *info; + struct list_head *head, *phead; + int ret = -ENOMEM; + + phead = xa_load(&drv_info->partition_info, dev->vm_id); + if (phead) { + head = phead; + list_for_each_entry(info, head, node) { + if (info->dev == dev) { + pr_err("%s: duplicate dev %p part ID 0x%x\n", + __func__, dev, dev->vm_id); + return -EEXIST; + } + } + } + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return ret; + + rwlock_init(&info->rw_lock); + info->dev = dev; + + if (!phead) { + phead = kzalloc(sizeof(*phead), GFP_KERNEL); + if (!phead) + goto free_out; + + INIT_LIST_HEAD(phead); + + ret = xa_insert(&drv_info->partition_info, dev->vm_id, phead, + GFP_KERNEL); + if (ret) { + pr_err("%s: failed to save part ID 0x%x Ret:%d\n", + __func__, dev->vm_id, ret); + goto free_out; + } + } + list_add(&info->node, phead); + return 0; + +free_out: + kfree(phead); + kfree(info); + return ret; +} + +static int ffa_setup_host_partition(int vm_id) +{ + struct ffa_partition_info buf = { 0 }; + struct ffa_device *ffa_dev; + int ret; + + buf.id = vm_id; + ffa_dev = ffa_device_register(&buf, &ffa_drv_ops); + if (!ffa_dev) { + pr_err("%s: failed to register host partition ID 0x%x\n", + __func__, vm_id); + return -EINVAL; + } + + ret = ffa_xa_add_partition_info(ffa_dev); + if (ret) + return ret; + + if (ffa_notifications_disabled()) + return 0; + + ret = ffa_sched_recv_cb_update(ffa_dev, ffa_self_notif_handle, + drv_info, true); + if (ret) + pr_info("Failed to register driver sched callback %d\n", ret); + + return ret; +} + +static void ffa_partitions_cleanup(void) +{ + struct list_head *phead; + unsigned long idx; + + /* Clean up/free all registered devices */ + ffa_devices_unregister(); + + xa_for_each(&drv_info->partition_info, idx, phead) { + struct ffa_dev_part_info *info, *tmp; + + xa_erase(&drv_info->partition_info, idx); + list_for_each_entry_safe(info, tmp, phead, node) { + list_del(&info->node); + kfree(info); + } + kfree(phead); + } + + xa_destroy(&drv_info->partition_info); +} + static int ffa_setup_partitions(void) { int count, idx, ret; - uuid_t uuid; struct ffa_device *ffa_dev; - struct ffa_dev_part_info *info; struct ffa_partition_info *pbuf, *tpbuf; + if (drv_info->version == FFA_VERSION_1_0) { + ret = bus_register_notifier(&ffa_bus_type, &ffa_bus_nb); + if (ret) + pr_err("Failed to register FF-A bus notifiers\n"); + } + count = ffa_partition_probe(&uuid_null, &pbuf); if (count <= 0) { pr_info("%s: No partitions found, error %d\n", __func__, count); @@ -1212,15 +1702,13 @@ static int ffa_setup_partitions(void) xa_init(&drv_info->partition_info); for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) { - import_uuid(&uuid, (u8 *)tpbuf->uuid); - /* Note that if the UUID will be uuid_null, that will require - * ffa_device_match() to find the UUID of this partition id + * ffa_bus_notifier() to find the UUID of this partition id * with help of ffa_device_match_uuid(). FF-A v1.1 and above * provides UUID here for each partition as part of the * discovery API and the same is passed. */ - ffa_dev = ffa_device_register(&uuid, tpbuf->id, &ffa_drv_ops); + ffa_dev = ffa_device_register(tpbuf, &ffa_drv_ops); if (!ffa_dev) { pr_err("%s: failed to register partition ID 0x%x\n", __func__, tpbuf->id); @@ -1231,72 +1719,52 @@ static int ffa_setup_partitions(void) !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC)) ffa_mode_32bit_set(ffa_dev); - info = kzalloc(sizeof(*info), GFP_KERNEL); - if (!info) { + if (ffa_xa_add_partition_info(ffa_dev)) { ffa_device_unregister(ffa_dev); continue; } - rwlock_init(&info->rw_lock); - ret = xa_insert(&drv_info->partition_info, tpbuf->id, - info, GFP_KERNEL); - if (ret) { - pr_err("%s: failed to save partition ID 0x%x - ret:%d\n", - __func__, tpbuf->id, ret); - ffa_device_unregister(ffa_dev); - kfree(info); - } } kfree(pbuf); - /* Allocate for the host */ - info = kzalloc(sizeof(*info), GFP_KERNEL); - if (!info) { - pr_err("%s: failed to alloc Host partition ID 0x%x. Abort.\n", - __func__, drv_info->vm_id); - /* Already registered devices are freed on bus_exit */ - ffa_partitions_cleanup(); - return -ENOMEM; - } + /* + * Check if the host is already added as part of partition info + * No multiple UUID possible for the host, so just checking if + * there is an entry will suffice + */ + if (xa_load(&drv_info->partition_info, drv_info->vm_id)) + return 0; - rwlock_init(&info->rw_lock); - ret = xa_insert(&drv_info->partition_info, drv_info->vm_id, - info, GFP_KERNEL); - if (ret) { - pr_err("%s: failed to save Host partition ID 0x%x - ret:%d. Abort.\n", - __func__, drv_info->vm_id, ret); - kfree(info); - /* Already registered devices are freed on bus_exit */ + /* Allocate for the host */ + ret = ffa_setup_host_partition(drv_info->vm_id); + if (ret) ffa_partitions_cleanup(); - } return ret; } -static void ffa_partitions_cleanup(void) -{ - struct ffa_dev_part_info *info; - unsigned long idx; - - xa_for_each(&drv_info->partition_info, idx, info) { - xa_erase(&drv_info->partition_info, idx); - kfree(info); - } - - xa_destroy(&drv_info->partition_info); -} - /* FFA FEATURE IDs */ #define FFA_FEAT_NOTIFICATION_PENDING_INT (1) #define FFA_FEAT_SCHEDULE_RECEIVER_INT (2) #define FFA_FEAT_MANAGED_EXIT_INT (3) -static irqreturn_t irq_handler(int irq, void *irq_data) +static irqreturn_t ffa_sched_recv_irq_handler(int irq, void *irq_data) { struct ffa_pcpu_irq *pcpu = irq_data; struct ffa_drv_info *info = pcpu->info; - queue_work(info->notif_pcpu_wq, &info->irq_work); + queue_work(info->notif_pcpu_wq, &info->sched_recv_irq_work); + + return IRQ_HANDLED; +} + +static irqreturn_t notif_pend_irq_handler(int irq, void *irq_data) +{ + struct ffa_pcpu_irq *pcpu = irq_data; + struct ffa_drv_info *info = pcpu->info; + + queue_work_on(smp_processor_id(), info->notif_pcpu_wq, + &info->notif_pcpu_work); return IRQ_HANDLED; } @@ -1306,15 +1774,23 @@ static void ffa_sched_recv_irq_work_fn(struct work_struct *work) ffa_notification_info_get(); } -static int ffa_sched_recv_irq_map(void) +static int ffa_irq_map(u32 id) { - int ret, irq, sr_intid; + char *err_str; + int ret, irq, intid; + + if (id == FFA_FEAT_NOTIFICATION_PENDING_INT) + err_str = "Notification Pending Interrupt"; + else if (id == FFA_FEAT_SCHEDULE_RECEIVER_INT) + err_str = "Schedule Receiver Interrupt"; + else + err_str = "Unknown ID"; - /* The returned sr_intid is assumed to be SGI donated to NS world */ - ret = ffa_features(FFA_FEAT_SCHEDULE_RECEIVER_INT, 0, &sr_intid, NULL); + /* The returned intid is assumed to be SGI donated to NS world */ + ret = ffa_features(id, 0, &intid, NULL); if (ret < 0) { if (ret != -EOPNOTSUPP) - pr_err("Failed to retrieve scheduler Rx interrupt\n"); + pr_err("Failed to retrieve FF-A %s %u\n", err_str, id); return ret; } @@ -1329,12 +1805,12 @@ static int ffa_sched_recv_irq_map(void) oirq.np = gic; oirq.args_count = 1; - oirq.args[0] = sr_intid; + oirq.args[0] = intid; irq = irq_create_of_mapping(&oirq); of_node_put(gic); #ifdef CONFIG_ACPI } else { - irq = acpi_register_gsi(NULL, sr_intid, ACPI_EDGE_SENSITIVE, + irq = acpi_register_gsi(NULL, intid, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_HIGH); #endif } @@ -1347,23 +1823,28 @@ static int ffa_sched_recv_irq_map(void) return irq; } -static void ffa_sched_recv_irq_unmap(void) +static void ffa_irq_unmap(unsigned int irq) { - if (drv_info->sched_recv_irq) { - irq_dispose_mapping(drv_info->sched_recv_irq); - drv_info->sched_recv_irq = 0; - } + if (!irq) + return; + irq_dispose_mapping(irq); } static int ffa_cpuhp_pcpu_irq_enable(unsigned int cpu) { - enable_percpu_irq(drv_info->sched_recv_irq, IRQ_TYPE_NONE); + if (drv_info->sched_recv_irq) + enable_percpu_irq(drv_info->sched_recv_irq, IRQ_TYPE_NONE); + if (drv_info->notif_pend_irq) + enable_percpu_irq(drv_info->notif_pend_irq, IRQ_TYPE_NONE); return 0; } static int ffa_cpuhp_pcpu_irq_disable(unsigned int cpu) { - disable_percpu_irq(drv_info->sched_recv_irq); + if (drv_info->sched_recv_irq) + disable_percpu_irq(drv_info->sched_recv_irq); + if (drv_info->notif_pend_irq) + disable_percpu_irq(drv_info->notif_pend_irq); return 0; } @@ -1382,13 +1863,16 @@ static void ffa_uninit_pcpu_irq(void) if (drv_info->sched_recv_irq) free_percpu_irq(drv_info->sched_recv_irq, drv_info->irq_pcpu); + if (drv_info->notif_pend_irq) + free_percpu_irq(drv_info->notif_pend_irq, drv_info->irq_pcpu); + if (drv_info->irq_pcpu) { free_percpu(drv_info->irq_pcpu); drv_info->irq_pcpu = NULL; } } -static int ffa_init_pcpu_irq(unsigned int irq) +static int ffa_init_pcpu_irq(void) { struct ffa_pcpu_irq __percpu *irq_pcpu; int ret, cpu; @@ -1402,13 +1886,31 @@ static int ffa_init_pcpu_irq(unsigned int irq) drv_info->irq_pcpu = irq_pcpu; - ret = request_percpu_irq(irq, irq_handler, "ARM-FFA", irq_pcpu); - if (ret) { - pr_err("Error registering notification IRQ %d: %d\n", irq, ret); - return ret; + if (drv_info->sched_recv_irq) { + ret = request_percpu_irq(drv_info->sched_recv_irq, + ffa_sched_recv_irq_handler, + "ARM-FFA-SRI", irq_pcpu); + if (ret) { + pr_err("Error registering percpu SRI nIRQ %d : %d\n", + drv_info->sched_recv_irq, ret); + drv_info->sched_recv_irq = 0; + return ret; + } + } + + if (drv_info->notif_pend_irq) { + ret = request_percpu_irq(drv_info->notif_pend_irq, + notif_pend_irq_handler, + "ARM-FFA-NPI", irq_pcpu); + if (ret) { + pr_err("Error registering percpu NPI nIRQ %d : %d\n", + drv_info->notif_pend_irq, ret); + drv_info->notif_pend_irq = 0; + return ret; + } } - INIT_WORK(&drv_info->irq_work, ffa_sched_recv_irq_work_fn); + INIT_WORK(&drv_info->sched_recv_irq_work, ffa_sched_recv_irq_work_fn); INIT_WORK(&drv_info->notif_pcpu_work, notif_pcpu_irq_work_fn); drv_info->notif_pcpu_wq = create_workqueue("ffa_pcpu_irq_notification"); if (!drv_info->notif_pcpu_wq) @@ -1428,7 +1930,10 @@ static int ffa_init_pcpu_irq(unsigned int irq) static void ffa_notifications_cleanup(void) { ffa_uninit_pcpu_irq(); - ffa_sched_recv_irq_unmap(); + ffa_irq_unmap(drv_info->sched_recv_irq); + drv_info->sched_recv_irq = 0; + ffa_irq_unmap(drv_info->notif_pend_irq); + drv_info->notif_pend_irq = 0; if (drv_info->bitmap_created) { ffa_notification_bitmap_destroy(); @@ -1439,30 +1944,31 @@ static void ffa_notifications_cleanup(void) static void ffa_notifications_setup(void) { - int ret, irq; + int ret; ret = ffa_features(FFA_NOTIFICATION_BITMAP_CREATE, 0, NULL, NULL); - if (ret) { - pr_info("Notifications not supported, continuing with it ..\n"); - return; - } + if (!ret) { + ret = ffa_notification_bitmap_create(); + if (ret) { + pr_err("Notification bitmap create error %d\n", ret); + return; + } - ret = ffa_notification_bitmap_create(); - if (ret) { - pr_info("Notification bitmap create error %d\n", ret); - return; + drv_info->bitmap_created = true; } - drv_info->bitmap_created = true; - irq = ffa_sched_recv_irq_map(); - if (irq <= 0) { - ret = irq; - goto cleanup; - } + ret = ffa_irq_map(FFA_FEAT_SCHEDULE_RECEIVER_INT); + if (ret > 0) + drv_info->sched_recv_irq = ret; + + ret = ffa_irq_map(FFA_FEAT_NOTIFICATION_PENDING_INT); + if (ret > 0) + drv_info->notif_pend_irq = ret; - drv_info->sched_recv_irq = irq; + if (!drv_info->sched_recv_irq && !drv_info->notif_pend_irq) + goto cleanup; - ret = ffa_init_pcpu_irq(irq); + ret = ffa_init_pcpu_irq(); if (ret) goto cleanup; @@ -1479,20 +1985,16 @@ cleanup: static int __init ffa_init(void) { int ret; + u32 buf_sz; + size_t rxtx_bufsz = SZ_4K; ret = ffa_transport_init(&invoke_ffa_fn); if (ret) return ret; - ret = arm_ffa_bus_init(); - if (ret) - return ret; - drv_info = kzalloc(sizeof(*drv_info), GFP_KERNEL); - if (!drv_info) { - ret = -ENOMEM; - goto ffa_bus_exit; - } + if (!drv_info) + return -ENOMEM; ret = ffa_version_check(&drv_info->version); if (ret) @@ -1504,13 +2006,24 @@ static int __init ffa_init(void) goto free_drv_info; } - drv_info->rx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL); + ret = ffa_features(FFA_FN_NATIVE(RXTX_MAP), 0, &buf_sz, NULL); + if (!ret) { + if (RXTX_MAP_MIN_BUFSZ(buf_sz) == 1) + rxtx_bufsz = SZ_64K; + else if (RXTX_MAP_MIN_BUFSZ(buf_sz) == 2) + rxtx_bufsz = SZ_16K; + else + rxtx_bufsz = SZ_4K; + } + + drv_info->rxtx_bufsz = rxtx_bufsz; + drv_info->rx_buffer = alloc_pages_exact(rxtx_bufsz, GFP_KERNEL); if (!drv_info->rx_buffer) { ret = -ENOMEM; goto free_pages; } - drv_info->tx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL); + drv_info->tx_buffer = alloc_pages_exact(rxtx_bufsz, GFP_KERNEL); if (!drv_info->tx_buffer) { ret = -ENOMEM; goto free_pages; @@ -1518,7 +2031,7 @@ static int __init ffa_init(void) ret = ffa_rxtx_map(virt_to_phys(drv_info->tx_buffer), virt_to_phys(drv_info->rx_buffer), - RXTX_BUFFER_SIZE / FFA_PAGE_SIZE); + rxtx_bufsz / FFA_PAGE_SIZE); if (ret) { pr_err("failed to register FFA RxTx buffers\n"); goto free_pages; @@ -1527,46 +2040,34 @@ static int __init ffa_init(void) mutex_init(&drv_info->rx_lock); mutex_init(&drv_info->tx_lock); - ffa_set_up_mem_ops_native_flag(); + ffa_drvinfo_flags_init(); ffa_notifications_setup(); ret = ffa_setup_partitions(); - if (ret) { - pr_err("failed to setup partitions\n"); - goto cleanup_notifs; - } - - ret = ffa_sched_recv_cb_update(drv_info->vm_id, ffa_self_notif_handle, - drv_info, true); - if (ret) - pr_info("Failed to register driver sched callback %d\n", ret); - - return 0; + if (!ret) + return ret; -cleanup_notifs: + pr_err("failed to setup partitions\n"); ffa_notifications_cleanup(); free_pages: if (drv_info->tx_buffer) - free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE); - free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE); + free_pages_exact(drv_info->tx_buffer, rxtx_bufsz); + free_pages_exact(drv_info->rx_buffer, rxtx_bufsz); free_drv_info: kfree(drv_info); -ffa_bus_exit: - arm_ffa_bus_exit(); return ret; } -subsys_initcall(ffa_init); +module_init(ffa_init); static void __exit ffa_exit(void) { ffa_notifications_cleanup(); ffa_partitions_cleanup(); ffa_rxtx_unmap(drv_info->vm_id); - free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE); - free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE); + free_pages_exact(drv_info->tx_buffer, drv_info->rxtx_bufsz); + free_pages_exact(drv_info->rx_buffer, drv_info->rxtx_bufsz); kfree(drv_info); - arm_ffa_bus_exit(); } module_exit(ffa_exit); diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig index aa5842be19b2..e3fb36825978 100644 --- a/drivers/firmware/arm_scmi/Kconfig +++ b/drivers/firmware/arm_scmi/Kconfig @@ -55,116 +55,35 @@ config ARM_SCMI_RAW_MODE_SUPPORT_COEX operate normally, thing which could make an SCMI test suite using the SCMI Raw mode support unreliable. If unsure, say N. -config ARM_SCMI_HAVE_TRANSPORT - bool - help - This declares whether at least one SCMI transport has been configured. - Used to trigger a build bug when trying to build SCMI without any - configured transport. - -config ARM_SCMI_HAVE_SHMEM - bool - help - This declares whether a shared memory based transport for SCMI is - available. - -config ARM_SCMI_HAVE_MSG - bool - help - This declares whether a message passing based transport for SCMI is - available. - -config ARM_SCMI_TRANSPORT_MAILBOX - bool "SCMI transport based on Mailbox" - depends on MAILBOX - select ARM_SCMI_HAVE_TRANSPORT - select ARM_SCMI_HAVE_SHMEM - default y - help - Enable mailbox based transport for SCMI. - - If you want the ARM SCMI PROTOCOL stack to include support for a - transport based on mailboxes, answer Y. - -config ARM_SCMI_TRANSPORT_OPTEE - bool "SCMI transport based on OP-TEE service" - depends on OPTEE=y || OPTEE=ARM_SCMI_PROTOCOL - select ARM_SCMI_HAVE_TRANSPORT - select ARM_SCMI_HAVE_SHMEM - select ARM_SCMI_HAVE_MSG - default y - help - This enables the OP-TEE service based transport for SCMI. - - If you want the ARM SCMI PROTOCOL stack to include support for a - transport based on OP-TEE SCMI service, answer Y. - -config ARM_SCMI_TRANSPORT_SMC - bool "SCMI transport based on SMC" - depends on HAVE_ARM_SMCCC_DISCOVERY - select ARM_SCMI_HAVE_TRANSPORT - select ARM_SCMI_HAVE_SHMEM - default y - help - Enable SMC based transport for SCMI. - - If you want the ARM SCMI PROTOCOL stack to include support for a - transport based on SMC, answer Y. - -config ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE - bool "Enable atomic mode support for SCMI SMC transport" - depends on ARM_SCMI_TRANSPORT_SMC - help - Enable support of atomic operation for SCMI SMC based transport. - - If you want the SCMI SMC based transport to operate in atomic - mode, avoiding any kind of sleeping behaviour for selected - transactions on the TX path, answer Y. - Enabling atomic mode operations allows any SCMI driver using this - transport to optionally ask for atomic SCMI transactions and operate - in atomic context too, at the price of using a number of busy-waiting - primitives all over instead. If unsure say N. - -config ARM_SCMI_TRANSPORT_VIRTIO - bool "SCMI transport based on VirtIO" - depends on VIRTIO=y || VIRTIO=ARM_SCMI_PROTOCOL - select ARM_SCMI_HAVE_TRANSPORT - select ARM_SCMI_HAVE_MSG +config ARM_SCMI_DEBUG_COUNTERS + bool "Enable SCMI communication debug metrics tracking" + select ARM_SCMI_NEED_DEBUGFS + depends on DEBUG_FS + default n help - This enables the virtio based transport for SCMI. + Enables tracking of some key communication metrics for debug + purposes. It may track metrics like how many messages were sent + or received, were there any failures, what kind of failures, ..etc. - If you want the ARM SCMI PROTOCOL stack to include support for a - transport based on VirtIO, answer Y. + Enable this option to create a new debugfs directory which contains + such useful debug counters. This can be helpful for debugging and + SCMI monitoring. -config ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE - bool "SCMI VirtIO transport Version 1 compliance" - depends on ARM_SCMI_TRANSPORT_VIRTIO +config ARM_SCMI_QUIRKS + bool "Enable SCMI Quirks framework" + depends on JUMP_LABEL || COMPILE_TEST default y help - This enforces strict compliance with VirtIO Version 1 specification. - - If you want the ARM SCMI VirtIO transport layer to refuse to work - with Legacy VirtIO backends and instead support only VirtIO Version 1 - devices (or above), answer Y. - - If you want instead to support also old Legacy VirtIO backends (like - the ones implemented by kvmtool) and let the core Kernel VirtIO layer - take care of the needed conversions, say N. - -config ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE - bool "Enable atomic mode for SCMI VirtIO transport" - depends on ARM_SCMI_TRANSPORT_VIRTIO - help - Enable support of atomic operation for SCMI VirtIO based transport. + Enables support for SCMI Quirks framework to workaround SCMI platform + firmware bugs on system already deployed in the wild. - If you want the SCMI VirtIO based transport to operate in atomic - mode, avoiding any kind of sleeping behaviour for selected - transactions on the TX path, answer Y. + The framework allows the definition of platform-specific code quirks + that will be associated and enabled only on the desired platforms + depending on the SCMI firmware advertised versions and/or machine + compatibles. - Enabling atomic mode operations allows any SCMI driver using this - transport to optionally ask for atomic SCMI transactions and operate - in atomic context too, at the price of using a number of busy-waiting - primitives all over instead. If unsure say N. +source "drivers/firmware/arm_scmi/transports/Kconfig" +source "drivers/firmware/arm_scmi/vendors/imx/Kconfig" endif #ARM_SCMI_PROTOCOL diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index a7bc4796519c..780cd62b2f78 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile @@ -3,24 +3,18 @@ scmi-bus-y = bus.o scmi-core-objs := $(scmi-bus-y) scmi-driver-y = driver.o notify.o +scmi-driver-$(CONFIG_ARM_SCMI_QUIRKS) += quirks.o scmi-driver-$(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT) += raw_mode.o scmi-transport-$(CONFIG_ARM_SCMI_HAVE_SHMEM) = shmem.o -scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += mailbox.o -scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o -scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o -scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += optee.o -scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o powercap.o +scmi-protocols-y := base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o powercap.o +scmi-protocols-y += pinctrl.o scmi-module-objs := $(scmi-driver-y) $(scmi-protocols-y) $(scmi-transport-y) +obj-$(CONFIG_ARM_SCMI_PROTOCOL) += transports/ +obj-$(CONFIG_ARM_SCMI_PROTOCOL) += vendors/imx/ + obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-core.o obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o obj-$(CONFIG_ARM_SCMI_POWER_CONTROL) += scmi_power_control.o - -ifeq ($(CONFIG_THUMB2_KERNEL)$(CONFIG_CC_IS_CLANG),yy) -# The use of R7 in the SMCCC conflicts with the compiler's use of R7 as a frame -# pointer in Thumb2 mode, which is forcibly enabled by Clang when profiling -# hooks are inserted via the -pg switch. -CFLAGS_REMOVE_smc.o += $(CC_FLAGS_FTRACE) -endif diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c index 97254de35ab0..86b376c50a13 100644 --- a/drivers/firmware/arm_scmi/base.c +++ b/drivers/firmware/arm_scmi/base.c @@ -14,7 +14,7 @@ #include "notify.h" /* Updated only after ALL the mandatory features for that version are merged */ -#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20000 +#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20001 #define SCMI_BASE_NUM_SOURCES 1 #define SCMI_BASE_MAX_CMD_ERR_COUNT 1024 @@ -42,7 +42,6 @@ struct scmi_msg_resp_base_discover_agent { u8 name[SCMI_SHORT_NAME_MAX_SIZE]; }; - struct scmi_msg_base_error_notify { __le32 event_control; #define BASE_TP_NOTIFY_ALL BIT(0) @@ -105,7 +104,6 @@ scmi_base_vendor_id_get(const struct scmi_protocol_handle *ph, bool sub_vendor) struct scmi_xfer *t; struct scmi_revision_info *rev = ph->get_priv(ph); - if (sub_vendor) { cmd = BASE_DISCOVER_SUB_VENDOR; vendor_id = rev->sub_vendor_id; @@ -386,7 +384,7 @@ static int scmi_base_protocol_init(const struct scmi_protocol_handle *ph) if (ret) return ret; - rev->major_ver = PROTOCOL_REV_MAJOR(version), + rev->major_ver = PROTOCOL_REV_MAJOR(version); rev->minor_ver = PROTOCOL_REV_MINOR(version); ph->set_priv(ph, rev, version); diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index 77c78be6e79c..1adef0389475 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -17,6 +17,8 @@ #include "common.h" +#define SCMI_UEVENT_MODALIAS_FMT "%s:%02x:%s" + BLOCKING_NOTIFIER_HEAD(scmi_requested_devices_nh); EXPORT_SYMBOL_GPL(scmi_requested_devices_nh); @@ -42,7 +44,7 @@ static atomic_t scmi_syspower_registered = ATOMIC_INIT(0); * This helper let an SCMI driver request specific devices identified by the * @id_table to be created for each active SCMI instance. * - * The requested device name MUST NOT be already existent for any protocol; + * The requested device name MUST NOT be already existent for this protocol; * at first the freshly requested @id_table is annotated in the IDR table * @scmi_requested_devices and then the requested device is advertised to any * registered party via the @scmi_requested_devices_nh notification chain. @@ -52,7 +54,6 @@ static atomic_t scmi_syspower_registered = ATOMIC_INIT(0); static int scmi_protocol_device_request(const struct scmi_device_id *id_table) { int ret = 0; - unsigned int id = 0; struct list_head *head, *phead = NULL; struct scmi_requested_dev *rdev; @@ -67,19 +68,13 @@ static int scmi_protocol_device_request(const struct scmi_device_id *id_table) } /* - * Search for the matching protocol rdev list and then search - * of any existent equally named device...fails if any duplicate found. + * Find the matching protocol rdev list and then search of any + * existent equally named device...fails if any duplicate found. */ mutex_lock(&scmi_requested_devices_mtx); - idr_for_each_entry(&scmi_requested_devices, head, id) { - if (!phead) { - /* A list found registered in the IDR is never empty */ - rdev = list_first_entry(head, struct scmi_requested_dev, - node); - if (rdev->id_table->protocol_id == - id_table->protocol_id) - phead = head; - } + phead = idr_find(&scmi_requested_devices, id_table->protocol_id); + if (phead) { + head = phead; list_for_each_entry(rdev, head, node) { if (!strcmp(rdev->id_table->name, id_table->name)) { pr_err("Ignoring duplicate request [%d] %s\n", @@ -206,60 +201,59 @@ scmi_protocol_table_unregister(const struct scmi_device_id *id_table) scmi_protocol_device_unrequest(entry); } -static const struct scmi_device_id * -scmi_dev_match_id(struct scmi_device *scmi_dev, struct scmi_driver *scmi_drv) +static int scmi_dev_match_by_id_table(struct scmi_device *scmi_dev, + const struct scmi_device_id *id_table) { - const struct scmi_device_id *id = scmi_drv->id_table; - - if (!id) - return NULL; - - for (; id->protocol_id; id++) - if (id->protocol_id == scmi_dev->protocol_id) { - if (!id->name) - return id; - else if (!strcmp(id->name, scmi_dev->name)) - return id; - } + if (!id_table || !id_table->name) + return 0; + + /* Always skip transport devices from matching */ + for (; id_table->protocol_id && id_table->name; id_table++) + if (id_table->protocol_id == scmi_dev->protocol_id && + strncmp(scmi_dev->name, "__scmi_transport_device", 23) && + !strcmp(id_table->name, scmi_dev->name)) + return 1; + return 0; +} - return NULL; +static int scmi_dev_match_id(struct scmi_device *scmi_dev, + const struct scmi_driver *scmi_drv) +{ + return scmi_dev_match_by_id_table(scmi_dev, scmi_drv->id_table); } -static int scmi_dev_match(struct device *dev, struct device_driver *drv) +static int scmi_dev_match(struct device *dev, const struct device_driver *drv) { - struct scmi_driver *scmi_drv = to_scmi_driver(drv); + const struct scmi_driver *scmi_drv = to_scmi_driver(drv); struct scmi_device *scmi_dev = to_scmi_dev(dev); - const struct scmi_device_id *id; - id = scmi_dev_match_id(scmi_dev, scmi_drv); - if (id) - return 1; - - return 0; + return scmi_dev_match_id(scmi_dev, scmi_drv); } -static int scmi_match_by_id_table(struct device *dev, void *data) +static int scmi_match_by_id_table(struct device *dev, const void *data) { - struct scmi_device *sdev = to_scmi_dev(dev); - struct scmi_device_id *id_table = data; + struct scmi_device *scmi_dev = to_scmi_dev(dev); + const struct scmi_device_id *id_table = data; - return sdev->protocol_id == id_table->protocol_id && - (id_table->name && !strcmp(sdev->name, id_table->name)); + return scmi_dev_match_by_id_table(scmi_dev, id_table); } static struct scmi_device *scmi_child_dev_find(struct device *parent, int prot_id, const char *name) { - struct scmi_device_id id_table; + struct scmi_device_id id_table[2] = { 0 }; struct device *dev; - id_table.protocol_id = prot_id; - id_table.name = name; + id_table[0].protocol_id = prot_id; + id_table[0].name = name; dev = device_find_child(parent, &id_table, scmi_match_by_id_table); if (!dev) return NULL; + /* Drop the refcnt bumped implicitly by device_find_child */ + put_device(dev); + return to_scmi_dev(dev); } @@ -283,11 +277,59 @@ static void scmi_dev_remove(struct device *dev) scmi_drv->remove(scmi_dev); } +static int scmi_device_uevent(const struct device *dev, struct kobj_uevent_env *env) +{ + const struct scmi_device *scmi_dev = to_scmi_dev(dev); + + return add_uevent_var(env, "MODALIAS=" SCMI_UEVENT_MODALIAS_FMT, + dev_name(&scmi_dev->dev), scmi_dev->protocol_id, + scmi_dev->name); +} + +static ssize_t modalias_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scmi_device *scmi_dev = to_scmi_dev(dev); + + return sysfs_emit(buf, SCMI_UEVENT_MODALIAS_FMT, + dev_name(&scmi_dev->dev), scmi_dev->protocol_id, + scmi_dev->name); +} +static DEVICE_ATTR_RO(modalias); + +static ssize_t protocol_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scmi_device *scmi_dev = to_scmi_dev(dev); + + return sprintf(buf, "0x%02x\n", scmi_dev->protocol_id); +} +static DEVICE_ATTR_RO(protocol_id); + +static ssize_t name_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scmi_device *scmi_dev = to_scmi_dev(dev); + + return sprintf(buf, "%s\n", scmi_dev->name); +} +static DEVICE_ATTR_RO(name); + +static struct attribute *scmi_device_attributes_attrs[] = { + &dev_attr_protocol_id.attr, + &dev_attr_name.attr, + &dev_attr_modalias.attr, + NULL, +}; +ATTRIBUTE_GROUPS(scmi_device_attributes); + const struct bus_type scmi_bus_type = { .name = "scmi_protocol", .match = scmi_dev_match, .probe = scmi_dev_probe, .remove = scmi_dev_remove, + .uevent = scmi_device_uevent, + .dev_groups = scmi_device_attributes_groups, }; EXPORT_SYMBOL_GPL(scmi_bus_type); @@ -325,7 +367,10 @@ EXPORT_SYMBOL_GPL(scmi_driver_unregister); static void scmi_device_release(struct device *dev) { - kfree(to_scmi_dev(dev)); + struct scmi_device *scmi_dev = to_scmi_dev(dev); + + kfree_const(scmi_dev->name); + kfree(scmi_dev); } static void __scmi_device_destroy(struct scmi_device *scmi_dev) @@ -338,7 +383,6 @@ static void __scmi_device_destroy(struct scmi_device *scmi_dev) if (scmi_dev->protocol_id == SCMI_PROTOCOL_SYSTEM) atomic_set(&scmi_syspower_registered, 0); - kfree_const(scmi_dev->name); ida_free(&scmi_bus_id, scmi_dev->id); device_unregister(&scmi_dev->dev); } @@ -410,12 +454,25 @@ __scmi_device_create(struct device_node *np, struct device *parent, return scmi_dev; put_dev: - kfree_const(scmi_dev->name); put_device(&scmi_dev->dev); ida_free(&scmi_bus_id, id); return NULL; } +static struct scmi_device * +_scmi_device_create(struct device_node *np, struct device *parent, + int protocol, const char *name) +{ + struct scmi_device *sdev; + + sdev = __scmi_device_create(np, parent, protocol, name); + if (!sdev) + pr_err("(%s) Failed to create device for protocol 0x%x (%s)\n", + of_node_full_name(parent->of_node), protocol, name); + + return sdev; +} + /** * scmi_device_create - A method to create one or more SCMI devices * @@ -448,7 +505,7 @@ struct scmi_device *scmi_device_create(struct device_node *np, struct scmi_device *scmi_dev = NULL; if (name) - return __scmi_device_create(np, parent, protocol, name); + return _scmi_device_create(np, parent, protocol, name); mutex_lock(&scmi_requested_devices_mtx); phead = idr_find(&scmi_requested_devices, protocol); @@ -462,18 +519,13 @@ struct scmi_device *scmi_device_create(struct device_node *np, list_for_each_entry(rdev, phead, node) { struct scmi_device *sdev; - sdev = __scmi_device_create(np, parent, - rdev->id_table->protocol_id, - rdev->id_table->name); - /* Report errors and carry on... */ + sdev = _scmi_device_create(np, parent, + rdev->id_table->protocol_id, + rdev->id_table->name); if (sdev) scmi_dev = sdev; - else - pr_err("(%s) Failed to create device for protocol 0x%x (%s)\n", - of_node_full_name(parent->of_node), - rdev->id_table->protocol_id, - rdev->id_table->name); } + mutex_unlock(&scmi_requested_devices_mtx); return scmi_dev; diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index 134019297d08..afa7981efe82 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c @@ -11,6 +11,7 @@ #include "protocols.h" #include "notify.h" +#include "quirks.h" /* Updated only after ALL the mandatory features for that version are merged */ #define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30000 @@ -365,6 +366,7 @@ static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph, ret = ph->xops->do_xfer(ph, t); if (!ret) { u32 latency = 0; + attributes = le32_to_cpu(attr->attributes); strscpy(clk->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE); /* clock_enable_latency field is present only since SCMI v3.1 */ @@ -428,6 +430,23 @@ static void iter_clk_describe_prepare_message(void *message, msg->rate_index = cpu_to_le32(desc_index); } +#define QUIRK_OUT_OF_SPEC_TRIPLET \ + ({ \ + /* \ + * A known quirk: a triplet is returned but num_returned != 3 \ + * Check for a safe payload size and fix. \ + */ \ + if (st->num_returned != 3 && st->num_remaining == 0 && \ + st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) { \ + st->num_returned = 3; \ + st->num_remaining = 0; \ + } else { \ + dev_err(p->dev, \ + "Cannot fix out-of-spec reply !\n"); \ + return -EPROTO; \ + } \ + }) + static int iter_clk_describe_update_state(struct scmi_iterator_state *st, const void *response, void *priv) @@ -449,19 +468,8 @@ iter_clk_describe_update_state(struct scmi_iterator_state *st, p->clk->name, st->num_returned, st->num_remaining, st->rx_len); - /* - * A known quirk: a triplet is returned but num_returned != 3 - * Check for a safe payload size and fix. - */ - if (st->num_returned != 3 && st->num_remaining == 0 && - st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) { - st->num_returned = 3; - st->num_remaining = 0; - } else { - dev_err(p->dev, - "Cannot fix out-of-spec reply !\n"); - return -EPROTO; - } + SCMI_QUIRK(clock_rates_triplet_out_of_spec, + QUIRK_OUT_OF_SPEC_TRIPLET); } return 0; diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 6affbfdd1dec..dab758c5fdea 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -4,7 +4,7 @@ * driver common header file containing some definitions, structures * and function prototypes used in all the different SCMI protocols. * - * Copyright (C) 2018-2022 ARM Ltd. + * Copyright (C) 2018-2024 ARM Ltd. */ #ifndef _SCMI_COMMON_H #define _SCMI_COMMON_H @@ -22,7 +22,7 @@ #include <linux/spinlock.h> #include <linux/types.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "protocols.h" #include "notify.h" @@ -31,6 +31,8 @@ #define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC) +#define SCMI_SHMEM_MAX_PAYLOAD_SIZE 104 + enum scmi_error_codes { SCMI_SUCCESS = 0, /* Success */ SCMI_ERR_SUPPORT = -1, /* Not supported */ @@ -163,7 +165,9 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id); * used to initialize this channel * @dev: Reference to device in the SCMI hierarchy corresponding to this * channel + * @is_p2a: A flag to identify a channel as P2A (RX) * @rx_timeout_ms: The configured RX timeout in milliseconds. + * @max_msg_size: Maximum size of message payload. * @handle: Pointer to SCMI entity handle * @no_completion_irq: Flag to indicate that this channel has no completion * interrupt mechanism for synchronous commands. @@ -174,7 +178,9 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id); struct scmi_chan_info { int id; struct device *dev; + bool is_p2a; unsigned int rx_timeout_ms; + unsigned int max_msg_size; struct scmi_handle *handle; bool no_completion_irq; void *transport_info; @@ -183,7 +189,6 @@ struct scmi_chan_info { /** * struct scmi_transport_ops - Structure representing a SCMI transport ops * - * @link_supplier: Optional callback to add link to a supplier device * @chan_available: Callback to check if channel is available or not * @chan_setup: Callback to allocate and setup a channel * @chan_free: Callback to free a channel @@ -198,7 +203,6 @@ struct scmi_chan_info { * @poll_done: Callback to poll transfer status */ struct scmi_transport_ops { - int (*link_supplier)(struct device *dev); bool (*chan_available)(struct device_node *of_node, int idx); int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev, bool tx); @@ -219,18 +223,18 @@ struct scmi_transport_ops { /** * struct scmi_desc - Description of SoC integration * - * @transport_init: An optional function that a transport can provide to - * initialize some transport-specific setup during SCMI core - * initialization, so ahead of SCMI core probing. - * @transport_exit: An optional function that a transport can provide to - * de-initialize some transport-specific setup during SCMI core - * de-initialization, so after SCMI core removal. * @ops: Pointer to the transport specific ops structure * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds) * @max_msg: Maximum number of messages for a channel type (tx or rx) that can * be pending simultaneously in the system. May be overridden by the * get_max_msg op. - * @max_msg_size: Maximum size of data per message that can be handled. + * @max_msg_size: Maximum size of data payload per message that can be handled. + * @atomic_threshold: Optional system wide DT-configured threshold, expressed + * in microseconds, for atomic operations. + * Only SCMI synchronous commands reported by the platform + * to have an execution latency lesser-equal to the threshold + * should be considered for atomic mode operation: such + * decision is finally left up to the SCMI drivers. * @force_polling: Flag to force this whole transport to use SCMI core polling * mechanism instead of completion interrupts even if available. * @sync_cmds_completed_on_ret: Flag to indicate that the transport assures @@ -245,12 +249,11 @@ struct scmi_transport_ops { * when requested. */ struct scmi_desc { - int (*transport_init)(void); - void (*transport_exit)(void); const struct scmi_transport_ops *ops; int max_rx_timeout_ms; int max_msg; int max_msg_size; + unsigned int atomic_threshold; const bool force_polling; const bool sync_cmds_completed_on_ret; const bool atomic_enabled; @@ -286,35 +289,101 @@ int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle, int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer, unsigned int timeout_ms); -#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX -extern const struct scmi_desc scmi_mailbox_desc; -#endif -#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC -extern const struct scmi_desc scmi_smc_desc; -#endif -#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO -extern const struct scmi_desc scmi_virtio_desc; -#endif -#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE -extern const struct scmi_desc scmi_optee_desc; -#endif - -void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv); + +enum debug_counters { + SENT_OK, + SENT_FAIL, + SENT_FAIL_POLLING_UNSUPPORTED, + SENT_FAIL_CHANNEL_NOT_FOUND, + RESPONSE_OK, + NOTIFICATION_OK, + DELAYED_RESPONSE_OK, + XFERS_RESPONSE_TIMEOUT, + XFERS_RESPONSE_POLLED_TIMEOUT, + RESPONSE_POLLED_OK, + ERR_MSG_UNEXPECTED, + ERR_MSG_INVALID, + ERR_MSG_NOMEM, + ERR_PROTOCOL, + SCMI_DEBUG_COUNTERS_LAST +}; + +static inline void scmi_inc_count(atomic_t *arr, int stat) +{ + if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) + atomic_inc(&arr[stat]); +} + +enum scmi_bad_msg { + MSG_UNEXPECTED = -1, + MSG_INVALID = -2, + MSG_UNKNOWN = -3, + MSG_NOMEM = -4, + MSG_MBOX_SPURIOUS = -5, +}; + +/* Used for compactness and signature validation of the function pointers being + * passed. + */ +typedef void (*shmem_copy_toio_t)(void __iomem *to, const void *from, + size_t count); +typedef void (*shmem_copy_fromio_t)(void *to, const void __iomem *from, + size_t count); + +/** + * struct scmi_shmem_io_ops - I/O operations to read from/write to + * Shared Memory + * + * @toio: Copy data to the shared memory area + * @fromio: Copy data from the shared memory area + */ +struct scmi_shmem_io_ops { + shmem_copy_fromio_t fromio; + shmem_copy_toio_t toio; +}; /* shmem related declarations */ struct scmi_shared_mem; -void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, - struct scmi_xfer *xfer, struct scmi_chan_info *cinfo); -u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem); -void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, +/** + * struct scmi_shared_mem_operations - Transport core operations for + * Shared Memory + * + * @tx_prepare: Prepare the @xfer message for transmission on the chosen @shmem + * @read_header: Read header of the message currently hold in @shmem + * @fetch_response: Copy the message response from @shmem into @xfer + * @fetch_notification: Copy the message notification from @shmem into @xfer + * @clear_channel: Clear the @shmem channel busy flag + * @poll_done: Check if poll has completed for @xfer on @shmem + * @channel_free: Check if @shmem channel is marked as free + * @channel_intr_enabled: Check is @shmem channel has requested a completion irq + * @setup_iomap: Setup IO shared memory for channel @cinfo + */ +struct scmi_shared_mem_operations { + void (*tx_prepare)(struct scmi_shared_mem __iomem *shmem, + struct scmi_xfer *xfer, + struct scmi_chan_info *cinfo, + shmem_copy_toio_t toio); + u32 (*read_header)(struct scmi_shared_mem __iomem *shmem); + + void (*fetch_response)(struct scmi_shared_mem __iomem *shmem, + struct scmi_xfer *xfer, + shmem_copy_fromio_t fromio); + void (*fetch_notification)(struct scmi_shared_mem __iomem *shmem, + size_t max_len, struct scmi_xfer *xfer, + shmem_copy_fromio_t fromio); + void (*clear_channel)(struct scmi_shared_mem __iomem *shmem); + bool (*poll_done)(struct scmi_shared_mem __iomem *shmem, struct scmi_xfer *xfer); -void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem, - size_t max_len, struct scmi_xfer *xfer); -void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem); -bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem, - struct scmi_xfer *xfer); -bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem); + bool (*channel_free)(struct scmi_shared_mem __iomem *shmem); + bool (*channel_intr_enabled)(struct scmi_shared_mem __iomem *shmem); + void __iomem *(*setup_iomap)(struct scmi_chan_info *cinfo, + struct device *dev, + bool tx, struct resource *res, + struct scmi_shmem_io_ops **ops); +}; + +const struct scmi_shared_mem_operations *scmi_shared_mem_operations_get(void); /* declarations for message passing transports */ struct scmi_msg_payld; @@ -322,14 +391,109 @@ struct scmi_msg_payld; /* Maximum overhead of message w.r.t. struct scmi_desc.max_msg_size */ #define SCMI_MSG_MAX_PROT_OVERHEAD (2 * sizeof(__le32)) -size_t msg_response_size(struct scmi_xfer *xfer); -size_t msg_command_size(struct scmi_xfer *xfer); -void msg_tx_prepare(struct scmi_msg_payld *msg, struct scmi_xfer *xfer); -u32 msg_read_header(struct scmi_msg_payld *msg); -void msg_fetch_response(struct scmi_msg_payld *msg, size_t len, - struct scmi_xfer *xfer); -void msg_fetch_notification(struct scmi_msg_payld *msg, size_t len, - size_t max_len, struct scmi_xfer *xfer); +/** + * struct scmi_message_operations - Transport core operations for Message + * + * @response_size: Get calculated response size for @xfer + * @command_size: Get calculated command size for @xfer + * @tx_prepare: Prepare the @xfer message for transmission on the provided @msg + * @read_header: Read header of the message currently hold in @msg + * @fetch_response: Copy the message response from @msg into @xfer + * @fetch_notification: Copy the message notification from @msg into @xfer + */ +struct scmi_message_operations { + size_t (*response_size)(struct scmi_xfer *xfer); + size_t (*command_size)(struct scmi_xfer *xfer); + void (*tx_prepare)(struct scmi_msg_payld *msg, struct scmi_xfer *xfer); + u32 (*read_header)(struct scmi_msg_payld *msg); + void (*fetch_response)(struct scmi_msg_payld *msg, size_t len, + struct scmi_xfer *xfer); + void (*fetch_notification)(struct scmi_msg_payld *msg, size_t len, + size_t max_len, struct scmi_xfer *xfer); +}; + +const struct scmi_message_operations *scmi_message_operations_get(void); + +/** + * struct scmi_transport_core_operations - Transpoert core operations + * + * @bad_message_trace: An helper to report a malformed/unexpected message + * @rx_callback: Callback to report received messages + * @shmem: Datagram operations for shared memory based transports + * @msg: Datagram operations for message based transports + */ +struct scmi_transport_core_operations { + void (*bad_message_trace)(struct scmi_chan_info *cinfo, + u32 msg_hdr, enum scmi_bad_msg err); + void (*rx_callback)(struct scmi_chan_info *cinfo, u32 msg_hdr, + void *priv); + const struct scmi_shared_mem_operations *shmem; + const struct scmi_message_operations *msg; +}; + +/** + * struct scmi_transport - A structure representing a configured transport + * + * @supplier: Device representing the transport and acting as a supplier for + * the core SCMI stack + * @desc: Transport descriptor + * @core_ops: A pointer to a pointer used by the core SCMI stack to make the + * core transport operations accessible to the transports. + */ +struct scmi_transport { + struct device *supplier; + struct scmi_desc desc; + struct scmi_transport_core_operations **core_ops; +}; + +#define DEFINE_SCMI_TRANSPORT_DRIVER(__tag, __drv, __desc, __match, __core_ops)\ +static void __tag##_dev_free(void *data) \ +{ \ + struct platform_device *spdev = data; \ + \ + platform_device_unregister(spdev); \ +} \ + \ +static int __tag##_probe(struct platform_device *pdev) \ +{ \ + struct device *dev = &pdev->dev; \ + struct platform_device *spdev; \ + struct scmi_transport strans; \ + int ret; \ + \ + spdev = platform_device_alloc("arm-scmi", PLATFORM_DEVID_AUTO); \ + if (!spdev) \ + return -ENOMEM; \ + \ + device_set_of_node_from_dev(&spdev->dev, dev); \ + \ + strans.supplier = dev; \ + memcpy(&strans.desc, &(__desc), sizeof(strans.desc)); \ + strans.core_ops = &(__core_ops); \ + \ + ret = platform_device_add_data(spdev, &strans, sizeof(strans)); \ + if (ret) \ + goto err; \ + \ + spdev->dev.parent = dev; \ + ret = platform_device_add(spdev); \ + if (ret) \ + goto err; \ + \ + return devm_add_action_or_reset(dev, __tag##_dev_free, spdev); \ + \ +err: \ + platform_device_put(spdev); \ + return ret; \ +} \ + \ +static struct platform_driver __drv = { \ + .driver = { \ + .name = #__tag "_transport", \ + .of_match_table = __match, \ + }, \ + .probe = __tag##_probe, \ +} void scmi_notification_instance_data_set(const struct scmi_handle *handle, void *priv); diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 2709598f3008..395fe9289035 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -11,7 +11,7 @@ * various power domain DVFS including the core/cluster, certain system * clocks configuration, thermal sensors and many others. * - * Copyright (C) 2018-2021 ARM Ltd. + * Copyright (C) 2018-2025 ARM Ltd. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -24,6 +24,7 @@ #include <linux/io.h> #include <linux/io-64-nonatomic-hi-lo.h> #include <linux/kernel.h> +#include <linux/kmod.h> #include <linux/ktime.h> #include <linux/hashtable.h> #include <linux/list.h> @@ -33,19 +34,22 @@ #include <linux/processor.h> #include <linux/refcount.h> #include <linux/slab.h> +#include <linux/xarray.h> #include "common.h" #include "notify.h" +#include "quirks.h" #include "raw_mode.h" #define CREATE_TRACE_POINTS #include <trace/events/scmi.h> +#define SCMI_VENDOR_MODULE_ALIAS_FMT "scmi-protocol-0x%02x-%s" + static DEFINE_IDA(scmi_id); -static DEFINE_IDR(scmi_protocols); -static DEFINE_SPINLOCK(protocol_lock); +static DEFINE_XARRAY(scmi_protocols); /* List of all SCMI devices active in system */ static LIST_HEAD(scmi_list); @@ -117,12 +121,14 @@ struct scmi_protocol_instance { * @name: Name of this SCMI instance * @type: Type of this SCMI instance * @is_atomic: Flag to state if the transport of this instance is atomic + * @counters: An array of atomic_c's used for tracking statistics (if enabled) */ struct scmi_debug_info { struct dentry *top_dentry; const char *name; const char *type; bool is_atomic; + atomic_t counters[SCMI_DEBUG_COUNTERS_LAST]; }; /** @@ -147,12 +153,6 @@ struct scmi_debug_info { * base protocol * @active_protocols: IDR storing device_nodes for protocols actually defined * in the DT and confirmed as implemented by fw. - * @atomic_threshold: Optional system wide DT-configured threshold, expressed - * in microseconds, for atomic operations. - * Only SCMI synchronous commands reported by the platform - * to have an execution latency lesser-equal to the threshold - * should be considered for atomic mode operation: such - * decision is finally left up to the SCMI drivers. * @notify_priv: Pointer to private data structure specific to notifications. * @node: List head * @users: Number of users of this instance @@ -178,7 +178,6 @@ struct scmi_info { struct mutex protocols_mtx; u8 *protocols_imp; struct idr active_protocols; - unsigned int atomic_threshold; void *notify_priv; struct list_head node; int users; @@ -194,11 +193,140 @@ struct scmi_info { #define bus_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, bus_nb) #define req_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, dev_req_nb) -static const struct scmi_protocol *scmi_protocol_get(int protocol_id) +static void scmi_rx_callback(struct scmi_chan_info *cinfo, + u32 msg_hdr, void *priv); +static void scmi_bad_message_trace(struct scmi_chan_info *cinfo, + u32 msg_hdr, enum scmi_bad_msg err); + +static struct scmi_transport_core_operations scmi_trans_core_ops = { + .bad_message_trace = scmi_bad_message_trace, + .rx_callback = scmi_rx_callback, +}; + +static unsigned long +scmi_vendor_protocol_signature(unsigned int protocol_id, char *vendor_id, + char *sub_vendor_id, u32 impl_ver) +{ + char *signature, *p; + unsigned long hash = 0; + + /* vendor_id/sub_vendor_id guaranteed <= SCMI_SHORT_NAME_MAX_SIZE */ + signature = kasprintf(GFP_KERNEL, "%02X|%s|%s|0x%08X", protocol_id, + vendor_id ?: "", sub_vendor_id ?: "", impl_ver); + if (!signature) + return 0; + + p = signature; + while (*p) + hash = partial_name_hash(tolower(*p++), hash); + hash = end_name_hash(hash); + + kfree(signature); + + return hash; +} + +static unsigned long +scmi_protocol_key_calculate(int protocol_id, char *vendor_id, + char *sub_vendor_id, u32 impl_ver) +{ + if (protocol_id < SCMI_PROTOCOL_VENDOR_BASE) + return protocol_id; + else + return scmi_vendor_protocol_signature(protocol_id, vendor_id, + sub_vendor_id, impl_ver); +} + +static const struct scmi_protocol * +__scmi_vendor_protocol_lookup(int protocol_id, char *vendor_id, + char *sub_vendor_id, u32 impl_ver) +{ + unsigned long key; + struct scmi_protocol *proto = NULL; + + key = scmi_protocol_key_calculate(protocol_id, vendor_id, + sub_vendor_id, impl_ver); + if (key) + proto = xa_load(&scmi_protocols, key); + + return proto; +} + +static const struct scmi_protocol * +scmi_vendor_protocol_lookup(int protocol_id, char *vendor_id, + char *sub_vendor_id, u32 impl_ver) +{ + const struct scmi_protocol *proto = NULL; + + /* Searching for closest match ...*/ + proto = __scmi_vendor_protocol_lookup(protocol_id, vendor_id, + sub_vendor_id, impl_ver); + if (proto) + return proto; + + /* Any match just on vendor/sub_vendor ? */ + if (impl_ver) { + proto = __scmi_vendor_protocol_lookup(protocol_id, vendor_id, + sub_vendor_id, 0); + if (proto) + return proto; + } + + /* Any match just on the vendor ? */ + if (sub_vendor_id) + proto = __scmi_vendor_protocol_lookup(protocol_id, vendor_id, + NULL, 0); + return proto; +} + +static const struct scmi_protocol * +scmi_vendor_protocol_get(int protocol_id, struct scmi_revision_info *version) { const struct scmi_protocol *proto; - proto = idr_find(&scmi_protocols, protocol_id); + proto = scmi_vendor_protocol_lookup(protocol_id, version->vendor_id, + version->sub_vendor_id, + version->impl_ver); + if (!proto) { + int ret; + + pr_debug("Looking for '" SCMI_VENDOR_MODULE_ALIAS_FMT "'\n", + protocol_id, version->vendor_id); + + /* Note that vendor_id is mandatory for vendor protocols */ + ret = request_module(SCMI_VENDOR_MODULE_ALIAS_FMT, + protocol_id, version->vendor_id); + if (ret) { + pr_warn("Problem loading module for protocol 0x%x\n", + protocol_id); + return NULL; + } + + /* Lookup again, once modules loaded */ + proto = scmi_vendor_protocol_lookup(protocol_id, + version->vendor_id, + version->sub_vendor_id, + version->impl_ver); + } + + if (proto) + pr_info("Loaded SCMI Vendor Protocol 0x%x - %s %s %X\n", + protocol_id, proto->vendor_id ?: "", + proto->sub_vendor_id ?: "", proto->impl_ver); + + return proto; +} + +static const struct scmi_protocol * +scmi_protocol_get(int protocol_id, struct scmi_revision_info *version) +{ + const struct scmi_protocol *proto = NULL; + + if (protocol_id < SCMI_PROTOCOL_VENDOR_BASE) + proto = xa_load(&scmi_protocols, protocol_id); + else + proto = scmi_vendor_protocol_get(protocol_id, version); + if (!proto || !try_module_get(proto->owner)) { pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id); return NULL; @@ -209,18 +337,38 @@ static const struct scmi_protocol *scmi_protocol_get(int protocol_id) return proto; } -static void scmi_protocol_put(int protocol_id) +static void scmi_protocol_put(const struct scmi_protocol *proto) { - const struct scmi_protocol *proto; - - proto = idr_find(&scmi_protocols, protocol_id); if (proto) module_put(proto->owner); } +static int scmi_vendor_protocol_check(const struct scmi_protocol *proto) +{ + if (!proto->vendor_id) { + pr_err("missing vendor_id for protocol 0x%x\n", proto->id); + return -EINVAL; + } + + if (strlen(proto->vendor_id) >= SCMI_SHORT_NAME_MAX_SIZE) { + pr_err("malformed vendor_id for protocol 0x%x\n", proto->id); + return -EINVAL; + } + + if (proto->sub_vendor_id && + strlen(proto->sub_vendor_id) >= SCMI_SHORT_NAME_MAX_SIZE) { + pr_err("malformed sub_vendor_id for protocol 0x%x\n", + proto->id); + return -EINVAL; + } + + return 0; +} + int scmi_protocol_register(const struct scmi_protocol *proto) { int ret; + unsigned long key; if (!proto) { pr_err("invalid protocol\n"); @@ -232,17 +380,30 @@ int scmi_protocol_register(const struct scmi_protocol *proto) return -EINVAL; } - spin_lock(&protocol_lock); - ret = idr_alloc(&scmi_protocols, (void *)proto, - proto->id, proto->id + 1, GFP_ATOMIC); - spin_unlock(&protocol_lock); - if (ret != proto->id) { - pr_err("unable to allocate SCMI idr slot for 0x%x - err %d\n", + if (proto->id >= SCMI_PROTOCOL_VENDOR_BASE && + scmi_vendor_protocol_check(proto)) + return -EINVAL; + + /* + * Calculate a protocol key to register this protocol with the core; + * key value 0 is considered invalid. + */ + key = scmi_protocol_key_calculate(proto->id, proto->vendor_id, + proto->sub_vendor_id, + proto->impl_ver); + if (!key) + return -EINVAL; + + ret = xa_insert(&scmi_protocols, key, (void *)proto, GFP_KERNEL); + if (ret) { + pr_err("unable to allocate SCMI protocol slot for 0x%x - err %d\n", proto->id, ret); return ret; } - pr_debug("Registered SCMI Protocol 0x%x\n", proto->id); + pr_debug("Registered SCMI Protocol 0x%x - %s %s 0x%08X\n", + proto->id, proto->vendor_id, proto->sub_vendor_id, + proto->impl_ver); return 0; } @@ -250,9 +411,15 @@ EXPORT_SYMBOL_GPL(scmi_protocol_register); void scmi_protocol_unregister(const struct scmi_protocol *proto) { - spin_lock(&protocol_lock); - idr_remove(&scmi_protocols, proto->id); - spin_unlock(&protocol_lock); + unsigned long key; + + key = scmi_protocol_key_calculate(proto->id, proto->vendor_id, + proto->sub_vendor_id, + proto->impl_ver); + if (!key) + return; + + xa_erase(&scmi_protocols, key); pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id); } @@ -273,14 +440,8 @@ static void scmi_create_protocol_devices(struct device_node *np, struct scmi_info *info, int prot_id, const char *name) { - struct scmi_device *sdev; - mutex_lock(&info->devreq_mtx); - sdev = scmi_device_create(np, info->dev, prot_id, name); - if (name && !sdev) - dev_err(info->dev, - "failed to create device for protocol 0x%X (%s)\n", - prot_id, name); + scmi_device_create(np, info->dev, prot_id, name); mutex_unlock(&info->devreq_mtx); } @@ -697,6 +858,45 @@ scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id) } /** + * scmi_bad_message_trace - A helper to trace weird messages + * + * @cinfo: A reference to the channel descriptor on which the message was + * received + * @msg_hdr: Message header to track + * @err: A specific error code used as a status value in traces. + * + * This helper can be used to trace any kind of weird, incomplete, unexpected, + * timed-out message that arrives and as such, can be traced only referring to + * the header content, since the payload is missing/unreliable. + */ +static void scmi_bad_message_trace(struct scmi_chan_info *cinfo, u32 msg_hdr, + enum scmi_bad_msg err) +{ + char *tag; + struct scmi_info *info = handle_to_scmi_info(cinfo->handle); + + switch (MSG_XTRACT_TYPE(msg_hdr)) { + case MSG_TYPE_COMMAND: + tag = "!RESP"; + break; + case MSG_TYPE_DELAYED_RESP: + tag = "!DLYD"; + break; + case MSG_TYPE_NOTIFICATION: + tag = "!NOTI"; + break; + default: + tag = "!UNKN"; + break; + } + + trace_scmi_msg_dump(info->id, cinfo->id, + MSG_XTRACT_PROT_ID(msg_hdr), + MSG_XTRACT_ID(msg_hdr), tag, + MSG_XTRACT_TOKEN(msg_hdr), err, NULL, 0); +} + +/** * scmi_msg_response_validate - Validate message type against state of related * xfer * @@ -822,6 +1022,10 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr) "Message for %d type %d is not expected!\n", xfer_id, msg_type); spin_unlock_irqrestore(&minfo->xfer_lock, flags); + + scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNEXPECTED); + scmi_inc_count(info->dbg->counters, ERR_MSG_UNEXPECTED); + return xfer; } refcount_inc(&xfer->users); @@ -846,6 +1050,10 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr) dev_err(cinfo->dev, "Invalid message type:%d for %d - HDR:0x%X state:%d\n", msg_type, xfer_id, msg_hdr, xfer->state); + + scmi_bad_message_trace(cinfo, msg_hdr, MSG_INVALID); + scmi_inc_count(info->dbg->counters, ERR_MSG_INVALID); + /* On error the refcount incremented above has to be dropped */ __scmi_xfer_put(minfo, xfer); xfer = ERR_PTR(-EINVAL); @@ -864,6 +1072,11 @@ static inline void scmi_xfer_command_release(struct scmi_info *info, static inline void scmi_clear_channel(struct scmi_info *info, struct scmi_chan_info *cinfo) { + if (!cinfo->is_p2a) { + dev_warn(cinfo->dev, "Invalid clear on A2P channel !\n"); + return; + } + if (info->desc->ops->clear_channel) info->desc->ops->clear_channel(cinfo); } @@ -882,6 +1095,10 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, if (IS_ERR(xfer)) { dev_err(dev, "failed to get free message slot (%ld)\n", PTR_ERR(xfer)); + + scmi_bad_message_trace(cinfo, msg_hdr, MSG_NOMEM); + scmi_inc_count(info->dbg->counters, ERR_MSG_NOMEM); + scmi_clear_channel(info, cinfo); return; } @@ -896,6 +1113,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, xfer->hdr.id, "NOTI", xfer->hdr.seq, xfer->hdr.status, xfer->rx.buf, xfer->rx.len); + scmi_inc_count(info->dbg->counters, NOTIFICATION_OK); scmi_notify(cinfo->handle, xfer->hdr.protocol_id, xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts); @@ -955,8 +1173,10 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) { scmi_clear_channel(info, cinfo); complete(xfer->async_done); + scmi_inc_count(info->dbg->counters, DELAYED_RESPONSE_OK); } else { complete(&xfer->done); + scmi_inc_count(info->dbg->counters, RESPONSE_OK); } if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { @@ -965,7 +1185,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, * RX path since it will be already queued at the end of the TX * poll loop. */ - if (!xfer->hdr.poll_completion) + if (!xfer->hdr.poll_completion || + xfer->hdr.type == MSG_TYPE_DELAYED_RESP) scmi_raw_message_report(info->raw, xfer, SCMI_RAW_REPLY_QUEUE, cinfo->id); @@ -987,7 +1208,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, * NOTE: This function will be invoked in IRQ context, hence should be * as optimal as possible. */ -void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv) +static void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, + void *priv) { u8 msg_type = MSG_XTRACT_TYPE(msg_hdr); @@ -1001,6 +1223,7 @@ void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv) break; default: WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type); + scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNKNOWN); break; } } @@ -1021,7 +1244,8 @@ static void xfer_put(const struct scmi_protocol_handle *ph, } static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo, - struct scmi_xfer *xfer, ktime_t stop) + struct scmi_xfer *xfer, ktime_t stop, + bool *ooo) { struct scmi_info *info = handle_to_scmi_info(cinfo->handle); @@ -1030,7 +1254,7 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo, * in case of out-of-order receptions of delayed responses */ return info->desc->ops->poll_done(cinfo, xfer) || - try_wait_for_completion(&xfer->done) || + (*ooo = try_wait_for_completion(&xfer->done)) || ktime_after(ktime_get(), stop); } @@ -1039,6 +1263,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc, struct scmi_xfer *xfer, unsigned int timeout_ms) { int ret = 0; + struct scmi_info *info = handle_to_scmi_info(cinfo->handle); if (xfer->hdr.poll_completion) { /* @@ -1046,26 +1271,27 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc, * itself to support synchronous commands replies. */ if (!desc->sync_cmds_completed_on_ret) { + bool ooo = false; + /* * Poll on xfer using transport provided .poll_done(); * assumes no completion interrupt was available. */ ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms); - spin_until_cond(scmi_xfer_done_no_timeout(cinfo, - xfer, stop)); - if (ktime_after(ktime_get(), stop)) { + spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, + stop, &ooo)); + if (!ooo && !info->desc->ops->poll_done(cinfo, xfer)) { dev_err(dev, "timed out in resp(caller: %pS) - polling\n", (void *)_RET_IP_); ret = -ETIMEDOUT; + scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_POLLED_TIMEOUT); } } if (!ret) { unsigned long flags; - struct scmi_info *info = - handle_to_scmi_info(cinfo->handle); /* * Do not fetch_response if an out-of-order delayed @@ -1085,11 +1311,9 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc, "RESP" : "resp", xfer->hdr.seq, xfer->hdr.status, xfer->rx.buf, xfer->rx.len); + scmi_inc_count(info->dbg->counters, RESPONSE_POLLED_OK); if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { - struct scmi_info *info = - handle_to_scmi_info(cinfo->handle); - scmi_raw_message_report(info->raw, xfer, SCMI_RAW_REPLY_QUEUE, cinfo->id); @@ -1102,6 +1326,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc, dev_err(dev, "timed out in resp(caller: %pS)\n", (void *)_RET_IP_); ret = -ETIMEDOUT; + scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_TIMEOUT); } } @@ -1185,13 +1410,15 @@ static int do_xfer(const struct scmi_protocol_handle *ph, !is_transport_polling_capable(info->desc)) { dev_warn_once(dev, "Polling mode is not supported by transport.\n"); + scmi_inc_count(info->dbg->counters, SENT_FAIL_POLLING_UNSUPPORTED); return -EINVAL; } cinfo = idr_find(&info->tx_idr, pi->proto->id); - if (unlikely(!cinfo)) + if (unlikely(!cinfo)) { + scmi_inc_count(info->dbg->counters, SENT_FAIL_CHANNEL_NOT_FOUND); return -EINVAL; - + } /* True ONLY if also supported by transport. */ if (is_polling_enabled(cinfo, info->desc)) xfer->hdr.poll_completion = true; @@ -1223,16 +1450,20 @@ static int do_xfer(const struct scmi_protocol_handle *ph, ret = info->desc->ops->send_message(cinfo, xfer); if (ret < 0) { dev_dbg(dev, "Failed to send message %d\n", ret); + scmi_inc_count(info->dbg->counters, SENT_FAIL); return ret; } trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, xfer->hdr.id, "CMND", xfer->hdr.seq, xfer->hdr.status, xfer->tx.buf, xfer->tx.len); + scmi_inc_count(info->dbg->counters, SENT_OK); ret = scmi_wait_for_message_response(cinfo, xfer); - if (!ret && xfer->hdr.status) + if (!ret && xfer->hdr.status) { ret = scmi_to_linux_errno(xfer->hdr.status); + scmi_inc_count(info->dbg->counters, ERR_PROTOCOL); + } if (info->desc->ops->mark_txdone) info->desc->ops->mark_txdone(cinfo, ret, xfer); @@ -1489,6 +1720,53 @@ out: } /** + * scmi_common_get_max_msg_size - Get maximum message size + * @ph: A protocol handle reference. + * + * Return: Maximum message size for the current protocol. + */ +static int scmi_common_get_max_msg_size(const struct scmi_protocol_handle *ph) +{ + const struct scmi_protocol_instance *pi = ph_to_pi(ph); + struct scmi_info *info = handle_to_scmi_info(pi->handle); + + return info->desc->max_msg_size; +} + +/** + * scmi_protocol_msg_check - Check protocol message attributes + * + * @ph: A reference to the protocol handle. + * @message_id: The ID of the message to check. + * @attributes: A parameter to optionally return the retrieved message + * attributes, in case of Success. + * + * An helper to check protocol message attributes for a specific protocol + * and message pair. + * + * Return: 0 on SUCCESS + */ +static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph, + u32 message_id, u32 *attributes) +{ + int ret; + struct scmi_xfer *t; + + ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES, + sizeof(__le32), 0, &t); + if (ret) + return ret; + + put_unaligned_le32(message_id, t->tx.buf); + ret = do_xfer(ph, t); + if (!ret && attributes) + *attributes = get_unaligned_le32(t->rx.buf); + xfer_put(ph, t); + + return ret; +} + +/** * struct scmi_iterator - Iterator descriptor * @msg: A reference to the message TX buffer; filled by @prepare_message with * a proper custom command payload for each multi-part command request. @@ -1620,6 +1898,13 @@ struct scmi_msg_resp_desc_fc { __le32 db_preserve_hmask; }; +#define QUIRK_PERF_FC_FORCE \ + ({ \ + if (pi->proto->id == SCMI_PROTOCOL_PERF && \ + message_id == 0x8 /* PERF_LEVEL_GET */) \ + attributes |= BIT(0); \ + }) + static void scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph, u8 describe_id, u32 message_id, u32 valid_size, @@ -1629,6 +1914,7 @@ scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph, int ret; u32 flags; u64 phys_addr; + u32 attributes; u8 size; void __iomem *addr; struct scmi_xfer *t; @@ -1637,6 +1923,16 @@ scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph, struct scmi_msg_resp_desc_fc *resp; const struct scmi_protocol_instance *pi = ph_to_pi(ph); + /* Check if the MSG_ID supports fastchannel */ + ret = scmi_protocol_msg_check(ph, message_id, &attributes); + SCMI_QUIRK(perf_level_get_fc_force, QUIRK_PERF_FC_FORCE); + if (ret || !MSG_SUPPORTS_FASTCHANNEL(attributes)) { + dev_dbg(ph->dev, + "Skip FC init for 0x%02X/%d domain:%d - ret:%d\n", + pi->proto->id, message_id, domain, ret); + return; + } + if (!p_addr) { ret = -EINVAL; goto err_out; @@ -1751,54 +2047,12 @@ static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db) else if (db->width == 4) SCMI_PROTO_FC_RING_DB(32); else /* db->width == 8 */ -#ifdef CONFIG_64BIT SCMI_PROTO_FC_RING_DB(64); -#else - { - u64 val = 0; - - if (db->mask) - val = ioread64_hi_lo(db->addr) & db->mask; - iowrite64_hi_lo(db->set | val, db->addr); - } -#endif -} - -/** - * scmi_protocol_msg_check - Check protocol message attributes - * - * @ph: A reference to the protocol handle. - * @message_id: The ID of the message to check. - * @attributes: A parameter to optionally return the retrieved message - * attributes, in case of Success. - * - * An helper to check protocol message attributes for a specific protocol - * and message pair. - * - * Return: 0 on SUCCESS - */ -static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph, - u32 message_id, u32 *attributes) -{ - int ret; - struct scmi_xfer *t; - - ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES, - sizeof(__le32), 0, &t); - if (ret) - return ret; - - put_unaligned_le32(message_id, t->tx.buf); - ret = do_xfer(ph, t); - if (!ret && attributes) - *attributes = get_unaligned_le32(t->rx.buf); - xfer_put(ph, t); - - return ret; } static const struct scmi_proto_helpers_ops helpers_ops = { .extended_name_get = scmi_common_extended_name_get, + .get_max_msg_size = scmi_common_get_max_msg_size, .iter_response_init = scmi_iterator_init, .iter_response_run = scmi_iterator_run, .protocol_msg_check = scmi_protocol_msg_check, @@ -1891,7 +2145,7 @@ scmi_alloc_init_protocol_instance(struct scmi_info *info, /* Protocol specific devres group */ gid = devres_open_group(handle->dev, NULL, GFP_KERNEL); if (!gid) { - scmi_protocol_put(proto->id); + scmi_protocol_put(proto); goto out; } @@ -1955,7 +2209,7 @@ scmi_alloc_init_protocol_instance(struct scmi_info *info, clean: /* Take care to put the protocol module's owner before releasing all */ - scmi_protocol_put(proto->id); + scmi_protocol_put(proto); devres_release_group(handle->dev, gid); out: return ERR_PTR(ret); @@ -1989,7 +2243,7 @@ scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id) const struct scmi_protocol *proto; /* Fails if protocol not registered on bus */ - proto = scmi_protocol_get(protocol_id); + proto = scmi_protocol_get(protocol_id, &info->version); if (proto) pi = scmi_alloc_init_protocol_instance(info, proto); else @@ -2044,7 +2298,7 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id) idr_remove(&info->protocols, protocol_id); - scmi_protocol_put(protocol_id); + scmi_protocol_put(pi->proto); devres_release_group(handle->dev, gid); dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n", @@ -2227,7 +2481,7 @@ static bool scmi_is_transport_atomic(const struct scmi_handle *handle, ret = info->desc->atomic_enabled && is_transport_polling_capable(info->desc); if (ret && atomic_threshold) - *atomic_threshold = info->atomic_threshold; + *atomic_threshold = info->desc->atomic_threshold; return ret; } @@ -2425,7 +2679,9 @@ static int scmi_chan_setup(struct scmi_info *info, struct device_node *of_node, if (!cinfo) return -ENOMEM; + cinfo->is_p2a = !tx; cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms; + cinfo->max_msg_size = info->desc->max_msg_size; /* Create a unique name for this transport device */ snprintf(name, 32, "__scmi_transport_device_%s_%02X", @@ -2491,6 +2747,10 @@ scmi_txrx_setup(struct scmi_info *info, struct device_node *of_node, ret = 0; } + if (ret) + dev_err(info->dev, + "failed to setup channel for protocol:0x%X\n", prot_id); + return ret; } @@ -2515,14 +2775,14 @@ scmi_txrx_setup(struct scmi_info *info, struct device_node *of_node, static int scmi_channels_setup(struct scmi_info *info) { int ret; - struct device_node *child, *top_np = info->dev->of_node; + struct device_node *top_np = info->dev->of_node; /* Initialize a common generic channel at first */ ret = scmi_txrx_setup(info, top_np, SCMI_PROTOCOL_BASE); if (ret) return ret; - for_each_available_child_of_node(top_np, child) { + for_each_available_child_of_node_scoped(top_np, child) { u32 prot_id; if (of_property_read_u32(child, "reg", &prot_id)) @@ -2533,10 +2793,8 @@ static int scmi_channels_setup(struct scmi_info *info) "Out of range protocol %d\n", prot_id); ret = scmi_txrx_setup(info, child, prot_id); - if (ret) { - of_node_put(child); + if (ret) return ret; - } } return 0; @@ -2584,9 +2842,8 @@ static int scmi_bus_notifier(struct notifier_block *nb, struct scmi_info *info = bus_nb_to_scmi_info(nb); struct scmi_device *sdev = to_scmi_dev(data); - /* Skip transport devices and devices of different SCMI instances */ - if (!strncmp(sdev->name, "__scmi_transport_device", 23) || - sdev->dev.parent != info->dev) + /* Skip devices of different SCMI instances */ + if (sdev->dev.parent != info->dev) return NOTIFY_DONE; switch (action) { @@ -2640,6 +2897,55 @@ static int scmi_device_request_notifier(struct notifier_block *nb, return NOTIFY_OK; } +static const char * const dbg_counter_strs[] = { + "sent_ok", + "sent_fail", + "sent_fail_polling_unsupported", + "sent_fail_channel_not_found", + "response_ok", + "notification_ok", + "delayed_response_ok", + "xfers_response_timeout", + "xfers_response_polled_timeout", + "response_polled_ok", + "err_msg_unexpected", + "err_msg_invalid", + "err_msg_nomem", + "err_protocol", +}; + +static ssize_t reset_all_on_write(struct file *filp, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct scmi_debug_info *dbg = filp->private_data; + + for (int i = 0; i < SCMI_DEBUG_COUNTERS_LAST; i++) + atomic_set(&dbg->counters[i], 0); + + return count; +} + +static const struct file_operations fops_reset_counts = { + .owner = THIS_MODULE, + .open = simple_open, + .write = reset_all_on_write, +}; + +static void scmi_debugfs_counters_setup(struct scmi_debug_info *dbg, + struct dentry *trans) +{ + struct dentry *counters; + int idx; + + counters = debugfs_create_dir("counters", trans); + + for (idx = 0; idx < SCMI_DEBUG_COUNTERS_LAST; idx++) + debugfs_create_atomic_t(dbg_counter_strs[idx], 0600, counters, + &dbg->counters[idx]); + + debugfs_create_file("reset", 0200, counters, dbg, &fops_reset_counts); +} + static void scmi_debugfs_common_cleanup(void *d) { struct scmi_debug_info *dbg = d; @@ -2688,7 +2994,7 @@ static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info) (char **)&dbg->name); debugfs_create_u32("atomic_threshold_us", 0400, top_dentry, - &info->atomic_threshold); + (u32 *)&info->desc->atomic_threshold); debugfs_create_str("type", 0400, trans, (char **)&dbg->type); @@ -2706,13 +3012,14 @@ static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info) debugfs_create_u32("rx_max_msg", 0400, trans, (u32 *)&info->rx_minfo.max_msg); + if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) + scmi_debugfs_counters_setup(dbg, trans); + dbg->top_dentry = top_dentry; if (devm_add_action_or_reset(info->dev, - scmi_debugfs_common_cleanup, dbg)) { - scmi_debugfs_common_cleanup(dbg); + scmi_debugfs_common_cleanup, dbg)) return NULL; - } return dbg; } @@ -2757,9 +3064,72 @@ static int scmi_debugfs_raw_mode_setup(struct scmi_info *info) return ret; } +static const struct scmi_desc *scmi_transport_setup(struct device *dev) +{ + struct scmi_transport *trans; + int ret; + + trans = dev_get_platdata(dev); + if (!trans || !trans->supplier || !trans->core_ops) + return NULL; + + if (!device_link_add(dev, trans->supplier, DL_FLAG_AUTOREMOVE_CONSUMER)) { + dev_err(dev, + "Adding link to supplier transport device failed\n"); + return NULL; + } + + /* Provide core transport ops */ + *trans->core_ops = &scmi_trans_core_ops; + + dev_info(dev, "Using %s\n", dev_driver_string(trans->supplier)); + + ret = of_property_read_u32(dev->of_node, "arm,max-rx-timeout-ms", + &trans->desc.max_rx_timeout_ms); + if (ret && ret != -EINVAL) + dev_err(dev, "Malformed arm,max-rx-timeout-ms DT property.\n"); + + ret = of_property_read_u32(dev->of_node, "arm,max-msg-size", + &trans->desc.max_msg_size); + if (ret && ret != -EINVAL) + dev_err(dev, "Malformed arm,max-msg-size DT property.\n"); + + ret = of_property_read_u32(dev->of_node, "arm,max-msg", + &trans->desc.max_msg); + if (ret && ret != -EINVAL) + dev_err(dev, "Malformed arm,max-msg DT property.\n"); + + dev_info(dev, + "SCMI max-rx-timeout: %dms / max-msg-size: %dbytes / max-msg: %d\n", + trans->desc.max_rx_timeout_ms, trans->desc.max_msg_size, + trans->desc.max_msg); + + /* System wide atomic threshold for atomic ops .. if any */ + if (!of_property_read_u32(dev->of_node, "atomic-threshold-us", + &trans->desc.atomic_threshold)) + dev_info(dev, + "SCMI System wide atomic threshold set to %u us\n", + trans->desc.atomic_threshold); + + return &trans->desc; +} + +static void scmi_enable_matching_quirks(struct scmi_info *info) +{ + struct scmi_revision_info *rev = &info->version; + + dev_dbg(info->dev, "Looking for quirks matching: %s/%s/0x%08X\n", + rev->vendor_id, rev->sub_vendor_id, rev->impl_ver); + + /* Enable applicable quirks */ + scmi_quirks_enable(info->dev, rev->vendor_id, + rev->sub_vendor_id, rev->impl_ver); +} + static int scmi_probe(struct platform_device *pdev) { int ret; + char *err_str = "probe failure\n"; struct scmi_handle *handle; const struct scmi_desc *desc; struct scmi_info *info; @@ -2767,9 +3137,12 @@ static int scmi_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *child, *np = dev->of_node; - desc = of_device_get_match_data(dev); - if (!desc) - return -EINVAL; + desc = scmi_transport_setup(dev); + if (!desc) { + err_str = "transport invalid\n"; + ret = -EINVAL; + goto out_err; + } info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); if (!info) @@ -2799,38 +3172,33 @@ static int scmi_probe(struct platform_device *pdev) handle->devm_protocol_acquire = scmi_devm_protocol_acquire; handle->devm_protocol_get = scmi_devm_protocol_get; handle->devm_protocol_put = scmi_devm_protocol_put; - - /* System wide atomic threshold for atomic ops .. if any */ - if (!of_property_read_u32(np, "atomic-threshold-us", - &info->atomic_threshold)) - dev_info(dev, - "SCMI System wide atomic threshold set to %d us\n", - info->atomic_threshold); handle->is_transport_atomic = scmi_is_transport_atomic; - if (desc->ops->link_supplier) { - ret = desc->ops->link_supplier(dev); - if (ret) - goto clear_ida; - } - /* Setup all channels described in the DT at first */ ret = scmi_channels_setup(info); - if (ret) + if (ret) { + err_str = "failed to setup channels\n"; goto clear_ida; + } ret = bus_register_notifier(&scmi_bus_type, &info->bus_nb); - if (ret) + if (ret) { + err_str = "failed to register bus notifier\n"; goto clear_txrx_setup; + } ret = blocking_notifier_chain_register(&scmi_requested_devices_nh, &info->dev_req_nb); - if (ret) + if (ret) { + err_str = "failed to register device notifier\n"; goto clear_bus_notifier; + } ret = scmi_xfer_info_init(info); - if (ret) + if (ret) { + err_str = "failed to init xfers pool\n"; goto clear_dev_req_notifier; + } if (scmi_top_dentry) { info->dbg = scmi_debugfs_common_setup(info); @@ -2867,9 +3235,11 @@ static int scmi_probe(struct platform_device *pdev) */ ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE); if (ret) { - dev_err(dev, "unable to communicate with SCMI\n"); - if (coex) + err_str = "unable to communicate with SCMI\n"; + if (coex) { + dev_err(dev, "%s", err_str); return 0; + } goto notification_exit; } @@ -2877,6 +3247,8 @@ static int scmi_probe(struct platform_device *pdev) list_add_tail(&info->node, &scmi_list); mutex_unlock(&scmi_list_mutex); + scmi_enable_matching_quirks(info); + for_each_available_child_of_node(np, child) { u32 prot_id; @@ -2923,7 +3295,9 @@ clear_txrx_setup: scmi_cleanup_txrx_channels(info); clear_ida: ida_free(&scmi_id, info->id); - return ret; + +out_err: + return dev_err_probe(dev, ret, "%s", err_str); } static void scmi_remove(struct platform_device *pdev) @@ -3008,86 +3382,16 @@ static struct attribute *versions_attrs[] = { }; ATTRIBUTE_GROUPS(versions); -/* Each compatible listed below must have descriptor associated with it */ -static const struct of_device_id scmi_of_match[] = { -#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX - { .compatible = "arm,scmi", .data = &scmi_mailbox_desc }, -#endif -#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE - { .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc }, -#endif -#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC - { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc}, - { .compatible = "arm,scmi-smc-param", .data = &scmi_smc_desc}, - { .compatible = "qcom,scmi-smc", .data = &scmi_smc_desc}, -#endif -#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO - { .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc}, -#endif - { /* Sentinel */ }, -}; - -MODULE_DEVICE_TABLE(of, scmi_of_match); - static struct platform_driver scmi_driver = { .driver = { .name = "arm-scmi", .suppress_bind_attrs = true, - .of_match_table = scmi_of_match, .dev_groups = versions_groups, }, .probe = scmi_probe, - .remove_new = scmi_remove, + .remove = scmi_remove, }; -/** - * __scmi_transports_setup - Common helper to call transport-specific - * .init/.exit code if provided. - * - * @init: A flag to distinguish between init and exit. - * - * Note that, if provided, we invoke .init/.exit functions for all the - * transports currently compiled in. - * - * Return: 0 on Success. - */ -static inline int __scmi_transports_setup(bool init) -{ - int ret = 0; - const struct of_device_id *trans; - - for (trans = scmi_of_match; trans->data; trans++) { - const struct scmi_desc *tdesc = trans->data; - - if ((init && !tdesc->transport_init) || - (!init && !tdesc->transport_exit)) - continue; - - if (init) - ret = tdesc->transport_init(); - else - tdesc->transport_exit(); - - if (ret) { - pr_err("SCMI transport %s FAILED initialization!\n", - trans->compatible); - break; - } - } - - return ret; -} - -static int __init scmi_transports_init(void) -{ - return __scmi_transports_setup(true); -} - -static void __exit scmi_transports_exit(void) -{ - __scmi_transports_setup(false); -} - static struct dentry *scmi_debugfs_init(void) { struct dentry *d; @@ -3103,16 +3407,17 @@ static struct dentry *scmi_debugfs_init(void) static int __init scmi_driver_init(void) { - int ret; + scmi_quirks_initialize(); /* Bail out if no SCMI transport was configured */ if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT))) return -EINVAL; - /* Initialize any compiled-in transport which provided an init/exit */ - ret = scmi_transports_init(); - if (ret) - return ret; + if (IS_ENABLED(CONFIG_ARM_SCMI_HAVE_SHMEM)) + scmi_trans_core_ops.shmem = scmi_shared_mem_operations_get(); + + if (IS_ENABLED(CONFIG_ARM_SCMI_HAVE_MSG)) + scmi_trans_core_ops.msg = scmi_message_operations_get(); if (IS_ENABLED(CONFIG_ARM_SCMI_NEED_DEBUGFS)) scmi_top_dentry = scmi_debugfs_init(); @@ -3127,6 +3432,7 @@ static int __init scmi_driver_init(void) scmi_voltage_register(); scmi_system_register(); scmi_powercap_register(); + scmi_pinctrl_register(); return platform_driver_register(&scmi_driver); } @@ -3144,8 +3450,7 @@ static void __exit scmi_driver_exit(void) scmi_voltage_unregister(); scmi_system_unregister(); scmi_powercap_unregister(); - - scmi_transports_exit(); + scmi_pinctrl_unregister(); platform_driver_unregister(&scmi_driver); diff --git a/drivers/firmware/arm_scmi/msg.c b/drivers/firmware/arm_scmi/msg.c index d33a704e5814..2cc74e6bbd72 100644 --- a/drivers/firmware/arm_scmi/msg.c +++ b/drivers/firmware/arm_scmi/msg.c @@ -4,7 +4,7 @@ * * Derived from shm.c. * - * Copyright (C) 2019-2021 ARM Ltd. + * Copyright (C) 2019-2024 ARM Ltd. * Copyright (C) 2020-2021 OpenSynergy GmbH */ @@ -30,7 +30,7 @@ struct scmi_msg_payld { * * Return: transport SDU size. */ -size_t msg_command_size(struct scmi_xfer *xfer) +static size_t msg_command_size(struct scmi_xfer *xfer) { return sizeof(struct scmi_msg_payld) + xfer->tx.len; } @@ -42,7 +42,7 @@ size_t msg_command_size(struct scmi_xfer *xfer) * * Return: transport SDU size. */ -size_t msg_response_size(struct scmi_xfer *xfer) +static size_t msg_response_size(struct scmi_xfer *xfer) { return sizeof(struct scmi_msg_payld) + sizeof(__le32) + xfer->rx.len; } @@ -53,7 +53,7 @@ size_t msg_response_size(struct scmi_xfer *xfer) * @msg: transport SDU for command * @xfer: message which is being sent */ -void msg_tx_prepare(struct scmi_msg_payld *msg, struct scmi_xfer *xfer) +static void msg_tx_prepare(struct scmi_msg_payld *msg, struct scmi_xfer *xfer) { msg->msg_header = cpu_to_le32(pack_scmi_header(&xfer->hdr)); if (xfer->tx.buf) @@ -67,7 +67,7 @@ void msg_tx_prepare(struct scmi_msg_payld *msg, struct scmi_xfer *xfer) * * Return: SCMI header */ -u32 msg_read_header(struct scmi_msg_payld *msg) +static u32 msg_read_header(struct scmi_msg_payld *msg) { return le32_to_cpu(msg->msg_header); } @@ -79,8 +79,8 @@ u32 msg_read_header(struct scmi_msg_payld *msg) * @len: transport SDU size * @xfer: message being responded to */ -void msg_fetch_response(struct scmi_msg_payld *msg, size_t len, - struct scmi_xfer *xfer) +static void msg_fetch_response(struct scmi_msg_payld *msg, + size_t len, struct scmi_xfer *xfer) { size_t prefix_len = sizeof(*msg) + sizeof(msg->msg_payload[0]); @@ -100,8 +100,8 @@ void msg_fetch_response(struct scmi_msg_payld *msg, size_t len, * @max_len: maximum SCMI payload size to fetch * @xfer: notification message */ -void msg_fetch_notification(struct scmi_msg_payld *msg, size_t len, - size_t max_len, struct scmi_xfer *xfer) +static void msg_fetch_notification(struct scmi_msg_payld *msg, size_t len, + size_t max_len, struct scmi_xfer *xfer) { xfer->rx.len = min_t(size_t, max_len, len >= sizeof(*msg) ? len - sizeof(*msg) : 0); @@ -109,3 +109,17 @@ void msg_fetch_notification(struct scmi_msg_payld *msg, size_t len, /* Take a copy to the rx buffer.. */ memcpy(xfer->rx.buf, msg->msg_payload, xfer->rx.len); } + +static const struct scmi_message_operations scmi_msg_ops = { + .tx_prepare = msg_tx_prepare, + .command_size = msg_command_size, + .response_size = msg_response_size, + .read_header = msg_read_header, + .fetch_response = msg_fetch_response, + .fetch_notification = msg_fetch_notification, +}; + +const struct scmi_message_operations *scmi_message_operations_get(void) +{ + return &scmi_msg_ops; +} diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c index 27c52531194d..e160ecb22948 100644 --- a/drivers/firmware/arm_scmi/notify.c +++ b/drivers/firmware/arm_scmi/notify.c @@ -1513,17 +1513,12 @@ static int scmi_devm_notifier_register(struct scmi_device *sdev, static int scmi_devm_notifier_match(struct device *dev, void *res, void *data) { struct scmi_notifier_devres *dres = res; - struct scmi_notifier_devres *xres = data; + struct notifier_block *nb = data; - if (WARN_ON(!dres || !xres)) + if (WARN_ON(!dres || !nb)) return 0; - return dres->proto_id == xres->proto_id && - dres->evt_id == xres->evt_id && - dres->nb == xres->nb && - ((!dres->src_id && !xres->src_id) || - (dres->src_id && xres->src_id && - dres->__src_id == xres->__src_id)); + return dres->nb == nb; } /** @@ -1531,10 +1526,6 @@ static int scmi_devm_notifier_match(struct device *dev, void *res, void *data) * notifier_block for an event * @sdev: A reference to an scmi_device whose embedded struct device is to * be used for devres accounting. - * @proto_id: Protocol ID - * @evt_id: Event ID - * @src_id: Source ID, when NULL register for events coming form ALL possible - * sources * @nb: A standard notifier block to register for the specified event * * Generic devres managed helper to explicitly un-register a notifier_block @@ -1544,25 +1535,12 @@ static int scmi_devm_notifier_match(struct device *dev, void *res, void *data) * Return: 0 on Success */ static int scmi_devm_notifier_unregister(struct scmi_device *sdev, - u8 proto_id, u8 evt_id, - const u32 *src_id, struct notifier_block *nb) { int ret; - struct scmi_notifier_devres dres; - - dres.handle = sdev->handle; - dres.proto_id = proto_id; - dres.evt_id = evt_id; - if (src_id) { - dres.__src_id = *src_id; - dres.src_id = &dres.__src_id; - } else { - dres.src_id = NULL; - } ret = devres_release(&sdev->dev, scmi_devm_release_notifier, - scmi_devm_notifier_match, &dres); + scmi_devm_notifier_match, nb); WARN_ON(ret); diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index 345fff167b52..c7e5a34b254b 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c @@ -310,7 +310,7 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph, } if (!dom_info->mult_factor) dev_warn(ph->dev, - "Wrong sustained perf/frequency(domain %d)\n", + "Wrong sustained perf/frequency(domain %d)\n", dom_info->id); strscpy(dom_info->info.name, attr->name, @@ -373,7 +373,7 @@ static int iter_perf_levels_update_state(struct scmi_iterator_state *st, return 0; } -static inline void +static inline int process_response_opp(struct device *dev, struct perf_dom_info *dom, struct scmi_opp *opp, unsigned int loop_idx, const struct scmi_msg_resp_perf_describe_levels *r) @@ -386,12 +386,16 @@ process_response_opp(struct device *dev, struct perf_dom_info *dom, le16_to_cpu(r->opp[loop_idx].transition_latency_us); ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL); - if (ret) - dev_warn(dev, "Failed to add opps_by_lvl at %d - ret:%d\n", - opp->perf, ret); + if (ret) { + dev_info(dev, FW_BUG "Failed to add opps_by_lvl at %d for %s - ret:%d\n", + opp->perf, dom->info.name, ret); + return ret; + } + + return 0; } -static inline void +static inline int process_response_opp_v4(struct device *dev, struct perf_dom_info *dom, struct scmi_opp *opp, unsigned int loop_idx, const struct scmi_msg_resp_perf_describe_levels_v4 *r) @@ -404,9 +408,11 @@ process_response_opp_v4(struct device *dev, struct perf_dom_info *dom, le16_to_cpu(r->opp[loop_idx].transition_latency_us); ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL); - if (ret) - dev_warn(dev, "Failed to add opps_by_lvl at %d - ret:%d\n", - opp->perf, ret); + if (ret) { + dev_info(dev, FW_BUG "Failed to add opps_by_lvl at %d for %s - ret:%d\n", + opp->perf, dom->info.name, ret); + return ret; + } /* Note that PERF v4 reports always five 32-bit words */ opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq); @@ -415,13 +421,21 @@ process_response_opp_v4(struct device *dev, struct perf_dom_info *dom, ret = xa_insert(&dom->opps_by_idx, opp->level_index, opp, GFP_KERNEL); - if (ret) + if (ret) { dev_warn(dev, - "Failed to add opps_by_idx at %d - ret:%d\n", - opp->level_index, ret); + "Failed to add opps_by_idx at %d for %s - ret:%d\n", + opp->level_index, dom->info.name, ret); + + /* Cleanup by_lvl too */ + xa_erase(&dom->opps_by_lvl, opp->perf); + + return ret; + } hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq); } + + return 0; } static int @@ -429,16 +443,22 @@ iter_perf_levels_process_response(const struct scmi_protocol_handle *ph, const void *response, struct scmi_iterator_state *st, void *priv) { + int ret; struct scmi_opp *opp; struct scmi_perf_ipriv *p = priv; - opp = &p->perf_dom->opp[st->desc_index + st->loop_idx]; + opp = &p->perf_dom->opp[p->perf_dom->opp_count]; if (PROTOCOL_REV_MAJOR(p->version) <= 0x3) - process_response_opp(ph->dev, p->perf_dom, opp, st->loop_idx, - response); + ret = process_response_opp(ph->dev, p->perf_dom, opp, + st->loop_idx, response); else - process_response_opp_v4(ph->dev, p->perf_dom, opp, st->loop_idx, - response); + ret = process_response_opp_v4(ph->dev, p->perf_dom, opp, + st->loop_idx, response); + + /* Skip BAD duplicates received from firmware */ + if (ret) + return ret == -EBUSY ? 0 : ret; + p->perf_dom->opp_count++; dev_dbg(ph->dev, "Level %d Power %d Latency %dus Ifreq %d Index %d\n", @@ -879,7 +899,8 @@ static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph, ret = dev_pm_opp_add_dynamic(dev, &data); if (ret) { - dev_warn(dev, "failed to add opp %luHz\n", freq); + dev_warn(dev, "[%d][%s]: Failed to add OPP[%d] %lu\n", + domain, dom->info.name, idx, freq); dev_pm_opp_remove_all_dynamic(dev); return ret; } diff --git a/drivers/firmware/arm_scmi/pinctrl.c b/drivers/firmware/arm_scmi/pinctrl.c new file mode 100644 index 000000000000..3855c98caf06 --- /dev/null +++ b/drivers/firmware/arm_scmi/pinctrl.c @@ -0,0 +1,917 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Pinctrl Protocol + * + * Copyright (C) 2024 EPAM + * Copyright 2024 NXP + */ + +#include <asm/byteorder.h> +#include <linux/bits.h> +#include <linux/bitfield.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/scmi_protocol.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/types.h> + +#include "common.h" +#include "protocols.h" + +/* Updated only after ALL the mandatory features for that version are merged */ +#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x10000 + +#define GET_GROUPS_NR(x) le32_get_bits((x), GENMASK(31, 16)) +#define GET_PINS_NR(x) le32_get_bits((x), GENMASK(15, 0)) +#define GET_FUNCTIONS_NR(x) le32_get_bits((x), GENMASK(15, 0)) + +#define EXT_NAME_FLAG(x) le32_get_bits((x), BIT(31)) +#define NUM_ELEMS(x) le32_get_bits((x), GENMASK(15, 0)) + +#define REMAINING(x) le32_get_bits((x), GENMASK(31, 16)) +#define RETURNED(x) le32_get_bits((x), GENMASK(11, 0)) + +#define CONFIG_FLAG_MASK GENMASK(19, 18) +#define SELECTOR_MASK GENMASK(17, 16) +#define SKIP_CONFIGS_MASK GENMASK(15, 8) +#define CONFIG_TYPE_MASK GENMASK(7, 0) + +enum scmi_pinctrl_protocol_cmd { + PINCTRL_ATTRIBUTES = 0x3, + PINCTRL_LIST_ASSOCIATIONS = 0x4, + PINCTRL_SETTINGS_GET = 0x5, + PINCTRL_SETTINGS_CONFIGURE = 0x6, + PINCTRL_REQUEST = 0x7, + PINCTRL_RELEASE = 0x8, + PINCTRL_NAME_GET = 0x9, + PINCTRL_SET_PERMISSIONS = 0xa, +}; + +struct scmi_msg_settings_conf { + __le32 identifier; + __le32 function_id; + __le32 attributes; + __le32 configs[]; +}; + +struct scmi_msg_settings_get { + __le32 identifier; + __le32 attributes; +}; + +struct scmi_resp_settings_get { + __le32 function_selected; + __le32 num_configs; + __le32 configs[]; +}; + +struct scmi_msg_pinctrl_protocol_attributes { + __le32 attributes_low; + __le32 attributes_high; +}; + +struct scmi_msg_pinctrl_attributes { + __le32 identifier; + __le32 flags; +}; + +struct scmi_resp_pinctrl_attributes { + __le32 attributes; + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; +}; + +struct scmi_msg_pinctrl_list_assoc { + __le32 identifier; + __le32 flags; + __le32 index; +}; + +struct scmi_resp_pinctrl_list_assoc { + __le32 flags; + __le16 array[]; +}; + +struct scmi_msg_request { + __le32 identifier; + __le32 flags; +}; + +struct scmi_group_info { + char name[SCMI_MAX_STR_SIZE]; + bool present; + u32 *group_pins; + u32 nr_pins; +}; + +struct scmi_function_info { + char name[SCMI_MAX_STR_SIZE]; + bool present; + u32 *groups; + u32 nr_groups; +}; + +struct scmi_pin_info { + char name[SCMI_MAX_STR_SIZE]; + bool present; +}; + +struct scmi_pinctrl_info { + u32 version; + int nr_groups; + int nr_functions; + int nr_pins; + struct scmi_group_info *groups; + struct scmi_function_info *functions; + struct scmi_pin_info *pins; +}; + +static int scmi_pinctrl_attributes_get(const struct scmi_protocol_handle *ph, + struct scmi_pinctrl_info *pi) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_pinctrl_protocol_attributes *attr; + + ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0, sizeof(*attr), &t); + if (ret) + return ret; + + attr = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + pi->nr_functions = GET_FUNCTIONS_NR(attr->attributes_high); + pi->nr_groups = GET_GROUPS_NR(attr->attributes_low); + pi->nr_pins = GET_PINS_NR(attr->attributes_low); + if (pi->nr_pins == 0) { + dev_warn(ph->dev, "returned zero pins\n"); + ret = -EINVAL; + } + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_pinctrl_count_get(const struct scmi_protocol_handle *ph, + enum scmi_pinctrl_selector_type type) +{ + struct scmi_pinctrl_info *pi = ph->get_priv(ph); + + switch (type) { + case PIN_TYPE: + return pi->nr_pins; + case GROUP_TYPE: + return pi->nr_groups; + case FUNCTION_TYPE: + return pi->nr_functions; + default: + return -EINVAL; + } +} + +static int scmi_pinctrl_validate_id(const struct scmi_protocol_handle *ph, + u32 selector, + enum scmi_pinctrl_selector_type type) +{ + int value; + + value = scmi_pinctrl_count_get(ph, type); + if (value < 0) + return value; + + if (selector >= value || value == 0) + return -EINVAL; + + return 0; +} + +static int scmi_pinctrl_attributes(const struct scmi_protocol_handle *ph, + enum scmi_pinctrl_selector_type type, + u32 selector, char *name, + u32 *n_elems) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_pinctrl_attributes *tx; + struct scmi_resp_pinctrl_attributes *rx; + bool ext_name_flag; + + if (!name) + return -EINVAL; + + ret = scmi_pinctrl_validate_id(ph, selector, type); + if (ret) + return ret; + + ret = ph->xops->xfer_get_init(ph, PINCTRL_ATTRIBUTES, sizeof(*tx), + sizeof(*rx), &t); + if (ret) + return ret; + + tx = t->tx.buf; + rx = t->rx.buf; + tx->identifier = cpu_to_le32(selector); + tx->flags = cpu_to_le32(type); + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + if (n_elems) + *n_elems = NUM_ELEMS(rx->attributes); + + strscpy(name, rx->name, SCMI_SHORT_NAME_MAX_SIZE); + + ext_name_flag = !!EXT_NAME_FLAG(rx->attributes); + } + + ph->xops->xfer_put(ph, t); + + if (ret) + return ret; + /* + * If supported overwrite short name with the extended one; + * on error just carry on and use already provided short name. + */ + if (ext_name_flag) + ret = ph->hops->extended_name_get(ph, PINCTRL_NAME_GET, + selector, (u32 *)&type, name, + SCMI_MAX_STR_SIZE); + return ret; +} + +struct scmi_pinctrl_ipriv { + u32 selector; + enum scmi_pinctrl_selector_type type; + u32 *array; +}; + +static void iter_pinctrl_assoc_prepare_message(void *message, + u32 desc_index, + const void *priv) +{ + struct scmi_msg_pinctrl_list_assoc *msg = message; + const struct scmi_pinctrl_ipriv *p = priv; + + msg->identifier = cpu_to_le32(p->selector); + msg->flags = cpu_to_le32(p->type); + msg->index = cpu_to_le32(desc_index); +} + +static int iter_pinctrl_assoc_update_state(struct scmi_iterator_state *st, + const void *response, void *priv) +{ + const struct scmi_resp_pinctrl_list_assoc *r = response; + + st->num_returned = RETURNED(r->flags); + st->num_remaining = REMAINING(r->flags); + + return 0; +} + +static int +iter_pinctrl_assoc_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, void *priv) +{ + const struct scmi_resp_pinctrl_list_assoc *r = response; + struct scmi_pinctrl_ipriv *p = priv; + + p->array[st->desc_index + st->loop_idx] = + le16_to_cpu(r->array[st->loop_idx]); + + return 0; +} + +static int scmi_pinctrl_list_associations(const struct scmi_protocol_handle *ph, + u32 selector, + enum scmi_pinctrl_selector_type type, + u16 size, u32 *array) +{ + int ret; + void *iter; + struct scmi_iterator_ops ops = { + .prepare_message = iter_pinctrl_assoc_prepare_message, + .update_state = iter_pinctrl_assoc_update_state, + .process_response = iter_pinctrl_assoc_process_response, + }; + struct scmi_pinctrl_ipriv ipriv = { + .selector = selector, + .type = type, + .array = array, + }; + + if (!array || !size || type == PIN_TYPE) + return -EINVAL; + + ret = scmi_pinctrl_validate_id(ph, selector, type); + if (ret) + return ret; + + iter = ph->hops->iter_response_init(ph, &ops, size, + PINCTRL_LIST_ASSOCIATIONS, + sizeof(struct scmi_msg_pinctrl_list_assoc), + &ipriv); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + return ph->hops->iter_response_run(iter); +} + +struct scmi_settings_get_ipriv { + u32 selector; + enum scmi_pinctrl_selector_type type; + bool get_all; + unsigned int *nr_configs; + enum scmi_pinctrl_conf_type *config_types; + u32 *config_values; +}; + +static void +iter_pinctrl_settings_get_prepare_message(void *message, u32 desc_index, + const void *priv) +{ + struct scmi_msg_settings_get *msg = message; + const struct scmi_settings_get_ipriv *p = priv; + u32 attributes; + + attributes = FIELD_PREP(SELECTOR_MASK, p->type); + + if (p->get_all) { + attributes |= FIELD_PREP(CONFIG_FLAG_MASK, 1) | + FIELD_PREP(SKIP_CONFIGS_MASK, desc_index); + } else { + attributes |= FIELD_PREP(CONFIG_TYPE_MASK, p->config_types[0]); + } + + msg->attributes = cpu_to_le32(attributes); + msg->identifier = cpu_to_le32(p->selector); +} + +static int +iter_pinctrl_settings_get_update_state(struct scmi_iterator_state *st, + const void *response, void *priv) +{ + const struct scmi_resp_settings_get *r = response; + struct scmi_settings_get_ipriv *p = priv; + + if (p->get_all) { + st->num_returned = le32_get_bits(r->num_configs, GENMASK(7, 0)); + st->num_remaining = le32_get_bits(r->num_configs, GENMASK(31, 24)); + } else { + st->num_returned = 1; + st->num_remaining = 0; + } + + return 0; +} + +static int +iter_pinctrl_settings_get_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, + void *priv) +{ + const struct scmi_resp_settings_get *r = response; + struct scmi_settings_get_ipriv *p = priv; + u32 type = le32_get_bits(r->configs[st->loop_idx * 2], GENMASK(7, 0)); + u32 val = le32_to_cpu(r->configs[st->loop_idx * 2 + 1]); + + if (p->get_all) { + p->config_types[st->desc_index + st->loop_idx] = type; + } else { + if (p->config_types[0] != type) + return -EINVAL; + } + + p->config_values[st->desc_index + st->loop_idx] = val; + ++*p->nr_configs; + + return 0; +} + +static int +scmi_pinctrl_settings_get(const struct scmi_protocol_handle *ph, u32 selector, + enum scmi_pinctrl_selector_type type, + unsigned int *nr_configs, + enum scmi_pinctrl_conf_type *config_types, + u32 *config_values) +{ + int ret; + void *iter; + unsigned int max_configs = *nr_configs; + struct scmi_iterator_ops ops = { + .prepare_message = iter_pinctrl_settings_get_prepare_message, + .update_state = iter_pinctrl_settings_get_update_state, + .process_response = iter_pinctrl_settings_get_process_response, + }; + struct scmi_settings_get_ipriv ipriv = { + .selector = selector, + .type = type, + .get_all = (max_configs > 1), + .nr_configs = nr_configs, + .config_types = config_types, + .config_values = config_values, + }; + + if (!config_types || !config_values || type == FUNCTION_TYPE) + return -EINVAL; + + ret = scmi_pinctrl_validate_id(ph, selector, type); + if (ret) + return ret; + + /* Prepare to count returned configs */ + *nr_configs = 0; + iter = ph->hops->iter_response_init(ph, &ops, max_configs, + PINCTRL_SETTINGS_GET, + sizeof(struct scmi_msg_settings_get), + &ipriv); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + return ph->hops->iter_response_run(iter); +} + +static int scmi_pinctrl_settings_get_one(const struct scmi_protocol_handle *ph, + u32 selector, + enum scmi_pinctrl_selector_type type, + enum scmi_pinctrl_conf_type config_type, + u32 *config_value) +{ + unsigned int nr_configs = 1; + + return scmi_pinctrl_settings_get(ph, selector, type, &nr_configs, + &config_type, config_value); +} + +static int scmi_pinctrl_settings_get_all(const struct scmi_protocol_handle *ph, + u32 selector, + enum scmi_pinctrl_selector_type type, + unsigned int *nr_configs, + enum scmi_pinctrl_conf_type *config_types, + u32 *config_values) +{ + if (!nr_configs || *nr_configs == 0) + return -EINVAL; + + return scmi_pinctrl_settings_get(ph, selector, type, nr_configs, + config_types, config_values); +} + +static int +scmi_pinctrl_settings_conf(const struct scmi_protocol_handle *ph, + u32 selector, + enum scmi_pinctrl_selector_type type, + u32 nr_configs, + enum scmi_pinctrl_conf_type *config_type, + u32 *config_value) +{ + struct scmi_xfer *t; + struct scmi_msg_settings_conf *tx; + u32 attributes; + int ret, i; + u32 configs_in_chunk, conf_num = 0; + u32 chunk; + int max_msg_size = ph->hops->get_max_msg_size(ph); + + if (!config_type || !config_value || type == FUNCTION_TYPE) + return -EINVAL; + + ret = scmi_pinctrl_validate_id(ph, selector, type); + if (ret) + return ret; + + configs_in_chunk = (max_msg_size - sizeof(*tx)) / (sizeof(__le32) * 2); + while (conf_num < nr_configs) { + chunk = (nr_configs - conf_num > configs_in_chunk) ? + configs_in_chunk : nr_configs - conf_num; + + ret = ph->xops->xfer_get_init(ph, PINCTRL_SETTINGS_CONFIGURE, + sizeof(*tx) + + chunk * 2 * sizeof(__le32), 0, &t); + if (ret) + break; + + tx = t->tx.buf; + tx->identifier = cpu_to_le32(selector); + tx->function_id = cpu_to_le32(0xFFFFFFFF); + attributes = FIELD_PREP(GENMASK(1, 0), type) | + FIELD_PREP(GENMASK(9, 2), chunk); + tx->attributes = cpu_to_le32(attributes); + + for (i = 0; i < chunk; i++) { + tx->configs[i * 2] = + cpu_to_le32(config_type[conf_num + i]); + tx->configs[i * 2 + 1] = + cpu_to_le32(config_value[conf_num + i]); + } + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + + if (ret) + break; + + conf_num += chunk; + } + + return ret; +} + +static int scmi_pinctrl_function_select(const struct scmi_protocol_handle *ph, + u32 group, + enum scmi_pinctrl_selector_type type, + u32 function_id) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_settings_conf *tx; + u32 attributes; + + ret = scmi_pinctrl_validate_id(ph, group, type); + if (ret) + return ret; + + ret = ph->xops->xfer_get_init(ph, PINCTRL_SETTINGS_CONFIGURE, + sizeof(*tx), 0, &t); + if (ret) + return ret; + + tx = t->tx.buf; + tx->identifier = cpu_to_le32(group); + tx->function_id = cpu_to_le32(function_id); + attributes = FIELD_PREP(GENMASK(1, 0), type) | BIT(10); + tx->attributes = cpu_to_le32(attributes); + + ret = ph->xops->do_xfer(ph, t); + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_pinctrl_request_free(const struct scmi_protocol_handle *ph, + u32 identifier, + enum scmi_pinctrl_selector_type type, + enum scmi_pinctrl_protocol_cmd cmd) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_request *tx; + + if (type == FUNCTION_TYPE) + return -EINVAL; + + if (cmd != PINCTRL_REQUEST && cmd != PINCTRL_RELEASE) + return -EINVAL; + + ret = scmi_pinctrl_validate_id(ph, identifier, type); + if (ret) + return ret; + + ret = ph->xops->xfer_get_init(ph, cmd, sizeof(*tx), 0, &t); + if (ret) + return ret; + + tx = t->tx.buf; + tx->identifier = cpu_to_le32(identifier); + tx->flags = cpu_to_le32(type); + + ret = ph->xops->do_xfer(ph, t); + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_pinctrl_pin_request(const struct scmi_protocol_handle *ph, + u32 pin) +{ + return scmi_pinctrl_request_free(ph, pin, PIN_TYPE, PINCTRL_REQUEST); +} + +static int scmi_pinctrl_pin_free(const struct scmi_protocol_handle *ph, u32 pin) +{ + return scmi_pinctrl_request_free(ph, pin, PIN_TYPE, PINCTRL_RELEASE); +} + +static int scmi_pinctrl_get_group_info(const struct scmi_protocol_handle *ph, + u32 selector, + struct scmi_group_info *group) +{ + int ret; + + ret = scmi_pinctrl_attributes(ph, GROUP_TYPE, selector, group->name, + &group->nr_pins); + if (ret) + return ret; + + if (!group->nr_pins) { + dev_err(ph->dev, "Group %d has 0 elements", selector); + return -ENODATA; + } + + group->group_pins = kmalloc_array(group->nr_pins, + sizeof(*group->group_pins), + GFP_KERNEL); + if (!group->group_pins) + return -ENOMEM; + + ret = scmi_pinctrl_list_associations(ph, selector, GROUP_TYPE, + group->nr_pins, group->group_pins); + if (ret) { + kfree(group->group_pins); + return ret; + } + + group->present = true; + return 0; +} + +static int scmi_pinctrl_get_group_name(const struct scmi_protocol_handle *ph, + u32 selector, const char **name) +{ + struct scmi_pinctrl_info *pi = ph->get_priv(ph); + + if (!name) + return -EINVAL; + + if (selector >= pi->nr_groups || pi->nr_groups == 0) + return -EINVAL; + + if (!pi->groups[selector].present) { + int ret; + + ret = scmi_pinctrl_get_group_info(ph, selector, + &pi->groups[selector]); + if (ret) + return ret; + } + + *name = pi->groups[selector].name; + + return 0; +} + +static int scmi_pinctrl_group_pins_get(const struct scmi_protocol_handle *ph, + u32 selector, const u32 **pins, + u32 *nr_pins) +{ + struct scmi_pinctrl_info *pi = ph->get_priv(ph); + + if (!pins || !nr_pins) + return -EINVAL; + + if (selector >= pi->nr_groups || pi->nr_groups == 0) + return -EINVAL; + + if (!pi->groups[selector].present) { + int ret; + + ret = scmi_pinctrl_get_group_info(ph, selector, + &pi->groups[selector]); + if (ret) + return ret; + } + + *pins = pi->groups[selector].group_pins; + *nr_pins = pi->groups[selector].nr_pins; + + return 0; +} + +static int scmi_pinctrl_get_function_info(const struct scmi_protocol_handle *ph, + u32 selector, + struct scmi_function_info *func) +{ + int ret; + + ret = scmi_pinctrl_attributes(ph, FUNCTION_TYPE, selector, func->name, + &func->nr_groups); + if (ret) + return ret; + + if (!func->nr_groups) { + dev_err(ph->dev, "Function %d has 0 elements", selector); + return -ENODATA; + } + + func->groups = kmalloc_array(func->nr_groups, sizeof(*func->groups), + GFP_KERNEL); + if (!func->groups) + return -ENOMEM; + + ret = scmi_pinctrl_list_associations(ph, selector, FUNCTION_TYPE, + func->nr_groups, func->groups); + if (ret) { + kfree(func->groups); + return ret; + } + + func->present = true; + return 0; +} + +static int scmi_pinctrl_get_function_name(const struct scmi_protocol_handle *ph, + u32 selector, const char **name) +{ + struct scmi_pinctrl_info *pi = ph->get_priv(ph); + + if (!name) + return -EINVAL; + + if (selector >= pi->nr_functions || pi->nr_functions == 0) + return -EINVAL; + + if (!pi->functions[selector].present) { + int ret; + + ret = scmi_pinctrl_get_function_info(ph, selector, + &pi->functions[selector]); + if (ret) + return ret; + } + + *name = pi->functions[selector].name; + return 0; +} + +static int +scmi_pinctrl_function_groups_get(const struct scmi_protocol_handle *ph, + u32 selector, u32 *nr_groups, + const u32 **groups) +{ + struct scmi_pinctrl_info *pi = ph->get_priv(ph); + + if (!groups || !nr_groups) + return -EINVAL; + + if (selector >= pi->nr_functions || pi->nr_functions == 0) + return -EINVAL; + + if (!pi->functions[selector].present) { + int ret; + + ret = scmi_pinctrl_get_function_info(ph, selector, + &pi->functions[selector]); + if (ret) + return ret; + } + + *groups = pi->functions[selector].groups; + *nr_groups = pi->functions[selector].nr_groups; + + return 0; +} + +static int scmi_pinctrl_mux_set(const struct scmi_protocol_handle *ph, + u32 selector, u32 group) +{ + return scmi_pinctrl_function_select(ph, group, GROUP_TYPE, selector); +} + +static int scmi_pinctrl_get_pin_info(const struct scmi_protocol_handle *ph, + u32 selector, struct scmi_pin_info *pin) +{ + int ret; + + if (!pin) + return -EINVAL; + + ret = scmi_pinctrl_attributes(ph, PIN_TYPE, selector, pin->name, NULL); + if (ret) + return ret; + + pin->present = true; + return 0; +} + +static int scmi_pinctrl_get_pin_name(const struct scmi_protocol_handle *ph, + u32 selector, const char **name) +{ + struct scmi_pinctrl_info *pi = ph->get_priv(ph); + + if (!name) + return -EINVAL; + + if (selector >= pi->nr_pins) + return -EINVAL; + + if (!pi->pins[selector].present) { + int ret; + + ret = scmi_pinctrl_get_pin_info(ph, selector, &pi->pins[selector]); + if (ret) + return ret; + } + + *name = pi->pins[selector].name; + + return 0; +} + +static int scmi_pinctrl_name_get(const struct scmi_protocol_handle *ph, + u32 selector, + enum scmi_pinctrl_selector_type type, + const char **name) +{ + switch (type) { + case PIN_TYPE: + return scmi_pinctrl_get_pin_name(ph, selector, name); + case GROUP_TYPE: + return scmi_pinctrl_get_group_name(ph, selector, name); + case FUNCTION_TYPE: + return scmi_pinctrl_get_function_name(ph, selector, name); + default: + return -EINVAL; + } +} + +static const struct scmi_pinctrl_proto_ops pinctrl_proto_ops = { + .count_get = scmi_pinctrl_count_get, + .name_get = scmi_pinctrl_name_get, + .group_pins_get = scmi_pinctrl_group_pins_get, + .function_groups_get = scmi_pinctrl_function_groups_get, + .mux_set = scmi_pinctrl_mux_set, + .settings_get_one = scmi_pinctrl_settings_get_one, + .settings_get_all = scmi_pinctrl_settings_get_all, + .settings_conf = scmi_pinctrl_settings_conf, + .pin_request = scmi_pinctrl_pin_request, + .pin_free = scmi_pinctrl_pin_free, +}; + +static int scmi_pinctrl_protocol_init(const struct scmi_protocol_handle *ph) +{ + int ret; + u32 version; + struct scmi_pinctrl_info *pinfo; + + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; + + dev_dbg(ph->dev, "Pinctrl Version %d.%d\n", + PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); + + pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL); + if (!pinfo) + return -ENOMEM; + + ret = scmi_pinctrl_attributes_get(ph, pinfo); + if (ret) + return ret; + + pinfo->pins = devm_kcalloc(ph->dev, pinfo->nr_pins, + sizeof(*pinfo->pins), GFP_KERNEL); + if (!pinfo->pins) + return -ENOMEM; + + pinfo->groups = devm_kcalloc(ph->dev, pinfo->nr_groups, + sizeof(*pinfo->groups), GFP_KERNEL); + if (!pinfo->groups) + return -ENOMEM; + + pinfo->functions = devm_kcalloc(ph->dev, pinfo->nr_functions, + sizeof(*pinfo->functions), GFP_KERNEL); + if (!pinfo->functions) + return -ENOMEM; + + pinfo->version = version; + + return ph->set_priv(ph, pinfo, version); +} + +static int scmi_pinctrl_protocol_deinit(const struct scmi_protocol_handle *ph) +{ + int i; + struct scmi_pinctrl_info *pi = ph->get_priv(ph); + + /* Free groups_pins allocated in scmi_pinctrl_get_group_info */ + for (i = 0; i < pi->nr_groups; i++) { + if (pi->groups[i].present) { + kfree(pi->groups[i].group_pins); + pi->groups[i].present = false; + } + } + + /* Free groups allocated in scmi_pinctrl_get_function_info */ + for (i = 0; i < pi->nr_functions; i++) { + if (pi->functions[i].present) { + kfree(pi->functions[i].groups); + pi->functions[i].present = false; + } + } + + return 0; +} + +static const struct scmi_protocol scmi_pinctrl = { + .id = SCMI_PROTOCOL_PINCTRL, + .owner = THIS_MODULE, + .instance_init = &scmi_pinctrl_protocol_init, + .instance_deinit = &scmi_pinctrl_protocol_deinit, + .ops = &pinctrl_proto_ops, + .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION, +}; + +DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(pinctrl, scmi_pinctrl) diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c index 49666bd1d8ac..59aa16444c64 100644 --- a/drivers/firmware/arm_scmi/power.c +++ b/drivers/firmware/arm_scmi/power.c @@ -14,7 +14,7 @@ #include "notify.h" /* Updated only after ALL the mandatory features for that version are merged */ -#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30000 +#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30001 enum scmi_power_protocol_cmd { POWER_DOMAIN_ATTRIBUTES = 0x3, diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h index 317d3fb32676..d62c4469d1fd 100644 --- a/drivers/firmware/arm_scmi/protocols.h +++ b/drivers/firmware/arm_scmi/protocols.h @@ -22,13 +22,17 @@ #include <linux/spinlock.h> #include <linux/types.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0) #define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16) #define PROTOCOL_REV_MAJOR(x) ((u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x)))) #define PROTOCOL_REV_MINOR(x) ((u16)(FIELD_GET(PROTOCOL_REV_MINOR_MASK, (x)))) +#define SCMI_PROTOCOL_VENDOR_BASE 0x80 + +#define MSG_SUPPORTS_FASTCHANNEL(x) ((x) & BIT(0)) + enum scmi_common_cmd { PROTOCOL_VERSION = 0x0, PROTOCOL_ATTRIBUTES = 0x1, @@ -258,6 +262,7 @@ struct scmi_fc_info { * @fastchannel_init: A common helper used to initialize FC descriptors by * gathering FC descriptions from the SCMI platform server. * @fastchannel_db_ring: A common helper to ring a FC doorbell. + * @get_max_msg_size: A common helper to get the maximum message size. */ struct scmi_proto_helpers_ops { int (*extended_name_get)(const struct scmi_protocol_handle *ph, @@ -277,6 +282,7 @@ struct scmi_proto_helpers_ops { struct scmi_fc_db_info **p_db, u32 *rate_limit); void (*fastchannel_db_ring)(struct scmi_fc_db_info *db); + int (*get_max_msg_size)(const struct scmi_protocol_handle *ph); }; /** @@ -323,6 +329,16 @@ typedef int (*scmi_prot_init_ph_fn_t)(const struct scmi_protocol_handle *); * protocol by the agent. Each protocol implementation * in the agent is supposed to downgrade to match the * protocol version supported by the platform. + * @vendor_id: A firmware vendor string for vendor protocols matching. + * Ignored when @id identifies a standard protocol, cannot be NULL + * otherwise. + * @sub_vendor_id: A firmware sub_vendor string for vendor protocols matching. + * Ignored if NULL or when @id identifies a standard protocol. + * @impl_ver: A firmware implementation version for vendor protocols matching. + * Ignored if zero or if @id identifies a standard protocol. + * + * Note that vendor protocols matching at load time is performed by attempting + * the closest match first against the tuple (vendor, sub_vendor, impl_ver) */ struct scmi_protocol { const u8 id; @@ -332,6 +348,9 @@ struct scmi_protocol { const void *ops; const struct scmi_protocol_events *events; unsigned int supported_version; + char *vendor_id; + char *sub_vendor_id; + u32 impl_ver; }; #define DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(name, proto) \ @@ -353,6 +372,7 @@ void __exit scmi_##name##_unregister(void) \ DECLARE_SCMI_REGISTER_UNREGISTER(base); DECLARE_SCMI_REGISTER_UNREGISTER(clock); DECLARE_SCMI_REGISTER_UNREGISTER(perf); +DECLARE_SCMI_REGISTER_UNREGISTER(pinctrl); DECLARE_SCMI_REGISTER_UNREGISTER(power); DECLARE_SCMI_REGISTER_UNREGISTER(reset); DECLARE_SCMI_REGISTER_UNREGISTER(sensors); diff --git a/drivers/firmware/arm_scmi/quirks.c b/drivers/firmware/arm_scmi/quirks.c new file mode 100644 index 000000000000..03960aca3610 --- /dev/null +++ b/drivers/firmware/arm_scmi/quirks.c @@ -0,0 +1,322 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Message Protocol Quirks + * + * Copyright (C) 2025 ARM Ltd. + */ + +/** + * DOC: Theory of operation + * + * A framework to define SCMI quirks and their activation conditions based on + * existing static_keys kernel facilities. + * + * Quirks are named and their activation conditions defined using the macro + * DEFINE_SCMI_QUIRK() in this file. + * + * After a quirk is defined, a corresponding entry must also be added to the + * global @scmi_quirks_table in this file using __DECLARE_SCMI_QUIRK_ENTRY(). + * + * Additionally a corresponding quirk declaration must be added also to the + * quirk.h file using DECLARE_SCMI_QUIRK(). + * + * The needed quirk code-snippet itself will be defined local to the SCMI code + * that is meant to fix and will be associated to the previously defined quirk + * and related activation conditions using the macro SCMI_QUIRK(). + * + * At runtime, during the SCMI stack probe sequence, once the SCMI Server had + * advertised the running platform Vendor, SubVendor and Implementation Version + * data, all the defined quirks matching the activation conditions will be + * enabled. + * + * Example + * + * quirk.c + * ------- + * DEFINE_SCMI_QUIRK(fix_me, "vendor", "subvend", "0x12000-0x30000", + * "someone,plat_A", "another,plat_b", "vend,sku"); + * + * static struct scmi_quirk *scmi_quirks_table[] = { + * ... + * __DECLARE_SCMI_QUIRK_ENTRY(fix_me), + * NULL + * }; + * + * quirk.h + * ------- + * DECLARE_SCMI_QUIRK(fix_me); + * + * <somewhere_in_the_scmi_stack.c> + * ------------------------------ + * + * #define QUIRK_CODE_SNIPPET_FIX_ME() \ + * ({ \ + * if (p->condition) \ + * a_ptr->calculated_val = 123; \ + * }) + * + * + * int some_function_to_fix(int param, struct something *p) + * { + * struct some_strut *a_ptr; + * + * a_ptr = some_load_func(p); + * SCMI_QUIRK(fix_me, QUIRK_CODE_SNIPPET_FIX_ME); + * some_more_func(a_ptr); + * ... + * + * return 0; + * } + * + */ + +#include <linux/ctype.h> +#include <linux/device.h> +#include <linux/export.h> +#include <linux/hashtable.h> +#include <linux/kstrtox.h> +#include <linux/of.h> +#include <linux/slab.h> +#include <linux/static_key.h> +#include <linux/string.h> +#include <linux/stringhash.h> +#include <linux/types.h> + +#include "quirks.h" + +#define SCMI_QUIRKS_HT_SZ 4 + +struct scmi_quirk { + bool enabled; + const char *name; + char *vendor; + char *sub_vendor_id; + char *impl_ver_range; + u32 start_range; + u32 end_range; + struct static_key_false *key; + struct hlist_node hash; + unsigned int hkey; + const char *const compats[]; +}; + +#define __DEFINE_SCMI_QUIRK_ENTRY(_qn, _ven, _sub, _impl, ...) \ + static struct scmi_quirk scmi_quirk_entry_ ## _qn = { \ + .name = __stringify(quirk_ ## _qn), \ + .vendor = _ven, \ + .sub_vendor_id = _sub, \ + .impl_ver_range = _impl, \ + .key = &(scmi_quirk_ ## _qn), \ + .compats = { __VA_ARGS__ __VA_OPT__(,) NULL }, \ + } + +#define __DECLARE_SCMI_QUIRK_ENTRY(_qn) (&(scmi_quirk_entry_ ## _qn)) + +/* + * Define a quirk by name and provide the matching tokens where: + * + * _qn: A string which will be used to build the quirk and the global + * static_key names. + * _ven : SCMI Vendor ID string match, NULL means any. + * _sub : SCMI SubVendor ID string match, NULL means any. + * _impl : SCMI Implementation Version string match, NULL means any. + * This string can be used to express version ranges which will be + * interpreted as follows: + * + * NULL [0, 0xFFFFFFFF] + * "X" [X, X] + * "X-" [X, 0xFFFFFFFF] + * "-X" [0, X] + * "X-Y" [X, Y] + * + * with X <= Y and <v> in [X, Y] meaning X <= <v> <= Y + * + * ... : An optional variadic macros argument used to provide a comma-separated + * list of compatible strings matches; when no variadic argument is + * provided, ANY compatible will match this quirk. + * + * This implicitly define also a properly named global static-key that + * will be used to dynamically enable the quirk at initialization time. + * + * Note that it is possible to associate multiple quirks to the same + * matching pattern, if your firmware quality is really astounding :P + * + * Example: + * + * Compatibles list NOT provided, so ANY compatible will match: + * + * DEFINE_SCMI_QUIRK(my_new_issue, "Vend", "SVend", "0x12000-0x30000"); + * + * + * A few compatibles provided to match against: + * + * DEFINE_SCMI_QUIRK(my_new_issue, "Vend", "SVend", "0x12000-0x30000", + * "xvend,plat_a", "xvend,plat_b", "xvend,sku_name"); + */ +#define DEFINE_SCMI_QUIRK(_qn, _ven, _sub, _impl, ...) \ + DEFINE_STATIC_KEY_FALSE(scmi_quirk_ ## _qn); \ + __DEFINE_SCMI_QUIRK_ENTRY(_qn, _ven, _sub, _impl, ##__VA_ARGS__) + +/* + * Same as DEFINE_SCMI_QUIRK but EXPORTED: this is meant to address quirks + * that possibly reside in code that is included in loadable kernel modules + * that needs to be able to access the global static keys at runtime to + * determine if enabled or not. (see SCMI_QUIRK to understand usage) + */ +#define DEFINE_SCMI_QUIRK_EXPORTED(_qn, _ven, _sub, _impl, ...) \ + DEFINE_STATIC_KEY_FALSE(scmi_quirk_ ## _qn); \ + EXPORT_SYMBOL_GPL(scmi_quirk_ ## _qn); \ + __DEFINE_SCMI_QUIRK_ENTRY(_qn, _ven, _sub, _impl, ##__VA_ARGS__) + +/* Global Quirks Definitions */ +DEFINE_SCMI_QUIRK(clock_rates_triplet_out_of_spec, NULL, NULL, NULL); +DEFINE_SCMI_QUIRK(perf_level_get_fc_force, "Qualcomm", NULL, "0x20000-"); + +/* + * Quirks Pointers Array + * + * This is filled at compile-time with the list of pointers to all the currently + * defined quirks descriptors. + */ +static struct scmi_quirk *scmi_quirks_table[] = { + __DECLARE_SCMI_QUIRK_ENTRY(clock_rates_triplet_out_of_spec), + __DECLARE_SCMI_QUIRK_ENTRY(perf_level_get_fc_force), + NULL +}; + +/* + * Quirks HashTable + * + * A run-time populated hashtable containing all the defined quirks descriptors + * hashed by matching pattern. + */ +static DEFINE_READ_MOSTLY_HASHTABLE(scmi_quirks_ht, SCMI_QUIRKS_HT_SZ); + +static unsigned int scmi_quirk_signature(const char *vend, const char *sub_vend) +{ + char *signature, *p; + unsigned int hash32; + unsigned long hash = 0; + + /* vendor_id/sub_vendor_id guaranteed <= SCMI_SHORT_NAME_MAX_SIZE */ + signature = kasprintf(GFP_KERNEL, "|%s|%s|", vend ?: "", sub_vend ?: ""); + if (!signature) + return 0; + + pr_debug("SCMI Quirk Signature >>>%s<<<\n", signature); + + p = signature; + while (*p) + hash = partial_name_hash(tolower(*p++), hash); + hash32 = end_name_hash(hash); + + kfree(signature); + + return hash32; +} + +static int scmi_quirk_range_parse(struct scmi_quirk *quirk) +{ + const char *last, *first = quirk->impl_ver_range; + size_t len; + char *sep; + int ret; + + quirk->start_range = 0; + quirk->end_range = 0xFFFFFFFF; + len = quirk->impl_ver_range ? strlen(quirk->impl_ver_range) : 0; + if (!len) + return 0; + + last = first + len - 1; + sep = strchr(quirk->impl_ver_range, '-'); + if (sep) + *sep = '\0'; + + if (sep == first) /* -X */ + ret = kstrtouint(first + 1, 0, &quirk->end_range); + else /* X OR X- OR X-y */ + ret = kstrtouint(first, 0, &quirk->start_range); + if (ret) + return ret; + + if (!sep) + quirk->end_range = quirk->start_range; + else if (sep != last) /* x-Y */ + ret = kstrtouint(sep + 1, 0, &quirk->end_range); + + if (quirk->start_range > quirk->end_range) + return -EINVAL; + + return ret; +} + +void scmi_quirks_initialize(void) +{ + struct scmi_quirk *quirk; + int i; + + for (i = 0, quirk = scmi_quirks_table[0]; quirk; + i++, quirk = scmi_quirks_table[i]) { + int ret; + + ret = scmi_quirk_range_parse(quirk); + if (ret) { + pr_err("SCMI skip QUIRK [%s] - BAD RANGE - |%s|\n", + quirk->name, quirk->impl_ver_range); + continue; + } + quirk->hkey = scmi_quirk_signature(quirk->vendor, + quirk->sub_vendor_id); + + hash_add(scmi_quirks_ht, &quirk->hash, quirk->hkey); + + pr_debug("Registered SCMI QUIRK [%s] -- %p - Key [0x%08X] - %s/%s/[0x%08X-0x%08X]\n", + quirk->name, quirk, quirk->hkey, + quirk->vendor, quirk->sub_vendor_id, + quirk->start_range, quirk->end_range); + } + + pr_debug("SCMI Quirks initialized\n"); +} + +void scmi_quirks_enable(struct device *dev, const char *vend, + const char *subv, const u32 impl) +{ + for (int i = 3; i >= 0; i--) { + struct scmi_quirk *quirk; + unsigned int hkey; + + hkey = scmi_quirk_signature(i > 1 ? vend : NULL, + i > 2 ? subv : NULL); + + /* + * Note that there could be multiple matches so we + * will enable multiple quirk part of a hash collision + * domain...BUT we cannot assume that ALL quirks on the + * same collision domain are a full match. + */ + hash_for_each_possible(scmi_quirks_ht, quirk, hash, hkey) { + if (quirk->enabled || quirk->hkey != hkey || + impl < quirk->start_range || + impl > quirk->end_range) + continue; + + if (quirk->compats[0] && + !of_machine_compatible_match(quirk->compats)) + continue; + + dev_info(dev, "Enabling SCMI Quirk [%s]\n", + quirk->name); + + dev_dbg(dev, + "Quirk matched on: %s/%s/%s/[0x%08X-0x%08X]\n", + quirk->compats[0], quirk->vendor, + quirk->sub_vendor_id, + quirk->start_range, quirk->end_range); + + static_branch_enable(quirk->key); + quirk->enabled = true; + } + } +} diff --git a/drivers/firmware/arm_scmi/quirks.h b/drivers/firmware/arm_scmi/quirks.h new file mode 100644 index 000000000000..a71fde85a527 --- /dev/null +++ b/drivers/firmware/arm_scmi/quirks.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * System Control and Management Interface (SCMI) Message Protocol Quirks + * + * Copyright (C) 2025 ARM Ltd. + */ +#ifndef _SCMI_QUIRKS_H +#define _SCMI_QUIRKS_H + +#include <linux/static_key.h> +#include <linux/types.h> + +#ifdef CONFIG_ARM_SCMI_QUIRKS + +#define DECLARE_SCMI_QUIRK(_qn) \ + DECLARE_STATIC_KEY_FALSE(scmi_quirk_ ## _qn) + +/* + * A helper to associate the actual code snippet to use as a quirk + * named as _qn. + */ +#define SCMI_QUIRK(_qn, _blk) \ + do { \ + if (static_branch_unlikely(&(scmi_quirk_ ## _qn))) \ + (_blk); \ + } while (0) + +void scmi_quirks_initialize(void); +void scmi_quirks_enable(struct device *dev, const char *vend, + const char *subv, const u32 impl); + +#else + +#define DECLARE_SCMI_QUIRK(_qn) +/* Force quirks compilation even when SCMI Quirks are disabled */ +#define SCMI_QUIRK(_qn, _blk) \ + do { \ + if (0) \ + (_blk); \ + } while (0) + +static inline void scmi_quirks_initialize(void) { } +static inline void scmi_quirks_enable(struct device *dev, const char *vend, + const char *sub_vend, const u32 impl) { } + +#endif /* CONFIG_ARM_SCMI_QUIRKS */ + +/* Quirk delarations */ +DECLARE_SCMI_QUIRK(clock_rates_triplet_out_of_spec); +DECLARE_SCMI_QUIRK(perf_level_get_fc_force); + +#endif /* _SCMI_QUIRKS_H */ diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c index 130d13e9cd6b..3d543b1d8947 100644 --- a/drivers/firmware/arm_scmi/raw_mode.c +++ b/drivers/firmware/arm_scmi/raw_mode.c @@ -671,11 +671,13 @@ static int scmi_do_xfer_raw_start(struct scmi_raw_mode_info *raw, * @len: Length of the message in @buf. * @chan_id: The channel ID to use. * @async: A flag stating if an asynchronous command is required. + * @poll: A flag stating if a polling transmission is required. * * Return: 0 on Success */ static int scmi_raw_message_send(struct scmi_raw_mode_info *raw, - void *buf, size_t len, u8 chan_id, bool async) + void *buf, size_t len, u8 chan_id, + bool async, bool poll) { int ret; struct scmi_xfer *xfer; @@ -684,6 +686,16 @@ static int scmi_raw_message_send(struct scmi_raw_mode_info *raw, if (ret) return ret; + if (poll) { + if (is_transport_polling_capable(raw->desc)) { + xfer->hdr.poll_completion = true; + } else { + dev_err(raw->handle->dev, + "Failed to send RAW message - Polling NOT supported\n"); + return -EINVAL; + } + } + ret = scmi_do_xfer_raw_start(raw, xfer, chan_id, async); if (ret) scmi_xfer_raw_put(raw->handle, xfer); @@ -801,7 +813,7 @@ static ssize_t scmi_dbg_raw_mode_common_read(struct file *filp, static ssize_t scmi_dbg_raw_mode_common_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos, - bool async) + bool async, bool poll) { int ret; struct scmi_dbg_raw_data *rd = filp->private_data; @@ -831,7 +843,7 @@ static ssize_t scmi_dbg_raw_mode_common_write(struct file *filp, } ret = scmi_raw_message_send(rd->raw, rd->tx.buf, rd->tx_size, - rd->chan_id, async); + rd->chan_id, async, poll); /* Reset ppos for next message ... */ rd->tx_size = 0; @@ -875,7 +887,8 @@ static ssize_t scmi_dbg_raw_mode_message_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { - return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, false); + return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, + false, false); } static __poll_t scmi_dbg_raw_mode_message_poll(struct file *filp, @@ -886,10 +899,8 @@ static __poll_t scmi_dbg_raw_mode_message_poll(struct file *filp, static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp) { - u8 id; struct scmi_raw_mode_info *raw; struct scmi_dbg_raw_data *rd; - const char *id_str = filp->f_path.dentry->d_parent->d_name.name; if (!inode->i_private) return -ENODEV; @@ -915,8 +926,8 @@ static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp) } /* Grab channel ID from debugfs entry naming if any */ - if (!kstrtou8(id_str, 16, &id)) - rd->chan_id = id; + /* not set - reassing 0 we already had after kzalloc() */ + rd->chan_id = debugfs_get_aux_num(filp); rd->raw = raw; filp->private_data = rd; @@ -950,7 +961,6 @@ static const struct file_operations scmi_dbg_raw_mode_reset_fops = { .open = scmi_dbg_raw_mode_open, .release = scmi_dbg_raw_mode_release, .write = scmi_dbg_raw_mode_reset_write, - .llseek = no_llseek, .owner = THIS_MODULE, }; @@ -960,7 +970,6 @@ static const struct file_operations scmi_dbg_raw_mode_message_fops = { .read = scmi_dbg_raw_mode_message_read, .write = scmi_dbg_raw_mode_message_write, .poll = scmi_dbg_raw_mode_message_poll, - .llseek = no_llseek, .owner = THIS_MODULE, }; @@ -968,7 +977,8 @@ static ssize_t scmi_dbg_raw_mode_message_async_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { - return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, true); + return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, + true, false); } static const struct file_operations scmi_dbg_raw_mode_message_async_fops = { @@ -977,7 +987,40 @@ static const struct file_operations scmi_dbg_raw_mode_message_async_fops = { .read = scmi_dbg_raw_mode_message_read, .write = scmi_dbg_raw_mode_message_async_write, .poll = scmi_dbg_raw_mode_message_poll, - .llseek = no_llseek, + .owner = THIS_MODULE, +}; + +static ssize_t scmi_dbg_raw_mode_message_poll_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) +{ + return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, + false, true); +} + +static const struct file_operations scmi_dbg_raw_mode_message_poll_fops = { + .open = scmi_dbg_raw_mode_open, + .release = scmi_dbg_raw_mode_release, + .read = scmi_dbg_raw_mode_message_read, + .write = scmi_dbg_raw_mode_message_poll_write, + .poll = scmi_dbg_raw_mode_message_poll, + .owner = THIS_MODULE, +}; + +static ssize_t scmi_dbg_raw_mode_message_poll_async_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) +{ + return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, + true, true); +} + +static const struct file_operations scmi_dbg_raw_mode_message_poll_async_fops = { + .open = scmi_dbg_raw_mode_open, + .release = scmi_dbg_raw_mode_release, + .read = scmi_dbg_raw_mode_message_read, + .write = scmi_dbg_raw_mode_message_poll_async_write, + .poll = scmi_dbg_raw_mode_message_poll, .owner = THIS_MODULE, }; @@ -1001,7 +1044,6 @@ static const struct file_operations scmi_dbg_raw_mode_notification_fops = { .release = scmi_dbg_raw_mode_release, .read = scmi_test_dbg_raw_mode_notif_read, .poll = scmi_test_dbg_raw_mode_notif_poll, - .llseek = no_llseek, .owner = THIS_MODULE, }; @@ -1025,7 +1067,6 @@ static const struct file_operations scmi_dbg_raw_mode_errors_fops = { .release = scmi_dbg_raw_mode_release, .read = scmi_test_dbg_raw_mode_errors_read, .poll = scmi_test_dbg_raw_mode_errors_poll, - .llseek = no_llseek, .owner = THIS_MODULE, }; @@ -1206,6 +1247,12 @@ void *scmi_raw_mode_init(const struct scmi_handle *handle, debugfs_create_file("message_async", 0600, raw->dentry, raw, &scmi_dbg_raw_mode_message_async_fops); + debugfs_create_file("message_poll", 0600, raw->dentry, raw, + &scmi_dbg_raw_mode_message_poll_fops); + + debugfs_create_file("message_poll_async", 0600, raw->dentry, raw, + &scmi_dbg_raw_mode_message_poll_async_fops); + debugfs_create_file("notification", 0400, raw->dentry, raw, &scmi_dbg_raw_mode_notification_fops); @@ -1230,11 +1277,21 @@ void *scmi_raw_mode_init(const struct scmi_handle *handle, snprintf(cdir, 8, "0x%02X", channels[i]); chd = debugfs_create_dir(cdir, top_chans); - debugfs_create_file("message", 0600, chd, raw, + debugfs_create_file_aux_num("message", 0600, chd, + raw, channels[i], &scmi_dbg_raw_mode_message_fops); - debugfs_create_file("message_async", 0600, chd, raw, + debugfs_create_file_aux_num("message_async", 0600, chd, + raw, channels[i], &scmi_dbg_raw_mode_message_async_fops); + + debugfs_create_file_aux_num("message_poll", 0600, chd, + raw, channels[i], + &scmi_dbg_raw_mode_message_poll_fops); + + debugfs_create_file_aux_num("message_poll_async", 0600, + chd, raw, channels[i], + &scmi_dbg_raw_mode_message_poll_async_fops); } } diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c index 1b318316535e..0aa82b96f41b 100644 --- a/drivers/firmware/arm_scmi/reset.c +++ b/drivers/firmware/arm_scmi/reset.c @@ -14,7 +14,7 @@ #include "notify.h" /* Updated only after ALL the mandatory features for that version are merged */ -#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30000 +#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30001 enum scmi_reset_protocol_cmd { RESET_DOMAIN_ATTRIBUTES = 0x3, diff --git a/drivers/firmware/arm_scmi/scmi_power_control.c b/drivers/firmware/arm_scmi/scmi_power_control.c index 6eb7d2a4b6b1..21f467a92942 100644 --- a/drivers/firmware/arm_scmi/scmi_power_control.c +++ b/drivers/firmware/arm_scmi/scmi_power_control.c @@ -50,6 +50,7 @@ #include <linux/reboot.h> #include <linux/scmi_protocol.h> #include <linux/slab.h> +#include <linux/suspend.h> #include <linux/time64.h> #include <linux/timer.h> #include <linux/types.h> @@ -78,6 +79,7 @@ enum scmi_syspower_state { * @reboot_nb: A notifier_block optionally used to track reboot progress * @forceful_work: A worker used to trigger a forceful transition once a * graceful has timed out. + * @suspend_work: A worker used to trigger system suspend */ struct scmi_syspower_conf { struct device *dev; @@ -90,6 +92,7 @@ struct scmi_syspower_conf { struct notifier_block reboot_nb; struct delayed_work forceful_work; + struct work_struct suspend_work; }; #define userspace_nb_to_sconf(x) \ @@ -249,6 +252,9 @@ static void scmi_request_graceful_transition(struct scmi_syspower_conf *sc, case SCMI_SYSTEM_WARMRESET: orderly_reboot(); break; + case SCMI_SYSTEM_SUSPEND: + schedule_work(&sc->suspend_work); + break; default: break; } @@ -277,7 +283,8 @@ static int scmi_userspace_notifier(struct notifier_block *nb, struct scmi_system_power_state_notifier_report *er = data; struct scmi_syspower_conf *sc = userspace_nb_to_sconf(nb); - if (er->system_state >= SCMI_SYSTEM_POWERUP) { + if (er->system_state >= SCMI_SYSTEM_MAX || + er->system_state == SCMI_SYSTEM_POWERUP) { dev_err(sc->dev, "Ignoring unsupported system_state: 0x%X\n", er->system_state); return NOTIFY_DONE; @@ -315,6 +322,16 @@ static int scmi_userspace_notifier(struct notifier_block *nb, return NOTIFY_OK; } +static void scmi_suspend_work_func(struct work_struct *work) +{ + struct scmi_syspower_conf *sc = + container_of(work, struct scmi_syspower_conf, suspend_work); + + pm_suspend(PM_SUSPEND_MEM); + + sc->state = SCMI_SYSPOWER_IDLE; +} + static int scmi_syspower_probe(struct scmi_device *sdev) { int ret; @@ -338,6 +355,8 @@ static int scmi_syspower_probe(struct scmi_device *sdev) sc->userspace_nb.notifier_call = &scmi_userspace_notifier; sc->dev = &sdev->dev; + INIT_WORK(&sc->suspend_work, scmi_suspend_work_func); + return handle->notify_ops->devm_event_notifier_register(sdev, SCMI_PROTOCOL_SYSTEM, SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER, diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c index 7fc5535ca34c..791efd0f82d7 100644 --- a/drivers/firmware/arm_scmi/sensors.c +++ b/drivers/firmware/arm_scmi/sensors.c @@ -15,7 +15,7 @@ #include "notify.h" /* Updated only after ALL the mandatory features for that version are merged */ -#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30000 +#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30001 #define SCMI_MAX_NUM_SENSOR_AXIS 63 #define SCMIv2_SENSOR_PROTOCOL 0x10000 diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c index 8bf495bcad09..11c347bff766 100644 --- a/drivers/firmware/arm_scmi/shmem.c +++ b/drivers/firmware/arm_scmi/shmem.c @@ -2,11 +2,13 @@ /* * For transport using shared mem structure. * - * Copyright (C) 2019 ARM Ltd. + * Copyright (C) 2019-2024 ARM Ltd. */ #include <linux/ktime.h> #include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> #include <linux/processor.h> #include <linux/types.h> @@ -14,6 +16,8 @@ #include "common.h" +#define SCMI_SHMEM_LAYOUT_OVERHEAD 24 + /* * SCMI specification requires all parameters, message headers, return * arguments or any protocol data to be expressed in little endian @@ -32,8 +36,59 @@ struct scmi_shared_mem { u8 msg_payload[]; }; -void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, - struct scmi_xfer *xfer, struct scmi_chan_info *cinfo) +static inline void shmem_memcpy_fromio32(void *to, + const void __iomem *from, + size_t count) +{ + WARN_ON(!IS_ALIGNED((unsigned long)from, 4) || + !IS_ALIGNED((unsigned long)to, 4) || + count % 4); + + __ioread32_copy(to, from, count / 4); +} + +static inline void shmem_memcpy_toio32(void __iomem *to, + const void *from, + size_t count) +{ + WARN_ON(!IS_ALIGNED((unsigned long)to, 4) || + !IS_ALIGNED((unsigned long)from, 4) || + count % 4); + + __iowrite32_copy(to, from, count / 4); +} + +static struct scmi_shmem_io_ops shmem_io_ops32 = { + .fromio = shmem_memcpy_fromio32, + .toio = shmem_memcpy_toio32, +}; + +/* Wrappers are needed for proper memcpy_{from,to}_io expansion by the + * pre-processor. + */ +static inline void shmem_memcpy_fromio(void *to, + const void __iomem *from, + size_t count) +{ + memcpy_fromio(to, from, count); +} + +static inline void shmem_memcpy_toio(void __iomem *to, + const void *from, + size_t count) +{ + memcpy_toio(to, from, count); +} + +static struct scmi_shmem_io_ops shmem_io_ops_default = { + .fromio = shmem_memcpy_fromio, + .toio = shmem_memcpy_toio, +}; + +static void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, + struct scmi_xfer *xfer, + struct scmi_chan_info *cinfo, + shmem_copy_toio_t copy_toio) { ktime_t stop; @@ -70,16 +125,17 @@ void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, iowrite32(sizeof(shmem->msg_header) + xfer->tx.len, &shmem->length); iowrite32(pack_scmi_header(&xfer->hdr), &shmem->msg_header); if (xfer->tx.buf) - memcpy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len); + copy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len); } -u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem) +static u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem) { return ioread32(&shmem->msg_header); } -void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, - struct scmi_xfer *xfer) +static void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, + struct scmi_xfer *xfer, + shmem_copy_fromio_t copy_fromio) { size_t len = ioread32(&shmem->length); @@ -88,11 +144,12 @@ void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, xfer->rx.len = min_t(size_t, xfer->rx.len, len > 8 ? len - 8 : 0); /* Take a copy to the rx buffer.. */ - memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len); + copy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len); } -void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem, - size_t max_len, struct scmi_xfer *xfer) +static void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem, + size_t max_len, struct scmi_xfer *xfer, + shmem_copy_fromio_t copy_fromio) { size_t len = ioread32(&shmem->length); @@ -100,16 +157,16 @@ void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem, xfer->rx.len = min_t(size_t, max_len, len > 4 ? len - 4 : 0); /* Take a copy to the rx buffer.. */ - memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len); + copy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len); } -void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem) +static void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem) { iowrite32(SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE, &shmem->channel_status); } -bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem, - struct scmi_xfer *xfer) +static bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem, + struct scmi_xfer *xfer) { u16 xfer_id; @@ -123,8 +180,86 @@ bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem, SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); } -bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem) +static bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem) { return (ioread32(&shmem->channel_status) & SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); } + +static bool shmem_channel_intr_enabled(struct scmi_shared_mem __iomem *shmem) +{ + return ioread32(&shmem->flags) & SCMI_SHMEM_FLAG_INTR_ENABLED; +} + +static void __iomem *shmem_setup_iomap(struct scmi_chan_info *cinfo, + struct device *dev, bool tx, + struct resource *res, + struct scmi_shmem_io_ops **ops) +{ + struct device_node *shmem __free(device_node); + const char *desc = tx ? "Tx" : "Rx"; + int ret, idx = tx ? 0 : 1; + struct device *cdev = cinfo->dev; + struct resource lres = {}; + resource_size_t size; + void __iomem *addr; + u32 reg_io_width; + + shmem = of_parse_phandle(cdev->of_node, "shmem", idx); + if (!shmem) + return IOMEM_ERR_PTR(-ENODEV); + + if (!of_device_is_compatible(shmem, "arm,scmi-shmem")) + return IOMEM_ERR_PTR(-ENXIO); + + /* Use a local on-stack as a working area when not provided */ + if (!res) + res = &lres; + + ret = of_address_to_resource(shmem, 0, res); + if (ret) { + dev_err(cdev, "failed to get SCMI %s shared memory\n", desc); + return IOMEM_ERR_PTR(ret); + } + + size = resource_size(res); + if (cinfo->max_msg_size + SCMI_SHMEM_LAYOUT_OVERHEAD > size) { + dev_err(dev, "misconfigured SCMI shared memory\n"); + return IOMEM_ERR_PTR(-ENOSPC); + } + + addr = devm_ioremap(dev, res->start, size); + if (!addr) { + dev_err(dev, "failed to ioremap SCMI %s shared memory\n", desc); + return IOMEM_ERR_PTR(-EADDRNOTAVAIL); + } + + of_property_read_u32(shmem, "reg-io-width", ®_io_width); + switch (reg_io_width) { + case 4: + *ops = &shmem_io_ops32; + break; + default: + *ops = &shmem_io_ops_default; + break; + } + + return addr; +} + +static const struct scmi_shared_mem_operations scmi_shmem_ops = { + .tx_prepare = shmem_tx_prepare, + .read_header = shmem_read_header, + .fetch_response = shmem_fetch_response, + .fetch_notification = shmem_fetch_notification, + .clear_channel = shmem_clear_channel, + .poll_done = shmem_poll_done, + .channel_free = shmem_channel_free, + .channel_intr_enabled = shmem_channel_intr_enabled, + .setup_iomap = shmem_setup_iomap, +}; + +const struct scmi_shared_mem_operations *scmi_shared_mem_operations_get(void) +{ + return &scmi_shmem_ops; +} diff --git a/drivers/firmware/arm_scmi/system.c b/drivers/firmware/arm_scmi/system.c index b6358c155f7f..ec3d355d1772 100644 --- a/drivers/firmware/arm_scmi/system.c +++ b/drivers/firmware/arm_scmi/system.c @@ -14,7 +14,7 @@ #include "notify.h" /* Updated only after ALL the mandatory features for that version are merged */ -#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20000 +#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20001 #define SCMI_SYSTEM_NUM_SOURCES 1 diff --git a/drivers/firmware/arm_scmi/transports/Kconfig b/drivers/firmware/arm_scmi/transports/Kconfig new file mode 100644 index 000000000000..57eccf316e26 --- /dev/null +++ b/drivers/firmware/arm_scmi/transports/Kconfig @@ -0,0 +1,123 @@ +# SPDX-License-Identifier: GPL-2.0-only +menu "SCMI Transport Drivers" + +config ARM_SCMI_HAVE_TRANSPORT + bool + help + This declares whether at least one SCMI transport has been configured. + Used to trigger a build bug when trying to build SCMI without any + configured transport. + +config ARM_SCMI_HAVE_SHMEM + bool + help + This declares whether a shared memory based transport for SCMI is + available. + +config ARM_SCMI_HAVE_MSG + bool + help + This declares whether a message passing based transport for SCMI is + available. + +config ARM_SCMI_TRANSPORT_MAILBOX + tristate "SCMI transport based on Mailbox" + depends on MAILBOX + select ARM_SCMI_HAVE_TRANSPORT + select ARM_SCMI_HAVE_SHMEM + default y + help + Enable mailbox based transport for SCMI. + + If you want the ARM SCMI PROTOCOL stack to include support for a + transport based on mailboxes, answer Y. + This driver can also be built as a module. If so, the module + will be called scmi_transport_mailbox. + +config ARM_SCMI_TRANSPORT_SMC + tristate "SCMI transport based on SMC" + depends on HAVE_ARM_SMCCC_DISCOVERY + select ARM_SCMI_HAVE_TRANSPORT + select ARM_SCMI_HAVE_SHMEM + default y + help + Enable SMC based transport for SCMI. + + If you want the ARM SCMI PROTOCOL stack to include support for a + transport based on SMC, answer Y. + This driver can also be built as a module. If so, the module + will be called scmi_transport_smc. + +config ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE + bool "Enable atomic mode support for SCMI SMC transport" + depends on ARM_SCMI_TRANSPORT_SMC + help + Enable support of atomic operation for SCMI SMC based transport. + + If you want the SCMI SMC based transport to operate in atomic + mode, avoiding any kind of sleeping behaviour for selected + transactions on the TX path, answer Y. + Enabling atomic mode operations allows any SCMI driver using this + transport to optionally ask for atomic SCMI transactions and operate + in atomic context too, at the price of using a number of busy-waiting + primitives all over instead. If unsure say N. + +config ARM_SCMI_TRANSPORT_OPTEE + tristate "SCMI transport based on OP-TEE service" + depends on OPTEE + select ARM_SCMI_HAVE_TRANSPORT + select ARM_SCMI_HAVE_SHMEM + select ARM_SCMI_HAVE_MSG + default y + help + This enables the OP-TEE service based transport for SCMI. + + If you want the ARM SCMI PROTOCOL stack to include support for a + transport based on OP-TEE SCMI service, answer Y. + This driver can also be built as a module. If so, the module + will be called scmi_transport_optee. + +config ARM_SCMI_TRANSPORT_VIRTIO + tristate "SCMI transport based on VirtIO" + depends on VIRTIO + select ARM_SCMI_HAVE_TRANSPORT + select ARM_SCMI_HAVE_MSG + help + This enables the virtio based transport for SCMI. + + If you want the ARM SCMI PROTOCOL stack to include support for a + transport based on VirtIO, answer Y. + This driver can also be built as a module. If so, the module + will be called scmi_transport_virtio. + +config ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE + bool "SCMI VirtIO transport Version 1 compliance" + depends on ARM_SCMI_TRANSPORT_VIRTIO + default y + help + This enforces strict compliance with VirtIO Version 1 specification. + + If you want the ARM SCMI VirtIO transport layer to refuse to work + with Legacy VirtIO backends and instead support only VirtIO Version 1 + devices (or above), answer Y. + + If you want instead to support also old Legacy VirtIO backends (like + the ones implemented by kvmtool) and let the core Kernel VirtIO layer + take care of the needed conversions, say N. + +config ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE + bool "Enable atomic mode for SCMI VirtIO transport" + depends on ARM_SCMI_TRANSPORT_VIRTIO + help + Enable support of atomic operation for SCMI VirtIO based transport. + + If you want the SCMI VirtIO based transport to operate in atomic + mode, avoiding any kind of sleeping behaviour for selected + transactions on the TX path, answer Y. + + Enabling atomic mode operations allows any SCMI driver using this + transport to optionally ask for atomic SCMI transactions and operate + in atomic context too, at the price of using a number of busy-waiting + primitives all over instead. If unsure say N. + +endmenu diff --git a/drivers/firmware/arm_scmi/transports/Makefile b/drivers/firmware/arm_scmi/transports/Makefile new file mode 100644 index 000000000000..3ba3d3bee151 --- /dev/null +++ b/drivers/firmware/arm_scmi/transports/Makefile @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Keep before scmi_transport_mailbox.o to allow precedence +# while matching the compatible. +scmi_transport_smc-objs := smc.o +obj-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += scmi_transport_smc.o +scmi_transport_mailbox-objs := mailbox.o +obj-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += scmi_transport_mailbox.o +scmi_transport_optee-objs := optee.o +obj-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += scmi_transport_optee.o +scmi_transport_virtio-objs := virtio.o +obj-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += scmi_transport_virtio.o + +ifeq ($(CONFIG_THUMB2_KERNEL)$(CONFIG_CC_IS_CLANG),yy) +# The use of R7 in the SMCCC conflicts with the compiler's use of R7 as a frame +# pointer in Thumb2 mode, which is forcibly enabled by Clang when profiling +# hooks are inserted via the -pg switch. +CFLAGS_REMOVE_smc.o += $(CC_FLAGS_FTRACE) +endif diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/transports/mailbox.c index b8d470417e8f..bd041c99b92b 100644 --- a/drivers/firmware/arm_scmi/mailbox.c +++ b/drivers/firmware/arm_scmi/transports/mailbox.c @@ -3,7 +3,7 @@ * System Control and Management Interface (SCMI) Message Mailbox Transport * driver. * - * Copyright (C) 2019 ARM Ltd. + * Copyright (C) 2019-2024 ARM Ltd. */ #include <linux/err.h> @@ -11,9 +11,10 @@ #include <linux/mailbox_client.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/platform_device.h> #include <linux/slab.h> -#include "common.h" +#include "../common.h" /** * struct scmi_mailbox - Structure representing a SCMI mailbox transport @@ -21,24 +22,33 @@ * @cl: Mailbox Client * @chan: Transmit/Receive mailbox uni/bi-directional channel * @chan_receiver: Optional Receiver mailbox unidirectional channel + * @chan_platform_receiver: Optional Platform Receiver mailbox unidirectional channel * @cinfo: SCMI channel info * @shmem: Transmit/Receive shared memory area + * @chan_lock: Lock that prevents multiple xfers from being queued + * @io_ops: Transport specific I/O operations */ struct scmi_mailbox { struct mbox_client cl; struct mbox_chan *chan; struct mbox_chan *chan_receiver; + struct mbox_chan *chan_platform_receiver; struct scmi_chan_info *cinfo; struct scmi_shared_mem __iomem *shmem; + struct mutex chan_lock; + struct scmi_shmem_io_ops *io_ops; }; #define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl) +static struct scmi_transport_core_operations *core; + static void tx_prepare(struct mbox_client *cl, void *m) { struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl); - shmem_tx_prepare(smbox->shmem, m, smbox->cinfo); + core->shmem->tx_prepare(smbox->shmem, m, smbox->cinfo, + smbox->io_ops->toio); } static void rx_callback(struct mbox_client *cl, void *m) @@ -54,12 +64,17 @@ static void rx_callback(struct mbox_client *cl, void *m) * a previous timed-out reply which arrived late could be wrongly * associated with the next pending transaction. */ - if (cl->knows_txdone && !shmem_channel_free(smbox->shmem)) { + if (cl->knows_txdone && + !core->shmem->channel_free(smbox->shmem)) { dev_warn(smbox->cinfo->dev, "Ignoring spurious A2P IRQ !\n"); + core->bad_message_trace(smbox->cinfo, + core->shmem->read_header(smbox->shmem), + MSG_MBOX_SPURIOUS); return; } - scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem), NULL); + core->rx_callback(smbox->cinfo, + core->shmem->read_header(smbox->shmem), NULL); } static bool mailbox_chan_available(struct device_node *of_node, int idx) @@ -88,6 +103,8 @@ static bool mailbox_chan_available(struct device_node *of_node, int idx) * for replies on the a2p channel. Set as zero if not present. * @p2a_chan: A reference to the optional p2a channel. * Set as zero if not present. + * @p2a_rx_chan: A reference to the optional p2a completion channel. + * Set as zero if not present. * * At first, validate the transport configuration as described in terms of * 'mboxes' and 'shmem', then determin which mailbox channel indexes are @@ -95,8 +112,8 @@ static bool mailbox_chan_available(struct device_node *of_node, int idx) * * Return: 0 on Success or error */ -static int mailbox_chan_validate(struct device *cdev, - int *a2p_rx_chan, int *p2a_chan) +static int mailbox_chan_validate(struct device *cdev, int *a2p_rx_chan, + int *p2a_chan, int *p2a_rx_chan) { int num_mb, num_sh, ret = 0; struct device_node *np = cdev->of_node; @@ -106,8 +123,9 @@ static int mailbox_chan_validate(struct device *cdev, dev_dbg(cdev, "Found %d mboxes and %d shmems !\n", num_mb, num_sh); /* Bail out if mboxes and shmem descriptors are inconsistent */ - if (num_mb <= 0 || num_sh <= 0 || num_sh > 2 || num_mb > 3 || - (num_mb == 1 && num_sh != 1) || (num_mb == 3 && num_sh != 2)) { + if (num_mb <= 0 || num_sh <= 0 || num_sh > 2 || num_mb > 4 || + (num_mb == 1 && num_sh != 1) || (num_mb == 3 && num_sh != 2) || + (num_mb == 4 && num_sh != 2)) { dev_warn(cdev, "Invalid channel descriptor for '%s' - mbs:%d shm:%d\n", of_node_full_name(np), num_mb, num_sh); @@ -116,18 +134,16 @@ static int mailbox_chan_validate(struct device *cdev, /* Bail out if provided shmem descriptors do not refer distinct areas */ if (num_sh > 1) { - struct device_node *np_tx, *np_rx; + struct device_node *np_tx __free(device_node) = + of_parse_phandle(np, "shmem", 0); + struct device_node *np_rx __free(device_node) = + of_parse_phandle(np, "shmem", 1); - np_tx = of_parse_phandle(np, "shmem", 0); - np_rx = of_parse_phandle(np, "shmem", 1); if (!np_tx || !np_rx || np_tx == np_rx) { dev_warn(cdev, "Invalid shmem descriptor for '%s'\n", of_node_full_name(np)); ret = -EINVAL; } - - of_node_put(np_tx); - of_node_put(np_rx); } /* Calculate channels IDs to use depending on mboxes/shmem layout */ @@ -136,6 +152,7 @@ static int mailbox_chan_validate(struct device *cdev, case 1: *a2p_rx_chan = 0; *p2a_chan = 0; + *p2a_rx_chan = 0; break; case 2: if (num_sh == 2) { @@ -145,10 +162,17 @@ static int mailbox_chan_validate(struct device *cdev, *a2p_rx_chan = 1; *p2a_chan = 0; } + *p2a_rx_chan = 0; break; case 3: *a2p_rx_chan = 1; *p2a_chan = 2; + *p2a_rx_chan = 0; + break; + case 4: + *a2p_rx_chan = 1; + *p2a_chan = 2; + *p2a_rx_chan = 3; break; } } @@ -162,13 +186,10 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, const char *desc = tx ? "Tx" : "Rx"; struct device *cdev = cinfo->dev; struct scmi_mailbox *smbox; - struct device_node *shmem; - int ret, a2p_rx_chan, p2a_chan, idx = tx ? 0 : 1; + int ret, a2p_rx_chan, p2a_chan, p2a_rx_chan; struct mbox_client *cl; - resource_size_t size; - struct resource res; - ret = mailbox_chan_validate(cdev, &a2p_rx_chan, &p2a_chan); + ret = mailbox_chan_validate(cdev, &a2p_rx_chan, &p2a_chan, &p2a_rx_chan); if (ret) return ret; @@ -179,25 +200,10 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, if (!smbox) return -ENOMEM; - shmem = of_parse_phandle(cdev->of_node, "shmem", idx); - if (!of_device_is_compatible(shmem, "arm,scmi-shmem")) { - of_node_put(shmem); - return -ENXIO; - } - - ret = of_address_to_resource(shmem, 0, &res); - of_node_put(shmem); - if (ret) { - dev_err(cdev, "failed to get SCMI %s shared memory\n", desc); - return ret; - } - - size = resource_size(&res); - smbox->shmem = devm_ioremap(dev, res.start, size); - if (!smbox->shmem) { - dev_err(dev, "failed to ioremap SCMI %s shared memory\n", desc); - return -EADDRNOTAVAIL; - } + smbox->shmem = core->shmem->setup_iomap(cinfo, dev, tx, NULL, + &smbox->io_ops); + if (IS_ERR(smbox->shmem)) + return PTR_ERR(smbox->shmem); cl = &smbox->cl; cl->dev = cdev; @@ -226,8 +232,19 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, } } + if (!tx && p2a_rx_chan) { + smbox->chan_platform_receiver = mbox_request_channel(cl, p2a_rx_chan); + if (IS_ERR(smbox->chan_platform_receiver)) { + ret = PTR_ERR(smbox->chan_platform_receiver); + if (ret != -EPROBE_DEFER) + dev_err(cdev, "failed to request SCMI P2A Receiver mailbox\n"); + return ret; + } + } + cinfo->transport_info = smbox; smbox->cinfo = cinfo; + mutex_init(&smbox->chan_lock); return 0; } @@ -240,9 +257,11 @@ static int mailbox_chan_free(int id, void *p, void *data) if (smbox && !IS_ERR(smbox->chan)) { mbox_free_channel(smbox->chan); mbox_free_channel(smbox->chan_receiver); + mbox_free_channel(smbox->chan_platform_receiver); cinfo->transport_info = NULL; smbox->chan = NULL; smbox->chan_receiver = NULL; + smbox->chan_platform_receiver = NULL; smbox->cinfo = NULL; } @@ -255,13 +274,23 @@ static int mailbox_send_message(struct scmi_chan_info *cinfo, struct scmi_mailbox *smbox = cinfo->transport_info; int ret; - ret = mbox_send_message(smbox->chan, xfer); + /* + * The mailbox layer has its own queue. However the mailbox queue + * confuses the per message SCMI timeouts since the clock starts when + * the message is submitted into the mailbox queue. So when multiple + * messages are queued up the clock starts on all messages instead of + * only the one inflight. + */ + mutex_lock(&smbox->chan_lock); - /* mbox_send_message returns non-negative value on success, so reset */ - if (ret > 0) - ret = 0; + ret = mbox_send_message(smbox->chan, xfer); + /* mbox_send_message returns non-negative value on success */ + if (ret < 0) { + mutex_unlock(&smbox->chan_lock); + return ret; + } - return ret; + return 0; } static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret, @@ -269,13 +298,10 @@ static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret, { struct scmi_mailbox *smbox = cinfo->transport_info; - /* - * NOTE: we might prefer not to need the mailbox ticker to manage the - * transfer queueing since the protocol layer queues things by itself. - * Unfortunately, we have to kick the mailbox framework after we have - * received our message. - */ mbox_client_txdone(smbox->chan, ret); + + /* Release channel */ + mutex_unlock(&smbox->chan_lock); } static void mailbox_fetch_response(struct scmi_chan_info *cinfo, @@ -283,7 +309,7 @@ static void mailbox_fetch_response(struct scmi_chan_info *cinfo, { struct scmi_mailbox *smbox = cinfo->transport_info; - shmem_fetch_response(smbox->shmem, xfer); + core->shmem->fetch_response(smbox->shmem, xfer, smbox->io_ops->fromio); } static void mailbox_fetch_notification(struct scmi_chan_info *cinfo, @@ -291,14 +317,34 @@ static void mailbox_fetch_notification(struct scmi_chan_info *cinfo, { struct scmi_mailbox *smbox = cinfo->transport_info; - shmem_fetch_notification(smbox->shmem, max_len, xfer); + core->shmem->fetch_notification(smbox->shmem, max_len, xfer, + smbox->io_ops->fromio); } static void mailbox_clear_channel(struct scmi_chan_info *cinfo) { struct scmi_mailbox *smbox = cinfo->transport_info; + struct mbox_chan *intr_chan; + int ret; + + core->shmem->clear_channel(smbox->shmem); + + if (!core->shmem->channel_intr_enabled(smbox->shmem)) + return; + + if (smbox->chan_platform_receiver) + intr_chan = smbox->chan_platform_receiver; + else if (smbox->chan) + intr_chan = smbox->chan; + else + return; - shmem_clear_channel(smbox->shmem); + ret = mbox_send_message(intr_chan, NULL); + /* mbox_send_message returns non-negative value on success, so reset */ + if (ret > 0) + ret = 0; + + mbox_client_txdone(intr_chan, ret); } static bool @@ -306,7 +352,7 @@ mailbox_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer) { struct scmi_mailbox *smbox = cinfo->transport_info; - return shmem_poll_done(smbox->shmem, xfer); + return core->shmem->poll_done(smbox->shmem, xfer); } static const struct scmi_transport_ops scmi_mailbox_ops = { @@ -321,9 +367,23 @@ static const struct scmi_transport_ops scmi_mailbox_ops = { .poll_done = mailbox_poll_done, }; -const struct scmi_desc scmi_mailbox_desc = { +static struct scmi_desc scmi_mailbox_desc = { .ops = &scmi_mailbox_ops, .max_rx_timeout_ms = 30, /* We may increase this if required */ .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */ - .max_msg_size = 128, + .max_msg_size = SCMI_SHMEM_MAX_PAYLOAD_SIZE, +}; + +static const struct of_device_id scmi_of_match[] = { + { .compatible = "arm,scmi" }, + { /* Sentinel */ }, }; +MODULE_DEVICE_TABLE(of, scmi_of_match); + +DEFINE_SCMI_TRANSPORT_DRIVER(scmi_mailbox, scmi_mailbox_driver, + scmi_mailbox_desc, scmi_of_match, core); +module_platform_driver(scmi_mailbox_driver); + +MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); +MODULE_DESCRIPTION("SCMI Mailbox Transport driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/transports/optee.c index 4e7944b91e38..3949a877e17d 100644 --- a/drivers/firmware/arm_scmi/optee.c +++ b/drivers/firmware/arm_scmi/transports/optee.c @@ -9,14 +9,13 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> +#include <linux/platform_device.h> #include <linux/slab.h> #include <linux/tee_drv.h> #include <linux/uuid.h> #include <uapi/linux/tee.h> -#include "common.h" - -#define SCMI_OPTEE_MAX_MSG_SIZE 128 +#include "../common.h" enum scmi_optee_pta_cmd { /* @@ -113,6 +112,7 @@ enum scmi_optee_pta_cmd { * @req.shmem: Virtual base address of the shared memory * @req.msg: Shared memory protocol handle for SCMI request and * synchronous response + * @io_ops: Transport specific I/O operations * @tee_shm: TEE shared memory handle @req or NULL if using IOMEM shmem * @link: Reference in agent's channel list */ @@ -127,6 +127,7 @@ struct scmi_optee_channel { struct scmi_shared_mem __iomem *shmem; struct scmi_msg_payld *msg; } req; + struct scmi_shmem_io_ops *io_ops; struct tee_shm *tee_shm; struct list_head link; }; @@ -148,12 +149,11 @@ struct scmi_optee_agent { struct list_head channel_list; }; +static struct scmi_transport_core_operations *core; + /* There can be only 1 SCMI service in OP-TEE we connect to */ static struct scmi_optee_agent *scmi_optee_private; -/* Forward reference to scmi_optee transport initialization */ -static int scmi_optee_init(void); - /* Open a session toward SCMI OP-TEE service with REE_KERNEL identity */ static int open_session(struct scmi_optee_agent *agent, u32 *tee_session) { @@ -297,7 +297,7 @@ static int invoke_process_msg_channel(struct scmi_optee_channel *channel, size_t param[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT; param[2].u.memref.shm = channel->tee_shm; - param[2].u.memref.size = SCMI_OPTEE_MAX_MSG_SIZE; + param[2].u.memref.size = SCMI_SHMEM_MAX_PAYLOAD_SIZE; ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param); if (ret < 0 || arg.ret) { @@ -312,24 +312,6 @@ static int invoke_process_msg_channel(struct scmi_optee_channel *channel, size_t return 0; } -static int scmi_optee_link_supplier(struct device *dev) -{ - if (!scmi_optee_private) { - if (scmi_optee_init()) - dev_dbg(dev, "Optee bus not yet ready\n"); - - /* Wait for optee bus */ - return -EPROBE_DEFER; - } - - if (!device_link_add(dev, scmi_optee_private->dev, DL_FLAG_AUTOREMOVE_CONSUMER)) { - dev_err(dev, "Adding link to supplier optee device failed\n"); - return -ECANCELED; - } - - return 0; -} - static bool scmi_optee_chan_available(struct device_node *of_node, int idx) { u32 channel_id; @@ -343,12 +325,12 @@ static void scmi_optee_clear_channel(struct scmi_chan_info *cinfo) struct scmi_optee_channel *channel = cinfo->transport_info; if (!channel->tee_shm) - shmem_clear_channel(channel->req.shmem); + core->shmem->clear_channel(channel->req.shmem); } static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *channel) { - const size_t msg_size = SCMI_OPTEE_MAX_MSG_SIZE; + const size_t msg_size = SCMI_SHMEM_MAX_PAYLOAD_SIZE; void *shbuf; channel->tee_shm = tee_shm_alloc_kernel_buf(scmi_optee_private->tee_ctx, msg_size); @@ -368,38 +350,12 @@ static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *ch static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo, struct scmi_optee_channel *channel) { - struct device_node *np; - resource_size_t size; - struct resource res; - int ret; - - np = of_parse_phandle(cinfo->dev->of_node, "shmem", 0); - if (!of_device_is_compatible(np, "arm,scmi-shmem")) { - ret = -ENXIO; - goto out; - } - - ret = of_address_to_resource(np, 0, &res); - if (ret) { - dev_err(dev, "Failed to get SCMI Tx shared memory\n"); - goto out; - } + channel->req.shmem = core->shmem->setup_iomap(cinfo, dev, true, NULL, + &channel->io_ops); + if (IS_ERR(channel->req.shmem)) + return PTR_ERR(channel->req.shmem); - size = resource_size(&res); - - channel->req.shmem = devm_ioremap(dev, res.start, size); - if (!channel->req.shmem) { - dev_err(dev, "Failed to ioremap SCMI Tx shared memory\n"); - ret = -EADDRNOTAVAIL; - goto out; - } - - ret = 0; - -out: - of_node_put(np); - - return ret; + return 0; } static int setup_shmem(struct device *dev, struct scmi_chan_info *cinfo, @@ -473,6 +429,13 @@ static int scmi_optee_chan_free(int id, void *p, void *data) struct scmi_chan_info *cinfo = p; struct scmi_optee_channel *channel = cinfo->transport_info; + /* + * Different protocols might share the same chan info, so a previous + * call might have already freed the structure. + */ + if (!channel) + return 0; + mutex_lock(&scmi_optee_private->mu); list_del(&channel->link); mutex_unlock(&scmi_optee_private->mu); @@ -499,10 +462,12 @@ static int scmi_optee_send_message(struct scmi_chan_info *cinfo, mutex_lock(&channel->mu); if (channel->tee_shm) { - msg_tx_prepare(channel->req.msg, xfer); - ret = invoke_process_msg_channel(channel, msg_command_size(xfer)); + core->msg->tx_prepare(channel->req.msg, xfer); + ret = invoke_process_msg_channel(channel, + core->msg->command_size(xfer)); } else { - shmem_tx_prepare(channel->req.shmem, xfer, cinfo); + core->shmem->tx_prepare(channel->req.shmem, xfer, cinfo, + channel->io_ops->toio); ret = invoke_process_smt_channel(channel); } @@ -518,9 +483,11 @@ static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo, struct scmi_optee_channel *channel = cinfo->transport_info; if (channel->tee_shm) - msg_fetch_response(channel->req.msg, channel->rx_len, xfer); + core->msg->fetch_response(channel->req.msg, + channel->rx_len, xfer); else - shmem_fetch_response(channel->req.shmem, xfer); + core->shmem->fetch_response(channel->req.shmem, xfer, + channel->io_ops->fromio); } static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret, @@ -532,7 +499,6 @@ static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret, } static struct scmi_transport_ops scmi_optee_ops = { - .link_supplier = scmi_optee_link_supplier, .chan_available = scmi_optee_chan_available, .chan_setup = scmi_optee_chan_setup, .chan_free = scmi_optee_chan_free, @@ -547,6 +513,22 @@ static int scmi_optee_ctx_match(struct tee_ioctl_version_data *ver, const void * return ver->impl_id == TEE_IMPL_ID_OPTEE; } +static struct scmi_desc scmi_optee_desc = { + .ops = &scmi_optee_ops, + .max_rx_timeout_ms = 30, + .max_msg = 20, + .max_msg_size = SCMI_SHMEM_MAX_PAYLOAD_SIZE, + .sync_cmds_completed_on_ret = true, +}; + +static const struct of_device_id scmi_of_match[] = { + { .compatible = "linaro,scmi-optee" }, + { /* Sentinel */ }, +}; + +DEFINE_SCMI_TRANSPORT_DRIVER(scmi_optee, scmi_optee_driver, scmi_optee_desc, + scmi_of_match, core); + static int scmi_optee_service_probe(struct device *dev) { struct scmi_optee_agent *agent; @@ -582,6 +564,12 @@ static int scmi_optee_service_probe(struct device *dev) smp_mb(); scmi_optee_private = agent; + ret = platform_driver_register(&scmi_optee_driver); + if (ret) { + scmi_optee_private = NULL; + goto err; + } + return 0; err: @@ -597,6 +585,8 @@ static int scmi_optee_service_remove(struct device *dev) if (!scmi_optee_private) return -EINVAL; + platform_driver_unregister(&scmi_optee_driver); + if (!list_empty(&scmi_optee_private->channel_list)) return -EBUSY; @@ -618,7 +608,7 @@ static const struct tee_client_device_id scmi_optee_service_id[] = { MODULE_DEVICE_TABLE(tee, scmi_optee_service_id); -static struct tee_client_driver scmi_optee_driver = { +static struct tee_client_driver scmi_optee_service_driver = { .id_table = scmi_optee_service_id, .driver = { .name = "scmi-optee", @@ -628,22 +618,18 @@ static struct tee_client_driver scmi_optee_driver = { }, }; -static int scmi_optee_init(void) +static int __init scmi_transport_optee_init(void) { - return driver_register(&scmi_optee_driver.driver); + return driver_register(&scmi_optee_service_driver.driver); } +module_init(scmi_transport_optee_init); -static void scmi_optee_exit(void) +static void __exit scmi_transport_optee_exit(void) { - if (scmi_optee_private) - driver_unregister(&scmi_optee_driver.driver); + driver_unregister(&scmi_optee_service_driver.driver); } +module_exit(scmi_transport_optee_exit); -const struct scmi_desc scmi_optee_desc = { - .transport_exit = scmi_optee_exit, - .ops = &scmi_optee_ops, - .max_rx_timeout_ms = 30, - .max_msg = 20, - .max_msg_size = SCMI_OPTEE_MAX_MSG_SIZE, - .sync_cmds_completed_on_ret = true, -}; +MODULE_AUTHOR("Etienne Carriere <etienne.carriere@foss.st.com>"); +MODULE_DESCRIPTION("SCMI OPTEE Transport driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/transports/smc.c index 39936e1dd30e..21abb571e4f2 100644 --- a/drivers/firmware/arm_scmi/smc.c +++ b/drivers/firmware/arm_scmi/transports/smc.c @@ -16,10 +16,11 @@ #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/limits.h> +#include <linux/platform_device.h> #include <linux/processor.h> #include <linux/slab.h> -#include "common.h" +#include "../common.h" /* * The shmem address is split into 4K page and offset. @@ -44,6 +45,7 @@ * @irq: An optional IRQ for completion * @cinfo: SCMI channel info * @shmem: Transmit/Receive shared memory area + * @io_ops: Transport specific I/O operations * @shmem_lock: Lock to protect access to Tx/Rx shared memory area. * Used when NOT operating in atomic mode. * @inflight: Atomic flag to protect access to Tx/Rx shared memory area. @@ -59,6 +61,7 @@ struct scmi_smc { int irq; struct scmi_chan_info *cinfo; struct scmi_shared_mem __iomem *shmem; + struct scmi_shmem_io_ops *io_ops; /* Protect access to shmem area */ struct mutex shmem_lock; #define INFLIGHT_NONE MSG_TOKEN_MAX @@ -69,23 +72,25 @@ struct scmi_smc { unsigned long cap_id; }; +static struct scmi_transport_core_operations *core; + static irqreturn_t smc_msg_done_isr(int irq, void *data) { struct scmi_smc *scmi_info = data; - scmi_rx_callback(scmi_info->cinfo, - shmem_read_header(scmi_info->shmem), NULL); + core->rx_callback(scmi_info->cinfo, + core->shmem->read_header(scmi_info->shmem), NULL); return IRQ_HANDLED; } static bool smc_chan_available(struct device_node *of_node, int idx) { - struct device_node *np = of_parse_phandle(of_node, "shmem", 0); + struct device_node *np __free(device_node) = + of_parse_phandle(of_node, "shmem", 0); if (!np) return false; - of_node_put(np); return true; } @@ -130,9 +135,7 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, struct device *cdev = cinfo->dev; unsigned long cap_id = ULONG_MAX; struct scmi_smc *scmi_info; - resource_size_t size; - struct resource res; - struct device_node *np; + struct resource res = {}; u32 func_id; int ret; @@ -143,31 +146,17 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, if (!scmi_info) return -ENOMEM; - np = of_parse_phandle(cdev->of_node, "shmem", 0); - if (!of_device_is_compatible(np, "arm,scmi-shmem")) { - of_node_put(np); - return -ENXIO; - } - - ret = of_address_to_resource(np, 0, &res); - of_node_put(np); - if (ret) { - dev_err(cdev, "failed to get SCMI Tx shared memory\n"); - return ret; - } - - size = resource_size(&res); - scmi_info->shmem = devm_ioremap(dev, res.start, size); - if (!scmi_info->shmem) { - dev_err(dev, "failed to ioremap SCMI Tx shared memory\n"); - return -EADDRNOTAVAIL; - } + scmi_info->shmem = core->shmem->setup_iomap(cinfo, dev, tx, &res, + &scmi_info->io_ops); + if (IS_ERR(scmi_info->shmem)) + return PTR_ERR(scmi_info->shmem); ret = of_property_read_u32(dev->of_node, "arm,smc-id", &func_id); if (ret < 0) return ret; if (of_device_is_compatible(dev->of_node, "qcom,scmi-smc")) { + resource_size_t size = resource_size(&res); void __iomem *ptr = (void __iomem *)scmi_info->shmem + size - 8; /* The capability-id is kept in last 8 bytes of shmem. * +-------+ <-- 0 @@ -243,7 +232,8 @@ static int smc_send_message(struct scmi_chan_info *cinfo, */ smc_channel_lock_acquire(scmi_info, xfer); - shmem_tx_prepare(scmi_info->shmem, xfer, cinfo); + core->shmem->tx_prepare(scmi_info->shmem, xfer, cinfo, + scmi_info->io_ops->toio); if (scmi_info->cap_id != ULONG_MAX) arm_smccc_1_1_invoke(scmi_info->func_id, scmi_info->cap_id, 0, @@ -267,7 +257,8 @@ static void smc_fetch_response(struct scmi_chan_info *cinfo, { struct scmi_smc *scmi_info = cinfo->transport_info; - shmem_fetch_response(scmi_info->shmem, xfer); + core->shmem->fetch_response(scmi_info->shmem, xfer, + scmi_info->io_ops->fromio); } static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret, @@ -287,11 +278,11 @@ static const struct scmi_transport_ops scmi_smc_ops = { .fetch_response = smc_fetch_response, }; -const struct scmi_desc scmi_smc_desc = { +static struct scmi_desc scmi_smc_desc = { .ops = &scmi_smc_ops, .max_rx_timeout_ms = 30, .max_msg = 20, - .max_msg_size = 128, + .max_msg_size = SCMI_SHMEM_MAX_PAYLOAD_SIZE, /* * Setting .sync_cmds_atomic_replies to true for SMC assumes that, * once the SMC instruction has completed successfully, the issued @@ -303,3 +294,20 @@ const struct scmi_desc scmi_smc_desc = { .sync_cmds_completed_on_ret = true, .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE), }; + +static const struct of_device_id scmi_of_match[] = { + { .compatible = "arm,scmi-smc" }, + { .compatible = "arm,scmi-smc-param" }, + { .compatible = "qcom,scmi-smc" }, + { /* Sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, scmi_of_match); + +DEFINE_SCMI_TRANSPORT_DRIVER(scmi_smc, scmi_smc_driver, scmi_smc_desc, + scmi_of_match, core); +module_platform_driver(scmi_smc_driver); + +MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>"); +MODULE_AUTHOR("Nikunj Kela <quic_nkela@quicinc.com>"); +MODULE_DESCRIPTION("SCMI SMC Transport driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/transports/virtio.c index d68c01cb7aa0..cb934db9b2b4 100644 --- a/drivers/firmware/arm_scmi/virtio.c +++ b/drivers/firmware/arm_scmi/transports/virtio.c @@ -4,7 +4,7 @@ * (SCMI). * * Copyright (C) 2020-2022 OpenSynergy. - * Copyright (C) 2021-2022 ARM Ltd. + * Copyright (C) 2021-2024 ARM Ltd. */ /** @@ -19,6 +19,7 @@ #include <linux/completion.h> #include <linux/errno.h> +#include <linux/platform_device.h> #include <linux/refcount.h> #include <linux/slab.h> #include <linux/virtio.h> @@ -27,12 +28,12 @@ #include <uapi/linux/virtio_ids.h> #include <uapi/linux/virtio_scmi.h> -#include "common.h" +#include "../common.h" #define VIRTIO_MAX_RX_TIMEOUT_MS 60000 #define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */ -#define VIRTIO_SCMI_MAX_PDU_SIZE \ - (VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD) +#define VIRTIO_SCMI_MAX_PDU_SIZE(ci) \ + ((ci)->max_msg_size + SCMI_MSG_MAX_PROT_OVERHEAD) #define DESCRIPTORS_PER_TX_MSG 2 /** @@ -89,6 +90,7 @@ enum poll_states { * @input: SDU used for (delayed) responses and notifications * @list: List which scmi_vio_msg may be part of * @rx_len: Input SDU size in bytes, once input has been received + * @max_len: Maximumm allowed SDU size in bytes * @poll_idx: Last used index registered for polling purposes if this message * transaction reply was configured for polling. * @poll_status: Polling state for this message. @@ -101,6 +103,7 @@ struct scmi_vio_msg { struct scmi_msg_payld *input; struct list_head list; unsigned int rx_len; + unsigned int max_len; unsigned int poll_idx; enum poll_states poll_status; /* Lock to protect access to poll_status */ @@ -108,6 +111,8 @@ struct scmi_vio_msg { refcount_t users; }; +static struct scmi_transport_core_operations *core; + /* Only one SCMI VirtIO device can possibly exist */ static struct virtio_device *scmi_vdev; @@ -231,7 +236,7 @@ static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, unsigned long flags; struct device *dev = &vioch->vqueue->vdev->dev; - sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE); + sg_init_one(&sg_in, msg->input, msg->max_len); spin_lock_irqsave(&vioch->lock, flags); @@ -294,8 +299,9 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue) if (msg) { msg->rx_len = length; - scmi_rx_callback(vioch->cinfo, - msg_read_header(msg->input), msg); + core->rx_callback(vioch->cinfo, + core->msg->read_header(msg->input), + msg); scmi_finalize_message(vioch, msg); } @@ -339,8 +345,9 @@ static void scmi_vio_deferred_tx_worker(struct work_struct *work) * is no more processed elsewhere so no poll_lock needed. */ if (msg->poll_status == VIO_MSG_NOT_POLLED) - scmi_rx_callback(vioch->cinfo, - msg_read_header(msg->input), msg); + core->rx_callback(vioch->cinfo, + core->msg->read_header(msg->input), + msg); /* Free the processed message once done */ scmi_vio_msg_release(vioch, msg); @@ -354,11 +361,9 @@ static void scmi_vio_deferred_tx_worker(struct work_struct *work) scmi_vio_channel_release(vioch); } -static const char *const scmi_vio_vqueue_names[] = { "tx", "rx" }; - -static vq_callback_t *scmi_vio_complete_callbacks[] = { - scmi_vio_complete_cb, - scmi_vio_complete_cb +static struct virtqueue_info scmi_vio_vqs_info[] = { + { "tx", scmi_vio_complete_cb }, + { "rx", scmi_vio_complete_cb }, }; static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo) @@ -368,23 +373,6 @@ static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo) return vioch->max_msg; } -static int virtio_link_supplier(struct device *dev) -{ - if (!scmi_vdev) { - dev_notice(dev, - "Deferring probe after not finding a bound scmi-virtio device\n"); - return -EPROBE_DEFER; - } - - if (!device_link_add(dev, &scmi_vdev->dev, - DL_FLAG_AUTOREMOVE_CONSUMER)) { - dev_err(dev, "Adding link to supplier virtio device failed\n"); - return -ECANCELED; - } - - return 0; -} - static bool virtio_chan_available(struct device_node *of_node, int idx) { struct scmi_vio_channel *channels, *vioch = NULL; @@ -453,9 +441,9 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, if (!msg) return -ENOMEM; + msg->max_len = VIRTIO_SCMI_MAX_PDU_SIZE(cinfo); if (tx) { - msg->request = devm_kzalloc(dev, - VIRTIO_SCMI_MAX_PDU_SIZE, + msg->request = devm_kzalloc(dev, msg->max_len, GFP_KERNEL); if (!msg->request) return -ENOMEM; @@ -463,8 +451,7 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, refcount_set(&msg->users, 1); } - msg->input = devm_kzalloc(dev, VIRTIO_SCMI_MAX_PDU_SIZE, - GFP_KERNEL); + msg->input = devm_kzalloc(dev, msg->max_len, GFP_KERNEL); if (!msg->input) return -ENOMEM; @@ -512,10 +499,10 @@ static int virtio_send_message(struct scmi_chan_info *cinfo, return -EBUSY; } - msg_tx_prepare(msg->request, xfer); + core->msg->tx_prepare(msg->request, xfer); - sg_init_one(&sg_out, msg->request, msg_command_size(xfer)); - sg_init_one(&sg_in, msg->input, msg_response_size(xfer)); + sg_init_one(&sg_out, msg->request, core->msg->command_size(xfer)); + sg_init_one(&sg_in, msg->input, core->msg->response_size(xfer)); spin_lock_irqsave(&vioch->lock, flags); @@ -562,7 +549,7 @@ static void virtio_fetch_response(struct scmi_chan_info *cinfo, struct scmi_vio_msg *msg = xfer->priv; if (msg) - msg_fetch_response(msg->input, msg->rx_len, xfer); + core->msg->fetch_response(msg->input, msg->rx_len, xfer); } static void virtio_fetch_notification(struct scmi_chan_info *cinfo, @@ -571,7 +558,8 @@ static void virtio_fetch_notification(struct scmi_chan_info *cinfo, struct scmi_vio_msg *msg = xfer->priv; if (msg) - msg_fetch_notification(msg->input, msg->rx_len, max_len, xfer); + core->msg->fetch_notification(msg->input, msg->rx_len, + max_len, xfer); } /** @@ -671,7 +659,7 @@ static void virtio_mark_txdone(struct scmi_chan_info *cinfo, int ret, * the message we are polling for could be alternatively delivered via usual * IRQs callbacks on another core which happened to have IRQs enabled while we * are actively polling for it here: in such a case it will be handled as such - * by scmi_rx_callback() and the polling loop in the SCMI Core TX path will be + * by rx_callback() and the polling loop in the SCMI Core TX path will be * transparently terminated anyway. * * Return: True once polling has successfully completed. @@ -792,7 +780,6 @@ static bool virtio_poll_done(struct scmi_chan_info *cinfo, } static const struct scmi_transport_ops scmi_virtio_ops = { - .link_supplier = virtio_link_supplier, .chan_available = virtio_chan_available, .chan_setup = virtio_chan_setup, .chan_free = virtio_chan_free, @@ -804,6 +791,23 @@ static const struct scmi_transport_ops scmi_virtio_ops = { .poll_done = virtio_poll_done, }; +static struct scmi_desc scmi_virtio_desc = { + .ops = &scmi_virtio_ops, + /* for non-realtime virtio devices */ + .max_rx_timeout_ms = VIRTIO_MAX_RX_TIMEOUT_MS, + .max_msg = 0, /* overridden by virtio_get_max_msg() */ + .max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE, + .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE), +}; + +static const struct of_device_id scmi_of_match[] = { + { .compatible = "arm,scmi-virtio" }, + { /* Sentinel */ }, +}; + +DEFINE_SCMI_TRANSPORT_DRIVER(scmi_virtio, scmi_virtio_driver, scmi_virtio_desc, + scmi_of_match, core); + static int scmi_vio_probe(struct virtio_device *vdev) { struct device *dev = &vdev->dev; @@ -831,8 +835,7 @@ static int scmi_vio_probe(struct virtio_device *vdev) if (have_vq_rx) channels[VIRTIO_SCMI_VQ_RX].is_rx = true; - ret = virtio_find_vqs(vdev, vq_cnt, vqs, scmi_vio_complete_callbacks, - scmi_vio_vqueue_names, NULL); + ret = virtio_find_vqs(vdev, vq_cnt, vqs, scmi_vio_vqs_info, NULL); if (ret) { dev_err(dev, "Failed to get %d virtqueue(s)\n", vq_cnt); return ret; @@ -864,14 +867,27 @@ static int scmi_vio_probe(struct virtio_device *vdev) } vdev->priv = channels; + /* Ensure initialized scmi_vdev is visible */ smp_store_mb(scmi_vdev, vdev); + ret = platform_driver_register(&scmi_virtio_driver); + if (ret) { + vdev->priv = NULL; + vdev->config->del_vqs(vdev); + /* Ensure NULLified scmi_vdev is visible */ + smp_store_mb(scmi_vdev, NULL); + + return ret; + } + return 0; } static void scmi_vio_remove(struct virtio_device *vdev) { + platform_driver_unregister(&scmi_virtio_driver); + /* * Once we get here, virtio_chan_free() will have already been called by * the SCMI core for any existing channel and, as a consequence, all the @@ -905,10 +921,10 @@ static const struct virtio_device_id id_table[] = { { VIRTIO_ID_SCMI, VIRTIO_DEV_ANY_ID }, { 0 } }; +MODULE_DEVICE_TABLE(virtio, id_table); static struct virtio_driver virtio_scmi_driver = { .driver.name = "scmi-virtio", - .driver.owner = THIS_MODULE, .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .id_table = id_table, @@ -917,23 +933,10 @@ static struct virtio_driver virtio_scmi_driver = { .validate = scmi_vio_validate, }; -static int __init virtio_scmi_init(void) -{ - return register_virtio_driver(&virtio_scmi_driver); -} - -static void virtio_scmi_exit(void) -{ - unregister_virtio_driver(&virtio_scmi_driver); -} +module_virtio_driver(virtio_scmi_driver); -const struct scmi_desc scmi_virtio_desc = { - .transport_init = virtio_scmi_init, - .transport_exit = virtio_scmi_exit, - .ops = &scmi_virtio_ops, - /* for non-realtime virtio devices */ - .max_rx_timeout_ms = VIRTIO_MAX_RX_TIMEOUT_MS, - .max_msg = 0, /* overridden by virtio_get_max_msg() */ - .max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE, - .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE), -}; +MODULE_AUTHOR("Igor Skalkin <igor.skalkin@opensynergy.com>"); +MODULE_AUTHOR("Peter Hilber <peter.hilber@opensynergy.com>"); +MODULE_AUTHOR("Cristian Marussi <cristian.marussi@arm.com>"); +MODULE_DESCRIPTION("SCMI VirtIO Transport driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/arm_scmi/vendors/imx/Kconfig b/drivers/firmware/arm_scmi/vendors/imx/Kconfig new file mode 100644 index 000000000000..c34c8c837441 --- /dev/null +++ b/drivers/firmware/arm_scmi/vendors/imx/Kconfig @@ -0,0 +1,50 @@ +# SPDX-License-Identifier: GPL-2.0-only +menu "ARM SCMI NXP i.MX Vendor Protocols" + +config IMX_SCMI_BBM_EXT + tristate "i.MX SCMI BBM EXTENSION" + depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF) + default y if ARCH_MXC + help + This enables i.MX System BBM control logic which supports RTC + and BUTTON. + + To compile this driver as a module, choose M here: the + module will be called imx-sm-bbm. + +config IMX_SCMI_CPU_EXT + tristate "i.MX SCMI CPU EXTENSION" + depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF) + depends on IMX_SCMI_CPU_DRV + default y if ARCH_MXC + help + This enables i.MX System CPU Protocol to manage cpu + start, stop and etc. + + To compile this driver as a module, choose M here: the + module will be called imx-sm-cpu. + +config IMX_SCMI_LMM_EXT + tristate "i.MX SCMI LMM EXTENSION" + depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF) + depends on IMX_SCMI_LMM_DRV + default y if ARCH_MXC + help + This enables i.MX System Logical Machine Protocol to + manage Logical Machines boot, shutdown and etc. + + To compile this driver as a module, choose M here: the + module will be called imx-sm-lmm. + +config IMX_SCMI_MISC_EXT + tristate "i.MX SCMI MISC EXTENSION" + depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF) + depends on IMX_SCMI_MISC_DRV + default y if ARCH_MXC + help + This enables i.MX System MISC control logic such as gpio expander + wakeup + + To compile this driver as a module, choose M here: the + module will be called imx-sm-misc. +endmenu diff --git a/drivers/firmware/arm_scmi/vendors/imx/Makefile b/drivers/firmware/arm_scmi/vendors/imx/Makefile new file mode 100644 index 000000000000..e3a5ea46345c --- /dev/null +++ b/drivers/firmware/arm_scmi/vendors/imx/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_IMX_SCMI_BBM_EXT) += imx-sm-bbm.o +obj-$(CONFIG_IMX_SCMI_CPU_EXT) += imx-sm-cpu.o +obj-$(CONFIG_IMX_SCMI_LMM_EXT) += imx-sm-lmm.o +obj-$(CONFIG_IMX_SCMI_MISC_EXT) += imx-sm-misc.o diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c new file mode 100644 index 000000000000..aa176c1a5eef --- /dev/null +++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c @@ -0,0 +1,384 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) NXP BBM Protocol + * + * Copyright 2024 NXP + */ + +#define pr_fmt(fmt) "SCMI Notifications BBM - " fmt + +#include <linux/bits.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/scmi_protocol.h> +#include <linux/scmi_imx_protocol.h> + +#include "../../protocols.h" +#include "../../notify.h" + +#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x10000 + +enum scmi_imx_bbm_protocol_cmd { + IMX_BBM_GPR_SET = 0x3, + IMX_BBM_GPR_GET = 0x4, + IMX_BBM_RTC_ATTRIBUTES = 0x5, + IMX_BBM_RTC_TIME_SET = 0x6, + IMX_BBM_RTC_TIME_GET = 0x7, + IMX_BBM_RTC_ALARM_SET = 0x8, + IMX_BBM_BUTTON_GET = 0x9, + IMX_BBM_RTC_NOTIFY = 0xA, + IMX_BBM_BUTTON_NOTIFY = 0xB, +}; + +#define GET_RTCS_NR(x) le32_get_bits((x), GENMASK(23, 16)) +#define GET_GPRS_NR(x) le32_get_bits((x), GENMASK(15, 0)) + +#define SCMI_IMX_BBM_NOTIFY_RTC_UPDATED BIT(2) +#define SCMI_IMX_BBM_NOTIFY_RTC_ROLLOVER BIT(1) +#define SCMI_IMX_BBM_NOTIFY_RTC_ALARM BIT(0) + +#define SCMI_IMX_BBM_RTC_ALARM_ENABLE_FLAG BIT(0) + +#define SCMI_IMX_BBM_NOTIFY_RTC_FLAG \ + (SCMI_IMX_BBM_NOTIFY_RTC_UPDATED | SCMI_IMX_BBM_NOTIFY_RTC_ROLLOVER | \ + SCMI_IMX_BBM_NOTIFY_RTC_ALARM) + +#define SCMI_IMX_BBM_EVENT_RTC_MASK GENMASK(31, 24) + +struct scmi_imx_bbm_info { + u32 version; + int nr_rtc; + int nr_gpr; +}; + +struct scmi_msg_imx_bbm_protocol_attributes { + __le32 attributes; +}; + +struct scmi_imx_bbm_set_time { + __le32 id; + __le32 flags; + __le32 value_low; + __le32 value_high; +}; + +struct scmi_imx_bbm_get_time { + __le32 id; + __le32 flags; +}; + +struct scmi_imx_bbm_alarm_time { + __le32 id; + __le32 flags; + __le32 value_low; + __le32 value_high; +}; + +struct scmi_msg_imx_bbm_rtc_notify { + __le32 rtc_id; + __le32 flags; +}; + +struct scmi_msg_imx_bbm_button_notify { + __le32 flags; +}; + +struct scmi_imx_bbm_notify_payld { + __le32 flags; +}; + +static int scmi_imx_bbm_attributes_get(const struct scmi_protocol_handle *ph, + struct scmi_imx_bbm_info *pi) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_imx_bbm_protocol_attributes *attr; + + ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0, sizeof(*attr), &t); + if (ret) + return ret; + + attr = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + pi->nr_rtc = GET_RTCS_NR(attr->attributes); + pi->nr_gpr = GET_GPRS_NR(attr->attributes); + } + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_imx_bbm_notify(const struct scmi_protocol_handle *ph, + u32 src_id, int message_id, bool enable) +{ + int ret; + struct scmi_xfer *t; + + if (message_id == IMX_BBM_RTC_NOTIFY) { + struct scmi_msg_imx_bbm_rtc_notify *rtc_notify; + + ret = ph->xops->xfer_get_init(ph, message_id, + sizeof(*rtc_notify), 0, &t); + if (ret) + return ret; + + rtc_notify = t->tx.buf; + rtc_notify->rtc_id = cpu_to_le32(0); + rtc_notify->flags = + cpu_to_le32(enable ? SCMI_IMX_BBM_NOTIFY_RTC_FLAG : 0); + } else if (message_id == IMX_BBM_BUTTON_NOTIFY) { + struct scmi_msg_imx_bbm_button_notify *button_notify; + + ret = ph->xops->xfer_get_init(ph, message_id, + sizeof(*button_notify), 0, &t); + if (ret) + return ret; + + button_notify = t->tx.buf; + button_notify->flags = cpu_to_le32(enable ? 1 : 0); + } else { + return -EINVAL; + } + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static enum scmi_imx_bbm_protocol_cmd evt_2_cmd[] = { + IMX_BBM_RTC_NOTIFY, + IMX_BBM_BUTTON_NOTIFY +}; + +static int scmi_imx_bbm_set_notify_enabled(const struct scmi_protocol_handle *ph, + u8 evt_id, u32 src_id, bool enable) +{ + int ret, cmd_id; + + if (evt_id >= ARRAY_SIZE(evt_2_cmd)) + return -EINVAL; + + cmd_id = evt_2_cmd[evt_id]; + ret = scmi_imx_bbm_notify(ph, src_id, cmd_id, enable); + if (ret) + pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n", + evt_id, src_id, ret); + + return ret; +} + +static void *scmi_imx_bbm_fill_custom_report(const struct scmi_protocol_handle *ph, + u8 evt_id, ktime_t timestamp, + const void *payld, size_t payld_sz, + void *report, u32 *src_id) +{ + const struct scmi_imx_bbm_notify_payld *p = payld; + struct scmi_imx_bbm_notif_report *r = report; + + if (sizeof(*p) != payld_sz) + return NULL; + + if (evt_id == SCMI_EVENT_IMX_BBM_RTC) { + r->is_rtc = true; + r->is_button = false; + r->timestamp = timestamp; + r->rtc_id = le32_get_bits(p->flags, SCMI_IMX_BBM_EVENT_RTC_MASK); + r->rtc_evt = le32_get_bits(p->flags, SCMI_IMX_BBM_NOTIFY_RTC_FLAG); + dev_dbg(ph->dev, "RTC: %d evt: %x\n", r->rtc_id, r->rtc_evt); + *src_id = r->rtc_evt; + } else if (evt_id == SCMI_EVENT_IMX_BBM_BUTTON) { + r->is_rtc = false; + r->is_button = true; + r->timestamp = timestamp; + dev_dbg(ph->dev, "BBM Button\n"); + *src_id = 0; + } else { + WARN_ON_ONCE(1); + return NULL; + } + + return r; +} + +static const struct scmi_event scmi_imx_bbm_events[] = { + { + .id = SCMI_EVENT_IMX_BBM_RTC, + .max_payld_sz = sizeof(struct scmi_imx_bbm_notify_payld), + .max_report_sz = sizeof(struct scmi_imx_bbm_notif_report), + }, + { + .id = SCMI_EVENT_IMX_BBM_BUTTON, + .max_payld_sz = sizeof(struct scmi_imx_bbm_notify_payld), + .max_report_sz = sizeof(struct scmi_imx_bbm_notif_report), + }, +}; + +static const struct scmi_event_ops scmi_imx_bbm_event_ops = { + .set_notify_enabled = scmi_imx_bbm_set_notify_enabled, + .fill_custom_report = scmi_imx_bbm_fill_custom_report, +}; + +static const struct scmi_protocol_events scmi_imx_bbm_protocol_events = { + .queue_sz = SCMI_PROTO_QUEUE_SZ, + .ops = &scmi_imx_bbm_event_ops, + .evts = scmi_imx_bbm_events, + .num_events = ARRAY_SIZE(scmi_imx_bbm_events), + .num_sources = 1, +}; + +static int scmi_imx_bbm_rtc_time_set(const struct scmi_protocol_handle *ph, + u32 rtc_id, u64 sec) +{ + struct scmi_imx_bbm_info *pi = ph->get_priv(ph); + struct scmi_imx_bbm_set_time *cfg; + struct scmi_xfer *t; + int ret; + + if (rtc_id >= pi->nr_rtc) + return -EINVAL; + + ret = ph->xops->xfer_get_init(ph, IMX_BBM_RTC_TIME_SET, sizeof(*cfg), 0, &t); + if (ret) + return ret; + + cfg = t->tx.buf; + cfg->id = cpu_to_le32(rtc_id); + cfg->flags = 0; + cfg->value_low = cpu_to_le32(lower_32_bits(sec)); + cfg->value_high = cpu_to_le32(upper_32_bits(sec)); + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_imx_bbm_rtc_time_get(const struct scmi_protocol_handle *ph, + u32 rtc_id, u64 *value) +{ + struct scmi_imx_bbm_info *pi = ph->get_priv(ph); + struct scmi_imx_bbm_get_time *cfg; + struct scmi_xfer *t; + int ret; + + if (rtc_id >= pi->nr_rtc) + return -EINVAL; + + ret = ph->xops->xfer_get_init(ph, IMX_BBM_RTC_TIME_GET, sizeof(*cfg), + sizeof(u64), &t); + if (ret) + return ret; + + cfg = t->tx.buf; + cfg->id = cpu_to_le32(rtc_id); + cfg->flags = 0; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) + *value = get_unaligned_le64(t->rx.buf); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_imx_bbm_rtc_alarm_set(const struct scmi_protocol_handle *ph, + u32 rtc_id, bool enable, u64 sec) +{ + struct scmi_imx_bbm_info *pi = ph->get_priv(ph); + struct scmi_imx_bbm_alarm_time *cfg; + struct scmi_xfer *t; + int ret; + + if (rtc_id >= pi->nr_rtc) + return -EINVAL; + + ret = ph->xops->xfer_get_init(ph, IMX_BBM_RTC_ALARM_SET, sizeof(*cfg), 0, &t); + if (ret) + return ret; + + cfg = t->tx.buf; + cfg->id = cpu_to_le32(rtc_id); + cfg->flags = enable ? + cpu_to_le32(SCMI_IMX_BBM_RTC_ALARM_ENABLE_FLAG) : 0; + cfg->value_low = cpu_to_le32(lower_32_bits(sec)); + cfg->value_high = cpu_to_le32(upper_32_bits(sec)); + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_imx_bbm_button_get(const struct scmi_protocol_handle *ph, u32 *state) +{ + struct scmi_xfer *t; + int ret; + + ret = ph->xops->xfer_get_init(ph, IMX_BBM_BUTTON_GET, 0, sizeof(u32), &t); + if (ret) + return ret; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) + *state = get_unaligned_le32(t->rx.buf); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static const struct scmi_imx_bbm_proto_ops scmi_imx_bbm_proto_ops = { + .rtc_time_get = scmi_imx_bbm_rtc_time_get, + .rtc_time_set = scmi_imx_bbm_rtc_time_set, + .rtc_alarm_set = scmi_imx_bbm_rtc_alarm_set, + .button_get = scmi_imx_bbm_button_get, +}; + +static int scmi_imx_bbm_protocol_init(const struct scmi_protocol_handle *ph) +{ + u32 version; + int ret; + struct scmi_imx_bbm_info *binfo; + + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; + + dev_info(ph->dev, "NXP SM BBM Version %d.%d\n", + PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); + + binfo = devm_kzalloc(ph->dev, sizeof(*binfo), GFP_KERNEL); + if (!binfo) + return -ENOMEM; + + ret = scmi_imx_bbm_attributes_get(ph, binfo); + if (ret) + return ret; + + return ph->set_priv(ph, binfo, version); +} + +static const struct scmi_protocol scmi_imx_bbm = { + .id = SCMI_PROTOCOL_IMX_BBM, + .owner = THIS_MODULE, + .instance_init = &scmi_imx_bbm_protocol_init, + .ops = &scmi_imx_bbm_proto_ops, + .events = &scmi_imx_bbm_protocol_events, + .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION, + .vendor_id = SCMI_IMX_VENDOR, + .sub_vendor_id = SCMI_IMX_SUBVENDOR, +}; +module_scmi_protocol(scmi_imx_bbm); + +MODULE_ALIAS("scmi-protocol-" __stringify(SCMI_PROTOCOL_IMX_BBM) "-" SCMI_IMX_VENDOR); +MODULE_DESCRIPTION("i.MX SCMI BBM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-cpu.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-cpu.c new file mode 100644 index 000000000000..66f47f5371e5 --- /dev/null +++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-cpu.c @@ -0,0 +1,276 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System control and Management Interface (SCMI) NXP CPU Protocol + * + * Copyright 2025 NXP + */ + +#include <linux/bits.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/scmi_protocol.h> +#include <linux/scmi_imx_protocol.h> + +#include "../../protocols.h" +#include "../../notify.h" + +#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x10000 + +enum scmi_imx_cpu_protocol_cmd { + SCMI_IMX_CPU_ATTRIBUTES = 0x3, + SCMI_IMX_CPU_START = 0x4, + SCMI_IMX_CPU_STOP = 0x5, + SCMI_IMX_CPU_RESET_VECTOR_SET = 0x6, + SCMI_IMX_CPU_INFO_GET = 0xC, +}; + +struct scmi_imx_cpu_info { + u32 nr_cpu; +}; + +#define SCMI_IMX_CPU_NR_CPU_MASK GENMASK(15, 0) +struct scmi_msg_imx_cpu_protocol_attributes { + __le32 attributes; +}; + +struct scmi_msg_imx_cpu_attributes_out { + __le32 attributes; +#define CPU_MAX_NAME 16 + u8 name[CPU_MAX_NAME]; +}; + +struct scmi_imx_cpu_reset_vector_set_in { + __le32 cpuid; +#define CPU_VEC_FLAGS_RESUME BIT(31) +#define CPU_VEC_FLAGS_START BIT(30) +#define CPU_VEC_FLAGS_BOOT BIT(29) + __le32 flags; + __le32 resetvectorlow; + __le32 resetvectorhigh; +}; + +struct scmi_imx_cpu_info_get_out { +#define CPU_RUN_MODE_START 0 +#define CPU_RUN_MODE_HOLD 1 +#define CPU_RUN_MODE_STOP 2 +#define CPU_RUN_MODE_SLEEP 3 + __le32 runmode; + __le32 sleepmode; + __le32 resetvectorlow; + __le32 resetvectorhigh; +}; + +static int scmi_imx_cpu_validate_cpuid(const struct scmi_protocol_handle *ph, + u32 cpuid) +{ + struct scmi_imx_cpu_info *info = ph->get_priv(ph); + + if (cpuid >= info->nr_cpu) + return -EINVAL; + + return 0; +} + +static int scmi_imx_cpu_start(const struct scmi_protocol_handle *ph, + u32 cpuid, bool start) +{ + struct scmi_xfer *t; + u8 msg_id; + int ret; + + ret = scmi_imx_cpu_validate_cpuid(ph, cpuid); + if (ret) + return ret; + + if (start) + msg_id = SCMI_IMX_CPU_START; + else + msg_id = SCMI_IMX_CPU_STOP; + + ret = ph->xops->xfer_get_init(ph, msg_id, sizeof(u32), 0, &t); + if (ret) + return ret; + + put_unaligned_le32(cpuid, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_imx_cpu_reset_vector_set(const struct scmi_protocol_handle *ph, + u32 cpuid, u64 vector, bool start, + bool boot, bool resume) +{ + struct scmi_imx_cpu_reset_vector_set_in *in; + struct scmi_xfer *t; + int ret; + + ret = scmi_imx_cpu_validate_cpuid(ph, cpuid); + if (ret) + return ret; + + ret = ph->xops->xfer_get_init(ph, SCMI_IMX_CPU_RESET_VECTOR_SET, sizeof(*in), + 0, &t); + if (ret) + return ret; + + in = t->tx.buf; + in->cpuid = cpu_to_le32(cpuid); + in->flags = cpu_to_le32(0); + if (start) + in->flags |= le32_encode_bits(1, CPU_VEC_FLAGS_START); + if (boot) + in->flags |= le32_encode_bits(1, CPU_VEC_FLAGS_BOOT); + if (resume) + in->flags |= le32_encode_bits(1, CPU_VEC_FLAGS_RESUME); + in->resetvectorlow = cpu_to_le32(lower_32_bits(vector)); + in->resetvectorhigh = cpu_to_le32(upper_32_bits(vector)); + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_imx_cpu_started(const struct scmi_protocol_handle *ph, u32 cpuid, + bool *started) +{ + struct scmi_imx_cpu_info_get_out *out; + struct scmi_xfer *t; + u32 mode; + int ret; + + if (!started) + return -EINVAL; + + *started = false; + ret = scmi_imx_cpu_validate_cpuid(ph, cpuid); + if (ret) + return ret; + + ret = ph->xops->xfer_get_init(ph, SCMI_IMX_CPU_INFO_GET, sizeof(u32), + 0, &t); + if (ret) + return ret; + + put_unaligned_le32(cpuid, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + out = t->rx.buf; + mode = le32_to_cpu(out->runmode); + if (mode == CPU_RUN_MODE_START || mode == CPU_RUN_MODE_SLEEP) + *started = true; + } + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static const struct scmi_imx_cpu_proto_ops scmi_imx_cpu_proto_ops = { + .cpu_reset_vector_set = scmi_imx_cpu_reset_vector_set, + .cpu_start = scmi_imx_cpu_start, + .cpu_started = scmi_imx_cpu_started, +}; + +static int scmi_imx_cpu_protocol_attributes_get(const struct scmi_protocol_handle *ph, + struct scmi_imx_cpu_info *info) +{ + struct scmi_msg_imx_cpu_protocol_attributes *attr; + struct scmi_xfer *t; + int ret; + + ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0, + sizeof(*attr), &t); + if (ret) + return ret; + + attr = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + info->nr_cpu = le32_get_bits(attr->attributes, SCMI_IMX_CPU_NR_CPU_MASK); + dev_info(ph->dev, "i.MX SM CPU: %d cpus\n", + info->nr_cpu); + } + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_imx_cpu_attributes_get(const struct scmi_protocol_handle *ph, + u32 cpuid) +{ + struct scmi_msg_imx_cpu_attributes_out *out; + char name[SCMI_SHORT_NAME_MAX_SIZE] = {'\0'}; + struct scmi_xfer *t; + int ret; + + ret = ph->xops->xfer_get_init(ph, SCMI_IMX_CPU_ATTRIBUTES, sizeof(u32), 0, &t); + if (ret) + return ret; + + put_unaligned_le32(cpuid, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + out = t->rx.buf; + strscpy(name, out->name, SCMI_SHORT_NAME_MAX_SIZE); + dev_info(ph->dev, "i.MX CPU: name: %s\n", name); + } else { + dev_err(ph->dev, "i.MX cpu: Failed to get info of cpu(%u)\n", cpuid); + } + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_imx_cpu_protocol_init(const struct scmi_protocol_handle *ph) +{ + struct scmi_imx_cpu_info *info; + u32 version; + int ret, i; + + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; + + dev_info(ph->dev, "NXP SM CPU Protocol Version %d.%d\n", + PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); + + info = devm_kzalloc(ph->dev, sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + ret = scmi_imx_cpu_protocol_attributes_get(ph, info); + if (ret) + return ret; + + for (i = 0; i < info->nr_cpu; i++) { + ret = scmi_imx_cpu_attributes_get(ph, i); + if (ret) + return ret; + } + + return ph->set_priv(ph, info, version); +} + +static const struct scmi_protocol scmi_imx_cpu = { + .id = SCMI_PROTOCOL_IMX_CPU, + .owner = THIS_MODULE, + .instance_init = &scmi_imx_cpu_protocol_init, + .ops = &scmi_imx_cpu_proto_ops, + .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION, + .vendor_id = SCMI_IMX_VENDOR, + .sub_vendor_id = SCMI_IMX_SUBVENDOR, +}; +module_scmi_protocol(scmi_imx_cpu); + +MODULE_ALIAS("scmi-protocol-" __stringify(SCMI_PROTOCOL_IMX_CPU) "-" SCMI_IMX_VENDOR); +MODULE_DESCRIPTION("i.MX SCMI CPU driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-lmm.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-lmm.c new file mode 100644 index 000000000000..b519c67fe920 --- /dev/null +++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-lmm.c @@ -0,0 +1,263 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System control and Management Interface (SCMI) NXP LMM Protocol + * + * Copyright 2025 NXP + */ + +#include <linux/bits.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/scmi_protocol.h> +#include <linux/scmi_imx_protocol.h> + +#include "../../protocols.h" +#include "../../notify.h" + +#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x10000 + +enum scmi_imx_lmm_protocol_cmd { + SCMI_IMX_LMM_ATTRIBUTES = 0x3, + SCMI_IMX_LMM_BOOT = 0x4, + SCMI_IMX_LMM_RESET = 0x5, + SCMI_IMX_LMM_SHUTDOWN = 0x6, + SCMI_IMX_LMM_WAKE = 0x7, + SCMI_IMX_LMM_SUSPEND = 0x8, + SCMI_IMX_LMM_NOTIFY = 0x9, + SCMI_IMX_LMM_RESET_REASON = 0xA, + SCMI_IMX_LMM_POWER_ON = 0xB, + SCMI_IMX_LMM_RESET_VECTOR_SET = 0xC, +}; + +struct scmi_imx_lmm_priv { + u32 nr_lmm; +}; + +#define SCMI_IMX_LMM_NR_LM_MASK GENMASK(5, 0) +#define SCMI_IMX_LMM_NR_MAX 16 +struct scmi_msg_imx_lmm_protocol_attributes { + __le32 attributes; +}; + +struct scmi_msg_imx_lmm_attributes_out { + __le32 lmid; + __le32 attributes; + __le32 state; + __le32 errstatus; + u8 name[LMM_MAX_NAME]; +}; + +struct scmi_imx_lmm_reset_vector_set_in { + __le32 lmid; + __le32 cpuid; + __le32 flags; /* reserved for future extension */ + __le32 resetvectorlow; + __le32 resetvectorhigh; +}; + +struct scmi_imx_lmm_shutdown_in { + __le32 lmid; +#define SCMI_IMX_LMM_SHUTDOWN_GRACEFUL BIT(0) + __le32 flags; +}; + +static int scmi_imx_lmm_validate_lmid(const struct scmi_protocol_handle *ph, u32 lmid) +{ + struct scmi_imx_lmm_priv *priv = ph->get_priv(ph); + + if (lmid >= priv->nr_lmm) + return -EINVAL; + + return 0; +} + +static int scmi_imx_lmm_attributes(const struct scmi_protocol_handle *ph, + u32 lmid, struct scmi_imx_lmm_info *info) +{ + struct scmi_msg_imx_lmm_attributes_out *out; + struct scmi_xfer *t; + int ret; + + ret = ph->xops->xfer_get_init(ph, SCMI_IMX_LMM_ATTRIBUTES, sizeof(u32), 0, &t); + if (ret) + return ret; + + put_unaligned_le32(lmid, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + out = t->rx.buf; + info->lmid = le32_to_cpu(out->lmid); + info->state = le32_to_cpu(out->state); + info->errstatus = le32_to_cpu(out->errstatus); + strscpy(info->name, out->name); + dev_dbg(ph->dev, "i.MX LMM: Logical Machine(%d), name: %s\n", + info->lmid, info->name); + } else { + dev_err(ph->dev, "i.MX LMM: Failed to get info of Logical Machine(%u)\n", lmid); + } + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int +scmi_imx_lmm_power_boot(const struct scmi_protocol_handle *ph, u32 lmid, bool boot) +{ + struct scmi_xfer *t; + u8 msg_id; + int ret; + + ret = scmi_imx_lmm_validate_lmid(ph, lmid); + if (ret) + return ret; + + if (boot) + msg_id = SCMI_IMX_LMM_BOOT; + else + msg_id = SCMI_IMX_LMM_POWER_ON; + + ret = ph->xops->xfer_get_init(ph, msg_id, sizeof(u32), 0, &t); + if (ret) + return ret; + + put_unaligned_le32(lmid, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_imx_lmm_reset_vector_set(const struct scmi_protocol_handle *ph, + u32 lmid, u32 cpuid, u32 flags, u64 vector) +{ + struct scmi_imx_lmm_reset_vector_set_in *in; + struct scmi_xfer *t; + int ret; + + ret = ph->xops->xfer_get_init(ph, SCMI_IMX_LMM_RESET_VECTOR_SET, sizeof(*in), + 0, &t); + if (ret) + return ret; + + in = t->tx.buf; + in->lmid = cpu_to_le32(lmid); + in->cpuid = cpu_to_le32(cpuid); + in->flags = cpu_to_le32(0); + in->resetvectorlow = cpu_to_le32(lower_32_bits(vector)); + in->resetvectorhigh = cpu_to_le32(upper_32_bits(vector)); + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_imx_lmm_shutdown(const struct scmi_protocol_handle *ph, u32 lmid, + u32 flags) +{ + struct scmi_imx_lmm_shutdown_in *in; + struct scmi_xfer *t; + int ret; + + ret = scmi_imx_lmm_validate_lmid(ph, lmid); + if (ret) + return ret; + + ret = ph->xops->xfer_get_init(ph, SCMI_IMX_LMM_SHUTDOWN, sizeof(*in), + 0, &t); + if (ret) + return ret; + + in = t->tx.buf; + in->lmid = cpu_to_le32(lmid); + if (flags & SCMI_IMX_LMM_SHUTDOWN_GRACEFUL) + in->flags = cpu_to_le32(SCMI_IMX_LMM_SHUTDOWN_GRACEFUL); + else + in->flags = cpu_to_le32(0); + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static const struct scmi_imx_lmm_proto_ops scmi_imx_lmm_proto_ops = { + .lmm_power_boot = scmi_imx_lmm_power_boot, + .lmm_info = scmi_imx_lmm_attributes, + .lmm_reset_vector_set = scmi_imx_lmm_reset_vector_set, + .lmm_shutdown = scmi_imx_lmm_shutdown, +}; + +static int scmi_imx_lmm_protocol_attributes_get(const struct scmi_protocol_handle *ph, + struct scmi_imx_lmm_priv *priv) +{ + struct scmi_msg_imx_lmm_protocol_attributes *attr; + struct scmi_xfer *t; + int ret; + + ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0, + sizeof(*attr), &t); + if (ret) + return ret; + + attr = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + priv->nr_lmm = le32_get_bits(attr->attributes, SCMI_IMX_LMM_NR_LM_MASK); + if (priv->nr_lmm > SCMI_IMX_LMM_NR_MAX) { + dev_err(ph->dev, "i.MX LMM: %d:Exceed max supported Logical Machines\n", + priv->nr_lmm); + ret = -EINVAL; + } else { + dev_info(ph->dev, "i.MX LMM: %d Logical Machines\n", priv->nr_lmm); + } + } + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_imx_lmm_protocol_init(const struct scmi_protocol_handle *ph) +{ + struct scmi_imx_lmm_priv *info; + u32 version; + int ret; + + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; + + dev_info(ph->dev, "NXP SM LMM Version %d.%d\n", + PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); + + info = devm_kzalloc(ph->dev, sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + ret = scmi_imx_lmm_protocol_attributes_get(ph, info); + if (ret) + return ret; + + return ph->set_priv(ph, info, version); +} + +static const struct scmi_protocol scmi_imx_lmm = { + .id = SCMI_PROTOCOL_IMX_LMM, + .owner = THIS_MODULE, + .instance_init = &scmi_imx_lmm_protocol_init, + .ops = &scmi_imx_lmm_proto_ops, + .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION, + .vendor_id = SCMI_IMX_VENDOR, + .sub_vendor_id = SCMI_IMX_SUBVENDOR, +}; +module_scmi_protocol(scmi_imx_lmm); + +MODULE_ALIAS("scmi-protocol-" __stringify(SCMI_PROTOCOL_IMX_LMM) "-" SCMI_IMX_VENDOR); +MODULE_DESCRIPTION("i.MX SCMI LMM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c new file mode 100644 index 000000000000..a8915d3b4df5 --- /dev/null +++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c @@ -0,0 +1,319 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System control and Management Interface (SCMI) NXP MISC Protocol + * + * Copyright 2024 NXP + */ + +#define pr_fmt(fmt) "SCMI Notifications MISC - " fmt + +#include <linux/bits.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/scmi_protocol.h> +#include <linux/scmi_imx_protocol.h> + +#include "../../protocols.h" +#include "../../notify.h" + +#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x10000 + +#define MAX_MISC_CTRL_SOURCES GENMASK(15, 0) + +enum scmi_imx_misc_protocol_cmd { + SCMI_IMX_MISC_CTRL_SET = 0x3, + SCMI_IMX_MISC_CTRL_GET = 0x4, + SCMI_IMX_MISC_CTRL_NOTIFY = 0x8, +}; + +struct scmi_imx_misc_info { + u32 version; + u32 nr_dev_ctrl; + u32 nr_brd_ctrl; + u32 nr_reason; +}; + +struct scmi_msg_imx_misc_protocol_attributes { + __le32 attributes; +}; + +#define GET_BRD_CTRLS_NR(x) le32_get_bits((x), GENMASK(31, 24)) +#define GET_REASONS_NR(x) le32_get_bits((x), GENMASK(23, 16)) +#define GET_DEV_CTRLS_NR(x) le32_get_bits((x), GENMASK(15, 0)) +#define BRD_CTRL_START_ID BIT(15) + +struct scmi_imx_misc_ctrl_set_in { + __le32 id; + __le32 num; + __le32 value[]; +}; + +struct scmi_imx_misc_ctrl_notify_in { + __le32 ctrl_id; + __le32 flags; +}; + +struct scmi_imx_misc_ctrl_notify_payld { + __le32 ctrl_id; + __le32 flags; +}; + +struct scmi_imx_misc_ctrl_get_out { + __le32 num; + __le32 val[]; +}; + +static int scmi_imx_misc_attributes_get(const struct scmi_protocol_handle *ph, + struct scmi_imx_misc_info *mi) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_imx_misc_protocol_attributes *attr; + + ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0, + sizeof(*attr), &t); + if (ret) + return ret; + + attr = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + mi->nr_dev_ctrl = GET_DEV_CTRLS_NR(attr->attributes); + mi->nr_brd_ctrl = GET_BRD_CTRLS_NR(attr->attributes); + mi->nr_reason = GET_REASONS_NR(attr->attributes); + dev_info(ph->dev, "i.MX MISC NUM DEV CTRL: %d, NUM BRD CTRL: %d,NUM Reason: %d\n", + mi->nr_dev_ctrl, mi->nr_brd_ctrl, mi->nr_reason); + } + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_imx_misc_ctrl_validate_id(const struct scmi_protocol_handle *ph, + u32 ctrl_id) +{ + struct scmi_imx_misc_info *mi = ph->get_priv(ph); + + /* + * [0, BRD_CTRL_START_ID) is for Dev Ctrl which is SOC related + * [BRD_CTRL_START_ID, 0xffff) is for Board Ctrl which is board related + */ + if (ctrl_id < BRD_CTRL_START_ID && ctrl_id > mi->nr_dev_ctrl) + return -EINVAL; + if (ctrl_id >= BRD_CTRL_START_ID + mi->nr_brd_ctrl) + return -EINVAL; + + return 0; +} + +static int scmi_imx_misc_ctrl_notify(const struct scmi_protocol_handle *ph, + u32 ctrl_id, u32 evt_id, u32 flags) +{ + struct scmi_imx_misc_ctrl_notify_in *in; + struct scmi_xfer *t; + int ret; + + ret = scmi_imx_misc_ctrl_validate_id(ph, ctrl_id); + if (ret) + return ret; + + ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CTRL_NOTIFY, + sizeof(*in), 0, &t); + if (ret) + return ret; + + in = t->tx.buf; + in->ctrl_id = cpu_to_le32(ctrl_id); + in->flags = cpu_to_le32(flags); + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int +scmi_imx_misc_ctrl_set_notify_enabled(const struct scmi_protocol_handle *ph, + u8 evt_id, u32 src_id, bool enable) +{ + int ret; + + /* misc_ctrl_req_notify is for enablement */ + if (enable) + return 0; + + ret = scmi_imx_misc_ctrl_notify(ph, src_id, evt_id, 0); + if (ret) + dev_err(ph->dev, "FAIL_ENABLED - evt[%X] src[%d] - ret:%d\n", + evt_id, src_id, ret); + + return ret; +} + +static void * +scmi_imx_misc_ctrl_fill_custom_report(const struct scmi_protocol_handle *ph, + u8 evt_id, ktime_t timestamp, + const void *payld, size_t payld_sz, + void *report, u32 *src_id) +{ + const struct scmi_imx_misc_ctrl_notify_payld *p = payld; + struct scmi_imx_misc_ctrl_notify_report *r = report; + + if (sizeof(*p) != payld_sz) + return NULL; + + r->timestamp = timestamp; + r->ctrl_id = le32_to_cpu(p->ctrl_id); + r->flags = le32_to_cpu(p->flags); + if (src_id) + *src_id = r->ctrl_id; + dev_dbg(ph->dev, "%s: ctrl_id: %d flags: %d\n", __func__, + r->ctrl_id, r->flags); + + return r; +} + +static const struct scmi_event_ops scmi_imx_misc_event_ops = { + .set_notify_enabled = scmi_imx_misc_ctrl_set_notify_enabled, + .fill_custom_report = scmi_imx_misc_ctrl_fill_custom_report, +}; + +static const struct scmi_event scmi_imx_misc_events[] = { + { + .id = SCMI_EVENT_IMX_MISC_CONTROL, + .max_payld_sz = sizeof(struct scmi_imx_misc_ctrl_notify_payld), + .max_report_sz = sizeof(struct scmi_imx_misc_ctrl_notify_report), + }, +}; + +static struct scmi_protocol_events scmi_imx_misc_protocol_events = { + .queue_sz = SCMI_PROTO_QUEUE_SZ, + .ops = &scmi_imx_misc_event_ops, + .evts = scmi_imx_misc_events, + .num_events = ARRAY_SIZE(scmi_imx_misc_events), + .num_sources = MAX_MISC_CTRL_SOURCES, +}; + +static int scmi_imx_misc_ctrl_get(const struct scmi_protocol_handle *ph, + u32 ctrl_id, u32 *num, u32 *val) +{ + struct scmi_imx_misc_ctrl_get_out *out; + struct scmi_xfer *t; + int ret, i; + int max_msg_size = ph->hops->get_max_msg_size(ph); + int max_num = (max_msg_size - sizeof(*out)) / sizeof(__le32); + + ret = scmi_imx_misc_ctrl_validate_id(ph, ctrl_id); + if (ret) + return ret; + + ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CTRL_GET, sizeof(u32), + 0, &t); + if (ret) + return ret; + + put_unaligned_le32(ctrl_id, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + out = t->rx.buf; + *num = le32_to_cpu(out->num); + + if (*num >= max_num || + *num * sizeof(__le32) > t->rx.len - sizeof(__le32)) { + ph->xops->xfer_put(ph, t); + return -EINVAL; + } + + for (i = 0; i < *num; i++) + val[i] = le32_to_cpu(out->val[i]); + } + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_imx_misc_ctrl_set(const struct scmi_protocol_handle *ph, + u32 ctrl_id, u32 num, u32 *val) +{ + struct scmi_imx_misc_ctrl_set_in *in; + struct scmi_xfer *t; + int ret, i; + int max_msg_size = ph->hops->get_max_msg_size(ph); + int max_num = (max_msg_size - sizeof(*in)) / sizeof(__le32); + + ret = scmi_imx_misc_ctrl_validate_id(ph, ctrl_id); + if (ret) + return ret; + + if (num > max_num) + return -EINVAL; + + ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CTRL_SET, + sizeof(*in) + num * sizeof(__le32), 0, &t); + if (ret) + return ret; + + in = t->tx.buf; + in->id = cpu_to_le32(ctrl_id); + in->num = cpu_to_le32(num); + for (i = 0; i < num; i++) + in->value[i] = cpu_to_le32(val[i]); + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static const struct scmi_imx_misc_proto_ops scmi_imx_misc_proto_ops = { + .misc_ctrl_set = scmi_imx_misc_ctrl_set, + .misc_ctrl_get = scmi_imx_misc_ctrl_get, + .misc_ctrl_req_notify = scmi_imx_misc_ctrl_notify, +}; + +static int scmi_imx_misc_protocol_init(const struct scmi_protocol_handle *ph) +{ + struct scmi_imx_misc_info *minfo; + u32 version; + int ret; + + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; + + dev_info(ph->dev, "NXP SM MISC Version %d.%d\n", + PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); + + minfo = devm_kzalloc(ph->dev, sizeof(*minfo), GFP_KERNEL); + if (!minfo) + return -ENOMEM; + + ret = scmi_imx_misc_attributes_get(ph, minfo); + if (ret) + return ret; + + return ph->set_priv(ph, minfo, version); +} + +static const struct scmi_protocol scmi_imx_misc = { + .id = SCMI_PROTOCOL_IMX_MISC, + .owner = THIS_MODULE, + .instance_init = &scmi_imx_misc_protocol_init, + .ops = &scmi_imx_misc_proto_ops, + .events = &scmi_imx_misc_protocol_events, + .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION, + .vendor_id = SCMI_IMX_VENDOR, + .sub_vendor_id = SCMI_IMX_SUBVENDOR, +}; +module_scmi_protocol(scmi_imx_misc); + +MODULE_ALIAS("scmi-protocol-" __stringify(SCMI_PROTOCOL_IMX_MISC) "-" SCMI_IMX_VENDOR); +MODULE_DESCRIPTION("i.MX SCMI MISC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx95.rst b/drivers/firmware/arm_scmi/vendors/imx/imx95.rst new file mode 100644 index 000000000000..4e246a78a042 --- /dev/null +++ b/drivers/firmware/arm_scmi/vendors/imx/imx95.rst @@ -0,0 +1,1714 @@ +.. SPDX-License-Identifier: GPL-2.0 +.. include:: <isonum.txt> + +=============================================================================== +i.MX95 System Control and Management Interface(SCMI) Vendor Protocols Extension +=============================================================================== + +:Copyright: |copy| 2024 NXP + +:Author: Peng Fan <peng.fan@nxp.com> + +The System Manager (SM) is a low-level system function which runs on a System +Control Processor (SCP) to support isolation and management of power domains, +clocks, resets, sensors, pins, etc. on complex application processors. It often +runs on a Cortex-M processor and provides an abstraction to many of the +underlying features of the hardware. The primary purpose of the SM is to allow +isolation between software running on different cores in the SoC. It does this +by having exclusive access to critical resources such as those controlling +power, clocks, reset, PMIC, etc. and then providing an RPC interface to those +clients. This allows the SM to provide access control, arbitration, and +aggregation policies for those shared critical resources. + +SM introduces a concept Logic Machine(LM) which is analogous to VM and each has +its own instance of SCMI. All normal SCMI calls only apply to that LM. That +includes boot, shutdown, reset, suspend, wake, etc. Each LM (e.g. A55 and M7) +are completely isolated from the others and each LM has its own communication +channels talking to the same SCMI server. + +This document covers all the information necessary to understand, maintain, +port, and deploy the SM on supported processors. + +The SM implements an interface compliant with the Arm SCMI Specification +with additional vendor specific extensions. + +System Control and Management Logical Machine Management Vendor Protocol +======================================================================== + +The SM adds the concept of logical machines (LMs). These are analogous to +VMs and each has its own instance of SCMI. All normal SCMI calls only apply +the LM running the calling agent. That includes boot, shutdown, reset, +suspend, wake, etc. If a caller makes the SCMI base call to get a list +of agents, it will only get those on that LM. Each LM is completely isolated +from the others. This is mandatory for these to operate independently. + +This protocol is intended to support boot, shutdown, and reset of other logical +machines (LM). It is usually used to allow one LM(e.g. OSPM) to manage +another LM which is usually an offload or accelerator engine. Notifications +from this protocol can also be used to manage a communication link to another +LM. The LMM protocol provides commands to: + +- Describe the protocol version. +- Discover implementation attributes. +- Discover all the LMs defined in the system. +- Boot a target LM. +- Shutdown a target LM (gracefully or forcibly). +- Reset a target LM (gracefully or forcibly). +- Wake a target LM from suspend. +- Suspend a target LM (gracefully). +- Read boot/shutdown/reset information for a target LM. +- Get notifications when a target LM boots or shuts down (e.g. LM 'X' requested + notification of LM 'Y' boots or shuts down, when LM 'Y' boots or shuts down, + SCMI firmware will send notification to LM 'X'). + +'Graceful' means asking LM itself to shutdown/reset/etc (e.g. sending +notification to Linux, Then Linux reboots or powers down itself). It is async +command that the SUCCESS of the command just means the command successfully +return, not means reboot/reset successfully finished. + +'Forceful' means the SM will force shutdown/reset/etc the LM. It is sync +command that the SUCCESS of the command means the LM has been successfully +shutdown/reset/etc. +If the commands not have Graceful/Forceful flag settings, such as WAKE, SUSEND, +it is a Graceful command. + +Commands: +_________ + +PROTOCOL_VERSION +~~~~~~~~~~~~~~~~ + +message_id: 0x0 +protocol_id: 0x80 +This command is mandatory. + ++---------------+--------------------------------------------------------------+ +|Return values | ++---------------+--------------------------------------------------------------+ +|Name |Description | ++---------------+--------------------------------------------------------------+ +|int32 status | See ARM SCMI Specification for status code definitions. | ++---------------+--------------------------------------------------------------+ +|uint32 version | For this revision of the specification, this value must be | +| | 0x10000. | ++---------------+--------------------------------------------------------------+ + +PROTOCOL_ATTRIBUTES +~~~~~~~~~~~~~~~~~~~ + +message_id: 0x1 +protocol_id: 0x80 +This command is mandatory. + ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status | See ARM SCMI Specification for status code definitions. | ++------------------+-----------------------------------------------------------+ +|uint32 attributes |Protocol attributes: | +| |Bits[31:5] Reserved, must be zero. | +| |Bits[4:0] Number of Logical Machines | +| |Note that due to both hardware limitations and reset reason| +| |field limitations, the max number of LM is 16. The minimum | +| |is 1. | ++------------------+-----------------------------------------------------------+ + +PROTOCOL_MESSAGE_ATTRIBUTES +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +message_id: 0x2 +protocol_id: 0x80 +This command is mandatory. + ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: in case the message is implemented and available | +| |to use. | +| |NOT_FOUND: if the message identified by message_id is | +| |invalid or not implemented | ++------------------+-----------------------------------------------------------+ +|uint32 attributes |Flags that are associated with a specific command in the | +| |protocol. For all commands in this protocol, this | +| |parameter has a value of 0 | ++------------------+-----------------------------------------------------------+ + +LMM_ATTRIBUTES +~~~~~~~~~~~~~~ + +message_id: 0x3 +protocol_id: 0x80 +This command is mandatory. + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 lmid |ID of the Logical Machine | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if valid attributes are returned. | +| |NOT_FOUND: if lmid not points to a valid logical machine. | +| |DENIED: if the agent does not have permission to get info | +| |for the LM specified by lmid. | ++------------------+-----------------------------------------------------------+ +|uint32 lmid |Identifier of the LM whose identification is requested. | +| |This field is: Populated with the lmid of the calling | +| |agent, when the lmid parameter passed via the command is | +| |0xFFFFFFFF. Identical to the lmid field passed via the | +| |calling parameters, in all other cases | ++------------------+-----------------------------------------------------------+ +|uint32 attributes | Bits[31:0] reserved. must be zero | ++------------------+-----------------------------------------------------------+ +|uint32 state | Current state of the LM | ++------------------+-----------------------------------------------------------+ +|uint32 errStatus | Last error status recorded | ++------------------+-----------------------------------------------------------+ +|char name[16] | A NULL terminated ASCII string with the LM name, of up | +| | to 16 bytes | ++------------------+-----------------------------------------------------------+ + +LMM_BOOT +~~~~~~~~ + +message_id: 0x4 +protocol_id: 0x80 +This command is mandatory. + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 lmid |ID of the Logical Machine | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if LM boots successfully started. | +| |NOT_FOUND: if lmid not points to a valid logical machine. | +| |INVALID_PARAMETERS: if lmid is same as the caller. | +| |DENIED: if the agent does not have permission to manage the| +| |the LM specified by lmid. | ++------------------+-----------------------------------------------------------+ + +LMM_RESET +~~~~~~~~~ + +message_id: 0x5 +protocol_id: 0x80 +This command is mandatory. + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 lmid |ID of the Logical Machine | ++------------------+-----------------------------------------------------------+ +|uint32 flags |Reset flags: | +| |Bits[31:1] Reserved, must be zero. | +| |Bit[0] Graceful request: | +| |Set to 1 if the request is a graceful request. | +| |Set to 0 if the request is a forceful request. | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: The LMM RESET command finished successfully in | +| |graceful reset or LM successfully resets in forceful reset.| +| |NOT_FOUND: if lmid not points to a valid logical machine. | +| |INVALID_PARAMETERS: if lmid is same as the caller. | +| |DENIED: if the agent does not have permission to manage the| +| |the LM specified by lmid. | ++------------------+-----------------------------------------------------------+ + +LMM_SHUTDOWN +~~~~~~~~~~~~ + +message_id: 0x6 +protocol_id: 0x80 +This command is mandatory. + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 lmid |ID of the Logical Machine | ++------------------+-----------------------------------------------------------+ +|uint32 flags |Reset flags: | +| |Bits[31:1] Reserved, must be zero. | +| |Bit[0] Graceful request: | +| |Set to 1 if the request is a graceful request. | +| |Set to 0 if the request is a forceful request. | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: The LMM shutdown command finished successfully in | +| |graceful request or LM successfully shutdown in forceful | +| |request. | +| |NOT_FOUND: if lmid not points to a valid logical machine. | +| |INVALID_PARAMETERS: if lmid is same as the caller. | +| |DENIED: if the agent does not have permission to manage the| +| |the LM specified by lmid. | ++------------------+-----------------------------------------------------------+ + +LMM_WAKE +~~~~~~~~ + +message_id: 0x7 +protocol_id: 0x80 +This command is mandatory. + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 lmid |ID of the Logical Machine | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if LM wake command successfully returns. | +| |NOT_FOUND: if lmid not points to a valid logical machine. | +| |INVALID_PARAMETERS: if lmid is same as the caller. | +| |DENIED: if the agent does not have permission to manage the| +| |the LM specified by lmid. | ++------------------+-----------------------------------------------------------+ + +LMM_SUSPEND +~~~~~~~~~~~ + +message_id: 0x8 +protocol_id: 0x80 +This command is mandatory. + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 lmid |ID of the Logical Machine | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if LM suspend command successfully returns. | +| |NOT_FOUND: if lmid not points to a valid logical machine. | +| |INVALID_PARAMETERS: if lmid is same as the caller. | +| |DENIED: if the agent does not have permission to manage the| +| |the LM specified by lmid. | ++------------------+-----------------------------------------------------------+ + +LMM_NOTIFY +~~~~~~~~~~ + +message_id: 0x9 +protocol_id: 0x80 +This command is mandatory. + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 lmid |ID of the Logical Machine | ++------------------+-----------------------------------------------------------+ +|uint32 flags |Notification flags: | +| |Bits[31:3] Reserved, must be zero. | +| |Bit[3] Wake (resume) notification: | +| |Set to 1 to send notification. | +| |Set to 0 if no notification. | +| |Bit[2] Suspend (sleep) notification: | +| |Set to 1 to send notification. | +| |Set to 0 if no notification. | +| |Bit[1] Shutdown (off) notification: | +| |Set to 1 to send notification. | +| |Set to 0 if no notification. | +| |Bit[0] Boot (on) notification: | +| |Set to 1 to send notification. | +| |Set to 0 if no notification | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if the notification state successfully updated. | +| |NOT_FOUND: if lmid not points to a valid logical machine. | +| |INVALID_PARAMETERS: if input attributes flag specifies | +| |unsupported or invalid configurations. | +| |DENIED: if the agent does not have permission to request | +| |the notification. | ++------------------+-----------------------------------------------------------+ + +LMM_RESET_REASON +~~~~~~~~~~~~~~~~ + +message_id: 0xA +protocol_id: 0x80 +This command is mandatory. + +This command is to return the reset reason that caused the last reset, such as +POR, WDOG, JTAG and etc. + ++---------------------+--------------------------------------------------------+ +|Parameters | ++---------------------+--------------------------------------------------------+ +|Name |Description | ++---------------------+--------------------------------------------------------+ +|uint32 lmid |ID of the Logical Machine | ++---------------------+--------------------------------------------------------+ +|Return values | ++---------------------+--------------------------------------------------------+ +|Name |Description | ++---------------------+--------------------------------------------------------+ +|int32 status |SUCCESS: if the reset reason of the LM successfully | +| |updated. | +| |NOT_FOUND: if lmid not points to a valid logical machine| +| |DENIED: if the agent does not have permission to request| +| |the reset reason. | ++---------------------+--------------------------------------------------------+ +|uint32 bootflags |Boot reason flags. This parameter has the format: | +| |Bits[31] Valid. | +| |Set to 1 if the entire reason is valid. | +| |Set to 0 if the entire reason is not valid. | +| |Bits[30:29] Reserved, must be zero. | +| |Bit[28] Valid origin: | +| |Set to 1 if the origin field is valid. | +| |Set to 0 if the origin field is not valid. | +| |Bits[27:24] Origin. | +| |Logical Machine(LM) ID that causes the BOOT of this LM | +| |Bit[23] Valid err ID: | +| |Set to 1 if the error ID field is valid. | +| |Set to 0 if the error ID field is not valid. | +| |Bits[22:8] Error ID(Agent ID of the system). | +| |Bit[7:0] Reason(WDOG, POR, FCCU and etc): | +| |See the SRESR register description in the System | +| |Reset Controller (SRC) section in SoC reference mannual | +| |One reason maps to BIT(reason) in SRESR | ++---------------------+--------------------------------------------------------+ +|uint32 shutdownflags |Shutdown reason flags. This parameter has the format: | +| |Bits[31] Valid. | +| |Set to 1 if the entire reason is valid. | +| |Set to 0 if the entire reason is not valid. | +| |Bits[30:29] Number of valid extended info words. | +| |Bit[28] Valid origin: | +| |Set to 1 if the origin field is valid. | +| |Set to 0 if the origin field is not valid. | +| |Bits[27:24] Origin. | +| |Logical Machine(LM) ID that causes the BOOT of this LM | +| |Bit[23] Valid err ID: | +| |Set to 1 if the error ID field is valid. | +| |Set to 0 if the error ID field is not valid. | +| |Bits[22:8] Error ID(Agent ID of the System). | +| |Bit[7:0] Reason | +| |See the SRESR register description in the System | +| |Reset Controller (SRC) section in SoC reference mannual | +| |One reason maps to BIT(reason) in SRESR | ++---------------------+--------------------------------------------------------+ +|uint32 extinfo[3] |Array of extended info words(e.g. fault pc) | ++---------------------+--------------------------------------------------------+ + +LMM_POWER_ON +~~~~~~~~~~~~ + +message_id: 0xB +protocol_id: 0x80 +This command is mandatory. + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 lmid |ID of the Logical Machine | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if LM successfully powers on. | +| |NOT_FOUND: if lmid not points to a valid logical machine. | +| |INVALID_PARAMETERS: if lmid is same as the caller. | +| |DENIED: if the agent does not have permission to manage the| +| |the LM specified by lmid. | ++------------------+-----------------------------------------------------------+ + +LMM_RESET_VECTOR_SET +~~~~~~~~~~~~~~~~~~~~ + +message_id: 0xC +protocol_id: 0x80 +This command is mandatory. + ++-----------------------+------------------------------------------------------+ +|Parameters | ++-----------------------+------------------------------------------------------+ +|Name |Description | ++-----------------------+------------------------------------------------------+ +|uint32 lmid |ID of the Logical Machine | ++-----------------------+------------------------------------------------------+ +|uint32 cpuid |ID of the CPU inside the LM | ++-----------------------+------------------------------------------------------+ +|uint32 flags |Reset vector flags | +| |Bits[31:0] Reserved, must be zero. | ++-----------------------+------------------------------------------------------+ +|uint32 resetVectorLow |Lower vector | ++-----------------------+------------------------------------------------------+ +|uint32 resetVectorHigh |Higher vector | ++-----------------------+------------------------------------------------------+ +|Return values | ++-----------------------+------------------------------------------------------+ +|Name |Description | ++-----------------------+------------------------------------------------------+ +|int32 status |SUCCESS: If reset vector is set successfully. | +| |NOT_FOUND: if lmid not points to a valid logical | +| |machine, or cpuId is not valid. | +| |INVALID_PARAMETERS: if reset vector is invalid. | +| |DENIED: if the agent does not have permission to set | +| |the reset vector for the CPU in the LM. | ++-----------------------+------------------------------------------------------+ + +NEGOTIATE_PROTOCOL_VERSION +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +message_id: 0x10 +protocol_id: 0x80 +This command is mandatory. + ++--------------------+---------------------------------------------------------+ +|Parameters | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|uint32 version |The negotiated protocol version the agent intends to use | ++--------------------+---------------------------------------------------------+ +|Return values | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|int32 status |SUCCESS: if the negotiated protocol version is supported | +| |by the platform. All commands, responses, and | +| |notifications post successful return of this command must| +| |comply with the negotiated version. | +| |NOT_SUPPORTED: if the protocol version is not supported. | ++--------------------+---------------------------------------------------------+ + +Notifications +_____________ + +LMM_EVENT +~~~~~~~~~ + +message_id: 0x0 +protocol_id: 0x80 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 lmid |Identifier for the LM that caused the transition. | ++------------------+-----------------------------------------------------------+ +|uint32 eventlm |Identifier of the LM this event refers to. | ++------------------+-----------------------------------------------------------+ +|uint32 flags |LM events: | +| |Bits[31:3] Reserved, must be zero. | +| |Bit[3] Wake (resume) event: | +| |1 LM has awakened. | +| |0 not a wake event. | +| |Bit[2] Suspend (sleep) event: | +| |1 LM has suspended. | +| |0 not a suspend event. | +| |Bit[1] Shutdown (off) event: | +| |1 LM has shutdown. | +| |0 not a shutdown event. | +| |Bit[0] Boot (on) event: | +| |1 LM has booted. | +| |0 not a boot event. | ++------------------+-----------------------------------------------------------+ + +SCMI_BBM: System Control and Management BBM Vendor Protocol +============================================================== + +This protocol is intended provide access to the battery-backed module. This +contains persistent storage (GPR), an RTC, and the ON/OFF button. The protocol +can also provide access to similar functions implemented via external board +components. The BBM protocol provides functions to: + +- Describe the protocol version. +- Discover implementation attributes. +- Read/write GPR +- Discover the RTCs available in the system. +- Read/write the RTC time in seconds and ticks +- Set an alarm (per LM) in seconds +- Get notifications on RTC update, alarm, or rollover. +- Get notification on ON/OFF button activity. + +For most SoC, there is one on-chip RTC (e.g. in BBNSM) and this is RTC ID 0. +Board code can add additional GPR and RTC. + +GPR are not aggregated. The RTC time is also not aggregated. Setting these +sets for all so normally exclusive access would be granted to one agent for +each. However, RTC alarms are maintained for each LM and the hardware is +programmed with the next nearest alarm time. So only one agent in an LM should +be given access rights to set an RTC alarm. + +Commands: +_________ + +PROTOCOL_VERSION +~~~~~~~~~~~~~~~~ + +message_id: 0x0 +protocol_id: 0x81 + ++---------------+--------------------------------------------------------------+ +|Return values | ++---------------+--------------------------------------------------------------+ +|Name |Description | ++---------------+--------------------------------------------------------------+ +|int32 status | See ARM SCMI Specification for status code definitions. | ++---------------+--------------------------------------------------------------+ +|uint32 version | For this revision of the specification, this value must be | +| | 0x10000. | ++---------------+--------------------------------------------------------------+ + +PROTOCOL_ATTRIBUTES +~~~~~~~~~~~~~~~~~~~ + +message_id: 0x1 +protocol_id: 0x81 + ++---------------+--------------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status | See ARM SCMI Specification for status code definitions. | ++------------------+-----------------------------------------------------------+ +|uint32 attributes | Bits[31:8] Number of RTCs. | +| | Bits[15:0] Number of persistent storage (GPR) words. | ++------------------+-----------------------------------------------------------+ + +PROTOCOL_MESSAGE_ATTRIBUTES +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +message_id: 0x2 +protocol_id: 0x81 + ++---------------+--------------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: in case the message is implemented and available | +| |to use. | +| |NOT_FOUND: if the message identified by message_id is | +| |invalid or not implemented | ++------------------+-----------------------------------------------------------+ +|uint32 attributes |Flags that are associated with a specific function in the | +| |protocol. For all functions in this protocol, this | +| |parameter has a value of 0 | ++------------------+-----------------------------------------------------------+ + +BBM_GPR_SET +~~~~~~~~~~~ + +message_id: 0x3 +protocol_id: 0x81 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 index |Index of GPR to write | ++------------------+-----------------------------------------------------------+ +|uint32 value |32-bit value to write to the GPR | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if the GPR was successfully written. | +| |NOT_FOUND: if the index is not valid. | +| |DENIED: if the agent does not have permission to write | +| |the specified GPR | ++------------------+-----------------------------------------------------------+ + +BBM_GPR_GET +~~~~~~~~~~~ + +message_id: 0x4 +protocol_id: 0x81 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 index |Index of GPR to read | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if the GPR was successfully read. | +| |NOT_FOUND: if the index is not valid. | +| |DENIED: if the agent does not have permission to read | +| |the specified GPR. | ++------------------+-----------------------------------------------------------+ +|uint32 value |32-bit value read from the GPR | ++------------------+-----------------------------------------------------------+ + +BBM_RTC_ATTRIBUTES +~~~~~~~~~~~~~~~~~~ + +message_id: 0x5 +protocol_id: 0x81 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 index |Index of RTC | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: returned the attributes. | +| |NOT_FOUND: Index is invalid. | ++------------------+-----------------------------------------------------------+ +|uint32 attributes |Bit[31:24] Bit width of RTC seconds. | +| |Bit[23:16] Bit width of RTC ticks. | +| |Bits[15:0] RTC ticks per second | ++------------------+-----------------------------------------------------------+ +|uint8 name[16] |Null-terminated ASCII string of up to 16 bytes in length | +| |describing the RTC name | ++------------------+-----------------------------------------------------------+ + +BBM_RTC_TIME_SET +~~~~~~~~~~~~~~~~ + +message_id: 0x6 +protocol_id: 0x81 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 index |Index of RTC | ++------------------+-----------------------------------------------------------+ +|uint32 flags |Bits[31:1] Reserved, must be zero. | +| |Bit[0] RTC time format: | +| |Set to 1 if the time is in ticks. | +| |Set to 0 if the time is in seconds | ++------------------+-----------------------------------------------------------+ +|uint32 time[2] |Lower word: Lower 32 bits of the time in seconds/ticks. | +| |Upper word: Upper 32 bits of the time in seconds/ticks. | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: RTC time was successfully set. | +| |NOT_FOUND: rtcId pertains to a non-existent RTC. | +| |INVALID_PARAMETERS: time is not valid | +| |(beyond the range of the RTC). | +| |DENIED: the agent does not have permission to set the RTC. | ++------------------+-----------------------------------------------------------+ + +BBM_RTC_TIME_GET +~~~~~~~~~~~~~~~~ + +message_id: 0x7 +protocol_id: 0x81 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 index |Index of RTC | ++------------------+-----------------------------------------------------------+ +|uint32 flags |Bits[31:1] Reserved, must be zero. | +| |Bit[0] RTC time format: | +| |Set to 1 if the time is in ticks. | +| |Set to 0 if the time is in seconds | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: RTC time was successfully get. | +| |NOT_FOUND: rtcId pertains to a non-existent RTC. | ++------------------+-----------------------------------------------------------+ +|uint32 time[2] |Lower word: Lower 32 bits of the time in seconds/ticks. | +| |Upper word: Upper 32 bits of the time in seconds/ticks. | ++------------------+-----------------------------------------------------------+ + +BBM_RTC_ALARM_SET +~~~~~~~~~~~~~~~~~ + +message_id: 0x8 +protocol_id: 0x81 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 index |Index of RTC | ++------------------+-----------------------------------------------------------+ +|uint32 flags |Bits[31:1] Reserved, must be zero. | +| |Bit[0] RTC enable flag: | +| |Set to 1 if the RTC alarm should be enabled. | +| |Set to 0 if the RTC alarm should be disabled | ++------------------+-----------------------------------------------------------+ +|uint32 time[2] |Lower word: Lower 32 bits of the time in seconds. | +| |Upper word: Upper 32 bits of the time in seconds. | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: RTC time was successfully set. | +| |NOT_FOUND: rtcId pertains to a non-existent RTC. | +| |INVALID_PARAMETERS: time is not valid | +| |(beyond the range of the RTC). | +| |DENIED: the agent does not have permission to set the RTC | +| |alarm | ++------------------+-----------------------------------------------------------+ + +BBM_BUTTON_GET +~~~~~~~~~~~~~~ + +message_id: 0x9 +protocol_id: 0x81 + ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if the button status was read. | +| |Other value: ARM SCMI Specification status code definitions| ++------------------+-----------------------------------------------------------+ +|uint32 state |State of the ON/OFF button. 1: ON, 0: OFF | ++------------------+-----------------------------------------------------------+ + +BBM_RTC_NOTIFY +~~~~~~~~~~~~~~ + +message_id: 0xA +protocol_id: 0x81 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 index |Index of RTC | ++------------------+-----------------------------------------------------------+ +|uint32 flags |Notification flags | +| |Bits[31:3] Reserved, must be zero. | +| |Bit[2] Update enable: | +| |Set to 1 to send notification. | +| |Set to 0 if no notification. | +| |Bit[1] Rollover enable: | +| |Set to 1 to send notification. | +| |Set to 0 if no notification. | +| |Bit[0] Alarm enable: | +| |Set to 1 to send notification. | +| |Set to 0 if no notification | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: notification configuration was successfully | +| |updated. | +| |NOT_FOUND: rtcId pertains to a non-existent RTC. | +| |DENIED: the agent does not have permission to request RTC | +| |notifications. | ++------------------+-----------------------------------------------------------+ + +BBM_BUTTON_NOTIFY +~~~~~~~~~~~~~~~~~ + +message_id: 0xB +protocol_id: 0x81 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 flags |Notification flags | +| |Bits[31:1] Reserved, must be zero. | +| |Bit[0] Enable button: | +| |Set to 1 to send notification. | +| |Set to 0 if no notification | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: notification configuration was successfully | +| |updated. | +| |DENIED: the agent does not have permission to request | +| |button notifications. | ++------------------+-----------------------------------------------------------+ + +NEGOTIATE_PROTOCOL_VERSION +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +message_id: 0x10 +protocol_id: 0x81 + ++--------------------+---------------------------------------------------------+ +|Parameters | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|uint32 version |The negotiated protocol version the agent intends to use | ++--------------------+---------------------------------------------------------+ +|Return values | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|int32 status |SUCCESS: if the negotiated protocol version is supported | +| |by the platform. All commands, responses, and | +| |notifications post successful return of this command must| +| |comply with the negotiated version. | +| |NOT_SUPPORTED: if the protocol version is not supported. | ++--------------------+---------------------------------------------------------+ + +Notifications +_____________ + +BBM_RTC_EVENT +~~~~~~~~~~~~~ + +message_id: 0x0 +protocol_id: 0x81 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 flags |RTC events: | +| |Bits[31:2] Reserved, must be zero. | +| |Bit[1] RTC rollover notification: | +| |1 RTC rollover detected. | +| |0 no RTC rollover detected. | +| |Bit[0] RTC alarm notification: | +| |1 RTC alarm generated. | +| |0 no RTC alarm generated. | ++------------------+-----------------------------------------------------------+ + +BBM_BUTTON_EVENT +~~~~~~~~~~~~~~~~ + +message_id: 0x1 +protocol_id: 0x81 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 flags |RTC events: | ++------------------+-----------------------------------------------------------+ +| |Button events: | +| |Bits[31:1] Reserved, must be zero. | +| |Bit[0] Button notification: | +| |1 button change detected. | +| |0 no button change detected. | ++------------------+-----------------------------------------------------------+ + +System Control and Management CPU Vendor Protocol +================================================= + +This protocol allows an agent to start or stop a CPU. It is used to manage +auxiliary CPUs in a target LM (e.g. additional cores in an AP cluster or +Cortex-M cores). +Note: + - For cores in AP cluster, PSCI should be used and PSCI firmware will use CPU + protocol to handle them. For cores in non-AP cluster, Operating System(e.g. + Linux OS) could use CPU protocols to control Cortex-M7 cores. + - CPU indicates the core and its auxiliary peripherals(e.g. TCM) inside + i.MX SoC + +There are cases where giving an agent full control of a CPU via the CPU +protocol is not desired. The LMM protocol is more restricted to just boot, +shutdown, etc. So an agent might boot another logical machine but not be +able to directly mess the state of its CPUs. Its also the reason there is an +LMM power on command even though that could have been done through the +power protocol. + +The CPU protocol provides commands to: + +- Describe the protocol version. +- Discover implementation attributes. +- Discover the CPUs defined in the system. +- Start a CPU. +- Stop a CPU. +- Set the boot and resume addresses for a CPU. +- Set the sleep mode of a CPU. +- Configure wake-up sources for a CPU. +- Configure power domain reactions (LPM mode and retention mask) for a CPU. +- The CPU IDs can be found in the CPU section of the SoC DEVICE: SM Device + Interface. They can also be found in the SoC RM. See the CPU Mode Control + (CMC) list in General Power Controller (GPC) section. + +CPU settings are not aggregated and setting their state is normally exclusive +to one client. + +Commands: +_________ + +PROTOCOL_VERSION +~~~~~~~~~~~~~~~~ + +message_id: 0x0 +protocol_id: 0x82 +This command is mandatory. + ++---------------+--------------------------------------------------------------+ +|Return values | ++---------------+--------------------------------------------------------------+ +|Name |Description | ++---------------+--------------------------------------------------------------+ +|int32 status | See ARM SCMI Specification for status code definitions. | ++---------------+--------------------------------------------------------------+ +|uint32 version | For this revision of the specification, this value must be | +| | 0x10000. | ++---------------+--------------------------------------------------------------+ + +PROTOCOL_ATTRIBUTES +~~~~~~~~~~~~~~~~~~~ + +message_id: 0x1 +protocol_id: 0x82 +This command is mandatory. + ++---------------+--------------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status | See ARM SCMI Specification for status code definitions. | ++------------------+-----------------------------------------------------------+ +|uint32 attributes |Protocol attributes: | +| |Bits[31:16] Reserved, must be zero. | +| |Bits[15:0] Number of CPUs | ++------------------+-----------------------------------------------------------+ + +PROTOCOL_MESSAGE_ATTRIBUTES +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +message_id: 0x2 +protocol_id: 0x82 +This command is mandatory. + ++---------------+--------------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: in case the message is implemented and available | +| |to use. | +| |NOT_FOUND: if the message identified by message_id is | +| |invalid or not implemented | ++------------------+-----------------------------------------------------------+ +|uint32 attributes |Flags that are associated with a specific command in the | +| |protocol. For all commands in this protocol, this | +| |parameter has a value of 0 | ++------------------+-----------------------------------------------------------+ + +CPU_ATTRIBUTES +~~~~~~~~~~~~~~ + +message_id: 0x4 +protocol_id: 0x82 +This command is mandatory. + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 cpuid |Identifier for the CPU | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if valid attributes are returned successfully. | +| |NOT_FOUND: if the cpuid is not valid. | ++------------------+-----------------------------------------------------------+ +|uint32 attributes |Bits[31:0] Reserved, must be zero | ++------------------+-----------------------------------------------------------+ +|char name[16] |NULL terminated ASCII string with CPU name up to 16 bytes | ++------------------+-----------------------------------------------------------+ + +CPU_START +~~~~~~~~~ + +message_id: 0x4 +protocol_id: 0x82 +This command is mandatory. + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 cpuid |Identifier for the CPU | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if the cpu is started successfully. | +| |NOT_FOUND: if cpuid is not valid. | +| |DENIED: the calling agent is not allowed to start this CPU.| ++------------------+-----------------------------------------------------------+ + +CPU_STOP +~~~~~~~~ + +message_id: 0x5 +protocol_id: 0x82 +This command is mandatory. + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 cpuid |Identifier for the CPU | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if the cpu is started successfully. | +| |NOT_FOUND: if cpuid is not valid. | +| |DENIED: the calling agent is not allowed to stop this CPU. | ++------------------+-----------------------------------------------------------+ + +CPU_RESET_VECTOR_SET +~~~~~~~~~~~~~~~~~~~~ + +message_id: 0x6 +protocol_id: 0x82 +This command is mandatory. + ++----------------------+-------------------------------------------------------+ +|Parameters | ++----------------------+-------------------------------------------------------+ +|Name |Description | ++----------------------+-------------------------------------------------------+ +|uint32 cpuid |Identifier for the CPU | ++----------------------+-------------------------------------------------------+ +|uint32 flags |Reset vector flags: | +| |Bit[31] Resume flag. | +| |Set to 1 to update the reset vector used on resume. | +| |Bit[30] Boot flag. | +| |Set to 1 to update the reset vector used for boot. | +| |Bits[29:1] Reserved, must be zero. | +| |Bit[0] Table flag. | +| |Set to 1 if vector is the vector table base address. | ++----------------------+-------------------------------------------------------+ +|uint32 resetVectorLow |Lower vector: | +| |If bit[0] of flags is 0, the lower 32 bits of the | +| |physical address where the CPU should execute from on | +| |reset. If bit[0] of flags is 1, the lower 32 bits of | +| |the vector table base address | ++----------------------+-------------------------------------------------------+ +|uint32 resetVectorhigh|Upper vector: | +| |If bit[0] of flags is 0, the upper 32 bits of the | +| |physical address where the CPU should execute from on | +| |reset. If bit[0] of flags is 1, the upper 32 bits of | +| |the vector table base address | ++----------------------+-------------------------------------------------------+ +|Return values | ++----------------------+-------------------------------------------------------+ +|Name |Description | ++----------------------+-------------------------------------------------------+ +|int32 status |SUCCESS: if the CPU reset vector is set successfully. | +| |NOT_FOUND: if cpuId does not point to a valid CPU. | +| |INVALID_PARAMETERS: the requested vector type is not | +| |supported by this CPU. | +| |DENIED: the calling agent is not allowed to set the | +| |reset vector of this CPU | ++----------------------+-------------------------------------------------------+ + +CPU_SLEEP_MODE_SET +~~~~~~~~~~~~~~~~~~ + +message_id: 0x7 +protocol_id: 0x82 +This command is mandatory. + ++----------------------+-------------------------------------------------------+ +|Parameters | ++----------------------+-------------------------------------------------------+ +|Name |Description | ++----------------------+-------------------------------------------------------+ +|uint32 cpuid |Identifier for the CPU | ++----------------------+-------------------------------------------------------+ +|uint32 flags |Sleep mode flags: | +| |Bits[31:1] Reserved, must be zero. | +| |Bit[0] IRQ mux: | +| |If set to 1 the wakeup mux source is the GIC, else if 0| +| |then the GPC | ++----------------------+-------------------------------------------------------+ +|uint32 sleepmode |target sleep mode. When CPU runs into WFI, the GPC mode| +| |will be triggered to be in below modes: | +| |RUN: (0) | +| |WAIT: (1) | +| |STOP: (2) | +| |SUSPEND: (3) | ++----------------------+-------------------------------------------------------+ +|Return values | ++----------------------+-------------------------------------------------------+ +|Name |Description | ++----------------------+-------------------------------------------------------+ +|int32 status |SUCCESS: if the CPU sleep mode is set successfully. | +| |NOT_FOUND: if cpuId does not point to a valid CPU. | +| |INVALID_PARAMETERS: the sleepmode or flags is invalid. | +| |DENIED: the calling agent is not allowed to configure | +| |the CPU | ++----------------------+-------------------------------------------------------+ + +CPU_INFO_GET +~~~~~~~~~~~~ + +message_id: 0xC +protocol_id: 0x82 +This command is mandatory. + ++----------------------+-------------------------------------------------------+ +|Parameters | ++----------------------+-------------------------------------------------------+ +|Name |Description | ++----------------------+-------------------------------------------------------+ +|uint32 cpuid |Identifier for the CPU | ++----------------------+-------------------------------------------------------+ +|Return values | ++----------------------+-------------------------------------------------------+ +|Name |Description | ++----------------------+-------------------------------------------------------+ +|int32 status |SUCCESS: if valid attributes are returned successfully.| +| |NOT_FOUND: if the cpuid is not valid. | ++----------------------+-------------------------------------------------------+ +|uint32 runmode |Run mode for the CPU | +| |RUN(0):cpu started | +| |HOLD(1):cpu powered up and reset asserted | +| |STOP(2):cpu reseted and hold cpu | +| |SUSPEND(3):in cpuidle state | ++----------------------+-------------------------------------------------------+ +|uint32 sleepmode |Sleep mode for the CPU, see CPU_SLEEP_MODE_SET | ++----------------------+-------------------------------------------------------+ +|uint32 resetvectorlow |Reset vector low 32 bits for the CPU | ++----------------------+-------------------------------------------------------+ +|uint32 resetvecothigh |Reset vector high 32 bits for the CPU | ++----------------------+-------------------------------------------------------+ + +NEGOTIATE_PROTOCOL_VERSION +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +message_id: 0x10 +protocol_id: 0x82 +This command is mandatory. + ++--------------------+---------------------------------------------------------+ +|Parameters | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|uint32 version |The negotiated protocol version the agent intends to use | ++--------------------+---------------------------------------------------------+ +|Return values | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|int32 status |SUCCESS: if the negotiated protocol version is supported | +| |by the platform. All commands, responses, and | +| |notifications post successful return of this command must| +| |comply with the negotiated version. | +| |NOT_SUPPORTED: if the protocol version is not supported. | ++--------------------+---------------------------------------------------------+ + +SCMI_MISC: System Control and Management MISC Vendor Protocol +================================================================ + +Provides miscellaneous functions. This includes controls that are miscellaneous +settings/actions that must be exposed from the SM to agents. They are device +specific and are usually define to access bit fields in various mix block +control modules, IOMUX_GPR, and other GPR/CSR owned by the SM. This protocol +supports the following functions: + +- Describe the protocol version. +- Discover implementation attributes. +- Set/Get a control. +- Initiate an action on a control. +- Obtain platform (i.e. SM) build information. +- Obtain ROM passover data. +- Read boot/shutdown/reset information for the LM or the system. + +Commands: +_________ + +PROTOCOL_VERSION +~~~~~~~~~~~~~~~~ + +message_id: 0x0 +protocol_id: 0x84 + ++---------------+--------------------------------------------------------------+ +|Return values | ++---------------+--------------------------------------------------------------+ +|Name |Description | ++---------------+--------------------------------------------------------------+ +|int32 status | See ARM SCMI Specification for status code definitions. | ++---------------+--------------------------------------------------------------+ +|uint32 version | For this revision of the specification, this value must be | +| | 0x10000. | ++---------------+--------------------------------------------------------------+ + +PROTOCOL_ATTRIBUTES +~~~~~~~~~~~~~~~~~~~ + +message_id: 0x1 +protocol_id: 0x84 + ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status | See ARM SCMI Specification for status code definitions. | ++------------------+-----------------------------------------------------------+ +|uint32 attributes |Protocol attributes: | +| |Bits[31:24] Reserved, must be zero. | +| |Bits[23:16] Number of reset reasons. | +| |Bits[15:0] Number of controls | ++------------------+-----------------------------------------------------------+ + +PROTOCOL_MESSAGE_ATTRIBUTES +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +message_id: 0x2 +protocol_id: 0x84 + ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: in case the message is implemented and available | +| |to use. | +| |NOT_FOUND: if the message identified by message_id is | +| |invalid or not implemented | ++------------------+-----------------------------------------------------------+ +|uint32 attributes |Flags that are associated with a specific function in the | +| |protocol. For all functions in this protocol, this | +| |parameter has a value of 0 | ++------------------+-----------------------------------------------------------+ + +MISC_CONTROL_SET +~~~~~~~~~~~~~~~~ + +message_id: 0x3 +protocol_id: 0x84 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 index |Index of the control | ++------------------+-----------------------------------------------------------+ +|uint32 num |Size of the value data in words | ++------------------+-----------------------------------------------------------+ +|uint32 val[8] |value data array | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if the control was set successfully. | +| |NOT_FOUND: if the index is not valid. | +| |DENIED: if the agent does not have permission to set the | +| |control | ++------------------+-----------------------------------------------------------+ + +MISC_CONTROL_GET +~~~~~~~~~~~~~~~~ + +message_id: 0x4 +protocol_id: 0x84 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 index |Index of the control | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if the control was get successfully. | +| |NOT_FOUND: if the index is not valid. | +| |DENIED: if the agent does not have permission to get the | +| |control | ++------------------+-----------------------------------------------------------+ +|uint32 num |Size of the return data in words, max 8 | ++------------------+-----------------------------------------------------------+ +|uint32 | | +|val[0, num - 1] |value data array | ++------------------+-----------------------------------------------------------+ + +MISC_CONTROL_ACTION +~~~~~~~~~~~~~~~~~~~ + +message_id: 0x5 +protocol_id: 0x84 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 index |Index of the control | ++------------------+-----------------------------------------------------------+ +|uint32 action |Action for the control | ++------------------+-----------------------------------------------------------+ +|uint32 numarg |Size of the argument data, max 8 | ++------------------+-----------------------------------------------------------+ +|uint32 | | +|arg[0, numarg -1] |Argument data array | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if the action was set successfully. | +| |NOT_FOUND: if the index is not valid. | +| |DENIED: if the agent does not have permission to get the | +| |control | ++------------------+-----------------------------------------------------------+ +|uint32 num |Size of the return data in words, max 8 | ++------------------+-----------------------------------------------------------+ +|uint32 | | +|val[0, num - 1] |value data array | ++------------------+-----------------------------------------------------------+ + +MISC_DISCOVER_BUILD_INFO +~~~~~~~~~~~~~~~~~~~~~~~~ + +This function is used to obtain the build commit, data, time, number. + +message_id: 0x6 +protocol_id: 0x84 + ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if the build info was got successfully. | +| |NOT_SUPPORTED: if the data is not available. | ++------------------+-----------------------------------------------------------+ +|uint32 buildnum |Build number | ++------------------+-----------------------------------------------------------+ +|uint32 buildcommit|Most significant 32 bits of the git commit hash | ++------------------+-----------------------------------------------------------+ +|uint8 date[16] |Date of build. Null terminated ASCII string of up to 16 | +| |bytes in length | ++------------------+-----------------------------------------------------------+ +|uint8 time[16] |Time of build. Null terminated ASCII string of up to 16 | +| |bytes in length | ++------------------+-----------------------------------------------------------+ + +MISC_ROM_PASSOVER_GET +~~~~~~~~~~~~~~~~~~~~~ + +ROM passover data is information exported by ROM and could be used by others. +It includes boot device, instance, type, mode and etc. This function is used +to obtain the ROM passover data. The returned block of words is structured as +defined in the ROM passover section in the SoC RM. + +message_id: 0x7 +protocol_id: 0x84 + ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if the data was got successfully. | +| |NOT_SUPPORTED: if the data is not available. | ++------------------+-----------------------------------------------------------+ +|uint32 num |Size of the passover data in words, max 13 | ++------------------+-----------------------------------------------------------+ +|uint32 | | +|data[0, num - 1] |Passover data array | ++------------------+-----------------------------------------------------------+ + +MISC_CONTROL_NOTIFY +~~~~~~~~~~~~~~~~~~~ + +message_id: 0x8 +protocol_id: 0x84 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 index |Index of control | ++------------------+-----------------------------------------------------------+ +|uint32 flags |Notification flags, varies by control | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: notification configuration was successfully | +| |updated. | +| |NOT_FOUND: control id not exists. | +| |INVALID_PARAMETERS: if the input attributes flag specifies | +| |unsupported or invalid configurations.. | +| |DENIED: if the calling agent is not permitted to request | +| |the notification. | ++------------------+-----------------------------------------------------------+ + +MISC_RESET_REASON_ATTRIBUTES +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +message_id: 0x9 +protocol_id: 0x84 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 reasonid |Identifier for the reason | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if valid reason attributes are returned | +| |NOT_FOUND: if reasonId pertains to a non-existent reason. | ++------------------+-----------------------------------------------------------+ +|uint32 attributes |Reason attributes. This parameter has the following | +| |format: Bits[31:0] Reserved, must be zero | +| |Bits[15:0] Number of persistent storage (GPR) words. | ++------------------+-----------------------------------------------------------+ +|uint8 name[16] |Null-terminated ASCII string of up to 16 bytes in length | +| |describing the reason | ++------------------+-----------------------------------------------------------+ + +MISC_RESET_REASON_GET +~~~~~~~~~~~~~~~~~~~~~ + +message_id: 0xA +protocol_id: 0x84 + ++--------------------+---------------------------------------------------------+ +|Parameters | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|uint32 flags |Reason flags. This parameter has the following format: | +| |Bits[31:1] Reserved, must be zero. | +| |Bit[0] System: | +| |Set to 1 to return the system reason. | +| |Set to 0 to return the LM reason | ++--------------------+---------------------------------------------------------+ +|Return values | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|int32 status |SUCCESS: reset reason return | ++--------------------+---------------------------------------------------------+ +|uint32 bootflags |Boot reason flags. This parameter has the format: | +| |Bits[31] Valid. | +| |Set to 1 if the entire reason is valid. | +| |Set to 0 if the entire reason is not valid. | +| |Bits[30:29] Reserved, must be zero. | +| |Bit[28] Valid origin: | +| |Set to 1 if the origin field is valid. | +| |Set to 0 if the origin field is not valid. | +| |Bits[27:24] Origin. | +| |Bit[23] Valid err ID: | +| |Set to 1 if the error ID field is valid. | +| |Set to 0 if the error ID field is not valid. | +| |Bits[22:8] Error ID. | +| |Bit[7:0] Reason | ++--------------------+---------------------------------------------------------+ +|uint32 shutdownflags|Shutdown reason flags. This parameter has the format: | +| |Bits[31] Valid. | +| |Set to 1 if the entire reason is valid. | +| |Set to 0 if the entire reason is not valid. | +| |Bits[30:29] Number of valid extended info words. | +| |Bit[28] Valid origin: | +| |Set to 1 if the origin field is valid. | +| |Set to 0 if the origin field is not valid. | +| |Bits[27:24] Origin. | +| |Bit[23] Valid err ID: | +| |Set to 1 if the error ID field is valid. | +| |Set to 0 if the error ID field is not valid. | +| |Bits[22:8] Error ID. | +| |Bit[7:0] Reason | ++--------------------+---------------------------------------------------------+ +|uint32 extinfo[8] |Array of extended info words | ++--------------------+---------------------------------------------------------+ + +MISC_SI_INFO_GET +~~~~~~~~~~~~~~~~ + +message_id: 0xB +protocol_id: 0x84 + ++--------------------+---------------------------------------------------------+ +|Return values | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|int32 status |SUCCESS: silicon info return | ++--------------------+---------------------------------------------------------+ +|uint32 deviceid |Silicon specific device ID | ++--------------------+---------------------------------------------------------+ +|uint32 sirev |Silicon specific revision | ++--------------------+---------------------------------------------------------+ +|uint32 partnum |Silicon specific part number | ++--------------------+---------------------------------------------------------+ +|uint8 siname[16] |Silicon name/revision. Null terminated ASCII string of up| +| |to 16 bytes in length | ++--------------------+---------------------------------------------------------+ + +MISC_CFG_INFO_GET +~~~~~~~~~~~~~~~~~ + +message_id: 0xC +protocol_id: 0x84 + ++--------------------+---------------------------------------------------------+ +|Return values | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|int32 status |SUCCESS: config name return | +| |NOT_SUPPORTED: name not available | ++--------------------+---------------------------------------------------------+ +|uint32 msel |Mode selector value | ++--------------------+---------------------------------------------------------+ +|uint8 cfgname[16] |config file basename. Null terminated ASCII string of up | +| |to 16 bytes in length | ++--------------------+---------------------------------------------------------+ + +MISC_SYSLOG_GET +~~~~~~~~~~~~~~~ + +message_id: 0xD +protocol_id: 0x84 + ++--------------------+---------------------------------------------------------+ +|Parameters | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|uint32 flags |Device specific flags that might impact the data returned| +| |or clearing of the data | ++--------------------+---------------------------------------------------------+ +|uint32 logindex |Index to the first log word. Will be the first element in| +| |the return array | ++--------------------+---------------------------------------------------------+ +|Return values | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|int32 status |SUCCESS: system log return | ++--------------------+---------------------------------------------------------+ +|uint32 numLogflags |Descriptor for the log data returned by this call. | +| |Bits[31:20] Number of remaining log words. | +| |Bits[15:12] Reserved, must be zero. | +| |Bits[11:0] Number of log words that are returned by this | +| |call | ++--------------------+---------------------------------------------------------+ +|uint32 syslog[N] |Log data array, N is defined in bits[11:0] of numLogflags| ++--------------------+---------------------------------------------------------+ + +NEGOTIATE_PROTOCOL_VERSION +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +message_id: 0x10 +protocol_id: 0x84 + ++--------------------+---------------------------------------------------------+ +|Parameters | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|uint32 version |The negotiated protocol version the agent intends to use | ++--------------------+---------------------------------------------------------+ +|Return values | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|int32 status |SUCCESS: if the negotiated protocol version is supported | +| |by the platform. All commands, responses, and | +| |notifications post successful return of this command must| +| |comply with the negotiated version. | +| |NOT_SUPPORTED: if the protocol version is not supported. | ++--------------------+---------------------------------------------------------+ + +Notifications +_____________ + +MISC_CONTROL_EVENT +~~~~~~~~~~~~~~~~~~ + +message_id: 0x0 +protocol_id: 0x81 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 ctrlid |Identifier for the control that caused the event. | ++------------------+-----------------------------------------------------------+ +|uint32 flags |Event flags, varies by control. | ++------------------+-----------------------------------------------------------+ diff --git a/drivers/firmware/arm_scmi/voltage.c b/drivers/firmware/arm_scmi/voltage.c index 2175ffd6cef5..fda6a1573609 100644 --- a/drivers/firmware/arm_scmi/voltage.c +++ b/drivers/firmware/arm_scmi/voltage.c @@ -11,7 +11,7 @@ #include "protocols.h" /* Updated only after ALL the mandatory features for that version are merged */ -#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20000 +#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20001 #define VOLTAGE_DOMS_NUM_MASK GENMASK(15, 0) #define REMAINING_LEVELS_MASK GENMASK(31, 16) @@ -229,8 +229,10 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph, /* Retrieve domain attributes at first ... */ put_unaligned_le32(dom, td->tx.buf); /* Skip domain on comms error */ - if (ph->xops->do_xfer(ph, td)) + if (ph->xops->do_xfer(ph, td)) { + ph->xops->reset_rx_to_maxsz(ph, td); continue; + } v = vinfo->domains + dom; v->id = dom; diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index 94a6b4e667de..87c323de17b9 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -630,6 +630,9 @@ static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain) if (ret) return ERR_PTR(ret); + if (!buf.opp_count) + return ERR_PTR(-ENOENT); + info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) return ERR_PTR(-ENOMEM); @@ -1046,7 +1049,7 @@ static struct platform_driver scpi_driver = { .dev_groups = versions_groups, }, .probe = scpi_probe, - .remove_new = scpi_remove, + .remove = scpi_remove, }; module_platform_driver(scpi_driver); diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index 285fe7ad490d..71e2a9a89f6a 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -763,7 +763,7 @@ static int sdei_device_freeze(struct device *dev) int err; /* unregister private events */ - cpuhp_remove_state(sdei_entry_point); + cpuhp_remove_state(sdei_hp_state); err = sdei_unregister_shared(); if (err) @@ -1062,13 +1062,12 @@ static bool __init sdei_present_acpi(void) return true; } -void __init sdei_init(void) +void __init acpi_sdei_init(void) { struct platform_device *pdev; int ret; - ret = platform_driver_register(&sdei_driver); - if (ret || !sdei_present_acpi()) + if (!sdei_present_acpi()) return; pdev = platform_device_register_simple(sdei_driver.driver.name, @@ -1081,6 +1080,12 @@ void __init sdei_init(void) } } +static int __init sdei_init(void) +{ + return platform_driver_register(&sdei_driver); +} +arch_initcall(sdei_init); + int sdei_event_handler(struct pt_regs *regs, struct sdei_registered_event *arg) { diff --git a/drivers/firmware/cirrus/Kconfig b/drivers/firmware/cirrus/Kconfig index 3ccbe14e4b0c..e3c2e38b746d 100644 --- a/drivers/firmware/cirrus/Kconfig +++ b/drivers/firmware/cirrus/Kconfig @@ -3,3 +3,18 @@ config FW_CS_DSP tristate default n + +config FW_CS_DSP_KUNIT_TEST_UTILS + tristate + +config FW_CS_DSP_KUNIT_TEST + tristate "KUnit tests for Cirrus Logic cs_dsp" if !KUNIT_ALL_TESTS + depends on KUNIT && REGMAP && FW_CS_DSP + default KUNIT_ALL_TESTS + select FW_CS_DSP_KUNIT_TEST_UTILS + help + This builds KUnit tests for cs_dsp. + For more information on KUnit and unit tests in general, + please refer to the KUnit documentation in + Documentation/dev-tools/kunit/. + If in doubt, say "N". diff --git a/drivers/firmware/cirrus/Makefile b/drivers/firmware/cirrus/Makefile index b91318ca0ff4..b32dfa869491 100644 --- a/drivers/firmware/cirrus/Makefile +++ b/drivers/firmware/cirrus/Makefile @@ -1,3 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 # obj-$(CONFIG_FW_CS_DSP) += cs_dsp.o + +obj-y += test/ diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c index 9f3d665cfdcf..560724ce21aa 100644 --- a/drivers/firmware/cirrus/cs_dsp.c +++ b/drivers/firmware/cirrus/cs_dsp.c @@ -12,6 +12,7 @@ #include <linux/ctype.h> #include <linux/debugfs.h> #include <linux/delay.h> +#include <linux/minmax.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/seq_file.h> @@ -275,6 +276,12 @@ #define HALO_MPU_VIO_ERR_SRC_MASK 0x00007fff #define HALO_MPU_VIO_ERR_SRC_SHIFT 0 +/* + * Write Sequence + */ +#define WSEQ_OP_MAX_WORDS 3 +#define WSEQ_END_OF_SCRIPT 0xFFFFFF + struct cs_dsp_ops { bool (*validate_version)(struct cs_dsp *dsp, unsigned int version); unsigned int (*parse_sizes)(struct cs_dsp *dsp, @@ -371,7 +378,7 @@ const char *cs_dsp_mem_region_name(unsigned int type) return NULL; } } -EXPORT_SYMBOL_NS_GPL(cs_dsp_mem_region_name, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_mem_region_name, "FW_CS_DSP"); #ifdef CONFIG_DEBUG_FS static void cs_dsp_debugfs_save_wmfwname(struct cs_dsp *dsp, const char *s) @@ -512,7 +519,7 @@ void cs_dsp_init_debugfs(struct cs_dsp *dsp, struct dentry *debugfs_root) dsp->debugfs_root = root; } -EXPORT_SYMBOL_NS_GPL(cs_dsp_init_debugfs, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_init_debugfs, "FW_CS_DSP"); /** * cs_dsp_cleanup_debugfs() - Removes DSP representation from debugfs @@ -524,17 +531,17 @@ void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp) debugfs_remove_recursive(dsp->debugfs_root); dsp->debugfs_root = ERR_PTR(-ENODEV); } -EXPORT_SYMBOL_NS_GPL(cs_dsp_cleanup_debugfs, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_cleanup_debugfs, "FW_CS_DSP"); #else void cs_dsp_init_debugfs(struct cs_dsp *dsp, struct dentry *debugfs_root) { } -EXPORT_SYMBOL_NS_GPL(cs_dsp_init_debugfs, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_init_debugfs, "FW_CS_DSP"); void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp) { } -EXPORT_SYMBOL_NS_GPL(cs_dsp_cleanup_debugfs, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_cleanup_debugfs, "FW_CS_DSP"); static inline void cs_dsp_debugfs_save_wmfwname(struct cs_dsp *dsp, const char *s) @@ -742,7 +749,7 @@ int cs_dsp_coeff_write_acked_control(struct cs_dsp_coeff_ctl *ctl, unsigned int return -ETIMEDOUT; } -EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_write_acked_control, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_write_acked_control, "FW_CS_DSP"); static int cs_dsp_coeff_write_ctrl_raw(struct cs_dsp_coeff_ctl *ctl, unsigned int off, const void *buf, size_t len) @@ -796,6 +803,9 @@ int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl, lockdep_assert_held(&ctl->dsp->pwr_lock); + if (ctl->flags && !(ctl->flags & WMFW_CTL_FLAG_WRITEABLE)) + return -EPERM; + if (len + off * sizeof(u32) > ctl->len) return -EINVAL; @@ -817,7 +827,34 @@ int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl, return 1; } -EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_write_ctrl, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_write_ctrl, "FW_CS_DSP"); + +/** + * cs_dsp_coeff_lock_and_write_ctrl() - Writes the given buffer to the given coefficient control + * @ctl: pointer to coefficient control + * @off: word offset at which data should be written + * @buf: the buffer to write to the given control + * @len: the length of the buffer in bytes + * + * Same as cs_dsp_coeff_write_ctrl() but takes pwr_lock. + * + * Return: A negative number on error, 1 when the control value changed and 0 when it has not. + */ +int cs_dsp_coeff_lock_and_write_ctrl(struct cs_dsp_coeff_ctl *ctl, + unsigned int off, const void *buf, size_t len) +{ + struct cs_dsp *dsp = ctl->dsp; + int ret; + + lockdep_assert_not_held(&dsp->pwr_lock); + + mutex_lock(&dsp->pwr_lock); + ret = cs_dsp_coeff_write_ctrl(ctl, off, buf, len); + mutex_unlock(&dsp->pwr_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(cs_dsp_coeff_lock_and_write_ctrl); static int cs_dsp_coeff_read_ctrl_raw(struct cs_dsp_coeff_ctl *ctl, unsigned int off, void *buf, size_t len) @@ -889,7 +926,34 @@ int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl, return ret; } -EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_read_ctrl, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_read_ctrl, "FW_CS_DSP"); + +/** + * cs_dsp_coeff_lock_and_read_ctrl() - Reads the given coefficient control into the given buffer + * @ctl: pointer to coefficient control + * @off: word offset at which data should be read + * @buf: the buffer to store to the given control + * @len: the length of the buffer in bytes + * + * Same as cs_dsp_coeff_read_ctrl() but takes pwr_lock. + * + * Return: Zero for success, a negative number on error. + */ +int cs_dsp_coeff_lock_and_read_ctrl(struct cs_dsp_coeff_ctl *ctl, + unsigned int off, void *buf, size_t len) +{ + struct cs_dsp *dsp = ctl->dsp; + int ret; + + lockdep_assert_not_held(&dsp->pwr_lock); + + mutex_lock(&dsp->pwr_lock); + ret = cs_dsp_coeff_read_ctrl(ctl, off, buf, len); + mutex_unlock(&dsp->pwr_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(cs_dsp_coeff_lock_and_read_ctrl); static int cs_dsp_coeff_init_control_caches(struct cs_dsp *dsp) { @@ -993,7 +1057,7 @@ static int cs_dsp_create_control(struct cs_dsp *dsp, ctl->fw_name = dsp->fw_name; ctl->alg_region = *alg_region; - if (subname && dsp->fw_ver >= 2) { + if (subname && dsp->wmfw_ver >= 2) { ctl->subname_len = subname_len; ctl->subname = kasprintf(GFP_KERNEL, "%.*s", subname_len, subname); if (!ctl->subname) { @@ -1053,9 +1117,16 @@ struct cs_dsp_coeff_parsed_coeff { int len; }; -static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, const u8 **str) +static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, unsigned int avail, + const u8 **str) { - int length; + int length, total_field_len; + + /* String fields are at least one __le32 */ + if (sizeof(__le32) > avail) { + *pos = NULL; + return 0; + } switch (bytes) { case 1: @@ -1068,10 +1139,16 @@ static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, const u8 **str) return 0; } + total_field_len = ((length + bytes) + 3) & ~0x03; + if ((unsigned int)total_field_len > avail) { + *pos = NULL; + return 0; + } + if (str) *str = *pos + bytes; - *pos += ((length + bytes) + 3) & ~0x03; + *pos += total_field_len; return length; } @@ -1096,71 +1173,134 @@ static int cs_dsp_coeff_parse_int(int bytes, const u8 **pos) return val; } -static inline void cs_dsp_coeff_parse_alg(struct cs_dsp *dsp, const u8 **data, - struct cs_dsp_coeff_parsed_alg *blk) +static int cs_dsp_coeff_parse_alg(struct cs_dsp *dsp, + const struct wmfw_region *region, + struct cs_dsp_coeff_parsed_alg *blk) { const struct wmfw_adsp_alg_data *raw; + unsigned int data_len = le32_to_cpu(region->len); + unsigned int pos; + const u8 *tmp; + + raw = (const struct wmfw_adsp_alg_data *)region->data; - switch (dsp->fw_ver) { + switch (dsp->wmfw_ver) { case 0: case 1: - raw = (const struct wmfw_adsp_alg_data *)*data; - *data = raw->data; + if (sizeof(*raw) > data_len) + return -EOVERFLOW; blk->id = le32_to_cpu(raw->id); blk->name = raw->name; - blk->name_len = strlen(raw->name); + blk->name_len = strnlen(raw->name, ARRAY_SIZE(raw->name)); blk->ncoeff = le32_to_cpu(raw->ncoeff); + + pos = sizeof(*raw); break; default: - blk->id = cs_dsp_coeff_parse_int(sizeof(raw->id), data); - blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), data, + if (sizeof(raw->id) > data_len) + return -EOVERFLOW; + + tmp = region->data; + blk->id = cs_dsp_coeff_parse_int(sizeof(raw->id), &tmp); + pos = tmp - region->data; + + tmp = ®ion->data[pos]; + blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp, data_len - pos, &blk->name); - cs_dsp_coeff_parse_string(sizeof(u16), data, NULL); - blk->ncoeff = cs_dsp_coeff_parse_int(sizeof(raw->ncoeff), data); + if (!tmp) + return -EOVERFLOW; + + pos = tmp - region->data; + cs_dsp_coeff_parse_string(sizeof(u16), &tmp, data_len - pos, NULL); + if (!tmp) + return -EOVERFLOW; + + pos = tmp - region->data; + if (sizeof(raw->ncoeff) > (data_len - pos)) + return -EOVERFLOW; + + blk->ncoeff = cs_dsp_coeff_parse_int(sizeof(raw->ncoeff), &tmp); + pos += sizeof(raw->ncoeff); break; } + if ((int)blk->ncoeff < 0) + return -EOVERFLOW; + cs_dsp_dbg(dsp, "Algorithm ID: %#x\n", blk->id); cs_dsp_dbg(dsp, "Algorithm name: %.*s\n", blk->name_len, blk->name); cs_dsp_dbg(dsp, "# of coefficient descriptors: %#x\n", blk->ncoeff); + + return pos; } -static inline void cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp, const u8 **data, - struct cs_dsp_coeff_parsed_coeff *blk) +static int cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp, + const struct wmfw_region *region, + unsigned int pos, + struct cs_dsp_coeff_parsed_coeff *blk) { const struct wmfw_adsp_coeff_data *raw; + unsigned int data_len = le32_to_cpu(region->len); + unsigned int blk_len, blk_end_pos; const u8 *tmp; - int length; - switch (dsp->fw_ver) { + raw = (const struct wmfw_adsp_coeff_data *)®ion->data[pos]; + if (sizeof(raw->hdr) > (data_len - pos)) + return -EOVERFLOW; + + blk_len = le32_to_cpu(raw->hdr.size); + if (blk_len > S32_MAX) + return -EOVERFLOW; + + if (blk_len > (data_len - pos - sizeof(raw->hdr))) + return -EOVERFLOW; + + blk_end_pos = pos + sizeof(raw->hdr) + blk_len; + + blk->offset = le16_to_cpu(raw->hdr.offset); + blk->mem_type = le16_to_cpu(raw->hdr.type); + + switch (dsp->wmfw_ver) { case 0: case 1: - raw = (const struct wmfw_adsp_coeff_data *)*data; - *data = *data + sizeof(raw->hdr) + le32_to_cpu(raw->hdr.size); + if (sizeof(*raw) > (data_len - pos)) + return -EOVERFLOW; - blk->offset = le16_to_cpu(raw->hdr.offset); - blk->mem_type = le16_to_cpu(raw->hdr.type); blk->name = raw->name; - blk->name_len = strlen(raw->name); + blk->name_len = strnlen(raw->name, ARRAY_SIZE(raw->name)); blk->ctl_type = le16_to_cpu(raw->ctl_type); blk->flags = le16_to_cpu(raw->flags); blk->len = le32_to_cpu(raw->len); break; default: - tmp = *data; - blk->offset = cs_dsp_coeff_parse_int(sizeof(raw->hdr.offset), &tmp); - blk->mem_type = cs_dsp_coeff_parse_int(sizeof(raw->hdr.type), &tmp); - length = cs_dsp_coeff_parse_int(sizeof(raw->hdr.size), &tmp); - blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp, + pos += sizeof(raw->hdr); + tmp = ®ion->data[pos]; + blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp, data_len - pos, &blk->name); - cs_dsp_coeff_parse_string(sizeof(u8), &tmp, NULL); - cs_dsp_coeff_parse_string(sizeof(u16), &tmp, NULL); + if (!tmp) + return -EOVERFLOW; + + pos = tmp - region->data; + cs_dsp_coeff_parse_string(sizeof(u8), &tmp, data_len - pos, NULL); + if (!tmp) + return -EOVERFLOW; + + pos = tmp - region->data; + cs_dsp_coeff_parse_string(sizeof(u16), &tmp, data_len - pos, NULL); + if (!tmp) + return -EOVERFLOW; + + pos = tmp - region->data; + if (sizeof(raw->ctl_type) + sizeof(raw->flags) + sizeof(raw->len) > + (data_len - pos)) + return -EOVERFLOW; + blk->ctl_type = cs_dsp_coeff_parse_int(sizeof(raw->ctl_type), &tmp); + pos += sizeof(raw->ctl_type); blk->flags = cs_dsp_coeff_parse_int(sizeof(raw->flags), &tmp); + pos += sizeof(raw->flags); blk->len = cs_dsp_coeff_parse_int(sizeof(raw->len), &tmp); - - *data = *data + sizeof(raw->hdr) + length; break; } @@ -1170,6 +1310,8 @@ static inline void cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp, const u8 **data, cs_dsp_dbg(dsp, "\tCoefficient flags: %#x\n", blk->flags); cs_dsp_dbg(dsp, "\tALSA control type: %#x\n", blk->ctl_type); cs_dsp_dbg(dsp, "\tALSA control len: %#x\n", blk->len); + + return blk_end_pos; } static int cs_dsp_check_coeff_flags(struct cs_dsp *dsp, @@ -1193,12 +1335,16 @@ static int cs_dsp_parse_coeff(struct cs_dsp *dsp, struct cs_dsp_alg_region alg_region = {}; struct cs_dsp_coeff_parsed_alg alg_blk; struct cs_dsp_coeff_parsed_coeff coeff_blk; - const u8 *data = region->data; - int i, ret; + int i, pos, ret; + + pos = cs_dsp_coeff_parse_alg(dsp, region, &alg_blk); + if (pos < 0) + return pos; - cs_dsp_coeff_parse_alg(dsp, &data, &alg_blk); for (i = 0; i < alg_blk.ncoeff; i++) { - cs_dsp_coeff_parse_coeff(dsp, &data, &coeff_blk); + pos = cs_dsp_coeff_parse_coeff(dsp, region, pos, &coeff_blk); + if (pos < 0) + return pos; switch (coeff_blk.ctl_type) { case WMFW_CTL_TYPE_BYTES: @@ -1267,6 +1413,10 @@ static unsigned int cs_dsp_adsp1_parse_sizes(struct cs_dsp *dsp, const struct wmfw_adsp1_sizes *adsp1_sizes; adsp1_sizes = (void *)&firmware->data[pos]; + if (sizeof(*adsp1_sizes) > firmware->size - pos) { + cs_dsp_err(dsp, "%s: file truncated\n", file); + return 0; + } cs_dsp_dbg(dsp, "%s: %d DM, %d PM, %d ZM\n", file, le32_to_cpu(adsp1_sizes->dm), le32_to_cpu(adsp1_sizes->pm), @@ -1283,6 +1433,10 @@ static unsigned int cs_dsp_adsp2_parse_sizes(struct cs_dsp *dsp, const struct wmfw_adsp2_sizes *adsp2_sizes; adsp2_sizes = (void *)&firmware->data[pos]; + if (sizeof(*adsp2_sizes) > firmware->size - pos) { + cs_dsp_err(dsp, "%s: file truncated\n", file); + return 0; + } cs_dsp_dbg(dsp, "%s: %d XM, %d YM %d PM, %d ZM\n", file, le32_to_cpu(adsp2_sizes->xm), le32_to_cpu(adsp2_sizes->ym), @@ -1322,12 +1476,10 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, struct regmap *regmap = dsp->regmap; unsigned int pos = 0; const struct wmfw_header *header; - const struct wmfw_adsp1_sizes *adsp1_sizes; const struct wmfw_footer *footer; const struct wmfw_region *region; const struct cs_dsp_region *mem; const char *region_name; - char *text = NULL; struct cs_dsp_buf *buf; unsigned int reg; int regions = 0; @@ -1338,10 +1490,8 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, ret = -EINVAL; - pos = sizeof(*header) + sizeof(*adsp1_sizes) + sizeof(*footer); - if (pos >= firmware->size) { - cs_dsp_err(dsp, "%s: file too short, %zu bytes\n", - file, firmware->size); + if (sizeof(*header) >= firmware->size) { + ret = -EOVERFLOW; goto out_fw; } @@ -1358,8 +1508,7 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, goto out_fw; } - cs_dsp_info(dsp, "Firmware version: %d\n", header->ver); - dsp->fw_ver = header->ver; + dsp->wmfw_ver = header->ver; if (header->core != dsp->type) { cs_dsp_err(dsp, "%s: invalid core %d != %d\n", @@ -1369,33 +1518,47 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, pos = sizeof(*header); pos = dsp->ops->parse_sizes(dsp, file, pos, firmware); + if ((pos == 0) || (sizeof(*footer) > firmware->size - pos)) { + ret = -EOVERFLOW; + goto out_fw; + } footer = (void *)&firmware->data[pos]; pos += sizeof(*footer); if (le32_to_cpu(header->len) != pos) { - cs_dsp_err(dsp, "%s: unexpected header length %d\n", - file, le32_to_cpu(header->len)); + ret = -EOVERFLOW; goto out_fw; } - cs_dsp_dbg(dsp, "%s: timestamp %llu\n", file, - le64_to_cpu(footer->timestamp)); + cs_dsp_info(dsp, "%s: format %d timestamp %#llx\n", file, header->ver, + le64_to_cpu(footer->timestamp)); + + while (pos < firmware->size) { + /* Is there enough data for a complete block header? */ + if (sizeof(*region) > firmware->size - pos) { + ret = -EOVERFLOW; + goto out_fw; + } - while (pos < firmware->size && - sizeof(*region) < firmware->size - pos) { region = (void *)&(firmware->data[pos]); + + if (le32_to_cpu(region->len) > firmware->size - pos - sizeof(*region)) { + ret = -EOVERFLOW; + goto out_fw; + } + region_name = "Unknown"; reg = 0; - text = NULL; offset = le32_to_cpu(region->offset) & 0xffffff; type = be32_to_cpu(region->type) & 0xff; switch (type) { + case WMFW_INFO_TEXT: case WMFW_NAME_TEXT: - region_name = "Firmware name"; - text = kzalloc(le32_to_cpu(region->len) + 1, - GFP_KERNEL); + region_name = "Info/Name"; + cs_dsp_info(dsp, "%s: %.*s\n", file, + min(le32_to_cpu(region->len), 100), region->data); break; case WMFW_ALGORITHM_DATA: region_name = "Algorithm"; @@ -1403,11 +1566,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, if (ret != 0) goto out_fw; break; - case WMFW_INFO_TEXT: - region_name = "Information"; - text = kzalloc(le32_to_cpu(region->len) + 1, - GFP_KERNEL); - break; case WMFW_ABSOLUTE: region_name = "Absolute"; reg = offset; @@ -1441,23 +1599,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, regions, le32_to_cpu(region->len), offset, region_name); - if (le32_to_cpu(region->len) > - firmware->size - pos - sizeof(*region)) { - cs_dsp_err(dsp, - "%s.%d: %s region len %d bytes exceeds file length %zu\n", - file, regions, region_name, - le32_to_cpu(region->len), firmware->size); - ret = -EINVAL; - goto out_fw; - } - - if (text) { - memcpy(text, region->data, le32_to_cpu(region->len)); - cs_dsp_info(dsp, "%s: %s\n", file, text); - kfree(text); - text = NULL; - } - if (reg) { buf = cs_dsp_buf_alloc(region->data, le32_to_cpu(region->len), @@ -1468,8 +1609,8 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, goto out_fw; } - ret = regmap_raw_write_async(regmap, reg, buf->buf, - le32_to_cpu(region->len)); + ret = regmap_raw_write(regmap, reg, buf->buf, + le32_to_cpu(region->len)); if (ret != 0) { cs_dsp_err(dsp, "%s.%d: Failed to write %d bytes at %d in %s: %d\n", @@ -1484,22 +1625,18 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, regions++; } - ret = regmap_async_complete(regmap); - if (ret != 0) { - cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret); - goto out_fw; - } - if (pos > firmware->size) cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n", file, regions, pos - firmware->size); cs_dsp_debugfs_save_wmfwname(dsp, file); + ret = 0; out_fw: - regmap_async_complete(regmap); cs_dsp_buf_free(&buf_list); - kfree(text); + + if (ret == -EOVERFLOW) + cs_dsp_err(dsp, "%s: file content overflows file data\n", file); return ret; } @@ -1536,7 +1673,7 @@ struct cs_dsp_coeff_ctl *cs_dsp_get_ctl(struct cs_dsp *dsp, const char *name, in return rslt; } -EXPORT_SYMBOL_NS_GPL(cs_dsp_get_ctl, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_get_ctl, "FW_CS_DSP"); static void cs_dsp_ctl_fixup_base(struct cs_dsp *dsp, const struct cs_dsp_alg_region *alg_region) @@ -1626,7 +1763,7 @@ struct cs_dsp_alg_region *cs_dsp_find_alg_region(struct cs_dsp *dsp, return NULL; } -EXPORT_SYMBOL_NS_GPL(cs_dsp_find_alg_region, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_find_alg_region, "FW_CS_DSP"); static struct cs_dsp_alg_region *cs_dsp_create_region(struct cs_dsp *dsp, int type, __be32 id, @@ -1645,7 +1782,7 @@ static struct cs_dsp_alg_region *cs_dsp_create_region(struct cs_dsp *dsp, list_add_tail(&alg_region->list, &dsp->alg_regions); - if (dsp->fw_ver > 0) + if (dsp->wmfw_ver > 0) cs_dsp_ctl_fixup_base(dsp, alg_region); return alg_region; @@ -1768,7 +1905,7 @@ static int cs_dsp_adsp1_setup_algs(struct cs_dsp *dsp) ret = PTR_ERR(alg_region); goto out; } - if (dsp->fw_ver == 0) { + if (dsp->wmfw_ver == 0) { if (i + 1 < n_algs) { len = be32_to_cpu(adsp1_alg[i + 1].dm); len -= be32_to_cpu(adsp1_alg[i].dm); @@ -1790,7 +1927,7 @@ static int cs_dsp_adsp1_setup_algs(struct cs_dsp *dsp) ret = PTR_ERR(alg_region); goto out; } - if (dsp->fw_ver == 0) { + if (dsp->wmfw_ver == 0) { if (i + 1 < n_algs) { len = be32_to_cpu(adsp1_alg[i + 1].zm); len -= be32_to_cpu(adsp1_alg[i].zm); @@ -1881,7 +2018,7 @@ static int cs_dsp_adsp2_setup_algs(struct cs_dsp *dsp) ret = PTR_ERR(alg_region); goto out; } - if (dsp->fw_ver == 0) { + if (dsp->wmfw_ver == 0) { if (i + 1 < n_algs) { len = be32_to_cpu(adsp2_alg[i + 1].xm); len -= be32_to_cpu(adsp2_alg[i].xm); @@ -1903,7 +2040,7 @@ static int cs_dsp_adsp2_setup_algs(struct cs_dsp *dsp) ret = PTR_ERR(alg_region); goto out; } - if (dsp->fw_ver == 0) { + if (dsp->wmfw_ver == 0) { if (i + 1 < n_algs) { len = be32_to_cpu(adsp2_alg[i + 1].ym); len -= be32_to_cpu(adsp2_alg[i].ym); @@ -1925,7 +2062,7 @@ static int cs_dsp_adsp2_setup_algs(struct cs_dsp *dsp) ret = PTR_ERR(alg_region); goto out; } - if (dsp->fw_ver == 0) { + if (dsp->wmfw_ver == 0) { if (i + 1 < n_algs) { len = be32_to_cpu(adsp2_alg[i + 1].zm); len -= be32_to_cpu(adsp2_alg[i].zm); @@ -2029,7 +2166,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware struct cs_dsp_alg_region *alg_region; const char *region_name; int ret, pos, blocks, type, offset, reg, version; - char *text = NULL; struct cs_dsp_buf *buf; if (!firmware) @@ -2068,10 +2204,20 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware pos = le32_to_cpu(hdr->len); blocks = 0; - while (pos < firmware->size && - sizeof(*blk) < firmware->size - pos) { + while (pos < firmware->size) { + /* Is there enough data for a complete block header? */ + if (sizeof(*blk) > firmware->size - pos) { + ret = -EOVERFLOW; + goto out_fw; + } + blk = (void *)(&firmware->data[pos]); + if (le32_to_cpu(blk->len) > firmware->size - pos - sizeof(*blk)) { + ret = -EOVERFLOW; + goto out_fw; + } + type = le16_to_cpu(blk->type); offset = le16_to_cpu(blk->offset); version = le32_to_cpu(blk->ver) >> 8; @@ -2088,7 +2234,8 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware region_name = "Unknown"; switch (type) { case (WMFW_NAME_TEXT << 8): - text = kzalloc(le32_to_cpu(blk->len) + 1, GFP_KERNEL); + cs_dsp_info(dsp, "%s: %.*s\n", dsp->fw_name, + min(le32_to_cpu(blk->len), 100), blk->data); break; case (WMFW_INFO_TEXT << 8): case (WMFW_METADATA << 8): @@ -2160,25 +2307,7 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware break; } - if (text) { - memcpy(text, blk->data, le32_to_cpu(blk->len)); - cs_dsp_info(dsp, "%s: %s\n", dsp->fw_name, text); - kfree(text); - text = NULL; - } - if (reg) { - if (le32_to_cpu(blk->len) > - firmware->size - pos - sizeof(*blk)) { - cs_dsp_err(dsp, - "%s.%d: %s region len %d bytes exceeds file length %zu\n", - file, blocks, region_name, - le32_to_cpu(blk->len), - firmware->size); - ret = -EINVAL; - goto out_fw; - } - buf = cs_dsp_buf_alloc(blk->data, le32_to_cpu(blk->len), &buf_list); @@ -2191,8 +2320,8 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware cs_dsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n", file, blocks, le32_to_cpu(blk->len), reg); - ret = regmap_raw_write_async(regmap, reg, buf->buf, - le32_to_cpu(blk->len)); + ret = regmap_raw_write(regmap, reg, buf->buf, + le32_to_cpu(blk->len)); if (ret != 0) { cs_dsp_err(dsp, "%s.%d: Failed to write to %x in %s: %d\n", @@ -2204,20 +2333,19 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware blocks++; } - ret = regmap_async_complete(regmap); - if (ret != 0) - cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret); - if (pos > firmware->size) cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n", file, blocks, pos - firmware->size); cs_dsp_debugfs_save_binname(dsp, file); + ret = 0; out_fw: - regmap_async_complete(regmap); cs_dsp_buf_free(&buf_list); - kfree(text); + + if (ret == -EOVERFLOW) + cs_dsp_err(dsp, "%s: file content overflows file data\n", file); + return ret; } @@ -2266,7 +2394,7 @@ int cs_dsp_adsp1_init(struct cs_dsp *dsp) return cs_dsp_common_init(dsp); } -EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_init, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_init, "FW_CS_DSP"); /** * cs_dsp_adsp1_power_up() - Load and start the named firmware @@ -2280,8 +2408,8 @@ EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_init, FW_CS_DSP); * Return: Zero for success, a negative number on error. */ int cs_dsp_adsp1_power_up(struct cs_dsp *dsp, - const struct firmware *wmfw_firmware, char *wmfw_filename, - const struct firmware *coeff_firmware, char *coeff_filename, + const struct firmware *wmfw_firmware, const char *wmfw_filename, + const struct firmware *coeff_firmware, const char *coeff_filename, const char *fw_name) { unsigned int val; @@ -2358,7 +2486,7 @@ err_mutex: mutex_unlock(&dsp->pwr_lock); return ret; } -EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_power_up, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_power_up, "FW_CS_DSP"); /** * cs_dsp_adsp1_power_down() - Halts the DSP @@ -2390,7 +2518,7 @@ void cs_dsp_adsp1_power_down(struct cs_dsp *dsp) mutex_unlock(&dsp->pwr_lock); } -EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_power_down, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_power_down, "FW_CS_DSP"); static int cs_dsp_adsp2v2_enable_core(struct cs_dsp *dsp) { @@ -2423,8 +2551,8 @@ static int cs_dsp_adsp2_enable_core(struct cs_dsp *dsp) { int ret; - ret = regmap_update_bits_async(dsp->regmap, dsp->base + ADSP2_CONTROL, - ADSP2_SYS_ENA, ADSP2_SYS_ENA); + ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL, + ADSP2_SYS_ENA, ADSP2_SYS_ENA); if (ret != 0) return ret; @@ -2542,7 +2670,7 @@ int cs_dsp_set_dspclk(struct cs_dsp *dsp, unsigned int freq) return ret; } -EXPORT_SYMBOL_NS_GPL(cs_dsp_set_dspclk, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_set_dspclk, "FW_CS_DSP"); static void cs_dsp_stop_watchdog(struct cs_dsp *dsp) { @@ -2574,8 +2702,8 @@ static void cs_dsp_halo_stop_watchdog(struct cs_dsp *dsp) * Return: Zero for success, a negative number on error. */ int cs_dsp_power_up(struct cs_dsp *dsp, - const struct firmware *wmfw_firmware, char *wmfw_filename, - const struct firmware *coeff_firmware, char *coeff_filename, + const struct firmware *wmfw_firmware, const char *wmfw_filename, + const struct firmware *coeff_firmware, const char *coeff_filename, const char *fw_name) { int ret; @@ -2632,7 +2760,7 @@ err_mutex: return ret; } -EXPORT_SYMBOL_NS_GPL(cs_dsp_power_up, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_power_up, "FW_CS_DSP"); /** * cs_dsp_power_down() - Powers-down the DSP @@ -2666,7 +2794,7 @@ void cs_dsp_power_down(struct cs_dsp *dsp) cs_dsp_dbg(dsp, "Shutdown complete\n"); } -EXPORT_SYMBOL_NS_GPL(cs_dsp_power_down, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_power_down, "FW_CS_DSP"); static int cs_dsp_adsp2_start_core(struct cs_dsp *dsp) { @@ -2752,7 +2880,7 @@ err: return ret; } -EXPORT_SYMBOL_NS_GPL(cs_dsp_run, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_run, "FW_CS_DSP"); /** * cs_dsp_stop() - Stops the firmware @@ -2791,7 +2919,7 @@ void cs_dsp_stop(struct cs_dsp *dsp) cs_dsp_dbg(dsp, "Execution stopped\n"); } -EXPORT_SYMBOL_NS_GPL(cs_dsp_stop, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_stop, "FW_CS_DSP"); static int cs_dsp_halo_start_core(struct cs_dsp *dsp) { @@ -2853,7 +2981,7 @@ int cs_dsp_adsp2_init(struct cs_dsp *dsp) return cs_dsp_common_init(dsp); } -EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp2_init, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp2_init, "FW_CS_DSP"); /** * cs_dsp_halo_init() - Initialise a cs_dsp structure representing a HALO Core DSP @@ -2870,7 +2998,7 @@ int cs_dsp_halo_init(struct cs_dsp *dsp) return cs_dsp_common_init(dsp); } -EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_init, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_init, "FW_CS_DSP"); /** * cs_dsp_remove() - Clean a cs_dsp before deletion @@ -2890,7 +3018,7 @@ void cs_dsp_remove(struct cs_dsp *dsp) cs_dsp_free_ctl_blk(ctl); } } -EXPORT_SYMBOL_NS_GPL(cs_dsp_remove, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_remove, "FW_CS_DSP"); /** * cs_dsp_read_raw_data_block() - Reads a block of data from DSP memory @@ -2927,7 +3055,7 @@ int cs_dsp_read_raw_data_block(struct cs_dsp *dsp, int mem_type, unsigned int me return 0; } -EXPORT_SYMBOL_NS_GPL(cs_dsp_read_raw_data_block, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_read_raw_data_block, "FW_CS_DSP"); /** * cs_dsp_read_data_word() - Reads a word from DSP memory @@ -2951,7 +3079,7 @@ int cs_dsp_read_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_add return 0; } -EXPORT_SYMBOL_NS_GPL(cs_dsp_read_data_word, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_read_data_word, "FW_CS_DSP"); /** * cs_dsp_write_data_word() - Writes a word to DSP memory @@ -2977,7 +3105,7 @@ int cs_dsp_write_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_ad return regmap_raw_write(dsp->regmap, reg, &val, sizeof(val)); } -EXPORT_SYMBOL_NS_GPL(cs_dsp_write_data_word, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_write_data_word, "FW_CS_DSP"); /** * cs_dsp_remove_padding() - Convert unpacked words to packed bytes @@ -3001,7 +3129,7 @@ void cs_dsp_remove_padding(u32 *buf, int nwords) *pack_out++ = (u8)(word >> 16); } } -EXPORT_SYMBOL_NS_GPL(cs_dsp_remove_padding, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_remove_padding, "FW_CS_DSP"); /** * cs_dsp_adsp2_bus_error() - Handle a DSP bus error interrupt @@ -3071,7 +3199,7 @@ void cs_dsp_adsp2_bus_error(struct cs_dsp *dsp) error: mutex_unlock(&dsp->pwr_lock); } -EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp2_bus_error, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp2_bus_error, "FW_CS_DSP"); /** * cs_dsp_halo_bus_error() - Handle a DSP bus error interrupt @@ -3131,7 +3259,7 @@ void cs_dsp_halo_bus_error(struct cs_dsp *dsp) exit_unlock: mutex_unlock(&dsp->pwr_lock); } -EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_bus_error, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_bus_error, "FW_CS_DSP"); /** * cs_dsp_halo_wdt_expire() - Handle DSP watchdog expiry @@ -3151,7 +3279,7 @@ void cs_dsp_halo_wdt_expire(struct cs_dsp *dsp) mutex_unlock(&dsp->pwr_lock); } -EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_wdt_expire, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_wdt_expire, "FW_CS_DSP"); static const struct cs_dsp_ops cs_dsp_adsp1_ops = { .validate_version = cs_dsp_validate_version, @@ -3281,7 +3409,7 @@ int cs_dsp_chunk_write(struct cs_dsp_chunk *ch, int nbits, u32 val) return 0; } -EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_write, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_write, "FW_CS_DSP"); /** * cs_dsp_chunk_flush() - Pad remaining data with zero and commit to chunk @@ -3300,7 +3428,7 @@ int cs_dsp_chunk_flush(struct cs_dsp_chunk *ch) return cs_dsp_chunk_write(ch, CS_DSP_DATA_WORD_BITS - ch->cachebits, 0); } -EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_flush, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_flush, "FW_CS_DSP"); /** * cs_dsp_chunk_read() - Parse data from a DSP memory chunk @@ -3342,7 +3470,279 @@ int cs_dsp_chunk_read(struct cs_dsp_chunk *ch, int nbits) return result; } -EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_read, FW_CS_DSP); +EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_read, "FW_CS_DSP"); + + +struct cs_dsp_wseq_op { + struct list_head list; + u32 address; + u32 data; + u16 offset; + u8 operation; +}; + +static void cs_dsp_wseq_clear(struct cs_dsp *dsp, struct cs_dsp_wseq *wseq) +{ + struct cs_dsp_wseq_op *op, *op_tmp; + + list_for_each_entry_safe(op, op_tmp, &wseq->ops, list) { + list_del(&op->list); + devm_kfree(dsp->dev, op); + } +} + +static int cs_dsp_populate_wseq(struct cs_dsp *dsp, struct cs_dsp_wseq *wseq) +{ + struct cs_dsp_wseq_op *op = NULL; + struct cs_dsp_chunk chunk; + u8 *words; + int ret; + + if (!wseq->ctl) { + cs_dsp_err(dsp, "No control for write sequence\n"); + return -EINVAL; + } + + words = kzalloc(wseq->ctl->len, GFP_KERNEL); + if (!words) + return -ENOMEM; + + ret = cs_dsp_coeff_read_ctrl(wseq->ctl, 0, words, wseq->ctl->len); + if (ret) { + cs_dsp_err(dsp, "Failed to read %s: %d\n", wseq->ctl->subname, ret); + goto err_free; + } + + INIT_LIST_HEAD(&wseq->ops); + + chunk = cs_dsp_chunk(words, wseq->ctl->len); + + while (!cs_dsp_chunk_end(&chunk)) { + op = devm_kzalloc(dsp->dev, sizeof(*op), GFP_KERNEL); + if (!op) { + ret = -ENOMEM; + goto err_free; + } + + op->offset = cs_dsp_chunk_bytes(&chunk); + op->operation = cs_dsp_chunk_read(&chunk, 8); + + switch (op->operation) { + case CS_DSP_WSEQ_END: + op->data = WSEQ_END_OF_SCRIPT; + break; + case CS_DSP_WSEQ_UNLOCK: + op->data = cs_dsp_chunk_read(&chunk, 16); + break; + case CS_DSP_WSEQ_ADDR8: + op->address = cs_dsp_chunk_read(&chunk, 8); + op->data = cs_dsp_chunk_read(&chunk, 32); + break; + case CS_DSP_WSEQ_H16: + case CS_DSP_WSEQ_L16: + op->address = cs_dsp_chunk_read(&chunk, 24); + op->data = cs_dsp_chunk_read(&chunk, 16); + break; + case CS_DSP_WSEQ_FULL: + op->address = cs_dsp_chunk_read(&chunk, 32); + op->data = cs_dsp_chunk_read(&chunk, 32); + break; + default: + ret = -EINVAL; + cs_dsp_err(dsp, "Unsupported op: %X\n", op->operation); + devm_kfree(dsp->dev, op); + goto err_free; + } + + list_add_tail(&op->list, &wseq->ops); + + if (op->operation == CS_DSP_WSEQ_END) + break; + } + + if (op && op->operation != CS_DSP_WSEQ_END) { + cs_dsp_err(dsp, "%s missing end terminator\n", wseq->ctl->subname); + ret = -ENOENT; + } + +err_free: + kfree(words); + + return ret; +} + +/** + * cs_dsp_wseq_init() - Initialize write sequences contained within the loaded DSP firmware + * @dsp: Pointer to DSP structure + * @wseqs: List of write sequences to initialize + * @num_wseqs: Number of write sequences to initialize + * + * Return: Zero for success, a negative number on error. + */ +int cs_dsp_wseq_init(struct cs_dsp *dsp, struct cs_dsp_wseq *wseqs, unsigned int num_wseqs) +{ + int i, ret; + + lockdep_assert_held(&dsp->pwr_lock); + + for (i = 0; i < num_wseqs; i++) { + ret = cs_dsp_populate_wseq(dsp, &wseqs[i]); + if (ret) { + cs_dsp_wseq_clear(dsp, &wseqs[i]); + return ret; + } + } + + return 0; +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_wseq_init, "FW_CS_DSP"); + +static struct cs_dsp_wseq_op *cs_dsp_wseq_find_op(u32 addr, u8 op_code, + struct list_head *wseq_ops) +{ + struct cs_dsp_wseq_op *op; + + list_for_each_entry(op, wseq_ops, list) { + if (op->operation == op_code && op->address == addr) + return op; + } + + return NULL; +} + +/** + * cs_dsp_wseq_write() - Add or update an entry in a write sequence + * @dsp: Pointer to a DSP structure + * @wseq: Write sequence to write to + * @addr: Address of the register to be written to + * @data: Data to be written + * @op_code: The type of operation of the new entry + * @update: If true, searches for the first entry in the write sequence with + * the same address and op_code, and replaces it. If false, creates a new entry + * at the tail + * + * This function formats register address and value pairs into the format + * required for write sequence entries, and either updates or adds the + * new entry into the write sequence. + * + * If update is set to true and no matching entry is found, it will add a new entry. + * + * Return: Zero for success, a negative number on error. + */ +int cs_dsp_wseq_write(struct cs_dsp *dsp, struct cs_dsp_wseq *wseq, + u32 addr, u32 data, u8 op_code, bool update) +{ + struct cs_dsp_wseq_op *op_end, *op_new = NULL; + u32 words[WSEQ_OP_MAX_WORDS]; + struct cs_dsp_chunk chunk; + int new_op_size, ret; + + if (update) + op_new = cs_dsp_wseq_find_op(addr, op_code, &wseq->ops); + + /* If entry to update is not found, treat it as a new operation */ + if (!op_new) { + op_end = cs_dsp_wseq_find_op(0, CS_DSP_WSEQ_END, &wseq->ops); + if (!op_end) { + cs_dsp_err(dsp, "Missing terminator for %s\n", wseq->ctl->subname); + return -EINVAL; + } + + op_new = devm_kzalloc(dsp->dev, sizeof(*op_new), GFP_KERNEL); + if (!op_new) + return -ENOMEM; + + op_new->operation = op_code; + op_new->address = addr; + op_new->offset = op_end->offset; + update = false; + } + + op_new->data = data; + + chunk = cs_dsp_chunk(words, sizeof(words)); + cs_dsp_chunk_write(&chunk, 8, op_new->operation); + + switch (op_code) { + case CS_DSP_WSEQ_FULL: + cs_dsp_chunk_write(&chunk, 32, op_new->address); + cs_dsp_chunk_write(&chunk, 32, op_new->data); + break; + case CS_DSP_WSEQ_L16: + case CS_DSP_WSEQ_H16: + cs_dsp_chunk_write(&chunk, 24, op_new->address); + cs_dsp_chunk_write(&chunk, 16, op_new->data); + break; + default: + ret = -EINVAL; + cs_dsp_err(dsp, "Operation %X not supported\n", op_code); + goto op_new_free; + } + + new_op_size = cs_dsp_chunk_bytes(&chunk); + + if (!update) { + if (wseq->ctl->len - op_end->offset < new_op_size) { + cs_dsp_err(dsp, "Not enough memory in %s for entry\n", wseq->ctl->subname); + ret = -E2BIG; + goto op_new_free; + } + + op_end->offset += new_op_size; + + ret = cs_dsp_coeff_write_ctrl(wseq->ctl, op_end->offset / sizeof(u32), + &op_end->data, sizeof(u32)); + if (ret) + goto op_new_free; + + list_add_tail(&op_new->list, &op_end->list); + } + + ret = cs_dsp_coeff_write_ctrl(wseq->ctl, op_new->offset / sizeof(u32), + words, new_op_size); + if (ret) + goto op_new_free; + + return 0; + +op_new_free: + devm_kfree(dsp->dev, op_new); + + return ret; +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_wseq_write, "FW_CS_DSP"); + +/** + * cs_dsp_wseq_multi_write() - Add or update multiple entries in a write sequence + * @dsp: Pointer to a DSP structure + * @wseq: Write sequence to write to + * @reg_seq: List of address-data pairs + * @num_regs: Number of address-data pairs + * @op_code: The types of operations of the new entries + * @update: If true, searches for the first entry in the write sequence with + * the same address and op_code, and replaces it. If false, creates a new entry + * at the tail + * + * This function calls cs_dsp_wseq_write() for multiple address-data pairs. + * + * Return: Zero for success, a negative number on error. + */ +int cs_dsp_wseq_multi_write(struct cs_dsp *dsp, struct cs_dsp_wseq *wseq, + const struct reg_sequence *reg_seq, int num_regs, + u8 op_code, bool update) +{ + int i, ret; + + for (i = 0; i < num_regs; i++) { + ret = cs_dsp_wseq_write(dsp, wseq, reg_seq[i].reg, + reg_seq[i].def, op_code, update); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_wseq_multi_write, "FW_CS_DSP"); MODULE_DESCRIPTION("Cirrus Logic DSP Support"); MODULE_AUTHOR("Simon Trimmer <simont@opensource.cirrus.com>"); diff --git a/drivers/firmware/cirrus/test/Makefile b/drivers/firmware/cirrus/test/Makefile new file mode 100644 index 000000000000..7a24a6079ddc --- /dev/null +++ b/drivers/firmware/cirrus/test/Makefile @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: GPL-2.0 +# + +cs_dsp_test_utils-objs := \ + cs_dsp_mock_mem_maps.o \ + cs_dsp_mock_bin.o \ + cs_dsp_mock_regmap.o \ + cs_dsp_mock_utils.o \ + cs_dsp_mock_wmfw.o + +cs_dsp_test-objs := \ + cs_dsp_test_bin.o \ + cs_dsp_test_bin_error.o \ + cs_dsp_test_callbacks.o \ + cs_dsp_test_control_parse.o \ + cs_dsp_test_control_cache.o \ + cs_dsp_test_control_rw.o \ + cs_dsp_test_wmfw.o \ + cs_dsp_test_wmfw_error.o \ + cs_dsp_tests.o + +obj-$(CONFIG_FW_CS_DSP_KUNIT_TEST_UTILS) += cs_dsp_test_utils.o +obj-$(CONFIG_FW_CS_DSP_KUNIT_TEST) += cs_dsp_test.o diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c b/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c new file mode 100644 index 000000000000..3f8777ee4dc0 --- /dev/null +++ b/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// bin file builder for cs_dsp KUnit tests. +// +// Copyright (C) 2024 Cirrus Logic, Inc. and +// Cirrus Logic International Semiconductor Ltd. + +#include <kunit/resource.h> +#include <kunit/test.h> +#include <linux/firmware/cirrus/cs_dsp.h> +#include <linux/firmware/cirrus/cs_dsp_test_utils.h> +#include <linux/firmware/cirrus/wmfw.h> +#include <linux/firmware.h> +#include <linux/math.h> +#include <linux/overflow.h> +#include <linux/string.h> +#include <linux/vmalloc.h> + +/* Buffer large enough for bin file content */ +#define CS_DSP_MOCK_BIN_BUF_SIZE 32768 + +KUNIT_DEFINE_ACTION_WRAPPER(vfree_action_wrapper, vfree, void *) + +struct cs_dsp_mock_bin_builder { + struct cs_dsp_test *test_priv; + void *buf; + void *write_p; + size_t bytes_used; +}; + +/** + * cs_dsp_mock_bin_get_firmware() - Get struct firmware wrapper for data. + * + * @builder: Pointer to struct cs_dsp_mock_bin_builder. + * + * Return: Pointer to a struct firmware wrapper for the data. + */ +struct firmware *cs_dsp_mock_bin_get_firmware(struct cs_dsp_mock_bin_builder *builder) +{ + struct firmware *fw; + + fw = kunit_kzalloc(builder->test_priv->test, sizeof(*fw), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, fw); + + fw->data = builder->buf; + fw->size = builder->bytes_used; + + return fw; +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_get_firmware, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/** + * cs_dsp_mock_bin_add_raw_block() - Add a data block to the bin file. + * + * @builder: Pointer to struct cs_dsp_mock_bin_builder. + * @alg_id: Algorithm ID. + * @alg_ver: Algorithm version. + * @type: Type of the block. + * @offset: Offset. + * @payload_data: Pointer to buffer containing the payload data. + * @payload_len_bytes: Length of payload data in bytes. + */ +void cs_dsp_mock_bin_add_raw_block(struct cs_dsp_mock_bin_builder *builder, + unsigned int alg_id, unsigned int alg_ver, + int type, unsigned int offset, + const void *payload_data, size_t payload_len_bytes) +{ + struct wmfw_coeff_item *item; + size_t bytes_needed = struct_size_t(struct wmfw_coeff_item, data, payload_len_bytes); + + KUNIT_ASSERT_TRUE(builder->test_priv->test, + (builder->write_p + bytes_needed) < + (builder->buf + CS_DSP_MOCK_BIN_BUF_SIZE)); + + item = builder->write_p; + + item->offset = cpu_to_le16(offset); + item->type = cpu_to_le16(type); + item->id = cpu_to_le32(alg_id); + item->ver = cpu_to_le32(alg_ver << 8); + item->len = cpu_to_le32(payload_len_bytes); + + if (payload_len_bytes) + memcpy(item->data, payload_data, payload_len_bytes); + + builder->write_p += bytes_needed; + builder->bytes_used += bytes_needed; +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_raw_block, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +static void cs_dsp_mock_bin_add_name_or_info(struct cs_dsp_mock_bin_builder *builder, + const char *info, int type) +{ + size_t info_len = strlen(info); + char *tmp = NULL; + + if (info_len % 4) { + /* Create a padded string with length a multiple of 4 */ + size_t copy_len = info_len; + info_len = round_up(info_len, 4); + tmp = kunit_kzalloc(builder->test_priv->test, info_len, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, tmp); + memcpy(tmp, info, copy_len); + info = tmp; + } + + cs_dsp_mock_bin_add_raw_block(builder, 0, 0, WMFW_INFO_TEXT, 0, info, info_len); + kunit_kfree(builder->test_priv->test, tmp); +} + +/** + * cs_dsp_mock_bin_add_info() - Add an info block to the bin file. + * + * @builder: Pointer to struct cs_dsp_mock_bin_builder. + * @info: Pointer to info string to be copied into the file. + * + * The string will be padded to a length that is a multiple of 4 bytes. + */ +void cs_dsp_mock_bin_add_info(struct cs_dsp_mock_bin_builder *builder, + const char *info) +{ + cs_dsp_mock_bin_add_name_or_info(builder, info, WMFW_INFO_TEXT); +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_info, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/** + * cs_dsp_mock_bin_add_name() - Add a name block to the bin file. + * + * @builder: Pointer to struct cs_dsp_mock_bin_builder. + * @name: Pointer to name string to be copied into the file. + */ +void cs_dsp_mock_bin_add_name(struct cs_dsp_mock_bin_builder *builder, + const char *name) +{ + cs_dsp_mock_bin_add_name_or_info(builder, name, WMFW_NAME_TEXT); +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_name, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/** + * cs_dsp_mock_bin_add_patch() - Add a patch data block to the bin file. + * + * @builder: Pointer to struct cs_dsp_mock_bin_builder. + * @alg_id: Algorithm ID for the patch. + * @alg_ver: Algorithm version for the patch. + * @mem_region: Memory region for the patch. + * @reg_addr_offset: Offset to start of data in register addresses. + * @payload_data: Pointer to buffer containing the payload data. + * @payload_len_bytes: Length of payload data in bytes. + */ +void cs_dsp_mock_bin_add_patch(struct cs_dsp_mock_bin_builder *builder, + unsigned int alg_id, unsigned int alg_ver, + int mem_region, unsigned int reg_addr_offset, + const void *payload_data, size_t payload_len_bytes) +{ + /* Payload length must be a multiple of 4 */ + KUNIT_ASSERT_EQ(builder->test_priv->test, payload_len_bytes % 4, 0); + + cs_dsp_mock_bin_add_raw_block(builder, alg_id, alg_ver, + mem_region, reg_addr_offset, + payload_data, payload_len_bytes); +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_patch, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/** + * cs_dsp_mock_bin_init() - Initialize a struct cs_dsp_mock_bin_builder. + * + * @priv: Pointer to struct cs_dsp_test. + * @format_version: Required bin format version. + * @fw_version: Firmware version to put in bin file. + * + * Return: Pointer to created struct cs_dsp_mock_bin_builder. + */ +struct cs_dsp_mock_bin_builder *cs_dsp_mock_bin_init(struct cs_dsp_test *priv, + int format_version, + unsigned int fw_version) +{ + struct cs_dsp_mock_bin_builder *builder; + struct wmfw_coeff_hdr *hdr; + + KUNIT_ASSERT_LE(priv->test, format_version, 0xff); + KUNIT_ASSERT_LE(priv->test, fw_version, 0xffffff); + + builder = kunit_kzalloc(priv->test, sizeof(*builder), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder); + builder->test_priv = priv; + + builder->buf = vmalloc(CS_DSP_MOCK_BIN_BUF_SIZE); + KUNIT_ASSERT_NOT_NULL(priv->test, builder->buf); + kunit_add_action_or_reset(priv->test, vfree_action_wrapper, builder->buf); + + /* Create header */ + hdr = builder->buf; + memcpy(hdr->magic, "WMDR", sizeof(hdr->magic)); + hdr->len = cpu_to_le32(offsetof(struct wmfw_coeff_hdr, data)); + hdr->ver = cpu_to_le32(fw_version | (format_version << 24)); + hdr->core_ver = cpu_to_le32(((u32)priv->dsp->type << 24) | priv->dsp->rev); + + builder->write_p = hdr->data; + builder->bytes_used = offsetof(struct wmfw_coeff_hdr, data); + + return builder; +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_init, "FW_CS_DSP_KUNIT_TEST_UTILS"); diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c b/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c new file mode 100644 index 000000000000..95946fac5563 --- /dev/null +++ b/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c @@ -0,0 +1,725 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Mock DSP memory maps for cs_dsp KUnit tests. +// +// Copyright (C) 2024 Cirrus Logic, Inc. and +// Cirrus Logic International Semiconductor Ltd. + +#include <kunit/test.h> +#include <linux/firmware/cirrus/cs_dsp.h> +#include <linux/firmware/cirrus/cs_dsp_test_utils.h> +#include <linux/firmware/cirrus/wmfw.h> +#include <linux/math.h> + +const struct cs_dsp_region cs_dsp_mock_halo_dsp1_regions[] = { + { .type = WMFW_HALO_PM_PACKED, .base = 0x3800000 }, + { .type = WMFW_HALO_XM_PACKED, .base = 0x2000000 }, + { .type = WMFW_HALO_YM_PACKED, .base = 0x2C00000 }, + { .type = WMFW_ADSP2_XM, .base = 0x2800000 }, + { .type = WMFW_ADSP2_YM, .base = 0x3400000 }, +}; +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_dsp1_regions, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/* List of sizes in bytes, for each entry above */ +const unsigned int cs_dsp_mock_halo_dsp1_region_sizes[] = { + 0x5000, /* PM_PACKED */ + 0x6000, /* XM_PACKED */ + 0x47F4, /* YM_PACKED */ + 0x8000, /* XM_UNPACKED_24 */ + 0x5FF8, /* YM_UNPACKED_24 */ + + 0 /* terminator */ +}; +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_dsp1_region_sizes, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +const struct cs_dsp_region cs_dsp_mock_adsp2_32bit_dsp1_regions[] = { + { .type = WMFW_ADSP2_PM, .base = 0x080000 }, + { .type = WMFW_ADSP2_XM, .base = 0x0a0000 }, + { .type = WMFW_ADSP2_YM, .base = 0x0c0000 }, + { .type = WMFW_ADSP2_ZM, .base = 0x0e0000 }, +}; +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_32bit_dsp1_regions, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/* List of sizes in bytes, for each entry above */ +const unsigned int cs_dsp_mock_adsp2_32bit_dsp1_region_sizes[] = { + 0x9000, /* PM */ + 0xa000, /* ZM */ + 0x2000, /* XM */ + 0x2000, /* YM */ + + 0 /* terminator */ +}; +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +const struct cs_dsp_region cs_dsp_mock_adsp2_16bit_dsp1_regions[] = { + { .type = WMFW_ADSP2_PM, .base = 0x100000 }, + { .type = WMFW_ADSP2_ZM, .base = 0x180000 }, + { .type = WMFW_ADSP2_XM, .base = 0x190000 }, + { .type = WMFW_ADSP2_YM, .base = 0x1a8000 }, +}; +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_16bit_dsp1_regions, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/* List of sizes in bytes, for each entry above */ +const unsigned int cs_dsp_mock_adsp2_16bit_dsp1_region_sizes[] = { + 0x6000, /* PM */ + 0x800, /* ZM */ + 0x800, /* XM */ + 0x800, /* YM */ + + 0 /* terminator */ +}; +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +int cs_dsp_mock_count_regions(const unsigned int *region_sizes) +{ + int i; + + for (i = 0; region_sizes[i]; ++i) + ; + + return i; +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_count_regions, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/** + * cs_dsp_mock_size_of_region() - Return size of given memory region. + * + * @dsp: Pointer to struct cs_dsp. + * @mem_type: Memory region type. + * + * Return: Size of region in bytes. + */ +unsigned int cs_dsp_mock_size_of_region(const struct cs_dsp *dsp, int mem_type) +{ + const unsigned int *sizes; + int i; + + if (dsp->mem == cs_dsp_mock_halo_dsp1_regions) + sizes = cs_dsp_mock_halo_dsp1_region_sizes; + else if (dsp->mem == cs_dsp_mock_adsp2_32bit_dsp1_regions) + sizes = cs_dsp_mock_adsp2_32bit_dsp1_region_sizes; + else if (dsp->mem == cs_dsp_mock_adsp2_16bit_dsp1_regions) + sizes = cs_dsp_mock_adsp2_16bit_dsp1_region_sizes; + else + return 0; + + for (i = 0; i < dsp->num_mems; ++i) { + if (dsp->mem[i].type == mem_type) + return sizes[i]; + } + + return 0; +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_size_of_region, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/** + * cs_dsp_mock_base_addr_for_mem() - Base register address for memory region. + * + * @priv: Pointer to struct cs_dsp_test. + * @mem_type: Memory region type. + * + * Return: Base register address of region. + */ +unsigned int cs_dsp_mock_base_addr_for_mem(struct cs_dsp_test *priv, int mem_type) +{ + int num_mems = priv->dsp->num_mems; + const struct cs_dsp_region *region = priv->dsp->mem; + int i; + + for (i = 0; i < num_mems; ++i) { + if (region[i].type == mem_type) + return region[i].base; + } + + KUNIT_FAIL(priv->test, "Unexpected region %d\n", mem_type); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_base_addr_for_mem, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/** + * cs_dsp_mock_reg_addr_inc_per_unpacked_word() - Unpacked register address increment per DSP word. + * + * @priv: Pointer to struct cs_dsp_test. + * + * Return: Amount by which register address increments to move to the next + * DSP word in unpacked XM/YM/ZM. + */ +unsigned int cs_dsp_mock_reg_addr_inc_per_unpacked_word(struct cs_dsp_test *priv) +{ + switch (priv->dsp->type) { + case WMFW_ADSP2: + return 2; /* two 16-bit register indexes per XM/YM/ZM word */ + case WMFW_HALO: + return 4; /* one byte-addressed 32-bit register per XM/YM/ZM word */ + default: + KUNIT_FAIL(priv->test, "Unexpected DSP type\n"); + return -1; + } +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_addr_inc_per_unpacked_word, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/** + * cs_dsp_mock_reg_block_length_bytes() - Number of bytes in an access block. + * + * @priv: Pointer to struct cs_dsp_test. + * @mem_type: Memory region type. + * + * Return: Total number of bytes in a group of registers forming the + * smallest bus access size (including any padding bits). For unpacked + * memory this is the number of registers containing one DSP word. + * For packed memory this is the number of registers in one packed + * access block. + */ +unsigned int cs_dsp_mock_reg_block_length_bytes(struct cs_dsp_test *priv, int mem_type) +{ + switch (priv->dsp->type) { + case WMFW_ADSP2: + switch (mem_type) { + case WMFW_ADSP2_PM: + return 3 * regmap_get_val_bytes(priv->dsp->regmap); + case WMFW_ADSP2_XM: + case WMFW_ADSP2_YM: + case WMFW_ADSP2_ZM: + return sizeof(u32); + default: + break; + } + break; + case WMFW_HALO: + switch (mem_type) { + case WMFW_ADSP2_XM: + case WMFW_ADSP2_YM: + return sizeof(u32); + case WMFW_HALO_PM_PACKED: + return 5 * sizeof(u32); + case WMFW_HALO_XM_PACKED: + case WMFW_HALO_YM_PACKED: + return 3 * sizeof(u32); + default: + break; + } + break; + default: + KUNIT_FAIL(priv->test, "Unexpected DSP type\n"); + return 0; + } + + KUNIT_FAIL(priv->test, "Unexpected mem type\n"); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_block_length_bytes, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/** + * cs_dsp_mock_reg_block_length_registers() - Number of registers in an access block. + * + * @priv: Pointer to struct cs_dsp_test. + * @mem_type: Memory region type. + * + * Return: Total number of register forming the smallest bus access size. + * For unpacked memory this is the number of registers containing one + * DSP word. For packed memory this is the number of registers in one + * packed access block. + */ +unsigned int cs_dsp_mock_reg_block_length_registers(struct cs_dsp_test *priv, int mem_type) +{ + return cs_dsp_mock_reg_block_length_bytes(priv, mem_type) / + regmap_get_val_bytes(priv->dsp->regmap); +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_block_length_registers, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/** + * cs_dsp_mock_reg_block_length_dsp_words() - Number of dsp_words in an access block. + * + * @priv: Pointer to struct cs_dsp_test. + * @mem_type: Memory region type. + * + * Return: Total number of DSP words in a group of registers forming the + * smallest bus access size. + */ +unsigned int cs_dsp_mock_reg_block_length_dsp_words(struct cs_dsp_test *priv, int mem_type) +{ + switch (priv->dsp->type) { + case WMFW_ADSP2: + switch (mem_type) { + case WMFW_ADSP2_PM: + return regmap_get_val_bytes(priv->dsp->regmap) / 2; + case WMFW_ADSP2_XM: + case WMFW_ADSP2_YM: + case WMFW_ADSP2_ZM: + return 1; + default: + break; + } + break; + case WMFW_HALO: + switch (mem_type) { + case WMFW_ADSP2_XM: + case WMFW_ADSP2_YM: + return 1; + case WMFW_HALO_PM_PACKED: + case WMFW_HALO_XM_PACKED: + case WMFW_HALO_YM_PACKED: + return 4; + default: + break; + } + break; + default: + KUNIT_FAIL(priv->test, "Unexpected DSP type\n"); + return 0; + } + + KUNIT_FAIL(priv->test, "Unexpected mem type\n"); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_block_length_dsp_words, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/** + * cs_dsp_mock_has_zm() - DSP has ZM + * + * @priv: Pointer to struct cs_dsp_test. + * + * Return: True if DSP has ZM. + */ +bool cs_dsp_mock_has_zm(struct cs_dsp_test *priv) +{ + switch (priv->dsp->type) { + case WMFW_ADSP2: + return true; + default: + return false; + } +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_has_zm, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/** + * cs_dsp_mock_packed_to_unpacked_mem_type() - Unpacked region that is + * the same memory as a packed region. + * + * @packed_mem_type: Type of packed memory region. + * + * Return: unpacked type that is the same memory as packed_mem_type. + */ +int cs_dsp_mock_packed_to_unpacked_mem_type(int packed_mem_type) +{ + switch (packed_mem_type) { + case WMFW_HALO_XM_PACKED: + return WMFW_ADSP2_XM; + case WMFW_HALO_YM_PACKED: + return WMFW_ADSP2_YM; + default: + return -1; + } +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_packed_to_unpacked_mem_type, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/** + * cs_dsp_mock_num_dsp_words_to_num_packed_regs() - Number of DSP words + * to number of packed registers. + * + * @num_dsp_words: Number of DSP words. + * + * Convert number of DSP words to number of packed registers rounded + * down to the nearest register. + * + * Return: Number of packed registers. + */ +unsigned int cs_dsp_mock_num_dsp_words_to_num_packed_regs(unsigned int num_dsp_words) +{ + /* There are 3 registers for every 4 packed words */ + return (num_dsp_words * 3) / 4; +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_num_dsp_words_to_num_packed_regs, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +static const struct wmfw_halo_id_hdr cs_dsp_mock_halo_xm_hdr = { + .fw = { + .core_id = cpu_to_be32(WMFW_HALO << 16), + .block_rev = cpu_to_be32(3 << 16), + .vendor_id = cpu_to_be32(0x2), + .id = cpu_to_be32(0xabcdef), + .ver = cpu_to_be32(0x090101), + }, + + /* + * Leave enough space for this header and 40 algorithm descriptors. + * base and size are counted in DSP words. + */ + .xm_base = cpu_to_be32(((sizeof(struct wmfw_halo_id_hdr) + + (40 * sizeof(struct wmfw_halo_alg_hdr))) + / 4) * 3), + .xm_size = cpu_to_be32(0x20), + + /* Allocate a dummy word of YM */ + .ym_base = cpu_to_be32(0), + .ym_size = cpu_to_be32(1), + + .n_algs = 0, +}; + +static const struct wmfw_adsp2_id_hdr cs_dsp_mock_adsp2_xm_hdr = { + .fw = { + .core_id = cpu_to_be32(WMFW_ADSP2 << 16), + .core_rev = cpu_to_be32(2 << 16), + .id = cpu_to_be32(0xabcdef), + .ver = cpu_to_be32(0x090101), + }, + + /* + * Leave enough space for this header and 40 algorithm descriptors. + * base and size are counted in DSP words. + */ + .xm = cpu_to_be32(((sizeof(struct wmfw_adsp2_id_hdr) + + (40 * sizeof(struct wmfw_adsp2_alg_hdr))) + / 4) * 3), + + .ym = cpu_to_be32(0), + .zm = cpu_to_be32(0), + + .n_algs = 0, +}; + +/** + * cs_dsp_mock_xm_header_get_alg_base_in_words() - Algorithm base offset in DSP words. + * + * @priv: Pointer to struct cs_dsp_test. + * @alg_id: Algorithm ID. + * @mem_type: Memory region type. + * + * Lookup an algorithm in the XM header and return the base offset in + * DSP words of the algorithm data in the requested memory region. + * + * Return: Offset in DSP words. + */ +unsigned int cs_dsp_mock_xm_header_get_alg_base_in_words(struct cs_dsp_test *priv, + unsigned int alg_id, + int mem_type) +{ + unsigned int xm = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM); + union { + struct wmfw_adsp2_alg_hdr adsp2; + struct wmfw_halo_alg_hdr halo; + } alg; + unsigned int alg_hdr_addr; + unsigned int val, xm_base = 0, ym_base = 0, zm_base = 0; + int ret; + + switch (priv->dsp->type) { + case WMFW_ADSP2: + alg_hdr_addr = xm + (sizeof(struct wmfw_adsp2_id_hdr) / 2); + for (;; alg_hdr_addr += sizeof(alg.adsp2) / 2) { + ret = regmap_read(priv->dsp->regmap, alg_hdr_addr, &val); + KUNIT_ASSERT_GE(priv->test, ret, 0); + KUNIT_ASSERT_NE(priv->test, val, 0xbedead); + ret = regmap_raw_read(priv->dsp->regmap, alg_hdr_addr, + &alg.adsp2, sizeof(alg.adsp2)); + KUNIT_ASSERT_GE(priv->test, ret, 0); + if (be32_to_cpu(alg.adsp2.alg.id) == alg_id) { + xm_base = be32_to_cpu(alg.adsp2.xm); + ym_base = be32_to_cpu(alg.adsp2.ym); + zm_base = be32_to_cpu(alg.adsp2.zm); + break; + } + } + break; + case WMFW_HALO: + alg_hdr_addr = xm + sizeof(struct wmfw_halo_id_hdr); + for (;; alg_hdr_addr += sizeof(alg.halo)) { + ret = regmap_read(priv->dsp->regmap, alg_hdr_addr, &val); + KUNIT_ASSERT_GE(priv->test, ret, 0); + KUNIT_ASSERT_NE(priv->test, val, 0xbedead); + ret = regmap_raw_read(priv->dsp->regmap, alg_hdr_addr, + &alg.halo, sizeof(alg.halo)); + KUNIT_ASSERT_GE(priv->test, ret, 0); + if (be32_to_cpu(alg.halo.alg.id) == alg_id) { + xm_base = be32_to_cpu(alg.halo.xm_base); + ym_base = be32_to_cpu(alg.halo.ym_base); + break; + } + } + break; + default: + KUNIT_FAIL(priv->test, "Unexpected DSP type %d\n", priv->dsp->type); + return 0; + } + + switch (mem_type) { + case WMFW_ADSP2_XM: + case WMFW_HALO_XM_PACKED: + return xm_base; + case WMFW_ADSP2_YM: + case WMFW_HALO_YM_PACKED: + return ym_base; + case WMFW_ADSP2_ZM: + return zm_base; + default: + KUNIT_FAIL(priv->test, "Bad mem_type\n"); + return 0; + } +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_alg_base_in_words, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/** + * cs_dsp_mock_xm_header_get_fw_version() - Firmware version. + * + * @header: Pointer to struct cs_dsp_mock_xm_header. + * + * Return: Firmware version word value. + */ +unsigned int cs_dsp_mock_xm_header_get_fw_version(struct cs_dsp_mock_xm_header *header) +{ + const struct wmfw_id_hdr *adsp2_hdr; + const struct wmfw_v3_id_hdr *halo_hdr; + + switch (header->test_priv->dsp->type) { + case WMFW_ADSP2: + adsp2_hdr = header->blob_data; + return be32_to_cpu(adsp2_hdr->ver); + case WMFW_HALO: + halo_hdr = header->blob_data; + return be32_to_cpu(halo_hdr->ver); + default: + KUNIT_FAIL(header->test_priv->test, NULL); + return 0; + } +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_fw_version, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/** + * cs_dsp_mock_xm_header_drop_from_regmap_cache() - Drop XM header from regmap cache. + * + * @priv: Pointer to struct cs_dsp_test. + */ +void cs_dsp_mock_xm_header_drop_from_regmap_cache(struct cs_dsp_test *priv) +{ + unsigned int xm = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM); + unsigned int bytes; + __be32 num_algs_be32; + unsigned int num_algs; + + switch (priv->dsp->type) { + case WMFW_ADSP2: + /* + * Could be one 32-bit register or two 16-bit registers. + * A raw read will read the requested number of bytes. + */ + KUNIT_ASSERT_GE(priv->test, 0, + regmap_raw_read(priv->dsp->regmap, + xm + + (offsetof(struct wmfw_adsp2_id_hdr, n_algs) / 2), + &num_algs_be32, sizeof(num_algs_be32))); + num_algs = be32_to_cpu(num_algs_be32); + bytes = sizeof(struct wmfw_adsp2_id_hdr) + + (num_algs * sizeof(struct wmfw_adsp2_alg_hdr)) + + 4 /* terminator word */; + + regcache_drop_region(priv->dsp->regmap, xm, xm + (bytes / 2) - 1); + break; + case WMFW_HALO: + KUNIT_ASSERT_GE(priv->test, 0, + regmap_read(priv->dsp->regmap, + xm + offsetof(struct wmfw_halo_id_hdr, n_algs), + &num_algs)); + bytes = sizeof(struct wmfw_halo_id_hdr) + + (num_algs * sizeof(struct wmfw_halo_alg_hdr)) + + 4 /* terminator word */; + + regcache_drop_region(priv->dsp->regmap, xm, xm + bytes - 4); + break; + default: + break; + } +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_drop_from_regmap_cache, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +static void cs_dsp_mock_xm_header_add_adsp2_algs(struct cs_dsp_mock_xm_header *builder, + const struct cs_dsp_mock_alg_def *algs, + size_t num_algs) +{ + struct wmfw_adsp2_id_hdr *hdr = builder->blob_data; + unsigned int next_free_xm_word, next_free_ym_word, next_free_zm_word; + + next_free_xm_word = be32_to_cpu(hdr->xm); + next_free_ym_word = be32_to_cpu(hdr->ym); + next_free_zm_word = be32_to_cpu(hdr->zm); + + /* Set num_algs in XM header. */ + hdr->n_algs = cpu_to_be32(num_algs); + + /* Create algorithm descriptor list */ + struct wmfw_adsp2_alg_hdr *alg_info = + (struct wmfw_adsp2_alg_hdr *)(&hdr[1]); + + for (; num_algs > 0; num_algs--, algs++, alg_info++) { + unsigned int alg_xm_last, alg_ym_last, alg_zm_last; + + alg_info->alg.id = cpu_to_be32(algs->id); + alg_info->alg.ver = cpu_to_be32(algs->ver); + alg_info->xm = cpu_to_be32(algs->xm_base_words); + alg_info->ym = cpu_to_be32(algs->ym_base_words); + alg_info->zm = cpu_to_be32(algs->zm_base_words); + + /* Check if we need to auto-allocate base addresses */ + if (!alg_info->xm && algs->xm_size_words) + alg_info->xm = cpu_to_be32(next_free_xm_word); + + if (!alg_info->ym && algs->ym_size_words) + alg_info->ym = cpu_to_be32(next_free_ym_word); + + if (!alg_info->zm && algs->zm_size_words) + alg_info->zm = cpu_to_be32(next_free_zm_word); + + alg_xm_last = be32_to_cpu(alg_info->xm) + algs->xm_size_words - 1; + if (alg_xm_last > next_free_xm_word) + next_free_xm_word = alg_xm_last; + + alg_ym_last = be32_to_cpu(alg_info->ym) + algs->ym_size_words - 1; + if (alg_ym_last > next_free_ym_word) + next_free_ym_word = alg_ym_last; + + alg_zm_last = be32_to_cpu(alg_info->zm) + algs->zm_size_words - 1; + if (alg_zm_last > next_free_zm_word) + next_free_zm_word = alg_zm_last; + } + + /* Write list terminator */ + *(__be32 *)(alg_info) = cpu_to_be32(0xbedead); +} + +static void cs_dsp_mock_xm_header_add_halo_algs(struct cs_dsp_mock_xm_header *builder, + const struct cs_dsp_mock_alg_def *algs, + size_t num_algs) +{ + struct wmfw_halo_id_hdr *hdr = builder->blob_data; + unsigned int next_free_xm_word, next_free_ym_word; + + /* Assume we're starting with bare header */ + next_free_xm_word = be32_to_cpu(hdr->xm_base) + be32_to_cpu(hdr->xm_size) - 1; + next_free_ym_word = be32_to_cpu(hdr->ym_base) + be32_to_cpu(hdr->ym_size) - 1; + + /* Set num_algs in XM header */ + hdr->n_algs = cpu_to_be32(num_algs); + + /* Create algorithm descriptor list */ + struct wmfw_halo_alg_hdr *alg_info = + (struct wmfw_halo_alg_hdr *)(&hdr[1]); + + for (; num_algs > 0; num_algs--, algs++, alg_info++) { + unsigned int alg_xm_last, alg_ym_last; + + alg_info->alg.id = cpu_to_be32(algs->id); + alg_info->alg.ver = cpu_to_be32(algs->ver); + alg_info->xm_base = cpu_to_be32(algs->xm_base_words); + alg_info->xm_size = cpu_to_be32(algs->xm_size_words); + alg_info->ym_base = cpu_to_be32(algs->ym_base_words); + alg_info->ym_size = cpu_to_be32(algs->ym_size_words); + + /* Check if we need to auto-allocate base addresses */ + if (!alg_info->xm_base && alg_info->xm_size) + alg_info->xm_base = cpu_to_be32(next_free_xm_word); + + if (!alg_info->ym_base && alg_info->ym_size) + alg_info->ym_base = cpu_to_be32(next_free_ym_word); + + alg_xm_last = be32_to_cpu(alg_info->xm_base) + be32_to_cpu(alg_info->xm_size) - 1; + if (alg_xm_last > next_free_xm_word) + next_free_xm_word = alg_xm_last; + + alg_ym_last = be32_to_cpu(alg_info->ym_base) + be32_to_cpu(alg_info->ym_size) - 1; + if (alg_ym_last > next_free_ym_word) + next_free_ym_word = alg_ym_last; + } + + /* Write list terminator */ + *(__be32 *)(alg_info) = cpu_to_be32(0xbedead); +} + +/** + * cs_dsp_mock_xm_header_write_to_regmap() - Write XM header to regmap. + * + * @header: Pointer to struct cs_dsp_mock_xm_header. + * + * The data in header is written to the XM addresses in the regmap. + * + * Return: 0 on success, else negative error code. + */ +int cs_dsp_mock_xm_header_write_to_regmap(struct cs_dsp_mock_xm_header *header) +{ + struct cs_dsp_test *priv = header->test_priv; + unsigned int reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM); + + /* + * One 32-bit word corresponds to one 32-bit unpacked XM word so the + * blob can be written directly to the regmap. + */ + return regmap_raw_write(priv->dsp->regmap, reg_addr, + header->blob_data, header->blob_size_bytes); +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_write_to_regmap, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/** + * cs_dsp_create_mock_xm_header() - Create a dummy XM header. + * + * @priv: Pointer to struct cs_dsp_test. + * @algs: Pointer to array of struct cs_dsp_mock_alg_def listing the + * dummy algorithm entries to include in the XM header. + * @num_algs: Number of entries in the algs array. + * + * Return: Pointer to created struct cs_dsp_mock_xm_header. + */ +struct cs_dsp_mock_xm_header *cs_dsp_create_mock_xm_header(struct cs_dsp_test *priv, + const struct cs_dsp_mock_alg_def *algs, + size_t num_algs) +{ + struct cs_dsp_mock_xm_header *builder; + size_t total_bytes_required; + const void *header; + size_t header_size_bytes; + + builder = kunit_kzalloc(priv->test, sizeof(*builder), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder); + builder->test_priv = priv; + + switch (priv->dsp->type) { + case WMFW_ADSP2: + header = &cs_dsp_mock_adsp2_xm_hdr; + header_size_bytes = sizeof(cs_dsp_mock_adsp2_xm_hdr); + total_bytes_required = header_size_bytes + + (num_algs * sizeof(struct wmfw_adsp2_alg_hdr)) + + 4; /* terminator word */ + break; + case WMFW_HALO: + header = &cs_dsp_mock_halo_xm_hdr, + header_size_bytes = sizeof(cs_dsp_mock_halo_xm_hdr); + total_bytes_required = header_size_bytes + + (num_algs * sizeof(struct wmfw_halo_alg_hdr)) + + 4; /* terminator word */ + break; + default: + KUNIT_FAIL(priv->test, "%s unexpected DSP type %d\n", + __func__, priv->dsp->type); + return NULL; + } + + builder->blob_data = kunit_kzalloc(priv->test, total_bytes_required, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder->blob_data); + builder->blob_size_bytes = total_bytes_required; + + memcpy(builder->blob_data, header, header_size_bytes); + + switch (priv->dsp->type) { + case WMFW_ADSP2: + cs_dsp_mock_xm_header_add_adsp2_algs(builder, algs, num_algs); + break; + case WMFW_HALO: + cs_dsp_mock_xm_header_add_halo_algs(builder, algs, num_algs); + break; + default: + break; + } + + return builder; +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_create_mock_xm_header, "FW_CS_DSP_KUNIT_TEST_UTILS"); diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c b/drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c new file mode 100644 index 000000000000..fb8e4a5d189a --- /dev/null +++ b/drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c @@ -0,0 +1,367 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Mock regmap for cs_dsp KUnit tests. +// +// Copyright (C) 2024 Cirrus Logic, Inc. and +// Cirrus Logic International Semiconductor Ltd. + +#include <kunit/test.h> +#include <linux/firmware/cirrus/cs_dsp.h> +#include <linux/firmware/cirrus/cs_dsp_test_utils.h> +#include <linux/firmware/cirrus/wmfw.h> +#include <linux/regmap.h> + +static int cs_dsp_mock_regmap_read(void *context, const void *reg_buf, + const size_t reg_size, void *val_buf, + size_t val_size) +{ + struct cs_dsp_test *priv = context; + + /* Should never get here because the regmap is cache-only */ + KUNIT_FAIL(priv->test, "Unexpected bus read @%#x", *(u32 *)reg_buf); + + return -EIO; +} + +static int cs_dsp_mock_regmap_gather_write(void *context, + const void *reg_buf, size_t reg_size, + const void *val_buf, size_t val_size) +{ + struct cs_dsp_test *priv = context; + + priv->saw_bus_write = true; + + /* Should never get here because the regmap is cache-only */ + KUNIT_FAIL(priv->test, "Unexpected bus gather_write @%#x", *(u32 *)reg_buf); + + return -EIO; +} + +static int cs_dsp_mock_regmap_write(void *context, const void *val_buf, size_t val_size) +{ + struct cs_dsp_test *priv = context; + + priv->saw_bus_write = true; + + /* Should never get here because the regmap is cache-only */ + KUNIT_FAIL(priv->test, "Unexpected bus write @%#x", *(u32 *)val_buf); + + return -EIO; +} + +static const struct regmap_bus cs_dsp_mock_regmap_bus = { + .read = cs_dsp_mock_regmap_read, + .write = cs_dsp_mock_regmap_write, + .gather_write = cs_dsp_mock_regmap_gather_write, + .reg_format_endian_default = REGMAP_ENDIAN_LITTLE, + .val_format_endian_default = REGMAP_ENDIAN_LITTLE, +}; + +static const struct reg_default adsp2_32bit_register_defaults[] = { + { 0xffe00, 0x0000 }, /* CONTROL */ + { 0xffe02, 0x0000 }, /* CLOCKING */ + { 0xffe04, 0x0001 }, /* STATUS1: RAM_RDY=1 */ + { 0xffe30, 0x0000 }, /* WDMW_CONFIG_1 */ + { 0xffe32, 0x0000 }, /* WDMA_CONFIG_2 */ + { 0xffe34, 0x0000 }, /* RDMA_CONFIG_1 */ + { 0xffe40, 0x0000 }, /* SCRATCH_0_1 */ + { 0xffe42, 0x0000 }, /* SCRATCH_2_3 */ +}; + +static const struct regmap_range adsp2_32bit_registers[] = { + regmap_reg_range(0x80000, 0x88ffe), /* PM */ + regmap_reg_range(0xa0000, 0xa9ffe), /* XM */ + regmap_reg_range(0xc0000, 0xc1ffe), /* YM */ + regmap_reg_range(0xe0000, 0xe1ffe), /* ZM */ + regmap_reg_range(0xffe00, 0xffe7c), /* CORE CTRL */ +}; + +const unsigned int cs_dsp_mock_adsp2_32bit_sysbase = 0xffe00; +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_32bit_sysbase, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +static const struct regmap_access_table adsp2_32bit_rw = { + .yes_ranges = adsp2_32bit_registers, + .n_yes_ranges = ARRAY_SIZE(adsp2_32bit_registers), +}; + +static const struct regmap_config cs_dsp_mock_regmap_adsp2_32bit = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 2, + .reg_format_endian = REGMAP_ENDIAN_LITTLE, + .val_format_endian = REGMAP_ENDIAN_BIG, + .wr_table = &adsp2_32bit_rw, + .rd_table = &adsp2_32bit_rw, + .max_register = 0xffe7c, + .reg_defaults = adsp2_32bit_register_defaults, + .num_reg_defaults = ARRAY_SIZE(adsp2_32bit_register_defaults), + .cache_type = REGCACHE_MAPLE, +}; + +static const struct reg_default adsp2_16bit_register_defaults[] = { + { 0x1100, 0x0000 }, /* CONTROL */ + { 0x1101, 0x0000 }, /* CLOCKING */ + { 0x1104, 0x0001 }, /* STATUS1: RAM_RDY=1 */ + { 0x1130, 0x0000 }, /* WDMW_CONFIG_1 */ + { 0x1131, 0x0000 }, /* WDMA_CONFIG_2 */ + { 0x1134, 0x0000 }, /* RDMA_CONFIG_1 */ + { 0x1140, 0x0000 }, /* SCRATCH_0 */ + { 0x1141, 0x0000 }, /* SCRATCH_1 */ + { 0x1142, 0x0000 }, /* SCRATCH_2 */ + { 0x1143, 0x0000 }, /* SCRATCH_3 */ +}; + +static const struct regmap_range adsp2_16bit_registers[] = { + regmap_reg_range(0x001100, 0x001143), /* CORE CTRL */ + regmap_reg_range(0x100000, 0x105fff), /* PM */ + regmap_reg_range(0x180000, 0x1807ff), /* ZM */ + regmap_reg_range(0x190000, 0x1947ff), /* XM */ + regmap_reg_range(0x1a8000, 0x1a97ff), /* YM */ +}; + +const unsigned int cs_dsp_mock_adsp2_16bit_sysbase = 0x001100; +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_16bit_sysbase, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +static const struct regmap_access_table adsp2_16bit_rw = { + .yes_ranges = adsp2_16bit_registers, + .n_yes_ranges = ARRAY_SIZE(adsp2_16bit_registers), +}; + +static const struct regmap_config cs_dsp_mock_regmap_adsp2_16bit = { + .reg_bits = 32, + .val_bits = 16, + .reg_stride = 1, + .reg_format_endian = REGMAP_ENDIAN_LITTLE, + .val_format_endian = REGMAP_ENDIAN_BIG, + .wr_table = &adsp2_16bit_rw, + .rd_table = &adsp2_16bit_rw, + .max_register = 0x1a97ff, + .reg_defaults = adsp2_16bit_register_defaults, + .num_reg_defaults = ARRAY_SIZE(adsp2_16bit_register_defaults), + .cache_type = REGCACHE_MAPLE, +}; + +static const struct reg_default halo_register_defaults[] = { + /* CORE */ + { 0x2b80010, 0 }, /* HALO_CORE_SOFT_RESET */ + { 0x2b805c0, 0 }, /* HALO_SCRATCH1 */ + { 0x2b805c8, 0 }, /* HALO_SCRATCH2 */ + { 0x2b805d0, 0 }, /* HALO_SCRATCH3 */ + { 0x2b805c8, 0 }, /* HALO_SCRATCH4 */ + { 0x2bc1000, 0 }, /* HALO_CCM_CORE_CONTROL */ + { 0x2bc7000, 0 }, /* HALO_WDT_CONTROL */ + + /* SYSINFO */ + { 0x25e2040, 0 }, /* HALO_AHBM_WINDOW_DEBUG_0 */ + { 0x25e2044, 0 }, /* HALO_AHBM_WINDOW_DEBUG_1 */ +}; + +static const struct regmap_range halo_readable_registers[] = { + regmap_reg_range(0x2000000, 0x2005fff), /* XM_PACKED */ + regmap_reg_range(0x25e0000, 0x25e004f), /* SYSINFO */ + regmap_reg_range(0x25e2000, 0x25e2047), /* SYSINFO */ + regmap_reg_range(0x2800000, 0x2807fff), /* XM */ + regmap_reg_range(0x2b80000, 0x2bc700b), /* CORE CTRL */ + regmap_reg_range(0x2c00000, 0x2c047f3), /* YM_PACKED */ + regmap_reg_range(0x3400000, 0x3405ff7), /* YM */ + regmap_reg_range(0x3800000, 0x3804fff), /* PM_PACKED */ +}; + +static const struct regmap_range halo_writeable_registers[] = { + regmap_reg_range(0x2000000, 0x2005fff), /* XM_PACKED */ + regmap_reg_range(0x2800000, 0x2807fff), /* XM */ + regmap_reg_range(0x2b80000, 0x2bc700b), /* CORE CTRL */ + regmap_reg_range(0x2c00000, 0x2c047f3), /* YM_PACKED */ + regmap_reg_range(0x3400000, 0x3405ff7), /* YM */ + regmap_reg_range(0x3800000, 0x3804fff), /* PM_PACKED */ +}; + +const unsigned int cs_dsp_mock_halo_core_base = 0x2b80000; +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_core_base, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +const unsigned int cs_dsp_mock_halo_sysinfo_base = 0x25e0000; +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_sysinfo_base, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +static const struct regmap_access_table halo_readable = { + .yes_ranges = halo_readable_registers, + .n_yes_ranges = ARRAY_SIZE(halo_readable_registers), +}; + +static const struct regmap_access_table halo_writeable = { + .yes_ranges = halo_writeable_registers, + .n_yes_ranges = ARRAY_SIZE(halo_writeable_registers), +}; + +static const struct regmap_config cs_dsp_mock_regmap_halo = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .reg_format_endian = REGMAP_ENDIAN_LITTLE, + .val_format_endian = REGMAP_ENDIAN_BIG, + .wr_table = &halo_writeable, + .rd_table = &halo_readable, + .max_register = 0x3804ffc, + .reg_defaults = halo_register_defaults, + .num_reg_defaults = ARRAY_SIZE(halo_register_defaults), + .cache_type = REGCACHE_MAPLE, +}; + +/** + * cs_dsp_mock_regmap_drop_range() - drop a range of registers from the cache. + * + * @priv: Pointer to struct cs_dsp_test object. + * @first_reg: Address of first register to drop. + * @last_reg: Address of last register to drop. + */ +void cs_dsp_mock_regmap_drop_range(struct cs_dsp_test *priv, + unsigned int first_reg, unsigned int last_reg) +{ + regcache_drop_region(priv->dsp->regmap, first_reg, last_reg); +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_range, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/** + * cs_dsp_mock_regmap_drop_regs() - drop a number of registers from the cache. + * + * @priv: Pointer to struct cs_dsp_test object. + * @first_reg: Address of first register to drop. + * @num_regs: Number of registers to drop. + */ +void cs_dsp_mock_regmap_drop_regs(struct cs_dsp_test *priv, + unsigned int first_reg, size_t num_regs) +{ + int stride = regmap_get_reg_stride(priv->dsp->regmap); + unsigned int last = first_reg + (stride * (num_regs - 1)); + + cs_dsp_mock_regmap_drop_range(priv, first_reg, last); +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_regs, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/** + * cs_dsp_mock_regmap_drop_bytes() - drop a number of bytes from the cache. + * + * @priv: Pointer to struct cs_dsp_test object. + * @first_reg: Address of first register to drop. + * @num_bytes: Number of bytes to drop from the cache. Will be rounded + * down to a whole number of registers. Trailing bytes that + * are not a multiple of the register size will not be dropped. + * (This is intended to help detect math errors in test code.) + */ +void cs_dsp_mock_regmap_drop_bytes(struct cs_dsp_test *priv, + unsigned int first_reg, size_t num_bytes) +{ + size_t num_regs = num_bytes / regmap_get_val_bytes(priv->dsp->regmap); + + cs_dsp_mock_regmap_drop_regs(priv, first_reg, num_regs); +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_bytes, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/** + * cs_dsp_mock_regmap_drop_system_regs() - Drop DSP system registers from the cache. + * + * @priv: Pointer to struct cs_dsp_test object. + * + * Drops all DSP system registers from the regmap cache. + */ +void cs_dsp_mock_regmap_drop_system_regs(struct cs_dsp_test *priv) +{ + switch (priv->dsp->type) { + case WMFW_ADSP2: + if (priv->dsp->base) { + regcache_drop_region(priv->dsp->regmap, + priv->dsp->base, + priv->dsp->base + 0x7c); + } + return; + case WMFW_HALO: + if (priv->dsp->base) { + regcache_drop_region(priv->dsp->regmap, + priv->dsp->base, + priv->dsp->base + 0x47000); + } + + /* sysinfo registers are read-only so don't drop them */ + return; + default: + return; + } +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_system_regs, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/** + * cs_dsp_mock_regmap_is_dirty() - Test for dirty registers in the cache. + * + * @priv: Pointer to struct cs_dsp_test object. + * @drop_system_regs: If true the DSP system regs will be dropped from + * the cache before checking for dirty. + * + * All registers that are expected to be written must have been dropped + * from the cache (DSP system registers can be dropped by passing + * drop_system_regs == true). If any unexpected registers were written + * there will still be dirty entries in the cache and a cache sync will + * cause a write. + * + * Returns: true if there were dirty entries, false if not. + */ +bool cs_dsp_mock_regmap_is_dirty(struct cs_dsp_test *priv, bool drop_system_regs) +{ + if (drop_system_regs) + cs_dsp_mock_regmap_drop_system_regs(priv); + + priv->saw_bus_write = false; + regcache_cache_only(priv->dsp->regmap, false); + regcache_sync(priv->dsp->regmap); + regcache_cache_only(priv->dsp->regmap, true); + + return priv->saw_bus_write; +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_is_dirty, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/** + * cs_dsp_mock_regmap_init() - Initialize a mock regmap. + * + * @priv: Pointer to struct cs_dsp_test object. This must have a + * valid pointer to a struct cs_dsp in which the type and + * rev fields are set to the type of DSP to be simulated. + * + * On success the priv->dsp->regmap will point to the created + * regmap instance. + * + * Return: zero on success, else negative error code. + */ +int cs_dsp_mock_regmap_init(struct cs_dsp_test *priv) +{ + const struct regmap_config *config; + int ret; + + switch (priv->dsp->type) { + case WMFW_HALO: + config = &cs_dsp_mock_regmap_halo; + break; + case WMFW_ADSP2: + if (priv->dsp->rev == 0) + config = &cs_dsp_mock_regmap_adsp2_16bit; + else + config = &cs_dsp_mock_regmap_adsp2_32bit; + break; + default: + config = NULL; + break; + } + + priv->dsp->regmap = devm_regmap_init(priv->dsp->dev, + &cs_dsp_mock_regmap_bus, + priv, + config); + if (IS_ERR(priv->dsp->regmap)) { + ret = PTR_ERR(priv->dsp->regmap); + kunit_err(priv->test, "Failed to allocate register map: %d\n", ret); + return ret; + } + + /* Put regmap in cache-only so it accumulates the writes done by cs_dsp */ + regcache_cache_only(priv->dsp->regmap, true); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_init, "FW_CS_DSP_KUNIT_TEST_UTILS"); diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_utils.c b/drivers/firmware/cirrus/test/cs_dsp_mock_utils.c new file mode 100644 index 000000000000..cbd0bf72b7de --- /dev/null +++ b/drivers/firmware/cirrus/test/cs_dsp_mock_utils.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Utility module for cs_dsp KUnit testing. +// +// Copyright (C) 2024 Cirrus Logic, Inc. and +// Cirrus Logic International Semiconductor Ltd. + +#include <linux/module.h> + +MODULE_DESCRIPTION("Utilities for Cirrus Logic DSP driver testing"); +MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS("FW_CS_DSP"); diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c b/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c new file mode 100644 index 000000000000..934d40a4d709 --- /dev/null +++ b/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c @@ -0,0 +1,477 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// wmfw file builder for cs_dsp KUnit tests. +// +// Copyright (C) 2024 Cirrus Logic, Inc. and +// Cirrus Logic International Semiconductor Ltd. + +#include <kunit/resource.h> +#include <kunit/test.h> +#include <linux/firmware/cirrus/cs_dsp.h> +#include <linux/firmware/cirrus/cs_dsp_test_utils.h> +#include <linux/firmware/cirrus/wmfw.h> +#include <linux/firmware.h> +#include <linux/math.h> +#include <linux/overflow.h> +#include <linux/string.h> +#include <linux/vmalloc.h> + +/* Buffer large enough for bin file content */ +#define CS_DSP_MOCK_WMFW_BUF_SIZE 131072 + +struct cs_dsp_mock_wmfw_builder { + struct cs_dsp_test *test_priv; + int format_version; + void *buf; + size_t buf_size_bytes; + void *write_p; + size_t bytes_used; + + void *alg_data_header; + unsigned int num_coeffs; +}; + +struct wmfw_adsp2_halo_header { + struct wmfw_header header; + struct wmfw_adsp2_sizes sizes; + struct wmfw_footer footer; +} __packed; + +struct wmfw_long_string { + __le16 len; + u8 data[] __nonstring __counted_by(len); +} __packed; + +struct wmfw_short_string { + u8 len; + u8 data[] __nonstring __counted_by(len); +} __packed; + +KUNIT_DEFINE_ACTION_WRAPPER(vfree_action_wrapper, vfree, void *) + +/** + * cs_dsp_mock_wmfw_format_version() - Return format version. + * + * @builder: Pointer to struct cs_dsp_mock_wmfw_builder. + * + * Return: Format version. + */ +int cs_dsp_mock_wmfw_format_version(struct cs_dsp_mock_wmfw_builder *builder) +{ + return builder->format_version; +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_format_version, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/** + * cs_dsp_mock_wmfw_get_firmware() - Get struct firmware wrapper for data. + * + * @builder: Pointer to struct cs_dsp_mock_wmfw_builder. + * + * Return: Pointer to a struct firmware wrapper for the data. + */ +struct firmware *cs_dsp_mock_wmfw_get_firmware(struct cs_dsp_mock_wmfw_builder *builder) +{ + struct firmware *fw; + + if (!builder) + return NULL; + + fw = kunit_kzalloc(builder->test_priv->test, sizeof(*fw), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, fw); + + fw->data = builder->buf; + fw->size = builder->bytes_used; + + return fw; +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_get_firmware, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/** + * cs_dsp_mock_wmfw_add_raw_block() - Add a block to the wmfw file. + * + * @builder: Pointer to struct cs_dsp_mock_bin_builder. + * @block_type: Block type. + * @offset: Offset. + * @payload_data: Pointer to buffer containing the payload data, + * or NULL if no data. + * @payload_len_bytes: Length of payload data in bytes, or zero. + */ +void cs_dsp_mock_wmfw_add_raw_block(struct cs_dsp_mock_wmfw_builder *builder, + int block_type, unsigned int offset, + const void *payload_data, size_t payload_len_bytes) +{ + struct wmfw_region *header = builder->write_p; + unsigned int bytes_needed = struct_size_t(struct wmfw_region, data, payload_len_bytes); + + KUNIT_ASSERT_TRUE(builder->test_priv->test, + (builder->write_p + bytes_needed) < + (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE)); + + header->offset = cpu_to_le32(offset | (block_type << 24)); + header->len = cpu_to_le32(payload_len_bytes); + if (payload_len_bytes > 0) + memcpy(header->data, payload_data, payload_len_bytes); + + builder->write_p += bytes_needed; + builder->bytes_used += bytes_needed; +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_raw_block, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/** + * cs_dsp_mock_wmfw_add_info() - Add an info block to the wmfw file. + * + * @builder: Pointer to struct cs_dsp_mock_bin_builder. + * @info: Pointer to info string to be copied into the file. + * + * The string will be padded to a length that is a multiple of 4 bytes. + */ +void cs_dsp_mock_wmfw_add_info(struct cs_dsp_mock_wmfw_builder *builder, + const char *info) +{ + size_t info_len = strlen(info); + char *tmp = NULL; + + if (info_len % 4) { + /* Create a padded string with length a multiple of 4 */ + info_len = round_up(info_len, 4); + tmp = kunit_kzalloc(builder->test_priv->test, info_len, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, tmp); + memcpy(tmp, info, info_len); + info = tmp; + } + + cs_dsp_mock_wmfw_add_raw_block(builder, WMFW_INFO_TEXT, 0, info, info_len); + kunit_kfree(builder->test_priv->test, tmp); +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_info, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +/** + * cs_dsp_mock_wmfw_add_data_block() - Add a data block to the wmfw file. + * + * @builder: Pointer to struct cs_dsp_mock_bin_builder. + * @mem_region: Memory region for the block. + * @mem_offset_dsp_words: Offset to start of destination in DSP words. + * @payload_data: Pointer to buffer containing the payload data. + * @payload_len_bytes: Length of payload data in bytes. + */ +void cs_dsp_mock_wmfw_add_data_block(struct cs_dsp_mock_wmfw_builder *builder, + int mem_region, unsigned int mem_offset_dsp_words, + const void *payload_data, size_t payload_len_bytes) +{ + /* Blob payload length must be a multiple of 4 */ + KUNIT_ASSERT_EQ(builder->test_priv->test, payload_len_bytes % 4, 0); + + cs_dsp_mock_wmfw_add_raw_block(builder, mem_region, mem_offset_dsp_words, + payload_data, payload_len_bytes); +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_data_block, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +void cs_dsp_mock_wmfw_start_alg_info_block(struct cs_dsp_mock_wmfw_builder *builder, + unsigned int alg_id, + const char *name, + const char *description) +{ + struct wmfw_region *rgn = builder->write_p; + struct wmfw_adsp_alg_data *v1; + struct wmfw_short_string *shortstring; + struct wmfw_long_string *longstring; + size_t bytes_needed, name_len, description_len; + int offset; + + KUNIT_ASSERT_LE(builder->test_priv->test, alg_id, 0xffffff); + + /* Bytes needed for region header */ + bytes_needed = offsetof(struct wmfw_region, data); + + builder->alg_data_header = builder->write_p; + builder->num_coeffs = 0; + + switch (builder->format_version) { + case 0: + KUNIT_FAIL(builder->test_priv->test, "wmfwV0 does not have alg blocks\n"); + return; + case 1: + bytes_needed += offsetof(struct wmfw_adsp_alg_data, data); + KUNIT_ASSERT_TRUE(builder->test_priv->test, + (builder->write_p + bytes_needed) < + (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE)); + + memset(builder->write_p, 0, bytes_needed); + + /* Create region header */ + rgn->offset = cpu_to_le32(WMFW_ALGORITHM_DATA << 24); + + /* Create algorithm entry */ + v1 = (struct wmfw_adsp_alg_data *)&rgn->data[0]; + v1->id = cpu_to_le32(alg_id); + if (name) + strscpy(v1->name, name, sizeof(v1->name)); + + if (description) + strscpy(v1->descr, description, sizeof(v1->descr)); + break; + default: + name_len = 0; + description_len = 0; + + if (name) + name_len = strlen(name); + + if (description) + description_len = strlen(description); + + bytes_needed += sizeof(__le32); /* alg id */ + bytes_needed += round_up(name_len + sizeof(u8), sizeof(__le32)); + bytes_needed += round_up(description_len + sizeof(__le16), sizeof(__le32)); + bytes_needed += sizeof(__le32); /* coeff count */ + + KUNIT_ASSERT_TRUE(builder->test_priv->test, + (builder->write_p + bytes_needed) < + (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE)); + + memset(builder->write_p, 0, bytes_needed); + + /* Create region header */ + rgn->offset = cpu_to_le32(WMFW_ALGORITHM_DATA << 24); + + /* Create algorithm entry */ + *(__force __le32 *)&rgn->data[0] = cpu_to_le32(alg_id); + + shortstring = (struct wmfw_short_string *)&rgn->data[4]; + shortstring->len = name_len; + + if (name_len) + memcpy(shortstring->data, name, name_len); + + /* Round up to next __le32 */ + offset = round_up(4 + struct_size_t(struct wmfw_short_string, data, name_len), + sizeof(__le32)); + + longstring = (struct wmfw_long_string *)&rgn->data[offset]; + longstring->len = cpu_to_le16(description_len); + + if (description_len) + memcpy(longstring->data, description, description_len); + break; + } + + builder->write_p += bytes_needed; + builder->bytes_used += bytes_needed; +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_start_alg_info_block, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +void cs_dsp_mock_wmfw_add_coeff_desc(struct cs_dsp_mock_wmfw_builder *builder, + const struct cs_dsp_mock_coeff_def *def) +{ + struct wmfw_adsp_coeff_data *v1; + struct wmfw_short_string *shortstring; + struct wmfw_long_string *longstring; + size_t bytes_needed, shortname_len, fullname_len, description_len; + __le32 *ple32; + + KUNIT_ASSERT_NOT_NULL(builder->test_priv->test, builder->alg_data_header); + + switch (builder->format_version) { + case 0: + return; + case 1: + bytes_needed = offsetof(struct wmfw_adsp_coeff_data, data); + KUNIT_ASSERT_TRUE(builder->test_priv->test, + (builder->write_p + bytes_needed) < + (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE)); + + v1 = (struct wmfw_adsp_coeff_data *)builder->write_p; + memset(v1, 0, sizeof(*v1)); + v1->hdr.offset = cpu_to_le16(def->offset_dsp_words); + v1->hdr.type = cpu_to_le16(def->mem_type); + v1->hdr.size = cpu_to_le32(bytes_needed - sizeof(v1->hdr)); + v1->ctl_type = cpu_to_le16(def->type); + v1->flags = cpu_to_le16(def->flags); + v1->len = cpu_to_le32(def->length_bytes); + + if (def->fullname) + strscpy(v1->name, def->fullname, sizeof(v1->name)); + + if (def->description) + strscpy(v1->descr, def->description, sizeof(v1->descr)); + break; + default: + fullname_len = 0; + description_len = 0; + shortname_len = strlen(def->shortname); + + if (def->fullname) + fullname_len = strlen(def->fullname); + + if (def->description) + description_len = strlen(def->description); + + bytes_needed = sizeof(__le32) * 2; /* type, offset and size */ + bytes_needed += round_up(shortname_len + sizeof(u8), sizeof(__le32)); + bytes_needed += round_up(fullname_len + sizeof(u8), sizeof(__le32)); + bytes_needed += round_up(description_len + sizeof(__le16), sizeof(__le32)); + bytes_needed += sizeof(__le32) * 2; /* flags, type and length */ + KUNIT_ASSERT_TRUE(builder->test_priv->test, + (builder->write_p + bytes_needed) < + (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE)); + + ple32 = (__force __le32 *)builder->write_p; + *ple32++ = cpu_to_le32(def->offset_dsp_words | (def->mem_type << 16)); + *ple32++ = cpu_to_le32(bytes_needed - sizeof(__le32) - sizeof(__le32)); + + shortstring = (__force struct wmfw_short_string *)ple32; + shortstring->len = shortname_len; + memcpy(shortstring->data, def->shortname, shortname_len); + + /* Round up to next __le32 multiple */ + ple32 += round_up(struct_size_t(struct wmfw_short_string, data, shortname_len), + sizeof(*ple32)) / sizeof(*ple32); + + shortstring = (__force struct wmfw_short_string *)ple32; + shortstring->len = fullname_len; + memcpy(shortstring->data, def->fullname, fullname_len); + + /* Round up to next __le32 multiple */ + ple32 += round_up(struct_size_t(struct wmfw_short_string, data, fullname_len), + sizeof(*ple32)) / sizeof(*ple32); + + longstring = (__force struct wmfw_long_string *)ple32; + longstring->len = cpu_to_le16(description_len); + memcpy(longstring->data, def->description, description_len); + + /* Round up to next __le32 multiple */ + ple32 += round_up(struct_size_t(struct wmfw_long_string, data, description_len), + sizeof(*ple32)) / sizeof(*ple32); + + *ple32++ = cpu_to_le32(def->type | (def->flags << 16)); + *ple32 = cpu_to_le32(def->length_bytes); + break; + } + + builder->write_p += bytes_needed; + builder->bytes_used += bytes_needed; + builder->num_coeffs++; +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_coeff_desc, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +void cs_dsp_mock_wmfw_end_alg_info_block(struct cs_dsp_mock_wmfw_builder *builder) +{ + struct wmfw_region *rgn = builder->alg_data_header; + struct wmfw_adsp_alg_data *v1; + const struct wmfw_short_string *shortstring; + const struct wmfw_long_string *longstring; + size_t offset; + + KUNIT_ASSERT_NOT_NULL(builder->test_priv->test, rgn); + + /* Fill in data size */ + rgn->len = cpu_to_le32((u8 *)builder->write_p - (u8 *)rgn->data); + + /* Fill in coefficient count */ + switch (builder->format_version) { + case 0: + return; + case 1: + v1 = (struct wmfw_adsp_alg_data *)&rgn->data[0]; + v1->ncoeff = cpu_to_le32(builder->num_coeffs); + break; + default: + offset = 4; /* skip alg id */ + + /* Get name length and round up to __le32 multiple */ + shortstring = (const struct wmfw_short_string *)&rgn->data[offset]; + offset += round_up(struct_size_t(struct wmfw_short_string, data, shortstring->len), + sizeof(__le32)); + + /* Get description length and round up to __le32 multiple */ + longstring = (const struct wmfw_long_string *)&rgn->data[offset]; + offset += round_up(struct_size_t(struct wmfw_long_string, data, + le16_to_cpu(longstring->len)), + sizeof(__le32)); + + *(__force __le32 *)&rgn->data[offset] = cpu_to_le32(builder->num_coeffs); + break; + } + + builder->alg_data_header = NULL; +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_end_alg_info_block, "FW_CS_DSP_KUNIT_TEST_UTILS"); + +static void cs_dsp_init_adsp2_halo_wmfw(struct cs_dsp_mock_wmfw_builder *builder) +{ + struct wmfw_adsp2_halo_header *hdr = builder->buf; + const struct cs_dsp *dsp = builder->test_priv->dsp; + + memcpy(hdr->header.magic, "WMFW", sizeof(hdr->header.magic)); + hdr->header.len = cpu_to_le32(sizeof(*hdr)); + hdr->header.ver = builder->format_version; + hdr->header.core = dsp->type; + hdr->header.rev = cpu_to_le16(dsp->rev); + + hdr->sizes.pm = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_PM)); + hdr->sizes.xm = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_XM)); + hdr->sizes.ym = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_YM)); + + switch (dsp->type) { + case WMFW_ADSP2: + hdr->sizes.zm = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_ZM)); + break; + default: + break; + } + + builder->write_p = &hdr[1]; + builder->bytes_used += sizeof(*hdr); +} + +/** + * cs_dsp_mock_wmfw_init() - Initialize a struct cs_dsp_mock_wmfw_builder. + * + * @priv: Pointer to struct cs_dsp_test. + * @format_version: Required wmfw format version. + * + * Return: Pointer to created struct cs_dsp_mock_wmfw_builder. + */ +struct cs_dsp_mock_wmfw_builder *cs_dsp_mock_wmfw_init(struct cs_dsp_test *priv, + int format_version) +{ + struct cs_dsp_mock_wmfw_builder *builder; + + KUNIT_ASSERT_LE(priv->test, format_version, 0xff); + + /* If format version isn't given use the default for the target core */ + if (format_version < 0) { + switch (priv->dsp->type) { + case WMFW_ADSP2: + format_version = 2; + break; + default: + format_version = 3; + break; + } + } + + builder = kunit_kzalloc(priv->test, sizeof(*builder), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder); + + builder->test_priv = priv; + builder->format_version = format_version; + + builder->buf = vmalloc(CS_DSP_MOCK_WMFW_BUF_SIZE); + KUNIT_ASSERT_NOT_NULL(priv->test, builder->buf); + kunit_add_action_or_reset(priv->test, vfree_action_wrapper, builder->buf); + + builder->buf_size_bytes = CS_DSP_MOCK_WMFW_BUF_SIZE; + + switch (priv->dsp->type) { + case WMFW_ADSP2: + case WMFW_HALO: + cs_dsp_init_adsp2_halo_wmfw(builder); + break; + default: + break; + } + + return builder; +} +EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_init, "FW_CS_DSP_KUNIT_TEST_UTILS"); diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_bin.c b/drivers/firmware/cirrus/test/cs_dsp_test_bin.c new file mode 100644 index 000000000000..163b7faecff4 --- /dev/null +++ b/drivers/firmware/cirrus/test/cs_dsp_test_bin.c @@ -0,0 +1,2556 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// KUnit tests for cs_dsp. +// +// Copyright (C) 2024 Cirrus Logic, Inc. and +// Cirrus Logic International Semiconductor Ltd. + +#include <kunit/device.h> +#include <kunit/resource.h> +#include <kunit/test.h> +#include <linux/build_bug.h> +#include <linux/firmware/cirrus/cs_dsp.h> +#include <linux/firmware/cirrus/cs_dsp_test_utils.h> +#include <linux/firmware/cirrus/wmfw.h> +#include <linux/firmware.h> +#include <linux/math.h> +#include <linux/random.h> +#include <linux/regmap.h> + +/* + * Test method is: + * + * 1) Create a mock regmap in cache-only mode so that all writes will be cached. + * 2) Create a XM header with an algorithm list in the cached regmap. + * 3) Create dummy wmfw file to satisfy cs_dsp. + * 4) Create bin file content. + * 5) Call cs_dsp_power_up() with the bin file. + * 6) Readback the cached value of registers that should have been written and + * check they have the correct value. + * 7) All the registers that are expected to have been written are dropped from + * the cache (including the XM header). This should leave the cache clean. + * 8) If the cache is still dirty there have been unexpected writes. + * + * There are multiple different schemes used for addressing across + * ADSP2 and Halo Core DSPs: + * + * dsp words: The addressing scheme used by the DSP, pointers and lengths + * in DSP memory use this. A memory region (XM, YM, ZM) is + * also required to create a unique DSP memory address. + * registers: Addresses in the register map. Older ADSP2 devices have + * 16-bit registers with an address stride of 1. Newer ADSP2 + * devices have 32-bit registers with an address stride of 2. + * Halo Core devices have 32-bit registers with a stride of 4. + * unpacked: Registers that have a 1:1 mapping to DSP words + * packed: Registers that pack multiple DSP words more efficiently into + * multiple 32-bit registers. Because of this the relationship + * between a packed _register_ address and the corresponding + * _dsp word_ address is different from unpacked registers. + * Packed registers can only be accessed as a group of + * multiple registers, therefore can only read/write a group + * of multiple DSP words. + * Packed registers only exist on Halo Core DSPs. + * + * Addresses can also be relative to the start of an algorithm, and this + * can be expressed in dsp words, register addresses, or bytes. + */ + +KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *) +KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *) + +struct cs_dsp_test_local { + struct cs_dsp_mock_bin_builder *bin_builder; + struct cs_dsp_mock_wmfw_builder *wmfw_builder; + struct firmware *wmfw; +}; + +struct bin_test_param { + const char *name; + int mem_type; + unsigned int offset_words; + int alg_idx; +}; + +static const struct cs_dsp_mock_alg_def bin_test_mock_algs[] = { + { + .id = 0xfafa, + .ver = 0x100000, + .xm_size_words = 164, + .ym_size_words = 164, + .zm_size_words = 164, + }, + { + .id = 0xfbfb, + .ver = 0x100000, + .xm_size_words = 99, + .ym_size_words = 99, + .zm_size_words = 99, + }, + { + .id = 0xc321, + .ver = 0x100000, + .xm_size_words = 120, + .ym_size_words = 120, + .zm_size_words = 120, + }, + { + .id = 0xb123, + .ver = 0x100000, + .xm_size_words = 96, + .ym_size_words = 96, + .zm_size_words = 96, + }, +}; + +/* + * Convert number of DSP words to number of packed registers rounded + * down to the nearest register. + * There are 3 registers for every 4 packed words. + */ +static unsigned int _num_words_to_num_packed_regs(unsigned int num_dsp_words) +{ + return (num_dsp_words * 3) / 4; +} + +/* bin file that patches a single DSP word */ +static void bin_patch_one_word(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + const struct bin_test_param *param = test->param_value; + unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + u32 reg_val, payload_data; + unsigned int alg_base_words, reg_addr; + struct firmware *fw; + + get_random_bytes(&payload_data, sizeof(payload_data)); + + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[param->alg_idx].id, + param->mem_type); + + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + param->mem_type, + param->offset_words * reg_inc_per_word, + &payload_data, sizeof(payload_data)); + + fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw", + fw, "mock_bin", "misc"), + 0); + + /* Content of registers should match payload_data */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) + + ((alg_base_words + param->offset_words) * reg_inc_per_word); + reg_val = 0; + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, + ®_val, sizeof(reg_val)), + 0); + KUNIT_EXPECT_EQ(test, reg_val, payload_data); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1); + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* bin file with a single payload that patches consecutive words */ +static void bin_patch_one_multiword(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + const struct bin_test_param *param = test->param_value; + unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + u32 payload_data[16], readback[16]; + unsigned int alg_base_words, reg_addr; + struct firmware *fw; + + static_assert(ARRAY_SIZE(readback) == ARRAY_SIZE(payload_data)); + + get_random_bytes(&payload_data, sizeof(payload_data)); + memset(readback, 0, sizeof(readback)); + + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[param->alg_idx].id, + param->mem_type); + + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + param->mem_type, + param->offset_words * reg_inc_per_word, + payload_data, sizeof(payload_data)); + + fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw", + fw, "mock_bin", "misc"), + 0); + + /* Content of registers should match payload_data */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) + + ((alg_base_words + param->offset_words) * reg_inc_per_word); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + sizeof(readback)), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, payload_data, sizeof(payload_data)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_regmap_drop_range(priv, reg_addr, + reg_addr + (reg_inc_per_word * ARRAY_SIZE(payload_data))); + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* bin file with a multiple one-word payloads that patch consecutive words */ +static void bin_patch_multi_oneword(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + const struct bin_test_param *param = test->param_value; + unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + u32 payload_data[16], readback[16]; + unsigned int alg_base_words, reg_addr; + struct firmware *fw; + int i; + + static_assert(ARRAY_SIZE(readback) == ARRAY_SIZE(payload_data)); + + get_random_bytes(&payload_data, sizeof(payload_data)); + memset(readback, 0, sizeof(readback)); + + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[param->alg_idx].id, + param->mem_type); + + /* Add one payload per word */ + for (i = 0; i < ARRAY_SIZE(payload_data); ++i) { + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + param->mem_type, + (param->offset_words + i) * reg_inc_per_word, + &payload_data[i], sizeof(payload_data[i])); + } + + fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw", + fw, "mock_bin", "misc"), + 0); + + /* Content of registers should match payload_data */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) + + ((alg_base_words + param->offset_words) * reg_inc_per_word); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + sizeof(readback)), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, payload_data, sizeof(payload_data)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + cs_dsp_mock_regmap_drop_range(priv, reg_addr, + reg_addr + (reg_inc_per_word * ARRAY_SIZE(payload_data))); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * bin file with a multiple one-word payloads that patch a block of consecutive + * words but the payloads are not in address order. + */ +static void bin_patch_multi_oneword_unordered(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + const struct bin_test_param *param = test->param_value; + unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + u32 payload_data[16], readback[16]; + static const u8 word_order[] = { 10, 2, 12, 4, 0, 11, 6, 1, 3, 15, 5, 13, 8, 7, 9, 14 }; + unsigned int alg_base_words, reg_addr; + struct firmware *fw; + int i; + + static_assert(ARRAY_SIZE(readback) == ARRAY_SIZE(payload_data)); + static_assert(ARRAY_SIZE(word_order) == ARRAY_SIZE(payload_data)); + + get_random_bytes(&payload_data, sizeof(payload_data)); + memset(readback, 0, sizeof(readback)); + + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[param->alg_idx].id, + param->mem_type); + + /* Add one payload per word */ + for (i = 0; i < ARRAY_SIZE(word_order); ++i) { + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + param->mem_type, + (param->offset_words + word_order[i]) * + reg_inc_per_word, + &payload_data[word_order[i]], sizeof(payload_data[0])); + } + + fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw", + fw, "mock_bin", "misc"), + 0); + + /* Content of registers should match payload_data */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) + + ((alg_base_words + param->offset_words) * reg_inc_per_word); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + sizeof(readback)), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, payload_data, sizeof(payload_data)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + cs_dsp_mock_regmap_drop_range(priv, reg_addr, + reg_addr + (reg_inc_per_word * ARRAY_SIZE(payload_data))); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * bin file with a multiple one-word payloads. The payloads are not in address + * order and collectively do not patch a contiguous block of memory. + */ +static void bin_patch_multi_oneword_sparse_unordered(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + const struct bin_test_param *param = test->param_value; + unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + static const u8 word_offsets[] = { + 11, 69, 59, 61, 32, 75, 4, 38, 70, 13, 79, 47, 46, 53, 18, 44, + 54, 35, 51, 21, 26, 45, 27, 41, 66, 2, 17, 56, 40, 9, 8, 20, + 29, 19, 63, 42, 12, 16, 43, 3, 5, 55, 52, 22 + }; + u32 payload_data[44]; + unsigned int alg_base_words, reg_addr; + struct firmware *fw; + u32 reg_val; + int i; + + static_assert(ARRAY_SIZE(word_offsets) == ARRAY_SIZE(payload_data)); + + get_random_bytes(&payload_data, sizeof(payload_data)); + + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[param->alg_idx].id, + param->mem_type); + + /* Add one payload per word */ + for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) { + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + param->mem_type, + word_offsets[i] * reg_inc_per_word, + &payload_data[i], sizeof(payload_data[i])); + } + + fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw", + fw, "mock_bin", "misc"), + 0); + + /* Content of registers should match payload_data */ + for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) { + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) + + ((alg_base_words + word_offsets[i]) * reg_inc_per_word); + reg_val = 0; + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, ®_val, + sizeof(reg_val)), + 0); + KUNIT_EXPECT_MEMEQ(test, ®_val, &payload_data[i], sizeof(reg_val)); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1); + } + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * bin file that patches a single DSP word in each of the memory regions + * of one algorithm. + */ +static void bin_patch_one_word_multiple_mems(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + const struct bin_test_param *param = test->param_value; + unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + unsigned int alg_xm_base_words, alg_ym_base_words, alg_zm_base_words; + unsigned int reg_addr; + u32 payload_data[3]; + struct firmware *fw; + u32 reg_val; + + get_random_bytes(&payload_data, sizeof(payload_data)); + + alg_xm_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[param->alg_idx].id, + WMFW_ADSP2_XM); + alg_ym_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[param->alg_idx].id, + WMFW_ADSP2_YM); + + if (cs_dsp_mock_has_zm(priv)) { + alg_zm_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[param->alg_idx].id, + WMFW_ADSP2_ZM); + } else { + alg_zm_base_words = 0; + } + + /* Add words to XM, YM and ZM */ + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + WMFW_ADSP2_XM, + param->offset_words * reg_inc_per_word, + &payload_data[0], sizeof(payload_data[0])); + + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + WMFW_ADSP2_YM, + param->offset_words * reg_inc_per_word, + &payload_data[1], sizeof(payload_data[1])); + + if (cs_dsp_mock_has_zm(priv)) { + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + WMFW_ADSP2_ZM, + param->offset_words * reg_inc_per_word, + &payload_data[2], sizeof(payload_data[2])); + } + + fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw", + fw, "mock_bin", "misc"), + 0); + + /* Content of registers should match payload_data */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM) + + ((alg_xm_base_words + param->offset_words) * reg_inc_per_word); + reg_val = 0; + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, ®_val, sizeof(reg_val)), + 0); + KUNIT_EXPECT_EQ(test, reg_val, payload_data[0]); + + cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1); + + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM) + + ((alg_ym_base_words + param->offset_words) * reg_inc_per_word); + reg_val = 0; + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, ®_val, sizeof(reg_val)), + 0); + KUNIT_EXPECT_EQ(test, reg_val, payload_data[1]); + + cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1); + + if (cs_dsp_mock_has_zm(priv)) { + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_ZM) + + ((alg_zm_base_words + param->offset_words) * reg_inc_per_word); + reg_val = 0; + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, ®_val, + sizeof(reg_val)), + 0); + KUNIT_EXPECT_EQ(test, reg_val, payload_data[2]); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1); + } + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * bin file that patches a single DSP word in multiple algorithms. + */ +static void bin_patch_one_word_multiple_algs(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + const struct bin_test_param *param = test->param_value; + u32 payload_data[ARRAY_SIZE(bin_test_mock_algs)]; + unsigned int alg_base_words; + unsigned int reg_inc_per_word, reg_addr; + struct firmware *fw; + u32 reg_val; + int i; + + get_random_bytes(&payload_data, sizeof(payload_data)); + + /* Add one payload per algorithm */ + for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) { + reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[i].id, + bin_test_mock_algs[i].ver, + param->mem_type, + param->offset_words * reg_inc_per_word, + &payload_data[i], sizeof(payload_data[i])); + } + + fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw", + fw, "mock_bin", "misc"), + 0); + + /* Content of registers should match payload_data */ + for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) { + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[i].id, + param->mem_type); + reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) + + ((alg_base_words + param->offset_words) * reg_inc_per_word); + reg_val = 0; + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, ®_val, + sizeof(reg_val)), + 0); + KUNIT_EXPECT_EQ(test, reg_val, payload_data[i]); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1); + } + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * bin file that patches a single DSP word in multiple algorithms. + * The algorithms are not patched in the same order they appear in the XM header. + */ +static void bin_patch_one_word_multiple_algs_unordered(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + const struct bin_test_param *param = test->param_value; + static const u8 alg_order[] = { 3, 0, 2, 1 }; + u32 payload_data[ARRAY_SIZE(bin_test_mock_algs)]; + unsigned int alg_base_words; + unsigned int reg_inc_per_word, reg_addr; + struct firmware *fw; + u32 reg_val; + int i, alg_idx; + + static_assert(ARRAY_SIZE(alg_order) == ARRAY_SIZE(bin_test_mock_algs)); + + get_random_bytes(&payload_data, sizeof(payload_data)); + + /* Add one payload per algorithm */ + for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) { + alg_idx = alg_order[i]; + reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[alg_idx].id, + bin_test_mock_algs[alg_idx].ver, + param->mem_type, + param->offset_words * reg_inc_per_word, + &payload_data[i], sizeof(payload_data[i])); + } + + fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw", + fw, "mock_bin", "misc"), + 0); + + /* Content of registers should match payload_data */ + for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) { + alg_idx = alg_order[i]; + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[alg_idx].id, + param->mem_type); + reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) + + ((alg_base_words + param->offset_words) * reg_inc_per_word); + reg_val = 0; + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, ®_val, + sizeof(reg_val)), + 0); + KUNIT_EXPECT_EQ(test, reg_val, payload_data[i]); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1); + } + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* bin file that patches a single packed block of DSP words */ +static void bin_patch_1_packed(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + const struct bin_test_param *param = test->param_value; + u32 packed_payload[3], readback[3]; + unsigned int alg_base_words, patch_pos_words; + unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs; + unsigned int reg_addr; + struct firmware *fw; + + static_assert(sizeof(readback) == sizeof(packed_payload)); + + get_random_bytes(packed_payload, sizeof(packed_payload)); + + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[param->alg_idx].id, + param->mem_type); + alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words); + + /* Round patch start word up to a packed boundary */ + patch_pos_words = round_up(alg_base_words + param->offset_words, 4); + patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words); + + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + param->mem_type, + (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4, + packed_payload, sizeof(packed_payload)); + + fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw", + fw, "mock_bin", "misc"), + 0); + + /* Content of registers should match payload_data */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) + + (patch_pos_in_packed_regs * 4); + memset(readback, 0, sizeof(readback)); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + sizeof(readback)), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, packed_payload, sizeof(packed_payload)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload)); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Patch data that is one word longer than a packed block using one + * packed block followed by one unpacked word. + */ +static void bin_patch_1_packed_1_single_trailing(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + const struct bin_test_param *param = test->param_value; + unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type); + u32 packed_payload[3], unpacked_payload[1], readback[3]; + unsigned int alg_base_words, patch_pos_words; + unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs; + unsigned int reg_addr; + struct firmware *fw; + + static_assert(sizeof(readback) == sizeof(packed_payload)); + static_assert(sizeof(readback) >= sizeof(unpacked_payload)); + + get_random_bytes(packed_payload, sizeof(packed_payload)); + get_random_bytes(unpacked_payload, sizeof(unpacked_payload)); + + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[param->alg_idx].id, + param->mem_type); + alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words); + + /* Round patch start word up to a packed boundary */ + patch_pos_words = round_up(alg_base_words + param->offset_words, 4); + patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words); + + /* Patch packed block */ + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + param->mem_type, + (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4, + &packed_payload, sizeof(packed_payload)); + + /* ... and the unpacked word following that */ + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + unpacked_mem_type, + ((patch_pos_words + 4) - alg_base_words) * 4, + unpacked_payload, sizeof(unpacked_payload)); + + fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw", + fw, "mock_bin", "misc"), + 0); + + /* Content of packed registers should match packed_payload */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) + + (patch_pos_in_packed_regs * 4); + memset(readback, 0, sizeof(readback)); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)), + 0); + KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload)); + + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload)); + + /* Content of unpacked registers should match unpacked_payload */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) + + (patch_pos_words + 4) * 4; + memset(readback, 0, sizeof(readback)); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, + sizeof(unpacked_payload)), + 0); + KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload)); + + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Patch data that is two words longer than a packed block using one + * packed block followed by two blocks of one unpacked word. + */ +static void bin_patch_1_packed_2_single_trailing(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + const struct bin_test_param *param = test->param_value; + unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type); + u32 packed_payload[3], unpacked_payloads[2], readback[3]; + unsigned int alg_base_words, patch_pos_words; + unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs; + unsigned int reg_addr; + struct firmware *fw; + + static_assert(sizeof(readback) == sizeof(packed_payload)); + static_assert(sizeof(readback) >= sizeof(unpacked_payloads)); + + get_random_bytes(packed_payload, sizeof(packed_payload)); + get_random_bytes(unpacked_payloads, sizeof(unpacked_payloads)); + + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[param->alg_idx].id, + param->mem_type); + alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words); + + /* Round patch start word up to a packed boundary */ + patch_pos_words = round_up(alg_base_words + param->offset_words, 4); + patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words); + + /* Patch packed block */ + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + param->mem_type, + (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4, + &packed_payload, sizeof(packed_payload)); + + /* ... and the unpacked words following that */ + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + unpacked_mem_type, + ((patch_pos_words + 4) - alg_base_words) * 4, + &unpacked_payloads[0], sizeof(unpacked_payloads[0])); + + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + unpacked_mem_type, + ((patch_pos_words + 5) - alg_base_words) * 4, + &unpacked_payloads[1], sizeof(unpacked_payloads[1])); + + fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw", + fw, "mock_bin", "misc"), + 0); + + /* Content of packed registers should match packed_payload */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) + + (patch_pos_in_packed_regs * 4); + memset(readback, 0, sizeof(readback)); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)), + 0); + KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload)); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload)); + + /* Content of unpacked registers should match unpacked_payloads */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) + + (patch_pos_words + 4) * 4; + memset(readback, 0, sizeof(readback)); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, + sizeof(unpacked_payloads)), + 0); + KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payloads, sizeof(unpacked_payloads)); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payloads)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Patch data that is three words longer than a packed block using one + * packed block followed by three blocks of one unpacked word. + */ +static void bin_patch_1_packed_3_single_trailing(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + const struct bin_test_param *param = test->param_value; + unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type); + u32 packed_payload[3], unpacked_payloads[3], readback[3]; + unsigned int alg_base_words, patch_pos_words; + unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs; + unsigned int reg_addr; + struct firmware *fw; + + static_assert(sizeof(readback) == sizeof(packed_payload)); + static_assert(sizeof(readback) >= sizeof(unpacked_payloads)); + + get_random_bytes(packed_payload, sizeof(packed_payload)); + get_random_bytes(unpacked_payloads, sizeof(unpacked_payloads)); + + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[param->alg_idx].id, + param->mem_type); + alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words); + + /* Round patch start word up to a packed boundary */ + patch_pos_words = round_up(alg_base_words + param->offset_words, 4); + patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words); + + /* Patch packed block */ + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + param->mem_type, + (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4, + &packed_payload, sizeof(packed_payload)); + + /* ... and the unpacked words following that */ + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + unpacked_mem_type, + ((patch_pos_words + 4) - alg_base_words) * 4, + &unpacked_payloads[0], sizeof(unpacked_payloads[0])); + + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + unpacked_mem_type, + ((patch_pos_words + 5) - alg_base_words) * 4, + &unpacked_payloads[1], sizeof(unpacked_payloads[1])); + + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + unpacked_mem_type, + ((patch_pos_words + 6) - alg_base_words) * 4, + &unpacked_payloads[2], sizeof(unpacked_payloads[2])); + + fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw", + fw, "mock_bin", "misc"), + 0); + + /* Content of packed registers should match packed_payload */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) + + (patch_pos_in_packed_regs * 4); + memset(readback, 0, sizeof(readback)); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)), + 0); + KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload)); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload)); + + /* Content of unpacked registers should match unpacked_payloads */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) + + (patch_pos_words + 4) * 4; + memset(readback, 0, sizeof(readback)); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, + sizeof(unpacked_payloads)), + 0); + KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payloads, sizeof(unpacked_payloads)); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payloads)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Patch data that is two words longer than a packed block using one + * packed block followed by a block of two unpacked words. + */ +static void bin_patch_1_packed_2_trailing(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + const struct bin_test_param *param = test->param_value; + unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type); + u32 packed_payload[3], unpacked_payload[2], readback[3]; + unsigned int alg_base_words, patch_pos_words; + unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs; + unsigned int reg_addr; + struct firmware *fw; + + static_assert(sizeof(readback) == sizeof(packed_payload)); + static_assert(sizeof(readback) >= sizeof(unpacked_payload)); + + get_random_bytes(packed_payload, sizeof(packed_payload)); + get_random_bytes(unpacked_payload, sizeof(unpacked_payload)); + + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[param->alg_idx].id, + param->mem_type); + alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words); + + /* Round patch start word up to a packed boundary */ + patch_pos_words = round_up(alg_base_words + param->offset_words, 4); + patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words); + + /* Patch packed block */ + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + param->mem_type, + (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4, + &packed_payload, sizeof(packed_payload)); + + /* ... and the unpacked words following that */ + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + unpacked_mem_type, + ((patch_pos_words + 4) - alg_base_words) * 4, + unpacked_payload, sizeof(unpacked_payload)); + + fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw", + fw, "mock_bin", "misc"), + 0); + + /* Content of packed registers should match packed_payload */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) + + (patch_pos_in_packed_regs * 4); + memset(readback, 0, sizeof(readback)); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)), + 0); + KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload)); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload)); + + /* Content of unpacked registers should match unpacked_payload */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) + + (patch_pos_words + 4) * 4; + memset(readback, 0, sizeof(readback)); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, + sizeof(unpacked_payload)), + 0); + KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload)); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Patch data that is three words longer than a packed block using one + * packed block followed by a block of three unpacked words. + */ +static void bin_patch_1_packed_3_trailing(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + const struct bin_test_param *param = test->param_value; + unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type); + u32 packed_payload[3], unpacked_payload[3], readback[3]; + unsigned int alg_base_words, patch_pos_words; + unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs; + unsigned int reg_addr; + struct firmware *fw; + + static_assert(sizeof(readback) == sizeof(packed_payload)); + static_assert(sizeof(readback) >= sizeof(unpacked_payload)); + + get_random_bytes(packed_payload, sizeof(packed_payload)); + get_random_bytes(unpacked_payload, sizeof(unpacked_payload)); + + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[param->alg_idx].id, + param->mem_type); + alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words); + + /* Round patch start word up to a packed boundary */ + patch_pos_words = round_up(alg_base_words + param->offset_words, 4); + patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words); + + /* Patch packed block */ + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + param->mem_type, + (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4, + &packed_payload, sizeof(packed_payload)); + + /* ... and the unpacked words following that */ + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + unpacked_mem_type, + ((patch_pos_words + 4) - alg_base_words) * 4, + unpacked_payload, sizeof(unpacked_payload)); + + fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw", + fw, "mock_bin", "misc"), + 0); + + /* Content of packed registers should match packed_payload */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) + + (patch_pos_in_packed_regs * 4); + memset(readback, 0, sizeof(readback)); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)), + 0); + KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload)); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload)); + + /* Content of unpacked registers should match unpacked_payload */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) + + (patch_pos_words + 4) * 4; + memset(readback, 0, sizeof(readback)); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, + sizeof(unpacked_payload)), + 0); + KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload)); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Patch data that starts one word before a packed boundary using one + * unpacked word followed by one packed block. + */ +static void bin_patch_1_single_leading_1_packed(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + const struct bin_test_param *param = test->param_value; + unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type); + u32 packed_payload[3], unpacked_payload[1], readback[3]; + unsigned int alg_base_words, packed_patch_pos_words; + unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs; + unsigned int reg_addr; + struct firmware *fw; + + static_assert(sizeof(readback) == sizeof(packed_payload)); + static_assert(sizeof(readback) >= sizeof(unpacked_payload)); + + get_random_bytes(packed_payload, sizeof(packed_payload)); + get_random_bytes(unpacked_payload, sizeof(unpacked_payload)); + memset(readback, 0, sizeof(readback)); + + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[param->alg_idx].id, + param->mem_type); + alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words); + + /* Round packed start word up to a packed boundary and move to the next boundary */ + packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4; + + /* Patch the leading unpacked word */ + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + unpacked_mem_type, + ((packed_patch_pos_words - 1) - alg_base_words) * 4, + unpacked_payload, sizeof(unpacked_payload)); + /* ... then the packed block */ + patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words); + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + param->mem_type, + (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4, + &packed_payload, sizeof(packed_payload)); + + fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw", + fw, "mock_bin", "misc"), + 0); + + /* Content of packed registers should match packed_payload */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) + + (patch_pos_in_packed_regs * 4); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)), + 0); + KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload)); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload)); + + /* Content of unpacked registers should match unpacked_payload */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) + + (packed_patch_pos_words - 1) * 4; + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, + sizeof(unpacked_payload)), + 0); + KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload)); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Patch data that starts two words before a packed boundary using two + * unpacked words followed by one packed block. + */ +static void bin_patch_2_single_leading_1_packed(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + const struct bin_test_param *param = test->param_value; + unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type); + u32 packed_payload[3], unpacked_payload[2], readback[3]; + unsigned int alg_base_words, packed_patch_pos_words; + unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs; + unsigned int reg_addr; + struct firmware *fw; + + static_assert(sizeof(readback) == sizeof(packed_payload)); + static_assert(sizeof(readback) >= sizeof(unpacked_payload)); + + get_random_bytes(packed_payload, sizeof(packed_payload)); + get_random_bytes(unpacked_payload, sizeof(unpacked_payload)); + + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[param->alg_idx].id, + param->mem_type); + alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words); + + /* Round packed start word up to a packed boundary and move to the next boundary */ + packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4; + + /* Patch the leading unpacked words */ + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + unpacked_mem_type, + ((packed_patch_pos_words - 2) - alg_base_words) * 4, + &unpacked_payload[0], sizeof(unpacked_payload[0])); + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + unpacked_mem_type, + ((packed_patch_pos_words - 1) - alg_base_words) * 4, + &unpacked_payload[1], sizeof(unpacked_payload[1])); + /* ... then the packed block */ + patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words); + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + param->mem_type, + (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4, + &packed_payload, sizeof(packed_payload)); + + fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw", + fw, "mock_bin", "misc"), + 0); + + /* Content of packed registers should match packed_payload */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) + + (patch_pos_in_packed_regs * 4); + memset(readback, 0, sizeof(readback)); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)), + 0); + KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload)); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload)); + + /* Content of unpacked registers should match unpacked_payload */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) + + (packed_patch_pos_words - 2) * 4; + memset(readback, 0, sizeof(readback)); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, + sizeof(unpacked_payload)), + 0); + KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload)); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Patch data that starts two words before a packed boundary using one + * block of two unpacked words followed by one packed block. + */ +static void bin_patch_2_leading_1_packed(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + const struct bin_test_param *param = test->param_value; + unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type); + u32 packed_payload[3], unpacked_payload[2], readback[3]; + unsigned int alg_base_words, packed_patch_pos_words; + unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs; + unsigned int reg_addr; + struct firmware *fw; + + static_assert(sizeof(readback) == sizeof(packed_payload)); + static_assert(sizeof(readback) >= sizeof(unpacked_payload)); + + get_random_bytes(packed_payload, sizeof(packed_payload)); + get_random_bytes(unpacked_payload, sizeof(unpacked_payload)); + + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[param->alg_idx].id, + param->mem_type); + alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words); + + /* Round packed start word up to a packed boundary and move to the next boundary */ + packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4; + + /* Patch the leading unpacked words */ + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + unpacked_mem_type, + ((packed_patch_pos_words - 2) - alg_base_words) * 4, + unpacked_payload, sizeof(unpacked_payload)); + /* ... then the packed block */ + patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words); + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + param->mem_type, + (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4, + &packed_payload, sizeof(packed_payload)); + + fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw", + fw, "mock_bin", "misc"), + 0); + + /* Content of packed registers should match packed_payload */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) + + (patch_pos_in_packed_regs * 4); + memset(readback, 0, sizeof(readback)); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)), + 0); + KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload)); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload)); + + /* Content of unpacked registers should match unpacked_payload */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) + + (packed_patch_pos_words - 2) * 4; + memset(readback, 0, sizeof(readback)); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, + sizeof(unpacked_payload)), + 0); + KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload)); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Patch data that starts three words before a packed boundary using three + * unpacked words followed by one packed block. + */ +static void bin_patch_3_single_leading_1_packed(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + const struct bin_test_param *param = test->param_value; + unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type); + u32 packed_payload[3], unpacked_payload[3], readback[3]; + unsigned int alg_base_words, packed_patch_pos_words; + unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs; + unsigned int reg_addr; + struct firmware *fw; + + static_assert(sizeof(readback) == sizeof(packed_payload)); + static_assert(sizeof(readback) >= sizeof(unpacked_payload)); + + get_random_bytes(packed_payload, sizeof(packed_payload)); + get_random_bytes(unpacked_payload, sizeof(unpacked_payload)); + + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[param->alg_idx].id, + param->mem_type); + alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words); + + /* Round packed start word up to a packed boundary and move to the next boundary */ + packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4; + + /* Patch the leading unpacked words */ + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + unpacked_mem_type, + ((packed_patch_pos_words - 3) - alg_base_words) * 4, + &unpacked_payload[0], sizeof(unpacked_payload[0])); + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + unpacked_mem_type, + ((packed_patch_pos_words - 2) - alg_base_words) * 4, + &unpacked_payload[1], sizeof(unpacked_payload[1])); + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + unpacked_mem_type, + ((packed_patch_pos_words - 1) - alg_base_words) * 4, + &unpacked_payload[2], sizeof(unpacked_payload[2])); + /* ... then the packed block */ + patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words); + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + param->mem_type, + (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4, + &packed_payload, sizeof(packed_payload)); + + fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw", + fw, "mock_bin", "misc"), + 0); + + /* Content of packed registers should match packed_payload */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) + + (patch_pos_in_packed_regs * 4); + memset(readback, 0, sizeof(readback)); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)), + 0); + KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload)); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload)); + + /* Content of unpacked registers should match unpacked_payload */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) + + (packed_patch_pos_words - 3) * 4; + memset(readback, 0, sizeof(readback)); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, + sizeof(unpacked_payload)), + 0); + KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload)); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Patch data that starts three words before a packed boundary using one + * block of three unpacked words followed by one packed block. + */ +static void bin_patch_3_leading_1_packed(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + const struct bin_test_param *param = test->param_value; + unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type); + u32 packed_payload[3], unpacked_payload[3], readback[3]; + unsigned int alg_base_words, packed_patch_pos_words; + unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs; + unsigned int reg_addr; + struct firmware *fw; + + static_assert(sizeof(readback) == sizeof(packed_payload)); + static_assert(sizeof(readback) >= sizeof(unpacked_payload)); + + get_random_bytes(packed_payload, sizeof(packed_payload)); + get_random_bytes(unpacked_payload, sizeof(unpacked_payload)); + + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[param->alg_idx].id, + param->mem_type); + alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words); + + /* Round packed start word up to a packed boundary and move to the next boundary */ + packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4; + + /* Patch the leading unpacked words */ + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + unpacked_mem_type, + ((packed_patch_pos_words - 3) - alg_base_words) * 4, + unpacked_payload, sizeof(unpacked_payload)); + /* ... then the packed block */ + patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words); + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + param->mem_type, + (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4, + &packed_payload, sizeof(packed_payload)); + + fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw", + fw, "mock_bin", "misc"), + 0); + + /* Content of packed registers should match packed_payload */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) + + (patch_pos_in_packed_regs * 4); + memset(readback, 0, sizeof(readback)); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)), + 0); + KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload)); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload)); + + /* Content of unpacked registers should match unpacked_payload */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) + + (packed_patch_pos_words - 3) * 4; + memset(readback, 0, sizeof(readback)); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, + sizeof(unpacked_payload)), + 0); + KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload)); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* bin file with a multiple payloads that each patch one packed block. */ +static void bin_patch_multi_onepacked(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + const struct bin_test_param *param = test->param_value; + u32 packed_payloads[8][3], readback[8][3]; + unsigned int alg_base_words, patch_pos_words; + unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs; + unsigned int payload_offset; + unsigned int reg_addr; + struct firmware *fw; + int i; + + static_assert(sizeof(readback) == sizeof(packed_payloads)); + + get_random_bytes(packed_payloads, sizeof(packed_payloads)); + + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[param->alg_idx].id, + param->mem_type); + alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words); + + /* Round patch start word up to a packed boundary */ + patch_pos_words = round_up(alg_base_words + param->offset_words, 4); + + /* Add one payload per packed block */ + for (i = 0; i < ARRAY_SIZE(packed_payloads); ++i) { + patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words + (i * 4)); + payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4; + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + param->mem_type, + payload_offset, + &packed_payloads[i], sizeof(packed_payloads[i])); + } + + fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw", + fw, "mock_bin", "misc"), + 0); + + /* Content of packed registers should match packed_payloads */ + patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) + + (patch_pos_in_packed_regs * 4); + memset(readback, 0, sizeof(readback)); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, packed_payloads, sizeof(packed_payloads)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payloads)); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * bin file with a multiple payloads that each patch one packed block. + * The payloads are not in address order. + */ +static void bin_patch_multi_onepacked_unordered(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + const struct bin_test_param *param = test->param_value; + static const u8 payload_order[] = { 4, 3, 6, 1, 0, 7, 5, 2 }; + u32 packed_payloads[8][3], readback[8][3]; + unsigned int alg_base_words, patch_pos_words; + unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs; + unsigned int payload_offset; + unsigned int reg_addr; + struct firmware *fw; + int i; + + static_assert(ARRAY_SIZE(payload_order) == ARRAY_SIZE(packed_payloads)); + static_assert(sizeof(readback) == sizeof(packed_payloads)); + + get_random_bytes(packed_payloads, sizeof(packed_payloads)); + + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[param->alg_idx].id, + param->mem_type); + alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words); + + /* Round patch start word up to a packed boundary */ + patch_pos_words = round_up(alg_base_words + param->offset_words, 4); + + /* Add one payload per packed block */ + for (i = 0; i < ARRAY_SIZE(payload_order); ++i) { + patch_pos_in_packed_regs = + _num_words_to_num_packed_regs(patch_pos_words + (payload_order[i] * 4)); + payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4; + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + param->mem_type, + payload_offset, + &packed_payloads[payload_order[i]], + sizeof(packed_payloads[0])); + } + + fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw", + fw, "mock_bin", "misc"), + 0); + + /* Content in registers should match the order of data in packed_payloads */ + patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) + + (patch_pos_in_packed_regs * 4); + memset(readback, 0, sizeof(readback)); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, packed_payloads, sizeof(packed_payloads)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payloads)); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * bin file with a multiple payloads that each patch one packed block. + * The payloads are not in address order. The patched memory is not contiguous. + */ +static void bin_patch_multi_onepacked_sparse_unordered(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + const struct bin_test_param *param = test->param_value; + static const u8 word_offsets[] = { 60, 24, 76, 4, 40, 52, 48, 36, 12 }; + u32 packed_payloads[9][3], readback[3]; + unsigned int alg_base_words, alg_base_in_packed_regs; + unsigned int patch_pos_words, patch_pos_in_packed_regs, payload_offset; + unsigned int reg_addr; + struct firmware *fw; + int i; + + static_assert(ARRAY_SIZE(word_offsets) == ARRAY_SIZE(packed_payloads)); + static_assert(sizeof(readback) == sizeof(packed_payloads[0])); + + get_random_bytes(packed_payloads, sizeof(packed_payloads)); + + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[param->alg_idx].id, + param->mem_type); + alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words); + + /* Add one payload per packed block */ + for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) { + /* Round patch start word up to a packed boundary */ + patch_pos_words = round_up(alg_base_words + word_offsets[i], 4); + patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words); + payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4; + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + param->mem_type, + payload_offset, + &packed_payloads[i], + sizeof(packed_payloads[0])); + } + + fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw", + fw, "mock_bin", "misc"), + 0); + + /* Content of packed registers should match packed_payloads */ + for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) { + patch_pos_words = round_up(alg_base_words + word_offsets[i], 4); + patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) + + (patch_pos_in_packed_regs * 4); + memset(readback, 0, sizeof(readback)); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + sizeof(readback)), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, packed_payloads[i], sizeof(packed_payloads[i])); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payloads[i])); + } + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * bin file that patches a single packed block in each of the memory regions + * of one algorithm. + */ +static void bin_patch_1_packed_multiple_mems(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + const struct bin_test_param *param = test->param_value; + u32 packed_xm_payload[3], packed_ym_payload[3], readback[3]; + unsigned int alg_xm_base_words, alg_ym_base_words; + unsigned int xm_patch_pos_words, ym_patch_pos_words; + unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs; + unsigned int reg_addr; + struct firmware *fw; + + static_assert(sizeof(readback) == sizeof(packed_xm_payload)); + static_assert(sizeof(readback) == sizeof(packed_ym_payload)); + + get_random_bytes(packed_xm_payload, sizeof(packed_xm_payload)); + get_random_bytes(packed_ym_payload, sizeof(packed_ym_payload)); + + alg_xm_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[param->alg_idx].id, + WMFW_HALO_XM_PACKED); + alg_ym_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[param->alg_idx].id, + WMFW_HALO_YM_PACKED); + + /* Round patch start word up to a packed boundary */ + xm_patch_pos_words = round_up(alg_xm_base_words + param->offset_words, 4); + ym_patch_pos_words = round_up(alg_ym_base_words + param->offset_words, 4); + + /* Add XM and YM patches */ + alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_xm_base_words); + patch_pos_in_packed_regs = _num_words_to_num_packed_regs(xm_patch_pos_words); + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + WMFW_HALO_XM_PACKED, + (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4, + packed_xm_payload, sizeof(packed_xm_payload)); + + alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_ym_base_words); + patch_pos_in_packed_regs = _num_words_to_num_packed_regs(ym_patch_pos_words); + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[param->alg_idx].id, + bin_test_mock_algs[param->alg_idx].ver, + WMFW_HALO_YM_PACKED, + (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4, + packed_ym_payload, sizeof(packed_ym_payload)); + + fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw", + fw, "mock_bin", "misc"), + 0); + + /* Content of packed XM registers should match packed_xm_payload */ + patch_pos_in_packed_regs = _num_words_to_num_packed_regs(xm_patch_pos_words); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_HALO_XM_PACKED) + + (patch_pos_in_packed_regs * 4); + memset(readback, 0, sizeof(readback)); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, packed_xm_payload, sizeof(packed_xm_payload)); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_xm_payload)); + + /* Content of packed YM registers should match packed_ym_payload */ + patch_pos_in_packed_regs = _num_words_to_num_packed_regs(ym_patch_pos_words); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_HALO_YM_PACKED) + + (patch_pos_in_packed_regs * 4); + memset(readback, 0, sizeof(readback)); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, packed_ym_payload, sizeof(packed_ym_payload)); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_ym_payload)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * bin file that patches a single packed block in multiple algorithms. + */ +static void bin_patch_1_packed_multiple_algs(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + const struct bin_test_param *param = test->param_value; + u32 packed_payload[ARRAY_SIZE(bin_test_mock_algs)][3]; + u32 readback[ARRAY_SIZE(bin_test_mock_algs)][3]; + unsigned int alg_base_words, patch_pos_words; + unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs; + unsigned int reg_addr, payload_offset; + struct firmware *fw; + int i; + + static_assert(sizeof(readback) == sizeof(packed_payload)); + + get_random_bytes(packed_payload, sizeof(packed_payload)); + + /* For each algorithm patch one DSP word to a value from packed_payload */ + for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) { + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[i].id, + param->mem_type); + alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words); + + /* Round patch start word up to a packed boundary */ + patch_pos_words = round_up(alg_base_words + param->offset_words, 4); + patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words); + + payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4; + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[i].id, + bin_test_mock_algs[i].ver, + param->mem_type, + payload_offset, + packed_payload[i], sizeof(packed_payload[i])); + } + + fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw", + fw, "mock_bin", "misc"), + 0); + + memset(readback, 0, sizeof(readback)); + + /* + * Readback the registers that should have been written. Place + * the values into the expected location in readback[] so that + * the content of readback[] should match packed_payload[] + */ + for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) { + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[i].id, + param->mem_type); + alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words); + + patch_pos_words = round_up(alg_base_words + param->offset_words, 4); + patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words); + + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) + + (patch_pos_in_packed_regs * 4); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, + readback[i], sizeof(readback[i])), + 0); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload[i])); + } + + KUNIT_EXPECT_MEMEQ(test, readback, packed_payload, sizeof(packed_payload)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * bin file that patches a single packed block in multiple algorithms. + * The algorithms are not patched in the same order they appear in the XM header. + */ +static void bin_patch_1_packed_multiple_algs_unordered(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + const struct bin_test_param *param = test->param_value; + static const u8 alg_order[] = { 3, 0, 2, 1 }; + u32 packed_payload[ARRAY_SIZE(bin_test_mock_algs)][3]; + u32 readback[ARRAY_SIZE(bin_test_mock_algs)][3]; + unsigned int alg_base_words, patch_pos_words; + unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs; + unsigned int reg_addr, payload_offset; + struct firmware *fw; + int i, alg_idx; + + static_assert(ARRAY_SIZE(alg_order) == ARRAY_SIZE(bin_test_mock_algs)); + static_assert(sizeof(readback) == sizeof(packed_payload)); + + get_random_bytes(packed_payload, sizeof(packed_payload)); + + /* + * For each algorithm index in alg_order[] patch one DSP word in + * that algorithm to a value from packed_payload. + */ + for (i = 0; i < ARRAY_SIZE(alg_order); ++i) { + alg_idx = alg_order[i]; + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[alg_idx].id, + param->mem_type); + alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words); + + /* Round patch start word up to a packed boundary */ + patch_pos_words = round_up(alg_base_words + param->offset_words, 4); + patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words); + + payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4; + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[alg_idx].id, + bin_test_mock_algs[alg_idx].ver, + param->mem_type, + payload_offset, + packed_payload[i], sizeof(packed_payload[i])); + } + + fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw", + fw, "mock_bin", "misc"), + 0); + + memset(readback, 0, sizeof(readback)); + + /* + * Readback the registers that should have been written. Place + * the values into the expected location in readback[] so that + * the content of readback[] should match packed_payload[] + */ + for (i = 0; i < ARRAY_SIZE(alg_order); ++i) { + alg_idx = alg_order[i]; + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[alg_idx].id, + param->mem_type); + + patch_pos_words = round_up(alg_base_words + param->offset_words, 4); + patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words); + + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) + + (patch_pos_in_packed_regs * 4); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, + readback[i], sizeof(readback[i])), + 0); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload[i])); + } + + KUNIT_EXPECT_MEMEQ(test, readback, packed_payload, sizeof(packed_payload)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * bin file that contains a mix of packed and unpacked words. + * payloads are in random offset order. Offsets that are on a packed boundary + * are written as a packed block. Offsets that are not on a packed boundary + * are written as a single unpacked word. + */ +static void bin_patch_mixed_packed_unpacked_random(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + const struct bin_test_param *param = test->param_value; + static const u8 offset_words[] = { + 58, 68, 50, 10, 44, 17, 74, 36, 8, 7, 49, 11, 78, 57, 65, 2, + 48, 38, 22, 70, 77, 21, 61, 56, 75, 34, 27, 3, 31, 20, 43, 63, + 5, 30, 32, 25, 33, 79, 29, 0, 37, 60, 69, 52, 13, 12, 24, 26, + 4, 51, 76, 72, 16, 6, 39, 62, 15, 41, 28, 73, 53, 40, 45, 54, + 14, 55, 46, 66, 64, 59, 23, 9, 67, 47, 19, 71, 35, 18, 42, 1, + }; + struct { + u32 packed[80][3]; + u32 unpacked[80]; + } *payload; + u32 readback[3]; + unsigned int alg_base_words, patch_pos_words; + unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs; + unsigned int reg_addr, payload_offset; + int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type); + struct firmware *fw; + int i; + + payload = kunit_kmalloc(test, sizeof(*payload), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, payload); + + get_random_bytes(payload->packed, sizeof(payload->packed)); + get_random_bytes(payload->unpacked, sizeof(payload->unpacked)); + + /* Create a patch entry for every offset in offset_words[] */ + for (i = 0; i < ARRAY_SIZE(offset_words); ++i) { + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[0].id, + param->mem_type); + /* + * If the offset is on a packed boundary use a packed payload else + * use an unpacked word + */ + patch_pos_words = alg_base_words + offset_words[i]; + if ((patch_pos_words % 4) == 0) { + alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words); + patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words); + payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4; + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[0].id, + bin_test_mock_algs[0].ver, + param->mem_type, + payload_offset, + payload->packed[i], + sizeof(payload->packed[i])); + } else { + payload_offset = offset_words[i] * 4; + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[0].id, + bin_test_mock_algs[0].ver, + unpacked_mem_type, + payload_offset, + &payload->unpacked[i], + sizeof(payload->unpacked[i])); + } + } + + fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw", + fw, "mock_bin", "misc"), + 0); + + /* + * Readback the packed registers that should have been written. + * Place the values into the expected location in readback[] so + * that the content of readback[] should match payload->packed[] + */ + for (i = 0; i < ARRAY_SIZE(offset_words); ++i) { + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[0].id, + param->mem_type); + patch_pos_words = alg_base_words + offset_words[i]; + + /* Skip if the offset is not on a packed boundary */ + if ((patch_pos_words % 4) != 0) + continue; + + patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words); + + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) + + (patch_pos_in_packed_regs * 4); + + memset(readback, 0, sizeof(readback)); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + sizeof(readback)), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, payload->packed[i], sizeof(payload->packed[i])); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(payload->packed[i])); + } + + /* + * Readback the unpacked registers that should have been written. + * Place the values into the expected location in readback[] so + * that the content of readback[] should match payload->unpacked[] + */ + for (i = 0; i < ARRAY_SIZE(offset_words); ++i) { + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[0].id, + unpacked_mem_type); + + patch_pos_words = alg_base_words + offset_words[i]; + + /* Skip if the offset is on a packed boundary */ + if ((patch_pos_words % 4) == 0) + continue; + + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) + + ((patch_pos_words) * 4); + + readback[0] = 0; + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, + &readback[0], sizeof(readback[0])), + 0); + KUNIT_EXPECT_EQ(test, readback[0], payload->unpacked[i]); + + /* Drop expected writes from the cache */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(payload->unpacked[i])); + } + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* Bin file with name and multiple info blocks */ +static void bin_patch_name_and_info(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + u32 reg_val, payload_data; + char *infobuf; + unsigned int alg_base_words, reg_addr; + struct firmware *fw; + + get_random_bytes(&payload_data, sizeof(payload_data)); + + alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv, + bin_test_mock_algs[0].id, + WMFW_ADSP2_YM); + + /* Add a name block and info block */ + cs_dsp_mock_bin_add_name(priv->local->bin_builder, "The name"); + cs_dsp_mock_bin_add_info(priv->local->bin_builder, "Some info"); + + /* Add a big block of info */ + infobuf = kunit_kzalloc(test, 512, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, infobuf); + + for (; strlcat(infobuf, "Waffle{Blah}\n", 512) < 512; ) + ; + + cs_dsp_mock_bin_add_info(priv->local->bin_builder, infobuf); + + /* Add a patch */ + cs_dsp_mock_bin_add_patch(priv->local->bin_builder, + bin_test_mock_algs[0].id, + bin_test_mock_algs[0].ver, + WMFW_ADSP2_YM, + 0, + &payload_data, sizeof(payload_data)); + + fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw", + fw, "mock_bin", "misc"), + 0); + + /* Content of registers should match payload_data */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM); + reg_addr += alg_base_words * reg_inc_per_word; + reg_val = 0; + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, + ®_val, sizeof(reg_val)), + 0); + KUNIT_EXPECT_EQ(test, reg_val, payload_data); +} + +static int cs_dsp_bin_test_common_init(struct kunit *test, struct cs_dsp *dsp) +{ + struct cs_dsp_test *priv; + struct cs_dsp_mock_xm_header *xm_hdr; + struct device *test_dev; + int ret; + + priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL); + if (!priv->local) + return -ENOMEM; + + priv->test = test; + priv->dsp = dsp; + test->priv = priv; + + /* Create dummy struct device */ + test_dev = kunit_device_register(test, "cs_dsp_test_drv"); + if (IS_ERR(test_dev)) + return PTR_ERR(test_dev); + + dsp->dev = get_device(test_dev); + if (!dsp->dev) + return -ENODEV; + + ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev); + if (ret) + return ret; + + dev_set_drvdata(dsp->dev, priv); + + /* Allocate regmap */ + ret = cs_dsp_mock_regmap_init(priv); + if (ret) + return ret; + + /* Create an XM header */ + xm_hdr = cs_dsp_create_mock_xm_header(priv, + bin_test_mock_algs, + ARRAY_SIZE(bin_test_mock_algs)); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xm_hdr); + ret = cs_dsp_mock_xm_header_write_to_regmap(xm_hdr); + KUNIT_ASSERT_EQ(test, ret, 0); + + priv->local->bin_builder = + cs_dsp_mock_bin_init(priv, 1, + cs_dsp_mock_xm_header_get_fw_version(xm_hdr)); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->local->bin_builder); + + /* We must provide a dummy wmfw to load */ + priv->local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, -1); + priv->local->wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + + dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops); + + switch (dsp->type) { + case WMFW_ADSP2: + ret = cs_dsp_adsp2_init(dsp); + break; + case WMFW_HALO: + ret = cs_dsp_halo_init(dsp); + break; + default: + KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type); + return -EINVAL; + } + + if (ret) + return ret; + + /* Automatically call cs_dsp_remove() when test case ends */ + return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp); +} + +static int cs_dsp_bin_test_halo_init(struct kunit *test) +{ + struct cs_dsp *dsp; + + /* Fill in cs_dsp and initialize */ + dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL); + if (!dsp) + return -ENOMEM; + + dsp->num = 1; + dsp->type = WMFW_HALO; + dsp->mem = cs_dsp_mock_halo_dsp1_regions; + dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes); + dsp->base = cs_dsp_mock_halo_core_base; + dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base; + + return cs_dsp_bin_test_common_init(test, dsp); +} + +static int cs_dsp_bin_test_adsp2_32bit_init(struct kunit *test) +{ + struct cs_dsp *dsp; + + /* Fill in cs_dsp and initialize */ + dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL); + if (!dsp) + return -ENOMEM; + + dsp->num = 1; + dsp->type = WMFW_ADSP2; + dsp->rev = 1; + dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions; + dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes); + dsp->base = cs_dsp_mock_adsp2_32bit_sysbase; + + return cs_dsp_bin_test_common_init(test, dsp); +} + +static int cs_dsp_bin_test_adsp2_16bit_init(struct kunit *test) +{ + struct cs_dsp *dsp; + + /* Fill in cs_dsp and initialize */ + dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL); + if (!dsp) + return -ENOMEM; + + dsp->num = 1; + dsp->type = WMFW_ADSP2; + dsp->rev = 0; + dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions; + dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes); + dsp->base = cs_dsp_mock_adsp2_16bit_sysbase; + + return cs_dsp_bin_test_common_init(test, dsp); +} + +/* Parameterize on choice of XM or YM with a range of word offsets */ +static const struct bin_test_param x_or_y_and_offset_param_cases[] = { + { .mem_type = WMFW_ADSP2_XM, .offset_words = 0 }, + { .mem_type = WMFW_ADSP2_XM, .offset_words = 1 }, + { .mem_type = WMFW_ADSP2_XM, .offset_words = 2 }, + { .mem_type = WMFW_ADSP2_XM, .offset_words = 3 }, + { .mem_type = WMFW_ADSP2_XM, .offset_words = 4 }, + { .mem_type = WMFW_ADSP2_XM, .offset_words = 23 }, + { .mem_type = WMFW_ADSP2_XM, .offset_words = 22 }, + { .mem_type = WMFW_ADSP2_XM, .offset_words = 21 }, + { .mem_type = WMFW_ADSP2_XM, .offset_words = 20 }, + + { .mem_type = WMFW_ADSP2_YM, .offset_words = 0 }, + { .mem_type = WMFW_ADSP2_YM, .offset_words = 1 }, + { .mem_type = WMFW_ADSP2_YM, .offset_words = 2 }, + { .mem_type = WMFW_ADSP2_YM, .offset_words = 3 }, + { .mem_type = WMFW_ADSP2_YM, .offset_words = 4 }, + { .mem_type = WMFW_ADSP2_YM, .offset_words = 23 }, + { .mem_type = WMFW_ADSP2_YM, .offset_words = 22 }, + { .mem_type = WMFW_ADSP2_YM, .offset_words = 21 }, + { .mem_type = WMFW_ADSP2_YM, .offset_words = 20 }, +}; + +/* Parameterize on ZM with a range of word offsets */ +static const struct bin_test_param z_and_offset_param_cases[] = { + { .mem_type = WMFW_ADSP2_ZM, .offset_words = 0 }, + { .mem_type = WMFW_ADSP2_ZM, .offset_words = 1 }, + { .mem_type = WMFW_ADSP2_ZM, .offset_words = 2 }, + { .mem_type = WMFW_ADSP2_ZM, .offset_words = 3 }, + { .mem_type = WMFW_ADSP2_ZM, .offset_words = 4 }, + { .mem_type = WMFW_ADSP2_ZM, .offset_words = 23 }, + { .mem_type = WMFW_ADSP2_ZM, .offset_words = 22 }, + { .mem_type = WMFW_ADSP2_ZM, .offset_words = 21 }, + { .mem_type = WMFW_ADSP2_ZM, .offset_words = 20 }, +}; + +/* Parameterize on choice of packed XM or YM with a range of word offsets */ +static const struct bin_test_param packed_x_or_y_and_offset_param_cases[] = { + { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 0 }, + { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 4 }, + { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 8 }, + { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 12 }, + + { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 0 }, + { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 4 }, + { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 8 }, + { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 12 }, +}; + +static void x_or_y_or_z_and_offset_param_desc(const struct bin_test_param *param, + char *desc) +{ + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s@%u", + cs_dsp_mem_region_name(param->mem_type), + param->offset_words); +} + +KUNIT_ARRAY_PARAM(x_or_y_and_offset, + x_or_y_and_offset_param_cases, + x_or_y_or_z_and_offset_param_desc); + +KUNIT_ARRAY_PARAM(z_and_offset, + z_and_offset_param_cases, + x_or_y_or_z_and_offset_param_desc); + +KUNIT_ARRAY_PARAM(packed_x_or_y_and_offset, + packed_x_or_y_and_offset_param_cases, + x_or_y_or_z_and_offset_param_desc); + +/* Parameterize on choice of packed XM or YM */ +static const struct bin_test_param packed_x_or_y_param_cases[] = { + { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 0 }, + { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 0 }, +}; + +static void x_or_y_or_z_param_desc(const struct bin_test_param *param, + char *desc) +{ + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s", cs_dsp_mem_region_name(param->mem_type)); +} + +KUNIT_ARRAY_PARAM(packed_x_or_y, packed_x_or_y_param_cases, x_or_y_or_z_param_desc); + +static const struct bin_test_param offset_param_cases[] = { + { .offset_words = 0 }, + { .offset_words = 1 }, + { .offset_words = 2 }, + { .offset_words = 3 }, + { .offset_words = 4 }, + { .offset_words = 23 }, + { .offset_words = 22 }, + { .offset_words = 21 }, + { .offset_words = 20 }, +}; + +static void offset_param_desc(const struct bin_test_param *param, char *desc) +{ + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "@%u", param->offset_words); +} + +KUNIT_ARRAY_PARAM(offset, offset_param_cases, offset_param_desc); + +static const struct bin_test_param alg_param_cases[] = { + { .alg_idx = 0 }, + { .alg_idx = 1 }, + { .alg_idx = 2 }, + { .alg_idx = 3 }, +}; + +static void alg_param_desc(const struct bin_test_param *param, char *desc) +{ + WARN_ON(param->alg_idx >= ARRAY_SIZE(bin_test_mock_algs)); + + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg[%u] (%#x)", + param->alg_idx, bin_test_mock_algs[param->alg_idx].id); +} + +KUNIT_ARRAY_PARAM(alg, alg_param_cases, alg_param_desc); + +static const struct bin_test_param x_or_y_and_alg_param_cases[] = { + { .mem_type = WMFW_ADSP2_XM, .alg_idx = 0 }, + { .mem_type = WMFW_ADSP2_XM, .alg_idx = 1 }, + { .mem_type = WMFW_ADSP2_XM, .alg_idx = 2 }, + { .mem_type = WMFW_ADSP2_XM, .alg_idx = 3 }, + + { .mem_type = WMFW_ADSP2_YM, .alg_idx = 0 }, + { .mem_type = WMFW_ADSP2_YM, .alg_idx = 1 }, + { .mem_type = WMFW_ADSP2_YM, .alg_idx = 2 }, + { .mem_type = WMFW_ADSP2_YM, .alg_idx = 3 }, +}; + +static void x_or_y_or_z_and_alg_param_desc(const struct bin_test_param *param, char *desc) +{ + WARN_ON(param->alg_idx >= ARRAY_SIZE(bin_test_mock_algs)); + + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s alg[%u] (%#x)", + cs_dsp_mem_region_name(param->mem_type), + param->alg_idx, bin_test_mock_algs[param->alg_idx].id); +} + +KUNIT_ARRAY_PARAM(x_or_y_and_alg, x_or_y_and_alg_param_cases, x_or_y_or_z_and_alg_param_desc); + +static const struct bin_test_param z_and_alg_param_cases[] = { + { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 0 }, + { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 1 }, + { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 2 }, + { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 3 }, +}; + +KUNIT_ARRAY_PARAM(z_and_alg, z_and_alg_param_cases, x_or_y_or_z_and_alg_param_desc); + +static const struct bin_test_param packed_x_or_y_and_alg_param_cases[] = { + { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 0 }, + { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 1 }, + { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 2 }, + { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 3 }, + + { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 0 }, + { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 1 }, + { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 2 }, + { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 3 }, +}; + +KUNIT_ARRAY_PARAM(packed_x_or_y_and_alg, packed_x_or_y_and_alg_param_cases, + x_or_y_or_z_and_alg_param_desc); + +static struct kunit_case cs_dsp_bin_test_cases_halo[] = { + /* Unpacked memory */ + KUNIT_CASE_PARAM(bin_patch_one_word, x_or_y_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_one_multiword, x_or_y_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_multi_oneword, x_or_y_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_multi_oneword_unordered, x_or_y_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, alg_gen_params), + KUNIT_CASE_PARAM(bin_patch_multi_oneword_sparse_unordered, x_or_y_and_alg_gen_params), + KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs, x_or_y_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs_unordered, x_or_y_and_offset_gen_params), + + /* Packed memory tests */ + KUNIT_CASE_PARAM(bin_patch_1_packed, + packed_x_or_y_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_1_packed_1_single_trailing, + packed_x_or_y_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_1_packed_2_single_trailing, + packed_x_or_y_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_1_packed_3_single_trailing, + packed_x_or_y_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_1_packed_2_trailing, + packed_x_or_y_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_1_packed_3_trailing, + packed_x_or_y_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_1_single_leading_1_packed, + packed_x_or_y_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_2_single_leading_1_packed, + packed_x_or_y_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_2_leading_1_packed, + packed_x_or_y_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_3_single_leading_1_packed, + packed_x_or_y_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_3_leading_1_packed, + packed_x_or_y_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_multi_onepacked, + packed_x_or_y_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_multi_onepacked_unordered, + packed_x_or_y_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_mems, offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_mems, alg_gen_params), + KUNIT_CASE_PARAM(bin_patch_multi_onepacked_sparse_unordered, + packed_x_or_y_and_alg_gen_params), + KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_algs, + packed_x_or_y_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_algs_unordered, + packed_x_or_y_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_mixed_packed_unpacked_random, + packed_x_or_y_gen_params), + + KUNIT_CASE(bin_patch_name_and_info), + + { } /* terminator */ +}; + +static struct kunit_case cs_dsp_bin_test_cases_adsp2[] = { + /* XM and YM */ + KUNIT_CASE_PARAM(bin_patch_one_word, x_or_y_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_one_multiword, x_or_y_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_multi_oneword, x_or_y_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_multi_oneword_unordered, x_or_y_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_multi_oneword_sparse_unordered, x_or_y_and_alg_gen_params), + KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs, x_or_y_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs_unordered, x_or_y_and_offset_gen_params), + + /* ZM */ + KUNIT_CASE_PARAM(bin_patch_one_word, z_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_one_multiword, z_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_multi_oneword, z_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_multi_oneword_unordered, z_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_multi_oneword_sparse_unordered, z_and_alg_gen_params), + KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs, z_and_offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs_unordered, z_and_offset_gen_params), + + /* Other */ + KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, offset_gen_params), + KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, alg_gen_params), + + KUNIT_CASE(bin_patch_name_and_info), + + { } /* terminator */ +}; + +static struct kunit_suite cs_dsp_bin_test_halo = { + .name = "cs_dsp_bin_halo", + .init = cs_dsp_bin_test_halo_init, + .test_cases = cs_dsp_bin_test_cases_halo, +}; + +static struct kunit_suite cs_dsp_bin_test_adsp2_32bit = { + .name = "cs_dsp_bin_adsp2_32bit", + .init = cs_dsp_bin_test_adsp2_32bit_init, + .test_cases = cs_dsp_bin_test_cases_adsp2, +}; + +static struct kunit_suite cs_dsp_bin_test_adsp2_16bit = { + .name = "cs_dsp_bin_adsp2_16bit", + .init = cs_dsp_bin_test_adsp2_16bit_init, + .test_cases = cs_dsp_bin_test_cases_adsp2, +}; + +kunit_test_suites(&cs_dsp_bin_test_halo, + &cs_dsp_bin_test_adsp2_32bit, + &cs_dsp_bin_test_adsp2_16bit); diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c new file mode 100644 index 000000000000..a7ec956d2724 --- /dev/null +++ b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c @@ -0,0 +1,595 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// KUnit tests for cs_dsp. +// +// Copyright (C) 2024 Cirrus Logic, Inc. and +// Cirrus Logic International Semiconductor Ltd. +// + +#include <kunit/device.h> +#include <kunit/resource.h> +#include <kunit/test.h> +#include <linux/build_bug.h> +#include <linux/firmware/cirrus/cs_dsp.h> +#include <linux/firmware/cirrus/cs_dsp_test_utils.h> +#include <linux/firmware/cirrus/wmfw.h> +#include <linux/random.h> +#include <linux/regmap.h> +#include <linux/string.h> +#include <linux/vmalloc.h> + +KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *); +KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *); + +struct cs_dsp_test_local { + struct cs_dsp_mock_bin_builder *bin_builder; + struct cs_dsp_mock_xm_header *xm_header; + struct cs_dsp_mock_wmfw_builder *wmfw_builder; + struct firmware *wmfw; + int wmfw_version; +}; + +struct cs_dsp_bin_test_param { + int block_type; +}; + +static const struct cs_dsp_mock_alg_def cs_dsp_bin_err_test_mock_algs[] = { + { + .id = 0xfafa, + .ver = 0x100000, + .xm_size_words = 164, + .ym_size_words = 164, + .zm_size_words = 164, + }, +}; + +/* Load a bin containing unknown blocks. They should be skipped. */ +static void bin_load_with_unknown_blocks(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *bin; + unsigned int reg_addr; + u8 *payload_data, *readback; + u8 random_data[8]; + const unsigned int payload_size_bytes = 64; + + payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data); + get_random_bytes(payload_data, payload_size_bytes); + + readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Add some unknown blocks at the start of the bin */ + get_random_bytes(random_data, sizeof(random_data)); + cs_dsp_mock_bin_add_raw_block(local->bin_builder, + cs_dsp_bin_err_test_mock_algs[0].id, + cs_dsp_bin_err_test_mock_algs[0].ver, + 0xf5, 0, + random_data, sizeof(random_data)); + cs_dsp_mock_bin_add_raw_block(local->bin_builder, + cs_dsp_bin_err_test_mock_algs[0].id, + cs_dsp_bin_err_test_mock_algs[0].ver, + 0xf500, 0, + random_data, sizeof(random_data)); + cs_dsp_mock_bin_add_raw_block(local->bin_builder, + cs_dsp_bin_err_test_mock_algs[0].id, + cs_dsp_bin_err_test_mock_algs[0].ver, + 0xc300, 0, + random_data, sizeof(random_data)); + + /* Add a single payload to be written to DSP memory */ + cs_dsp_mock_bin_add_raw_block(local->bin_builder, + cs_dsp_bin_err_test_mock_algs[0].id, + cs_dsp_bin_err_test_mock_algs[0].ver, + WMFW_ADSP2_YM, 0, + payload_data, payload_size_bytes); + + bin = cs_dsp_mock_bin_get_firmware(local->bin_builder); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"), + 0); + + /* Check that the payload was written to memory */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes); +} + +/* Load a bin that doesn't have a valid magic marker. */ +static void bin_err_wrong_magic(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *bin; + + /* Sanity-check that the wmfw loads ok without the bin */ + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"), + 0); + cs_dsp_power_down(priv->dsp); + + bin = cs_dsp_mock_bin_get_firmware(local->bin_builder); + + memcpy((void *)bin->data, "WMFW", 4); + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"), + 0); + + memcpy((void *)bin->data, "xMDR", 4); + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"), + 0); + + memcpy((void *)bin->data, "WxDR", 4); + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"), + 0); + + memcpy((void *)bin->data, "WMxR", 4); + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"), + 0); + + memcpy((void *)bin->data, "WMDx", 4); + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"), + 0); + + memset((void *)bin->data, 0, 4); + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"), + 0); +} + +/* Load a bin that is too short for a valid header. */ +static void bin_err_too_short_for_header(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *bin; + + /* Sanity-check that the wmfw loads ok without the bin */ + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"), + 0); + cs_dsp_power_down(priv->dsp); + + bin = cs_dsp_mock_bin_get_firmware(local->bin_builder); + do { + bin->size--; + + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"), + 0); + } while (bin->size > 0); +} + +/* Header length field isn't a valid header length. */ +static void bin_err_bad_header_length(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *bin; + struct wmfw_coeff_hdr *header; + unsigned int real_len, len; + + /* Sanity-check that the wmfw loads ok without the bin */ + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"), + 0); + cs_dsp_power_down(priv->dsp); + + bin = cs_dsp_mock_bin_get_firmware(local->bin_builder); + header = (struct wmfw_coeff_hdr *)bin->data; + real_len = le32_to_cpu(header->len); + + for (len = 0; len < real_len; len++) { + header->len = cpu_to_le32(len); + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"), + 0); + } + + for (len = real_len + 1; len < real_len + 7; len++) { + header->len = cpu_to_le32(len); + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"), + 0); + } + + header->len = cpu_to_le32(0xffffffff); + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"), + 0); + + header->len = cpu_to_le32(0x80000000); + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"), + 0); + + header->len = cpu_to_le32(0x7fffffff); + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"), + 0); +} + +/* Wrong core type in header. */ +static void bin_err_bad_core_type(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *bin; + struct wmfw_coeff_hdr *header; + + /* Sanity-check that the wmfw loads ok without the bin */ + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"), + 0); + cs_dsp_power_down(priv->dsp); + + bin = cs_dsp_mock_bin_get_firmware(local->bin_builder); + header = (struct wmfw_coeff_hdr *)bin->data; + + header->core_ver = cpu_to_le32(0); + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"), + 0); + + header->core_ver = cpu_to_le32(1); + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"), + 0); + + header->core_ver = cpu_to_le32(priv->dsp->type + 1); + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"), + 0); + + header->core_ver = cpu_to_le32(0xff); + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"), + 0); +} + +/* File too short to contain a full block header */ +static void bin_too_short_for_block_header(struct kunit *test) +{ + const struct cs_dsp_bin_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *bin; + unsigned int header_length; + + /* Sanity-check that the wmfw loads ok without the bin */ + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"), + 0); + cs_dsp_power_down(priv->dsp); + + bin = cs_dsp_mock_bin_get_firmware(local->bin_builder); + header_length = bin->size; + kunit_kfree(test, bin); + + cs_dsp_mock_bin_add_raw_block(local->bin_builder, + cs_dsp_bin_err_test_mock_algs[0].id, + cs_dsp_bin_err_test_mock_algs[0].ver, + param->block_type, 0, + NULL, 0); + + bin = cs_dsp_mock_bin_get_firmware(local->bin_builder); + KUNIT_ASSERT_GT(test, bin->size, header_length); + + for (bin->size--; bin->size > header_length; bin->size--) { + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"), + 0); + } +} + +/* File too short to contain the block payload */ +static void bin_too_short_for_block_payload(struct kunit *test) +{ + const struct cs_dsp_bin_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *bin; + static const u8 payload[256] = { }; + int i; + + /* Sanity-check that the wmfw loads ok without the bin */ + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"), + 0); + cs_dsp_power_down(priv->dsp); + + cs_dsp_mock_bin_add_raw_block(local->bin_builder, + cs_dsp_bin_err_test_mock_algs[0].id, + cs_dsp_bin_err_test_mock_algs[0].ver, + param->block_type, 0, + payload, sizeof(payload)); + + bin = cs_dsp_mock_bin_get_firmware(local->bin_builder); + for (i = 0; i < sizeof(payload); i++) { + bin->size--; + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"), + 0); + } +} + +/* Block payload length is a garbage value */ +static void bin_block_payload_len_garbage(struct kunit *test) +{ + const struct cs_dsp_bin_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *bin; + struct wmfw_coeff_hdr *header; + struct wmfw_coeff_item *block; + u32 payload = 0; + + /* Sanity-check that the wmfw loads ok without the bin */ + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"), + 0); + cs_dsp_power_down(priv->dsp); + + cs_dsp_mock_bin_add_raw_block(local->bin_builder, + cs_dsp_bin_err_test_mock_algs[0].id, + cs_dsp_bin_err_test_mock_algs[0].ver, + param->block_type, 0, + &payload, sizeof(payload)); + + bin = cs_dsp_mock_bin_get_firmware(local->bin_builder); + header = (struct wmfw_coeff_hdr *)bin->data; + block = (struct wmfw_coeff_item *)&bin->data[le32_to_cpu(header->len)]; + + /* Sanity check that we're looking at the correct part of the bin */ + KUNIT_ASSERT_EQ(test, le16_to_cpu(block->type), param->block_type); + KUNIT_ASSERT_EQ(test, le32_to_cpu(block->len), sizeof(payload)); + + block->len = cpu_to_le32(0x8000); + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"), + 0); + + block->len = cpu_to_le32(0xffff); + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"), + 0); + + block->len = cpu_to_le32(0x7fffffff); + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"), + 0); + + block->len = cpu_to_le32(0x80000000); + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"), + 0); + + block->len = cpu_to_le32(0xffffffff); + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"), + 0); +} + +static void cs_dsp_bin_err_test_exit(struct kunit *test) +{ + /* + * Testing error conditions can produce a lot of log output + * from cs_dsp error messages, so rate limit the test cases. + */ + usleep_range(200, 500); +} + +static int cs_dsp_bin_err_test_common_init(struct kunit *test, struct cs_dsp *dsp, + int wmfw_version) +{ + struct cs_dsp_test *priv; + struct cs_dsp_test_local *local; + struct device *test_dev; + int ret; + + priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL); + if (!local) + return -ENOMEM; + + priv->test = test; + priv->dsp = dsp; + test->priv = priv; + priv->local = local; + priv->local->wmfw_version = wmfw_version; + + /* Create dummy struct device */ + test_dev = kunit_device_register(test, "cs_dsp_test_drv"); + if (IS_ERR(test_dev)) + return PTR_ERR(test_dev); + + dsp->dev = get_device(test_dev); + if (!dsp->dev) + return -ENODEV; + + ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev); + if (ret) + return ret; + + dev_set_drvdata(dsp->dev, priv); + + /* Allocate regmap */ + ret = cs_dsp_mock_regmap_init(priv); + if (ret) + return ret; + + /* + * There must always be a XM header with at least 1 algorithm, so create + * a dummy one that tests can use and extract it to a data payload. + */ + local->xm_header = cs_dsp_create_mock_xm_header(priv, + cs_dsp_bin_err_test_mock_algs, + ARRAY_SIZE(cs_dsp_bin_err_test_mock_algs)); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header); + + local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, priv->local->wmfw_version); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder); + + /* Add dummy XM header payload to wmfw */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + WMFW_ADSP2_XM, 0, + local->xm_header->blob_data, + local->xm_header->blob_size_bytes); + + local->wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + + local->bin_builder = + cs_dsp_mock_bin_init(priv, 1, + cs_dsp_mock_xm_header_get_fw_version(local->xm_header)); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->bin_builder); + + /* Init cs_dsp */ + dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops); + + switch (dsp->type) { + case WMFW_ADSP2: + ret = cs_dsp_adsp2_init(dsp); + break; + case WMFW_HALO: + ret = cs_dsp_halo_init(dsp); + break; + default: + KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type); + return -EINVAL; + } + + if (ret) + return ret; + + /* Automatically call cs_dsp_remove() when test case ends */ + return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp); +} + +static int cs_dsp_bin_err_test_halo_init(struct kunit *test) +{ + struct cs_dsp *dsp; + + /* Fill in cs_dsp and initialize */ + dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL); + if (!dsp) + return -ENOMEM; + + dsp->num = 1; + dsp->type = WMFW_HALO; + dsp->mem = cs_dsp_mock_halo_dsp1_regions; + dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes); + dsp->base = cs_dsp_mock_halo_core_base; + dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base; + + return cs_dsp_bin_err_test_common_init(test, dsp, 3); +} + +static int cs_dsp_bin_err_test_adsp2_32bit_init(struct kunit *test) +{ + struct cs_dsp *dsp; + + /* Fill in cs_dsp and initialize */ + dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL); + if (!dsp) + return -ENOMEM; + + dsp->num = 1; + dsp->type = WMFW_ADSP2; + dsp->rev = 1; + dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions; + dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes); + dsp->base = cs_dsp_mock_adsp2_32bit_sysbase; + + return cs_dsp_bin_err_test_common_init(test, dsp, 2); +} + +static int cs_dsp_bin_err_test_adsp2_16bit_init(struct kunit *test) +{ + struct cs_dsp *dsp; + + /* Fill in cs_dsp and initialize */ + dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL); + if (!dsp) + return -ENOMEM; + + dsp->num = 1; + dsp->type = WMFW_ADSP2; + dsp->rev = 0; + dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions; + dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes); + dsp->base = cs_dsp_mock_adsp2_16bit_sysbase; + + return cs_dsp_bin_err_test_common_init(test, dsp, 1); +} + +static void cs_dsp_bin_err_block_types_desc(const struct cs_dsp_bin_test_param *param, + char *desc) +{ + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "block_type:%#x", param->block_type); +} + +/* Some block types to test against, including illegal types */ +static const struct cs_dsp_bin_test_param bin_test_block_types_cases[] = { + { .block_type = WMFW_INFO_TEXT << 8 }, + { .block_type = WMFW_METADATA << 8 }, + { .block_type = WMFW_ADSP2_PM }, + { .block_type = WMFW_ADSP2_XM }, + { .block_type = 0x33 }, + { .block_type = 0xf500 }, + { .block_type = 0xc000 }, +}; + +KUNIT_ARRAY_PARAM(bin_test_block_types, + bin_test_block_types_cases, + cs_dsp_bin_err_block_types_desc); + +static struct kunit_case cs_dsp_bin_err_test_cases[] = { + KUNIT_CASE(bin_load_with_unknown_blocks), + KUNIT_CASE(bin_err_wrong_magic), + KUNIT_CASE(bin_err_too_short_for_header), + KUNIT_CASE(bin_err_bad_header_length), + KUNIT_CASE(bin_err_bad_core_type), + + KUNIT_CASE_PARAM(bin_too_short_for_block_header, bin_test_block_types_gen_params), + KUNIT_CASE_PARAM(bin_too_short_for_block_payload, bin_test_block_types_gen_params), + KUNIT_CASE_PARAM(bin_block_payload_len_garbage, bin_test_block_types_gen_params), + + { } /* terminator */ +}; + +static struct kunit_suite cs_dsp_bin_err_test_halo = { + .name = "cs_dsp_bin_err_halo", + .init = cs_dsp_bin_err_test_halo_init, + .exit = cs_dsp_bin_err_test_exit, + .test_cases = cs_dsp_bin_err_test_cases, +}; + +static struct kunit_suite cs_dsp_bin_err_test_adsp2_32bit = { + .name = "cs_dsp_bin_err_adsp2_32bit", + .init = cs_dsp_bin_err_test_adsp2_32bit_init, + .exit = cs_dsp_bin_err_test_exit, + .test_cases = cs_dsp_bin_err_test_cases, +}; + +static struct kunit_suite cs_dsp_bin_err_test_adsp2_16bit = { + .name = "cs_dsp_bin_err_adsp2_16bit", + .init = cs_dsp_bin_err_test_adsp2_16bit_init, + .exit = cs_dsp_bin_err_test_exit, + .test_cases = cs_dsp_bin_err_test_cases, +}; + +kunit_test_suites(&cs_dsp_bin_err_test_halo, + &cs_dsp_bin_err_test_adsp2_32bit, + &cs_dsp_bin_err_test_adsp2_16bit); diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c b/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c new file mode 100644 index 000000000000..8a9b66a3b7d3 --- /dev/null +++ b/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c @@ -0,0 +1,688 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// KUnit tests for cs_dsp. +// +// Copyright (C) 2024 Cirrus Logic, Inc. and +// Cirrus Logic International Semiconductor Ltd. +// + +#include <kunit/device.h> +#include <kunit/resource.h> +#include <kunit/test.h> +#include <kunit/test-bug.h> +#include <linux/build_bug.h> +#include <linux/firmware/cirrus/cs_dsp.h> +#include <linux/firmware/cirrus/cs_dsp_test_utils.h> +#include <linux/firmware/cirrus/wmfw.h> +#include <linux/random.h> +#include <linux/regmap.h> +#include <linux/string.h> +#include <linux/vmalloc.h> + +#define ADSP2_LOCK_REGION_CTRL 0x7A +#define ADSP2_WDT_TIMEOUT_STS_MASK 0x2000 + +KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *) +KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *) + +struct cs_dsp_test_local { + struct cs_dsp_mock_wmfw_builder *wmfw_builder; + + int num_control_add; + int num_control_remove; + int num_pre_run; + int num_post_run; + int num_pre_stop; + int num_post_stop; + int num_watchdog_expired; + + struct cs_dsp_coeff_ctl *passed_ctl[16]; + struct cs_dsp *passed_dsp; +}; + +struct cs_dsp_callbacks_test_param { + const struct cs_dsp_client_ops *ops; + const char *case_name; +}; + +static const struct cs_dsp_mock_alg_def cs_dsp_callbacks_test_mock_algs[] = { + { + .id = 0xfafa, + .ver = 0x100000, + .xm_size_words = 164, + .ym_size_words = 164, + .zm_size_words = 164, + }, +}; + +static const struct cs_dsp_mock_coeff_def mock_coeff_template = { + .shortname = "Dummy Coeff", + .type = WMFW_CTL_TYPE_BYTES, + .mem_type = WMFW_ADSP2_YM, + .flags = WMFW_CTL_FLAG_VOLATILE, + .length_bytes = 4, +}; + +static int cs_dsp_test_control_add_callback(struct cs_dsp_coeff_ctl *ctl) +{ + struct kunit *test = kunit_get_current_test(); + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + + local->passed_ctl[local->num_control_add] = ctl; + local->num_control_add++; + + return 0; +} + +static void cs_dsp_test_control_remove_callback(struct cs_dsp_coeff_ctl *ctl) +{ + struct kunit *test = kunit_get_current_test(); + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + + local->passed_ctl[local->num_control_remove] = ctl; + local->num_control_remove++; +} + +static int cs_dsp_test_pre_run_callback(struct cs_dsp *dsp) +{ + struct kunit *test = kunit_get_current_test(); + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + + local->passed_dsp = dsp; + local->num_pre_run++; + + return 0; +} + +static int cs_dsp_test_post_run_callback(struct cs_dsp *dsp) +{ + struct kunit *test = kunit_get_current_test(); + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + + local->passed_dsp = dsp; + local->num_post_run++; + + return 0; +} + +static void cs_dsp_test_pre_stop_callback(struct cs_dsp *dsp) +{ + struct kunit *test = kunit_get_current_test(); + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + + local->passed_dsp = dsp; + local->num_pre_stop++; +} + +static void cs_dsp_test_post_stop_callback(struct cs_dsp *dsp) +{ + struct kunit *test = kunit_get_current_test(); + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + + local->passed_dsp = dsp; + local->num_post_stop++; +} + +static void cs_dsp_test_watchdog_expired_callback(struct cs_dsp *dsp) +{ + struct kunit *test = kunit_get_current_test(); + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + + local->passed_dsp = dsp; + local->num_watchdog_expired++; +} + +static const struct cs_dsp_client_ops cs_dsp_callback_test_client_ops = { + .control_add = cs_dsp_test_control_add_callback, + .control_remove = cs_dsp_test_control_remove_callback, + .pre_run = cs_dsp_test_pre_run_callback, + .post_run = cs_dsp_test_post_run_callback, + .pre_stop = cs_dsp_test_pre_stop_callback, + .post_stop = cs_dsp_test_post_stop_callback, + .watchdog_expired = cs_dsp_test_watchdog_expired_callback, +}; + +static const struct cs_dsp_client_ops cs_dsp_callback_test_empty_client_ops = { + /* No entries */ +}; + +static void cs_dsp_test_run_stop_callbacks(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"), + 0); + + KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0); + KUNIT_EXPECT_EQ(test, local->num_pre_run, 1); + KUNIT_EXPECT_EQ(test, local->num_post_run, 1); + KUNIT_EXPECT_EQ(test, local->num_pre_stop, 0); + KUNIT_EXPECT_EQ(test, local->num_post_stop, 0); + KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp); + local->passed_dsp = NULL; + + cs_dsp_stop(priv->dsp); + KUNIT_EXPECT_EQ(test, local->num_pre_run, 1); + KUNIT_EXPECT_EQ(test, local->num_post_run, 1); + KUNIT_EXPECT_EQ(test, local->num_pre_stop, 1); + KUNIT_EXPECT_EQ(test, local->num_post_stop, 1); + KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp); + local->passed_dsp = NULL; + + KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0); + KUNIT_EXPECT_EQ(test, local->num_pre_run, 2); + KUNIT_EXPECT_EQ(test, local->num_post_run, 2); + KUNIT_EXPECT_EQ(test, local->num_pre_stop, 1); + KUNIT_EXPECT_EQ(test, local->num_post_stop, 1); + KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp); + local->passed_dsp = NULL; + + cs_dsp_stop(priv->dsp); + KUNIT_EXPECT_EQ(test, local->num_pre_run, 2); + KUNIT_EXPECT_EQ(test, local->num_post_run, 2); + KUNIT_EXPECT_EQ(test, local->num_pre_stop, 2); + KUNIT_EXPECT_EQ(test, local->num_post_stop, 2); + KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp); + local->passed_dsp = NULL; +} + +static void cs_dsp_test_ctl_v1_callbacks(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + int i; + + /* Add a control for each memory */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_callbacks_test_mock_algs[0].id, + "dummyalg", NULL); + def.shortname = "zm"; + def.mem_type = WMFW_ADSP2_ZM; + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + + def.shortname = "ym"; + def.mem_type = WMFW_ADSP2_YM; + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + + def.shortname = "xm"; + def.mem_type = WMFW_ADSP2_XM; + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"), + 0); + + /* There should have been an add callback for each control */ + KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 3); + KUNIT_EXPECT_EQ(test, local->num_control_add, 3); + KUNIT_EXPECT_EQ(test, local->num_control_remove, 0); + + i = 0; + list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list) + KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl); + + /* + * Call cs_dsp_remove() and there should be a remove callback + * for each control + */ + memset(local->passed_ctl, 0, sizeof(local->passed_ctl)); + cs_dsp_remove(priv->dsp); + + /* Prevent double cleanup */ + kunit_remove_action(priv->test, _cs_dsp_remove_wrapper, priv->dsp); + + KUNIT_EXPECT_EQ(test, local->num_control_add, 3); + KUNIT_EXPECT_EQ(test, local->num_control_remove, 3); + + i = 0; + list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list) + KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl); +} + +static void cs_dsp_test_ctl_v2_callbacks(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + char name[2] = { }; + int i; + + /* Add some controls */ + def.shortname = name; + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_callbacks_test_mock_algs[0].id, + "dummyalg", NULL); + for (i = 0; i < ARRAY_SIZE(local->passed_ctl); ++i) { + name[0] = 'A' + i; + def.offset_dsp_words = i; + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + } + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"), + 0); + + /* There should have been an add callback for each control */ + KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), + ARRAY_SIZE(local->passed_ctl)); + KUNIT_EXPECT_EQ(test, local->num_control_add, ARRAY_SIZE(local->passed_ctl)); + KUNIT_EXPECT_EQ(test, local->num_control_remove, 0); + + i = 0; + list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list) + KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl); + + /* + * Call cs_dsp_remove() and there should be a remove callback + * for each control + */ + memset(local->passed_ctl, 0, sizeof(local->passed_ctl)); + cs_dsp_remove(priv->dsp); + + /* Prevent double cleanup */ + kunit_remove_action(priv->test, _cs_dsp_remove_wrapper, priv->dsp); + + KUNIT_EXPECT_EQ(test, local->num_control_add, ARRAY_SIZE(local->passed_ctl)); + KUNIT_EXPECT_EQ(test, local->num_control_remove, ARRAY_SIZE(local->passed_ctl)); + + i = 0; + list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list) + KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl); +} + +static void cs_dsp_test_no_callbacks(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct firmware *wmfw; + + /* Add a controls */ + def.shortname = "A"; + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_callbacks_test_mock_algs[0].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + + /* Run a sequence of ops that would invoke callbacks */ + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"), + 0); + KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0); + cs_dsp_stop(priv->dsp); + cs_dsp_remove(priv->dsp); + + /* Prevent double cleanup */ + kunit_remove_action(priv->test, _cs_dsp_remove_wrapper, priv->dsp); + + /* Something went very wrong if any of our callbacks were called */ + KUNIT_EXPECT_EQ(test, local->num_control_add, 0); + KUNIT_EXPECT_EQ(test, local->num_control_remove, 0); + KUNIT_EXPECT_EQ(test, local->num_pre_run, 0); + KUNIT_EXPECT_EQ(test, local->num_post_run, 0); + KUNIT_EXPECT_EQ(test, local->num_pre_stop, 0); + KUNIT_EXPECT_EQ(test, local->num_post_stop, 0); +} + +static void cs_dsp_test_adsp2v2_watchdog_callback(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"), + 0); + + KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0); + + /* Set the watchdog timeout bit */ + regmap_write(priv->dsp->regmap, priv->dsp->base + ADSP2_LOCK_REGION_CTRL, + ADSP2_WDT_TIMEOUT_STS_MASK); + + /* Notify an interrupt and the watchdog callback should be called */ + cs_dsp_adsp2_bus_error(priv->dsp); + KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 1); + KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp); +} + +static void cs_dsp_test_adsp2v2_watchdog_no_callbacks(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"), + 0); + KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0); + + /* Set the watchdog timeout bit */ + regmap_write(priv->dsp->regmap, priv->dsp->base + ADSP2_LOCK_REGION_CTRL, + ADSP2_WDT_TIMEOUT_STS_MASK); + + /* Notify an interrupt, which will look for a watchdog callback */ + cs_dsp_adsp2_bus_error(priv->dsp); + KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 0); +} + +static void cs_dsp_test_halo_watchdog_callback(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"), + 0); + + KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0); + + /* Notify an interrupt and the watchdog callback should be called */ + cs_dsp_halo_wdt_expire(priv->dsp); + KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 1); + KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp); +} + +static void cs_dsp_test_halo_watchdog_no_callbacks(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"), + 0); + KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0); + + /* Notify an interrupt, which will look for a watchdog callback */ + cs_dsp_halo_wdt_expire(priv->dsp); + KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 0); +} + +static int cs_dsp_callbacks_test_common_init(struct kunit *test, struct cs_dsp *dsp, + int wmfw_version) +{ + const struct cs_dsp_callbacks_test_param *param = test->param_value; + struct cs_dsp_test *priv; + struct cs_dsp_test_local *local; + struct device *test_dev; + struct cs_dsp_mock_xm_header *xm_header; + int ret; + + priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL); + if (!local) + return -ENOMEM; + + priv->test = test; + priv->dsp = dsp; + test->priv = priv; + priv->local = local; + + /* Create dummy struct device */ + test_dev = kunit_device_register(test, "cs_dsp_test_drv"); + if (IS_ERR(test_dev)) + return PTR_ERR(test_dev); + + dsp->dev = get_device(test_dev); + if (!dsp->dev) + return -ENODEV; + + ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev); + if (ret) + return ret; + + dev_set_drvdata(dsp->dev, priv); + + /* Allocate regmap */ + ret = cs_dsp_mock_regmap_init(priv); + if (ret) + return ret; + + /* + * There must always be a XM header with at least 1 algorithm, + * so create a dummy one and pre-populate XM so the wmfw doesn't + * have to contain an XM blob. + */ + xm_header = cs_dsp_create_mock_xm_header(priv, + cs_dsp_callbacks_test_mock_algs, + ARRAY_SIZE(cs_dsp_callbacks_test_mock_algs)); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xm_header); + cs_dsp_mock_xm_header_write_to_regmap(xm_header); + + local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, wmfw_version); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder); + + /* Add dummy XM header payload to wmfw */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + WMFW_ADSP2_XM, 0, + xm_header->blob_data, + xm_header->blob_size_bytes); + + /* Init cs_dsp */ + dsp->client_ops = param->ops; + + switch (dsp->type) { + case WMFW_ADSP2: + ret = cs_dsp_adsp2_init(dsp); + break; + case WMFW_HALO: + ret = cs_dsp_halo_init(dsp); + break; + default: + KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type); + return -EINVAL; + } + + if (ret) + return ret; + + /* Automatically call cs_dsp_remove() when test case ends */ + return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp); +} + +static int cs_dsp_callbacks_test_halo_init(struct kunit *test) +{ + struct cs_dsp *dsp; + + /* Fill in cs_dsp and initialize */ + dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL); + if (!dsp) + return -ENOMEM; + + dsp->num = 1; + dsp->type = WMFW_HALO; + dsp->mem = cs_dsp_mock_halo_dsp1_regions; + dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes); + dsp->base = cs_dsp_mock_halo_core_base; + dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base; + + return cs_dsp_callbacks_test_common_init(test, dsp, 3); +} + +static int cs_dsp_callbacks_test_adsp2_32bit_init(struct kunit *test, int rev) +{ + struct cs_dsp *dsp; + + /* Fill in cs_dsp and initialize */ + dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL); + if (!dsp) + return -ENOMEM; + + dsp->num = 1; + dsp->type = WMFW_ADSP2; + dsp->rev = rev; + dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions; + dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes); + dsp->base = cs_dsp_mock_adsp2_32bit_sysbase; + + return cs_dsp_callbacks_test_common_init(test, dsp, 2); +} + +static int cs_dsp_callbacks_test_adsp2v2_32bit_init(struct kunit *test) +{ + return cs_dsp_callbacks_test_adsp2_32bit_init(test, 2); +} + +static int cs_dsp_callbacks_test_adsp2v1_32bit_init(struct kunit *test) +{ + return cs_dsp_callbacks_test_adsp2_32bit_init(test, 1); +} + +static int cs_dsp_callbacks_test_adsp2_16bit_init(struct kunit *test) +{ + struct cs_dsp *dsp; + + /* Fill in cs_dsp and initialize */ + dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL); + if (!dsp) + return -ENOMEM; + + dsp->num = 1; + dsp->type = WMFW_ADSP2; + dsp->rev = 0; + dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions; + dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes); + dsp->base = cs_dsp_mock_adsp2_16bit_sysbase; + + return cs_dsp_callbacks_test_common_init(test, dsp, 1); +} + +static void cs_dsp_callbacks_param_desc(const struct cs_dsp_callbacks_test_param *param, + char *desc) +{ + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s", param->case_name); +} + +/* Parameterize on different client callback ops tables */ +static const struct cs_dsp_callbacks_test_param cs_dsp_callbacks_ops_cases[] = { + { .ops = &cs_dsp_callback_test_client_ops, .case_name = "all ops" }, +}; + +KUNIT_ARRAY_PARAM(cs_dsp_callbacks_ops, + cs_dsp_callbacks_ops_cases, + cs_dsp_callbacks_param_desc); + +static const struct cs_dsp_callbacks_test_param cs_dsp_no_callbacks_cases[] = { + { .ops = &cs_dsp_callback_test_empty_client_ops, .case_name = "empty ops" }, +}; + +KUNIT_ARRAY_PARAM(cs_dsp_no_callbacks, + cs_dsp_no_callbacks_cases, + cs_dsp_callbacks_param_desc); + +static struct kunit_case cs_dsp_callbacks_adsp2_wmfwv1_test_cases[] = { + KUNIT_CASE_PARAM(cs_dsp_test_run_stop_callbacks, cs_dsp_callbacks_ops_gen_params), + KUNIT_CASE_PARAM(cs_dsp_test_ctl_v1_callbacks, cs_dsp_callbacks_ops_gen_params), + KUNIT_CASE_PARAM(cs_dsp_test_no_callbacks, cs_dsp_no_callbacks_gen_params), + + { } /* terminator */ +}; + +static struct kunit_case cs_dsp_callbacks_adsp2_wmfwv2_test_cases[] = { + KUNIT_CASE_PARAM(cs_dsp_test_run_stop_callbacks, cs_dsp_callbacks_ops_gen_params), + KUNIT_CASE_PARAM(cs_dsp_test_ctl_v2_callbacks, cs_dsp_callbacks_ops_gen_params), + KUNIT_CASE_PARAM(cs_dsp_test_no_callbacks, cs_dsp_no_callbacks_gen_params), + + { } /* terminator */ +}; + +static struct kunit_case cs_dsp_callbacks_halo_test_cases[] = { + KUNIT_CASE_PARAM(cs_dsp_test_run_stop_callbacks, cs_dsp_callbacks_ops_gen_params), + KUNIT_CASE_PARAM(cs_dsp_test_ctl_v2_callbacks, cs_dsp_callbacks_ops_gen_params), + KUNIT_CASE_PARAM(cs_dsp_test_no_callbacks, cs_dsp_no_callbacks_gen_params), + + { } /* terminator */ +}; + +static struct kunit_case cs_dsp_watchdog_adsp2v2_test_cases[] = { + KUNIT_CASE_PARAM(cs_dsp_test_adsp2v2_watchdog_callback, cs_dsp_callbacks_ops_gen_params), + KUNIT_CASE_PARAM(cs_dsp_test_adsp2v2_watchdog_no_callbacks, cs_dsp_no_callbacks_gen_params), + + { } /* terminator */ +}; + +static struct kunit_case cs_dsp_watchdog_halo_test_cases[] = { + KUNIT_CASE_PARAM(cs_dsp_test_halo_watchdog_callback, cs_dsp_callbacks_ops_gen_params), + KUNIT_CASE_PARAM(cs_dsp_test_halo_watchdog_no_callbacks, cs_dsp_no_callbacks_gen_params), + + { } /* terminator */ +}; + +static struct kunit_suite cs_dsp_callbacks_test_halo = { + .name = "cs_dsp_callbacks_halo", + .init = cs_dsp_callbacks_test_halo_init, + .test_cases = cs_dsp_callbacks_halo_test_cases, +}; + +static struct kunit_suite cs_dsp_callbacks_test_adsp2v2_32bit = { + .name = "cs_dsp_callbacks_adsp2v2_32bit_wmfwv2", + .init = cs_dsp_callbacks_test_adsp2v2_32bit_init, + .test_cases = cs_dsp_callbacks_adsp2_wmfwv2_test_cases, +}; + +static struct kunit_suite cs_dsp_callbacks_test_adsp2v1_32bit = { + .name = "cs_dsp_callbacks_adsp2v1_32bit_wmfwv2", + .init = cs_dsp_callbacks_test_adsp2v1_32bit_init, + .test_cases = cs_dsp_callbacks_adsp2_wmfwv2_test_cases, +}; + +static struct kunit_suite cs_dsp_callbacks_test_adsp2_16bit = { + .name = "cs_dsp_callbacks_adsp2_16bit_wmfwv1", + .init = cs_dsp_callbacks_test_adsp2_16bit_init, + .test_cases = cs_dsp_callbacks_adsp2_wmfwv1_test_cases, +}; + +static struct kunit_suite cs_dsp_watchdog_test_adsp2v2_32bit = { + .name = "cs_dsp_watchdog_adsp2v2_32bit", + .init = cs_dsp_callbacks_test_adsp2v2_32bit_init, + .test_cases = cs_dsp_watchdog_adsp2v2_test_cases, +}; + +static struct kunit_suite cs_dsp_watchdog_test_halo_32bit = { + .name = "cs_dsp_watchdog_halo", + .init = cs_dsp_callbacks_test_halo_init, + .test_cases = cs_dsp_watchdog_halo_test_cases, +}; + +kunit_test_suites(&cs_dsp_callbacks_test_halo, + &cs_dsp_callbacks_test_adsp2v2_32bit, + &cs_dsp_callbacks_test_adsp2v1_32bit, + &cs_dsp_callbacks_test_adsp2_16bit, + &cs_dsp_watchdog_test_adsp2v2_32bit, + &cs_dsp_watchdog_test_halo_32bit); diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c b/drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c new file mode 100644 index 000000000000..83386cc978e3 --- /dev/null +++ b/drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c @@ -0,0 +1,3282 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// KUnit tests for cs_dsp. +// +// Copyright (C) 2024 Cirrus Logic, Inc. and +// Cirrus Logic International Semiconductor Ltd. + +#include <kunit/device.h> +#include <kunit/resource.h> +#include <kunit/test.h> +#include <linux/build_bug.h> +#include <linux/firmware/cirrus/cs_dsp.h> +#include <linux/firmware/cirrus/cs_dsp_test_utils.h> +#include <linux/firmware/cirrus/wmfw.h> +#include <linux/list.h> +#include <linux/random.h> +#include <linux/regmap.h> + +KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *); +KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_stop_wrapper, cs_dsp_stop, struct cs_dsp *); +KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *); + +struct cs_dsp_test_local { + struct cs_dsp_mock_xm_header *xm_header; + struct cs_dsp_mock_wmfw_builder *wmfw_builder; + int wmfw_version; +}; + +struct cs_dsp_ctl_cache_test_param { + int mem_type; + int alg_id; + unsigned int offs_words; + unsigned int len_bytes; + u16 ctl_type; + u16 flags; +}; + +static const struct cs_dsp_mock_alg_def cs_dsp_ctl_cache_test_algs[] = { + { + .id = 0xfafa, + .ver = 0x100000, + .xm_base_words = 60, + .xm_size_words = 1000, + .ym_base_words = 0, + .ym_size_words = 1000, + .zm_base_words = 0, + .zm_size_words = 1000, + }, + { + .id = 0xb, + .ver = 0x100001, + .xm_base_words = 1060, + .xm_size_words = 1000, + .ym_base_words = 1000, + .ym_size_words = 1000, + .zm_base_words = 1000, + .zm_size_words = 1000, + }, + { + .id = 0x9f1234, + .ver = 0x100500, + .xm_base_words = 2060, + .xm_size_words = 32, + .ym_base_words = 2000, + .ym_size_words = 32, + .zm_base_words = 2000, + .zm_size_words = 32, + }, + { + .id = 0xff00ff, + .ver = 0x300113, + .xm_base_words = 2100, + .xm_size_words = 32, + .ym_base_words = 2032, + .ym_size_words = 32, + .zm_base_words = 2032, + .zm_size_words = 32, + }, +}; + +static const struct cs_dsp_mock_coeff_def mock_coeff_template = { + .shortname = "Dummy Coeff", + .type = WMFW_CTL_TYPE_BYTES, + .mem_type = WMFW_ADSP2_YM, + .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE, + .length_bytes = 4, +}; + +static const char * const cs_dsp_ctl_cache_test_fw_names[] = { + "misc", "mbc/vss", "haps", +}; + +static int _find_alg_entry(struct kunit *test, unsigned int alg_id) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_cache_test_algs); ++i) { + if (cs_dsp_ctl_cache_test_algs[i].id == alg_id) + break; + } + + KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(cs_dsp_ctl_cache_test_algs)); + + return i; +} + +static int _get_alg_mem_base_words(struct kunit *test, int alg_index, int mem_type) +{ + switch (mem_type) { + case WMFW_ADSP2_XM: + return cs_dsp_ctl_cache_test_algs[alg_index].xm_base_words; + case WMFW_ADSP2_YM: + return cs_dsp_ctl_cache_test_algs[alg_index].ym_base_words; + case WMFW_ADSP2_ZM: + return cs_dsp_ctl_cache_test_algs[alg_index].zm_base_words; + default: + KUNIT_FAIL(test, "Bug in test: illegal memory type %d\n", mem_type); + return 0; + } +} + +static struct cs_dsp_mock_wmfw_builder *_create_dummy_wmfw(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_wmfw_builder *builder; + + builder = cs_dsp_mock_wmfw_init(priv, local->wmfw_version); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder); + + /* Init an XM header */ + cs_dsp_mock_wmfw_add_data_block(builder, + WMFW_ADSP2_XM, 0, + local->xm_header->blob_data, + local->xm_header->blob_size_bytes); + + return builder; +} + +/* + * Memory allocated for control cache must be large enough. + * This creates multiple controls of different sizes so only works on + * wmfw V2 and later. + */ +static void cs_dsp_ctl_v2_cache_alloc(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + unsigned int reg, alg_base_words, alg_size_bytes; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + char ctl_name[4]; + u32 *reg_vals; + int num_ctls; + + /* Create some DSP data to initialize the control cache */ + alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_YM); + alg_size_bytes = cs_dsp_ctl_cache_test_algs[0].ym_size_words * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + reg_vals = kunit_kzalloc(test, alg_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM); + reg += alg_base_words * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals, alg_size_bytes); + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[0].id, + "dummyalg", NULL); + + /* Create controls of different sizes */ + def.mem_type = WMFW_ADSP2_YM; + def.shortname = ctl_name; + num_ctls = 0; + for (def.length_bytes = 4; def.length_bytes <= 64; def.length_bytes += 4) { + snprintf(ctl_name, ARRAY_SIZE(ctl_name), "%x", def.length_bytes); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + num_ctls++; + def.offset_dsp_words += def.length_bytes / sizeof(u32); + } + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + KUNIT_EXPECT_EQ(test, list_count_nodes(&dsp->ctl_list), num_ctls); + + /* Check that the block allocated for the cache is large enough */ + list_for_each_entry(ctl, &dsp->ctl_list, list) + KUNIT_EXPECT_GE(test, ksize(ctl->cache), ctl->len); +} + +/* + * Content of registers backing a control should be read into the + * control cache when the firmware is downloaded. + */ +static void cs_dsp_ctl_cache_init(struct kunit *test) +{ + const struct cs_dsp_ctl_cache_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback; + + reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create some DSP data to be read into the control cache */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + get_random_bytes(reg_vals, param->len_bytes); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + /* + * The data should have been populated into the control cache + * so should be readable through the control. + */ + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes); +} + +/* + * For a non-volatile write-only control the cache should be zero-filled + * when the firmware is downloaded. + */ +static void cs_dsp_ctl_cache_init_write_only(struct kunit *test) +{ + const struct cs_dsp_ctl_cache_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *readback, *zeros; + + zeros = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, zeros); + + readback = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create a non-volatile write-only control */ + def.flags = param->flags & ~WMFW_CTL_FLAG_VOLATILE; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + /* + * The control cache should have been zero-filled so should be + * readable through the control. + */ + get_random_bytes(readback, param->len_bytes); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, zeros, param->len_bytes); +} + +/* + * Multiple different firmware with identical controls. + * This is legal because different firmwares could contain the same + * algorithm. + * The control cache should be initialized only with the data from + * the firmware containing it. + */ +static void cs_dsp_ctl_cache_init_multiple_fw_same_controls(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_mock_wmfw_builder *builder[3]; + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *walkctl, *ctl[3]; + struct firmware *wmfw; + u32 *reg_vals[3], *readback; + int i; + + static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(builder)); + static_assert(ARRAY_SIZE(reg_vals) == ARRAY_SIZE(builder)); + static_assert(ARRAY_SIZE(cs_dsp_ctl_cache_test_fw_names) >= ARRAY_SIZE(builder)); + + /* Create an identical control in each firmware but with different alg id */ + for (i = 0; i < ARRAY_SIZE(builder); i++) { + builder[i] = _create_dummy_wmfw(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder[i]); + + cs_dsp_mock_wmfw_start_alg_info_block(builder[i], + cs_dsp_ctl_cache_test_algs[0].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(builder[i], &def); + cs_dsp_mock_wmfw_end_alg_info_block(builder[i]); + } + + for (i = 0; i < ARRAY_SIZE(reg_vals); i++) { + reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]); + } + + readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* + * For each firmware create random content in the register backing + * the control. Then download, start, stop and power-down. + */ + for (i = 0; i < ARRAY_SIZE(builder); i++) { + alg_base_words = _get_alg_mem_base_words(test, 0, def.mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type); + reg += (alg_base_words + def.offset_dsp_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + + get_random_bytes(reg_vals[i], def.length_bytes); + regmap_raw_write(dsp->regmap, reg, reg_vals[i], def.length_bytes); + wmfw = cs_dsp_mock_wmfw_get_firmware(builder[i]); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(dsp, wmfw, + cs_dsp_ctl_cache_test_fw_names[i], + NULL, NULL, + cs_dsp_ctl_cache_test_fw_names[i]), + 0); + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + cs_dsp_stop(dsp); + cs_dsp_power_down(dsp); + } + + /* There should now be 3 controls */ + KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3); + + /* + * There's no requirement for the control list to be in any + * particular order, so don't assume the order. + */ + for (i = 0; i < ARRAY_SIZE(ctl); i++) + ctl[i] = NULL; + + list_for_each_entry(walkctl, &dsp->ctl_list, list) { + if (strcmp(walkctl->fw_name, cs_dsp_ctl_cache_test_fw_names[0]) == 0) + ctl[0] = walkctl; + else if (strcmp(walkctl->fw_name, cs_dsp_ctl_cache_test_fw_names[1]) == 0) + ctl[1] = walkctl; + else if (strcmp(walkctl->fw_name, cs_dsp_ctl_cache_test_fw_names[2]) == 0) + ctl[2] = walkctl; + } + + KUNIT_ASSERT_NOT_NULL(test, ctl[0]); + KUNIT_ASSERT_NOT_NULL(test, ctl[1]); + KUNIT_ASSERT_NOT_NULL(test, ctl[2]); + + /* + * The data should have been populated into the control cache + * so should be readable through the control. + */ + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes); + + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes); + + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback, def.length_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes); +} + +/* + * Multiple different firmware with controls identical except for alg id. + * This is legal because the controls are qualified by algorithm id. + * The control cache should be initialized only with the data from + * the firmware containing it. + */ +static void cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_mock_wmfw_builder *builder[3]; + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *walkctl, *ctl[3]; + struct firmware *wmfw; + u32 *reg_vals[3], *readback; + int i; + + static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(builder)); + static_assert(ARRAY_SIZE(reg_vals) == ARRAY_SIZE(builder)); + static_assert(ARRAY_SIZE(cs_dsp_ctl_cache_test_fw_names) >= ARRAY_SIZE(builder)); + + /* Create an identical control in each firmware but with different alg id */ + for (i = 0; i < ARRAY_SIZE(builder); i++) { + builder[i] = _create_dummy_wmfw(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder[i]); + + cs_dsp_mock_wmfw_start_alg_info_block(builder[i], + cs_dsp_ctl_cache_test_algs[i].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(builder[i], &def); + cs_dsp_mock_wmfw_end_alg_info_block(builder[i]); + } + + for (i = 0; i < ARRAY_SIZE(reg_vals); i++) { + reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]); + } + + readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* + * For each firmware create random content in the register backing + * the control. Then download, start, stop and power-down. + */ + for (i = 0; i < ARRAY_SIZE(builder); i++) { + alg_base_words = _get_alg_mem_base_words(test, i, def.mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type); + reg += (alg_base_words + def.offset_dsp_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + + get_random_bytes(reg_vals[i], def.length_bytes); + regmap_raw_write(dsp->regmap, reg, reg_vals[i], def.length_bytes); + wmfw = cs_dsp_mock_wmfw_get_firmware(builder[i]); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(dsp, wmfw, + cs_dsp_ctl_cache_test_fw_names[i], + NULL, NULL, + cs_dsp_ctl_cache_test_fw_names[i]), + 0); + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + cs_dsp_stop(dsp); + cs_dsp_power_down(dsp); + } + + /* There should now be 3 controls */ + KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3); + + /* + * There's no requirement for the control list to be in any + * particular order, so don't assume the order. + */ + for (i = 0; i < ARRAY_SIZE(ctl); i++) + ctl[i] = NULL; + + list_for_each_entry(walkctl, &dsp->ctl_list, list) { + if (cs_dsp_ctl_cache_test_algs[0].id == walkctl->alg_region.alg) + ctl[0] = walkctl; + else if (cs_dsp_ctl_cache_test_algs[1].id == walkctl->alg_region.alg) + ctl[1] = walkctl; + else if (cs_dsp_ctl_cache_test_algs[2].id == walkctl->alg_region.alg) + ctl[2] = walkctl; + } + + KUNIT_ASSERT_NOT_NULL(test, ctl[0]); + KUNIT_ASSERT_NOT_NULL(test, ctl[1]); + KUNIT_ASSERT_NOT_NULL(test, ctl[2]); + + /* + * The data should have been populated into the control cache + * so should be readable through the control. + */ + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes); + + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes); + + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback, def.length_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes); +} + +/* + * Firmware with controls at the same position in different memories. + * The control cache should be initialized with content from the + * correct memory region. + */ +static void cs_dsp_ctl_cache_init_multiple_mems(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *walkctl, *ctl[3]; + struct firmware *wmfw; + u32 *reg_vals[3], *readback; + int i; + + static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(reg_vals)); + + for (i = 0; i < ARRAY_SIZE(reg_vals); i++) { + reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]); + get_random_bytes(reg_vals[i], def.length_bytes); + } + + readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[0].id, + "dummyalg", NULL); + + /* Create controls identical except for memory region */ + def.mem_type = WMFW_ADSP2_YM; + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + + def.mem_type = WMFW_ADSP2_XM; + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + + if (cs_dsp_mock_has_zm(priv)) { + def.mem_type = WMFW_ADSP2_ZM; + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + } + + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Create random content in the registers backing each control */ + alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_YM); + reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM); + reg += (alg_base_words + def.offset_dsp_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals[0], def.length_bytes); + + alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_XM); + reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM); + reg += (alg_base_words + def.offset_dsp_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals[1], def.length_bytes); + + if (cs_dsp_mock_has_zm(priv)) { + alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_ZM); + reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_ZM); + reg += (alg_base_words + def.offset_dsp_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals[2], def.length_bytes); + } + + /* Download, run, stop and power-down the firmware */ + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + cs_dsp_stop(dsp); + cs_dsp_power_down(dsp); + + /* There should now be 2 or 3 controls */ + KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), + cs_dsp_mock_has_zm(priv) ? 3 : 2); + + /* + * There's no requirement for the control list to be in any + * particular order, so don't assume the order. + */ + for (i = 0; i < ARRAY_SIZE(ctl); i++) + ctl[i] = NULL; + + list_for_each_entry(walkctl, &dsp->ctl_list, list) { + if (walkctl->alg_region.type == WMFW_ADSP2_YM) + ctl[0] = walkctl; + if (walkctl->alg_region.type == WMFW_ADSP2_XM) + ctl[1] = walkctl; + if (walkctl->alg_region.type == WMFW_ADSP2_ZM) + ctl[2] = walkctl; + } + + + /* + * The data should have been populated into the control cache + * so should be readable through the control. + */ + KUNIT_ASSERT_NOT_NULL(test, ctl[0]); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes); + + KUNIT_ASSERT_NOT_NULL(test, ctl[1]); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes); + + if (cs_dsp_mock_has_zm(priv)) { + KUNIT_ASSERT_NOT_NULL(test, ctl[2]); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback, + def.length_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes); + } +} + +/* + * Firmware with controls at the same position in different algorithms + * The control cache should be initialized with content from the + * memory of the algorithm it points to. + */ +static void cs_dsp_ctl_cache_init_multiple_algs(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *walkctl, *ctl[3]; + struct firmware *wmfw; + u32 *reg_vals[3], *readback; + int i; + + static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(reg_vals)); + static_assert(ARRAY_SIZE(reg_vals) <= ARRAY_SIZE(cs_dsp_ctl_cache_test_algs)); + + for (i = 0; i < ARRAY_SIZE(reg_vals); i++) { + reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]); + get_random_bytes(reg_vals[i], def.length_bytes); + } + + readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create controls identical except for algorithm */ + for (i = 0; i < ARRAY_SIZE(reg_vals); i++) { + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[i].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + } + + /* Create random content in the registers backing each control */ + for (i = 0; i < ARRAY_SIZE(reg_vals); i++) { + alg_base_words = _get_alg_mem_base_words(test, i, def.mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type); + reg += (alg_base_words + def.offset_dsp_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals[i], def.length_bytes); + } + + /* Download, run, stop and power-down the firmware */ + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + cs_dsp_stop(dsp); + cs_dsp_power_down(dsp); + + /* There should now be 3 controls */ + KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3); + + /* + * There's no requirement for the control list to be in any + * particular order, so don't assume the order. + */ + for (i = 0; i < ARRAY_SIZE(ctl); i++) + ctl[i] = NULL; + + list_for_each_entry(walkctl, &dsp->ctl_list, list) { + if (walkctl->alg_region.alg == cs_dsp_ctl_cache_test_algs[0].id) + ctl[0] = walkctl; + if (walkctl->alg_region.alg == cs_dsp_ctl_cache_test_algs[1].id) + ctl[1] = walkctl; + if (walkctl->alg_region.alg == cs_dsp_ctl_cache_test_algs[2].id) + ctl[2] = walkctl; + } + + KUNIT_ASSERT_NOT_NULL(test, ctl[0]); + KUNIT_ASSERT_NOT_NULL(test, ctl[1]); + KUNIT_ASSERT_NOT_NULL(test, ctl[2]); + + /* + * The data should have been populated into the control cache + * so should be readable through the control. + */ + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes); + + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes); + + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback, + def.length_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes); +} + +/* + * Firmware with controls in the same algorithm and memory but at + * different offsets. + * The control cache should be initialized with content from the + * correct offset. + * Only for wmfw format V2 and later. V1 only supports one control per + * memory per algorithm. + */ +static void cs_dsp_ctl_cache_init_multiple_offsets(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + unsigned int reg, alg_base_words, alg_base_reg; + struct cs_dsp_coeff_ctl *walkctl, *ctl[3]; + struct firmware *wmfw; + u32 *reg_vals[3], *readback; + int i; + + static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(reg_vals)); + static_assert(ARRAY_SIZE(reg_vals) <= ARRAY_SIZE(cs_dsp_ctl_cache_test_algs)); + + for (i = 0; i < ARRAY_SIZE(reg_vals); i++) { + reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]); + get_random_bytes(reg_vals[i], def.length_bytes); + } + + readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[0].id, + "dummyalg", NULL); + + /* Create controls identical except for offset */ + def.length_bytes = 8; + def.offset_dsp_words = 0; + def.shortname = "CtlA"; + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + + def.offset_dsp_words = 5; + def.shortname = "CtlB"; + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + + def.offset_dsp_words = 8; + def.shortname = "CtlC"; + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Create random content in the registers backing each control */ + alg_base_words = _get_alg_mem_base_words(test, 0, def.mem_type); + alg_base_reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type); + alg_base_reg += alg_base_words * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + + reg = alg_base_reg; + regmap_raw_write(dsp->regmap, reg, reg_vals[0], def.length_bytes); + reg = alg_base_reg + (5 * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv)); + regmap_raw_write(dsp->regmap, reg, reg_vals[1], def.length_bytes); + reg = alg_base_reg + (8 * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv)); + regmap_raw_write(dsp->regmap, reg, reg_vals[2], def.length_bytes); + + /* Download, run, stop and power-down the firmware */ + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + cs_dsp_stop(dsp); + cs_dsp_power_down(dsp); + + /* There should now be 3 controls */ + KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3); + + /* + * There's no requirement for the control list to be in any + * particular order, so don't assume the order. + */ + for (i = 0; i < ARRAY_SIZE(ctl); i++) + ctl[i] = NULL; + + list_for_each_entry(walkctl, &dsp->ctl_list, list) { + if (walkctl->offset == 0) + ctl[0] = walkctl; + if (walkctl->offset == 5) + ctl[1] = walkctl; + if (walkctl->offset == 8) + ctl[2] = walkctl; + } + + KUNIT_ASSERT_NOT_NULL(test, ctl[0]); + KUNIT_ASSERT_NOT_NULL(test, ctl[1]); + KUNIT_ASSERT_NOT_NULL(test, ctl[2]); + + /* + * The data should have been populated into the control cache + * so should be readable through the control. + */ + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes); + + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes); + + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback, + def.length_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes); +} + +/* + * Read from a cached control before the firmware is started. + * Should return the data in the cache. + */ +static void cs_dsp_ctl_cache_read_not_started(struct kunit *test) +{ + const struct cs_dsp_ctl_cache_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback; + + reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create some DSP data to be read into the control cache */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + get_random_bytes(reg_vals, param->len_bytes); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Power-up DSP but don't start firmware */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Drop expected writes and the regmap cache should be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); + + /* Control should readback the data from the control cache */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes); +} + +/* + * Read from a cached control after the firmware has been stopped. + * Should return the data in the cache. + */ +static void cs_dsp_ctl_cache_read_stopped(struct kunit *test) +{ + const struct cs_dsp_ctl_cache_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback; + + reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create some DSP data to be read into the control cache */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + get_random_bytes(reg_vals, param->len_bytes); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Power-up DSP */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Start and stop the firmware */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + cs_dsp_stop(dsp); + + /* Drop expected writes and the regmap cache should be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); + + /* Control should readback the data from the control cache */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes); +} + +/* + * Read from a cached control after the DSP has been powered-up and + * then powered-down without running. + * Should return the data in the cache. + */ +static void cs_dsp_ctl_cache_read_powered_down(struct kunit *test) +{ + const struct cs_dsp_ctl_cache_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback; + + reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create some DSP data to be read into the control cache */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + get_random_bytes(reg_vals, param->len_bytes); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Power-up DSP then power-down */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + cs_dsp_power_down(dsp); + + /* Drop expected writes and the regmap cache should be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); + + /* Control should readback the data from the control cache */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes); +} + +/* + * Read from a cached control after the firmware has been run and + * stopped, then the DSP has been powered-down. + * Should return the data in the cache. + */ +static void cs_dsp_ctl_cache_read_stopped_powered_down(struct kunit *test) +{ + const struct cs_dsp_ctl_cache_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback; + + reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create some DSP data to be read into the control cache */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + get_random_bytes(reg_vals, param->len_bytes); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Power-up DSP */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Start and stop the firmware then power-down */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + cs_dsp_stop(dsp); + cs_dsp_power_down(dsp); + + /* Drop expected writes and the regmap cache should be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); + + /* Control should readback the data from the control cache */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes); +} + +/* + * Read from a cached control when a different firmware is currently + * loaded into the DSP. + * Should return the data in the cache. + */ +static void cs_dsp_ctl_cache_read_not_current_loaded_fw(struct kunit *test) +{ + const struct cs_dsp_ctl_cache_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test); + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback; + + reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create some DSP data to be read into the control cache */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + get_random_bytes(reg_vals, param->len_bytes); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Power-up DSP */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Power-down DSP then power-up with a different firmware */ + cs_dsp_power_down(dsp); + wmfw = cs_dsp_mock_wmfw_get_firmware(builder2); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0); + + /* Drop expected writes and the regmap cache should be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); + + /* Control should readback the data from the control cache */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes); +} + +/* + * Read from a cached control when a different firmware is currently + * running. + * Should return the data in the cache. + */ +static void cs_dsp_ctl_cache_read_not_current_running_fw(struct kunit *test) +{ + const struct cs_dsp_ctl_cache_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test); + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback; + + reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create some DSP data to be read into the control cache */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + get_random_bytes(reg_vals, param->len_bytes); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Power-up DSP then power-down */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + cs_dsp_power_down(dsp); + + /* Power-up with a different firmware and run it */ + wmfw = cs_dsp_mock_wmfw_get_firmware(builder2); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0); + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + + /* Drop expected writes and the regmap cache should be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); + + /* Control should readback the data from the control cache */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes); +} + +/* + * Read from a cached control with non-zero flags while the firmware is + * running. + * Should return the data in the cache, not from the registers. + */ +static void cs_dsp_ctl_cache_read_running(struct kunit *test) +{ + const struct cs_dsp_ctl_cache_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *init_reg_vals, *new_reg_vals, *readback; + + init_reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_reg_vals); + + new_reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_reg_vals); + + readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create data in the registers backing the control */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + get_random_bytes(init_reg_vals, param->len_bytes); + regmap_raw_write(dsp->regmap, reg, init_reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Power-up DSP */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Start the firmware running */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + + /* + * Change the values in the registers backing the control then drop + * them from the regmap cache. This allows checking that the control + * read is returning values from the control cache and not accessing + * the registers. + */ + KUNIT_ASSERT_EQ(test, + regmap_raw_write(dsp->regmap, reg, new_reg_vals, param->len_bytes), + 0); + cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes); + + /* Control should readback the origin data from its cache */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, init_reg_vals, param->len_bytes); + + /* Stop and power-down the DSP */ + kunit_release_action(test, _cs_dsp_stop_wrapper, dsp); + cs_dsp_power_down(dsp); + + /* Control should readback from the cache */ + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, init_reg_vals, param->len_bytes); +} + +/* + * Read from a cached control with flags == 0 while the firmware is + * running. + * Should behave as volatile and read from the registers. + * (This is for backwards compatibility with old firmware versions) + */ +static void cs_dsp_ctl_cache_read_running_zero_flags(struct kunit *test) +{ + const struct cs_dsp_ctl_cache_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *init_reg_vals, *new_reg_vals, *readback; + + init_reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_reg_vals); + + new_reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_reg_vals); + + readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Zero-fill the registers backing the control */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, init_reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = 0; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Power-up DSP */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Start the firmware running */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + + /* Change the values in the registers backing the control */ + get_random_bytes(new_reg_vals, param->len_bytes); + regmap_raw_write(dsp->regmap, reg, new_reg_vals, param->len_bytes); + + /* Control should readback the new data from the registers */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, new_reg_vals, param->len_bytes); + + /* Stop and power-down the DSP */ + kunit_release_action(test, _cs_dsp_stop_wrapper, dsp); + cs_dsp_power_down(dsp); + + /* Change the values in the registers backing the control */ + regmap_raw_write(dsp->regmap, reg, init_reg_vals, param->len_bytes); + + /* Control should readback from the cache */ + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, new_reg_vals, param->len_bytes); +} + +/* + * Write to a cached control while the firmware is running. + * This should be a writethrough operation, writing to the cache and + * the registers. + */ +static void cs_dsp_ctl_cache_writethrough(struct kunit *test) +{ + const struct cs_dsp_ctl_cache_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback; + + reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create some DSP data to be read into the control cache */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + memset(reg_vals, 0, param->len_bytes); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + /* Start the firmware and add an action to stop it during cleanup */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + + /* Write new data to the control, it should be written to the registers */ + get_random_bytes(reg_vals, param->len_bytes); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes), + 1); + KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes); +} + +/* + * Write unchanged data to a cached control while the firmware is running. + * The control write should return 0 to indicate that the content + * didn't change. + */ +static void cs_dsp_ctl_cache_writethrough_unchanged(struct kunit *test) +{ + const struct cs_dsp_ctl_cache_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback; + + reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create some DSP data to be read into the control cache */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + get_random_bytes(reg_vals, param->len_bytes); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + /* Start the firmware and add an action to stop it during cleanup */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + + /* + * If the control is write-only the cache will have been zero-initialized + * so the first write will always indicate a change. + */ + if (def.flags && !(def.flags & WMFW_CTL_FLAG_READABLE)) { + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, + param->len_bytes), + 1); + } + + /* + * Write the same data to the control, cs_dsp_coeff_lock_and_write_ctrl() + * should return 0 to indicate the content didn't change. + */ + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes), + 0); + KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes); +} + +/* + * Write unchanged data to a cached control while the firmware is not started. + * The control write should return 0 to indicate that the cache content + * didn't change. + */ +static void cs_dsp_ctl_cache_write_unchanged_not_started(struct kunit *test) +{ + const struct cs_dsp_ctl_cache_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback; + + reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create some DSP data to be read into the control cache */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + get_random_bytes(reg_vals, param->len_bytes); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + /* + * If the control is write-only the cache will have been zero-initialized + * so the first write will always indicate a change. + */ + if (def.flags && !(def.flags & WMFW_CTL_FLAG_READABLE)) { + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, + param->len_bytes), + 1); + } + + /* + * Write the same data to the control, cs_dsp_coeff_lock_and_write_ctrl() + * should return 0 to indicate the content didn't change. + */ + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes), + 0); + KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes); +} + +/* + * Write to a cached control while the firmware is loaded but not + * started. + * This should write to the cache only. + */ +static void cs_dsp_ctl_cache_write_not_started(struct kunit *test) +{ + const struct cs_dsp_ctl_cache_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback; + + reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create some DSP data to be read into the control cache */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Power-up DSP but don't start firmware */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Drop expected writes and the regmap cache should be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); + + /* Write new data to the control, it should not be written to the registers */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + get_random_bytes(reg_vals, param->len_bytes); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes), + 1); + + /* Registers should not have been written so regmap cache should still be clean */ + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); + + /* Control should readback the new data from the control cache */ + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes); +} + +/* + * Write to a cached control after the firmware has been loaded, + * started and stopped. + * This should write to the cache only. + */ +static void cs_dsp_ctl_cache_write_stopped(struct kunit *test) +{ + const struct cs_dsp_ctl_cache_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback; + + reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create some DSP data to be read into the control cache */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Power-up DSP */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Start and stop the firmware */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + cs_dsp_stop(dsp); + + /* Drop expected writes and the regmap cache should be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); + + /* Write new data to the control, it should not be written to the registers */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + get_random_bytes(reg_vals, param->len_bytes); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes), + 1); + + /* Registers should not have been written so regmap cache should still be clean */ + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); + + /* Control should readback the new data from the control cache */ + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes); +} + +/* + * Write to a cached control after the firmware has been loaded, + * then the DSP powered-down. + * This should write to the cache only. + */ +static void cs_dsp_ctl_cache_write_powered_down(struct kunit *test) +{ + const struct cs_dsp_ctl_cache_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback; + + reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create some DSP data to be read into the control cache */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Power-up DSP then power-down */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + cs_dsp_power_down(dsp); + + /* Drop expected writes and the regmap cache should be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); + + /* Write new data to the control, it should not be written to the registers */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + get_random_bytes(reg_vals, param->len_bytes); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes), + 1); + + /* Registers should not have been written so regmap cache should still be clean */ + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); + + /* Control should readback the new data from the control cache */ + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes); +} + +/* + * Write to a cached control after the firmware has been loaded, + * started, stopped, and then the DSP powered-down. + * This should write to the cache only. + */ +static void cs_dsp_ctl_cache_write_stopped_powered_down(struct kunit *test) +{ + const struct cs_dsp_ctl_cache_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback; + + reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create some DSP data to be read into the control cache */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Power-up DSP */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Start and stop the firmware then power-down */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + cs_dsp_stop(dsp); + cs_dsp_power_down(dsp); + + /* Drop expected writes and the regmap cache should be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); + + /* Write new data to the control, it should not be written to the registers */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + get_random_bytes(reg_vals, param->len_bytes); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes), + 1); + + /* Registers should not have been written so regmap cache should still be clean */ + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); + + /* Control should readback the new data from the control cache */ + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes); +} + +/* + * Write to a cached control that is not in the currently loaded firmware. + * This should write to the cache only. + */ +static void cs_dsp_ctl_cache_write_not_current_loaded_fw(struct kunit *test) +{ + const struct cs_dsp_ctl_cache_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test); + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback; + + reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create some DSP data to be read into the control cache */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Power-up DSP */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Get the control */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + /* Power-down DSP then power-up with a different firmware */ + cs_dsp_power_down(dsp); + wmfw = cs_dsp_mock_wmfw_get_firmware(builder2); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0); + + /* Control from unloaded firmware should be disabled */ + KUNIT_EXPECT_FALSE(test, ctl->enabled); + + /* Drop expected writes and the regmap cache should be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); + + /* + * It should be possible to write new data to the control from + * the first firmware. But this should not be written to the + * registers. + */ + get_random_bytes(reg_vals, param->len_bytes); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes), + 1); + + /* Registers should not have been written so regmap cache should still be clean */ + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); + + /* Control should readback the new data from the control cache */ + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes); +} + +/* + * Write to a cached control that is not in the currently running firmware. + * This should write to the cache only. + */ +static void cs_dsp_ctl_cache_write_not_current_running_fw(struct kunit *test) +{ + const struct cs_dsp_ctl_cache_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test); + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback; + + reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create some DSP data to be read into the control cache */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Power-up DSP then power-down */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + cs_dsp_power_down(dsp); + + /* Get the control */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + /* Power-up with a different firmware and run it */ + wmfw = cs_dsp_mock_wmfw_get_firmware(builder2); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0); + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + + /* Control from unloaded firmware should be disabled */ + KUNIT_EXPECT_FALSE(test, ctl->enabled); + + /* Drop expected writes and the regmap cache should be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); + + /* + * It should be possible to write new data to the control from + * the first firmware. But this should not be written to the + * registers. + */ + get_random_bytes(reg_vals, param->len_bytes); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes), + 1); + + /* Registers should not have been written so regmap cache should still be clean */ + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); + + /* Control should readback the new data from the control cache */ + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes); +} + +/* + * Write to a cached control before running the firmware. + * The value written to the cache should be synced out to the registers + * backing the control when the firmware is run. + */ +static void cs_dsp_ctl_cache_sync_write_before_run(struct kunit *test) +{ + const struct cs_dsp_ctl_cache_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback; + + reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create some DSP data to be read into the control cache */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Power-up DSP but don't start firmware */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Write new data to the control, it should not be written to the registers */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + get_random_bytes(reg_vals, param->len_bytes); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes), + 1); + + KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0); + KUNIT_EXPECT_MEMNEQ(test, readback, reg_vals, param->len_bytes); + + /* Start the firmware and the cached data should be written to registers */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + + KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes); + + /* Control should readback the new data from the control cache */ + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes); +} + +/* + * Write to a cached control while the firmware is running. + * The value written should be synced out to the registers + * backing the control when the firmware is next run. + */ +static void cs_dsp_ctl_cache_sync_write_while_running(struct kunit *test) +{ + const struct cs_dsp_ctl_cache_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *init_vals, *ctl_vals, *readback; + + init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals); + + ctl_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals); + + readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Zero-fill the registers backing the control */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Power-up DSP and start firmware */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + + /* Write new data to the control */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + get_random_bytes(ctl_vals, param->len_bytes); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes), + 1); + + /* Stop firmware and zero the registers backing the control */ + kunit_release_action(test, _cs_dsp_stop_wrapper, dsp); + regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes); + KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0); + KUNIT_EXPECT_MEMEQ(test, readback, init_vals, param->len_bytes); + + /* Start the firmware and the cached data should be written to registers */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + + KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0); + KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes); + + /* Control should readback the new data from the control cache */ + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes); +} + +/* + * Write to a cached control after stopping the firmware. + * The value written to the cache should be synced out to the registers + * backing the control when the firmware is next run. + */ +static void cs_dsp_ctl_cache_sync_write_after_stop(struct kunit *test) +{ + const struct cs_dsp_ctl_cache_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback; + + reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create some DSP data to be read into the control cache */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Power-up DSP but don't start firmware */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Start and stop the firmware */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + cs_dsp_stop(dsp); + + /* Write new data to the control, it should not be written to the registers */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + get_random_bytes(reg_vals, param->len_bytes); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes), + 1); + + KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0); + KUNIT_EXPECT_MEMNEQ(test, readback, reg_vals, param->len_bytes); + + /* Start the firmware and the cached data should be written to registers */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + + KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes); + + /* Control should readback the new data from the control cache */ + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes); +} + +/* + * Write to a cached control that is not in the currently loaded firmware. + * The value written to the cache should be synced out to the registers + * backing the control the next time the firmware containing the + * control is run. + */ +static void cs_dsp_ctl_cache_sync_write_not_current_fw(struct kunit *test) +{ + const struct cs_dsp_ctl_cache_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback; + + reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create some DSP data to be read into the control cache */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Power-up DSP but don't start firmware */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Get the control */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + /* Power-down DSP then power-up with a different firmware */ + cs_dsp_power_down(dsp); + wmfw = cs_dsp_mock_wmfw_get_firmware(builder2); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0); + + /* Write new data to the control, it should not be written to the registers */ + get_random_bytes(reg_vals, param->len_bytes); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes), + 1); + + KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0); + KUNIT_EXPECT_MEMNEQ(test, readback, reg_vals, param->len_bytes); + + /* Power-down DSP then power-up with the original firmware */ + cs_dsp_power_down(dsp); + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Start the firmware and the cached data should be written to registers */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + + KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes); + + /* Control should readback the new data from the control cache */ + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes); +} + +/* + * The value in the control cache should be synced out to the registers + * backing the control every time the firmware containing the control + * is run. + */ +static void cs_dsp_ctl_cache_sync_reapply_every_run(struct kunit *test) +{ + const struct cs_dsp_ctl_cache_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *init_vals, *readback, *ctl_vals; + + init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals); + + readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + ctl_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals); + + /* Zero-fill the registers backing the control */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Power-up DSP but don't start firmware */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Write new data to the control */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + get_random_bytes(ctl_vals, param->len_bytes); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes), + 1); + + /* Start the firmware and the cached data should be written to registers */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0); + KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes); + + /* Stop the firmware and reset the registers */ + kunit_release_action(test, _cs_dsp_stop_wrapper, dsp); + regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes); + + /* Start the firmware again and the cached data should be written to registers */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0); + KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes); + + /* Control should readback the new data from the control cache */ + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes); +} + +/* + * The value in the control cache should be retained if the same + * firmware is downloaded again. It should be synced out to the + * registers backing the control after the firmware containing the + * control is downloaded again and run. + */ +static void cs_dsp_ctl_cache_sync_reapply_after_fw_reload(struct kunit *test) +{ + const struct cs_dsp_ctl_cache_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *init_vals, *readback, *ctl_vals; + + init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals); + + readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + ctl_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals); + + /* Zero-fill the registers backing the control */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Power-up DSP but don't start firmware */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Write new data to the control */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + get_random_bytes(ctl_vals, param->len_bytes); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes), + 1); + + /* Start the firmware and the cached data should be written to registers */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0); + KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes); + + /* Stop the firmware and power-down the DSP */ + kunit_release_action(test, _cs_dsp_stop_wrapper, dsp); + cs_dsp_power_down(dsp); + + /* Reset the registers */ + regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes); + + /* Download the firmware again, the cache content should not change */ + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Start the firmware and the cached data should be written to registers */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0); + KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes); + + /* Control should readback the new data from the control cache */ + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes); +} + +/* + * The value in the control cache should be retained after a different + * firmware is downloaded. + * When the firmware containing the control is downloaded and run + * the value in the control cache should be synced out to the registers + * backing the control. + */ +static void cs_dsp_ctl_cache_sync_reapply_after_fw_swap(struct kunit *test) +{ + const struct cs_dsp_ctl_cache_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *init_vals, *readback, *ctl_vals; + + init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals); + + readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + ctl_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals); + + /* Zero-fill the registers backing the control */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_cache_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Power-up DSP but don't start firmware */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Write new data to the control */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + get_random_bytes(ctl_vals, param->len_bytes); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes), + 1); + + /* Start the firmware and the cached data should be written to registers */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0); + KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes); + + /* Stop the firmware and power-down the DSP */ + kunit_release_action(test, _cs_dsp_stop_wrapper, dsp); + cs_dsp_power_down(dsp); + + /* Reset the registers */ + regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes); + + /* Download and run a different firmware */ + wmfw = cs_dsp_mock_wmfw_get_firmware(builder2); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0); + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + cs_dsp_power_down(dsp); + + /* Reset the registers */ + regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes); + + /* Download the original firmware again */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + KUNIT_EXPECT_TRUE(test, ctl->set); + + /* Start the firmware and the cached data should be written to registers */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0); + KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes); + + /* Control should readback the new data from the control cache */ + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes); +} + +static int cs_dsp_ctl_cache_test_common_init(struct kunit *test, struct cs_dsp *dsp, + int wmfw_version) +{ + struct cs_dsp_test *priv; + struct cs_dsp_test_local *local; + struct device *test_dev; + int ret; + + priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL); + if (!local) + return -ENOMEM; + + priv->test = test; + priv->dsp = dsp; + test->priv = priv; + priv->local = local; + priv->local->wmfw_version = wmfw_version; + + /* Create dummy struct device */ + test_dev = kunit_device_register(test, "cs_dsp_test_drv"); + if (IS_ERR(test_dev)) + return PTR_ERR(test_dev); + + dsp->dev = get_device(test_dev); + if (!dsp->dev) + return -ENODEV; + + ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev); + if (ret) + return ret; + + dev_set_drvdata(dsp->dev, priv); + + /* Allocate regmap */ + ret = cs_dsp_mock_regmap_init(priv); + if (ret) + return ret; + + /* + * There must always be a XM header with at least 1 algorithm, so create + * a dummy one that tests can use and extract it to a data blob. + */ + local->xm_header = cs_dsp_create_mock_xm_header(priv, + cs_dsp_ctl_cache_test_algs, + ARRAY_SIZE(cs_dsp_ctl_cache_test_algs)); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header); + + /* Create wmfw builder */ + local->wmfw_builder = _create_dummy_wmfw(test); + + /* Init cs_dsp */ + dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops); + + switch (dsp->type) { + case WMFW_ADSP2: + ret = cs_dsp_adsp2_init(dsp); + break; + case WMFW_HALO: + ret = cs_dsp_halo_init(dsp); + break; + default: + KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type); + return -EINVAL; + } + + if (ret) + return ret; + + /* Automatically call cs_dsp_remove() when test case ends */ + return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp); +} + +static int cs_dsp_ctl_cache_test_halo_init(struct kunit *test) +{ + struct cs_dsp *dsp; + + /* Fill in cs_dsp and initialize */ + dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL); + if (!dsp) + return -ENOMEM; + + dsp->num = 1; + dsp->type = WMFW_HALO; + dsp->mem = cs_dsp_mock_halo_dsp1_regions; + dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes); + dsp->base = cs_dsp_mock_halo_core_base; + dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base; + + return cs_dsp_ctl_cache_test_common_init(test, dsp, 3); +} + +static int cs_dsp_ctl_cache_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver) +{ + struct cs_dsp *dsp; + + /* Fill in cs_dsp and initialize */ + dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL); + if (!dsp) + return -ENOMEM; + + dsp->num = 1; + dsp->type = WMFW_ADSP2; + dsp->rev = 1; + dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions; + dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes); + dsp->base = cs_dsp_mock_adsp2_32bit_sysbase; + + return cs_dsp_ctl_cache_test_common_init(test, dsp, wmfw_ver); +} + +static int cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1_init(struct kunit *test) +{ + return cs_dsp_ctl_cache_test_adsp2_32bit_init(test, 1); +} + +static int cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2_init(struct kunit *test) +{ + return cs_dsp_ctl_cache_test_adsp2_32bit_init(test, 2); +} + +static int cs_dsp_ctl_cache_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver) +{ + struct cs_dsp *dsp; + + /* Fill in cs_dsp and initialize */ + dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL); + if (!dsp) + return -ENOMEM; + + dsp->num = 1; + dsp->type = WMFW_ADSP2; + dsp->rev = 0; + dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions; + dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes); + dsp->base = cs_dsp_mock_adsp2_16bit_sysbase; + + return cs_dsp_ctl_cache_test_common_init(test, dsp, wmfw_ver); +} + +static int cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1_init(struct kunit *test) +{ + return cs_dsp_ctl_cache_test_adsp2_16bit_init(test, 1); +} + +static int cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2_init(struct kunit *test) +{ + return cs_dsp_ctl_cache_test_adsp2_16bit_init(test, 2); +} + +static void cs_dsp_ctl_all_param_desc(const struct cs_dsp_ctl_cache_test_param *param, + char *desc) +{ + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg:%#x %s@%u len:%u flags:%#x", + param->alg_id, cs_dsp_mem_region_name(param->mem_type), + param->offs_words, param->len_bytes, param->flags); +} + +/* All parameters populated, with various lengths */ +static const struct cs_dsp_ctl_cache_test_param all_pop_varying_len_cases[] = { + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 8 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 12 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 16 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 48 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 100 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 1000 }, +}; +KUNIT_ARRAY_PARAM(all_pop_varying_len, all_pop_varying_len_cases, + cs_dsp_ctl_all_param_desc); + +/* All parameters populated, with various offsets */ +static const struct cs_dsp_ctl_cache_test_param all_pop_varying_offset_cases[] = { + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 0, .len_bytes = 4 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 2, .len_bytes = 4 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 3, .len_bytes = 4 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 8, .len_bytes = 4 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 10, .len_bytes = 4 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 128, .len_bytes = 4 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 180, .len_bytes = 4 }, +}; +KUNIT_ARRAY_PARAM(all_pop_varying_offset, all_pop_varying_offset_cases, + cs_dsp_ctl_all_param_desc); + +/* All parameters populated, with various X and Y memory regions */ +static const struct cs_dsp_ctl_cache_test_param all_pop_varying_xy_cases[] = { + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_XM, .offs_words = 1, .len_bytes = 4 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 }, +}; +KUNIT_ARRAY_PARAM(all_pop_varying_xy, all_pop_varying_xy_cases, + cs_dsp_ctl_all_param_desc); + +/* All parameters populated, using ZM */ +static const struct cs_dsp_ctl_cache_test_param all_pop_z_cases[] = { + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_ZM, .offs_words = 1, .len_bytes = 4 }, +}; +KUNIT_ARRAY_PARAM(all_pop_z, all_pop_z_cases, cs_dsp_ctl_all_param_desc); + +/* All parameters populated, with various algorithm ids */ +static const struct cs_dsp_ctl_cache_test_param all_pop_varying_alg_cases[] = { + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 }, + { .alg_id = 0xb, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 }, + { .alg_id = 0x9f1234, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 }, + { .alg_id = 0xff00ff, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 }, +}; +KUNIT_ARRAY_PARAM(all_pop_varying_alg, all_pop_varying_alg_cases, + cs_dsp_ctl_all_param_desc); + +/* + * All parameters populated, with all combinations of flags for a + * non-volatile readable control + */ +static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_readable_flags_cases[] = { + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = 0 + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_READABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE, + }, +}; +KUNIT_ARRAY_PARAM(all_pop_nonvol_readable_flags, + all_pop_nonvol_readable_flags_cases, + cs_dsp_ctl_all_param_desc); + +/* + * All parameters populated, with all combinations of flags for a + * non-volatile readable control, except flags==0 + */ +static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_readable_nonzero_flags_cases[] = { + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_READABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE, + }, +}; +KUNIT_ARRAY_PARAM(all_pop_nonvol_readable_nonzero_flags, + all_pop_nonvol_readable_nonzero_flags_cases, + cs_dsp_ctl_all_param_desc); + +/* + * All parameters populated, with all combinations of flags for a + * non-volatile writeable control + */ +static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_writeable_flags_cases[] = { + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = 0 + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE, + }, +}; +KUNIT_ARRAY_PARAM(all_pop_nonvol_writeable_flags, + all_pop_nonvol_writeable_flags_cases, + cs_dsp_ctl_all_param_desc); + +/* + * All parameters populated, with all combinations of flags for a + * non-volatile write-only control of varying lengths + */ +static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_write_only_length_cases[] = { + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512, + .flags = WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE, + }, +}; +KUNIT_ARRAY_PARAM(all_pop_nonvol_write_only_length, + all_pop_nonvol_write_only_length_cases, + cs_dsp_ctl_all_param_desc); + +static struct kunit_case cs_dsp_ctl_cache_test_cases_v1[] = { + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_len_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_offset_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_xy_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_z_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_alg_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_nonvol_readable_flags_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init_write_only, + all_pop_nonvol_write_only_length_gen_params), + + KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fw_same_controls), + KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls), + KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_mems), + KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_algs), + + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_started, + all_pop_nonvol_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped, + all_pop_nonvol_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_powered_down, + all_pop_nonvol_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped_powered_down, + all_pop_nonvol_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_loaded_fw, + all_pop_nonvol_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_running_fw, + all_pop_nonvol_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running, + all_pop_nonvol_readable_nonzero_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running_zero_flags, + all_pop_varying_len_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_len_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_offset_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_xy_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_z_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_alg_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_nonvol_writeable_flags_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged, + all_pop_varying_len_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged, + all_pop_varying_offset_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged, + all_pop_varying_xy_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged, + all_pop_z_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged, + all_pop_varying_alg_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged, + all_pop_nonvol_writeable_flags_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_unchanged_not_started, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_started, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_powered_down, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped_powered_down, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_loaded_fw, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_running_fw, + all_pop_nonvol_writeable_flags_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_before_run, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_while_running, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_after_stop, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_not_current_fw, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_every_run, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_reload, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_swap, + all_pop_nonvol_writeable_flags_gen_params), + + { } /* terminator */ +}; + +static struct kunit_case cs_dsp_ctl_cache_test_cases_v2[] = { + KUNIT_CASE(cs_dsp_ctl_v2_cache_alloc), + + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_len_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_offset_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_xy_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_z_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_alg_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_nonvol_readable_flags_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init_write_only, + all_pop_nonvol_write_only_length_gen_params), + + KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fw_same_controls), + KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls), + KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_mems), + KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_algs), + KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_offsets), + + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_started, + all_pop_nonvol_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped, + all_pop_nonvol_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_powered_down, + all_pop_nonvol_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped_powered_down, + all_pop_nonvol_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_loaded_fw, + all_pop_nonvol_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_running_fw, + all_pop_nonvol_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running, + all_pop_nonvol_readable_nonzero_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running_zero_flags, + all_pop_varying_len_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_len_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_offset_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_xy_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_z_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_alg_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_nonvol_writeable_flags_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged, + all_pop_varying_len_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged, + all_pop_varying_offset_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged, + all_pop_varying_xy_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged, + all_pop_z_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged, + all_pop_varying_alg_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged, + all_pop_nonvol_writeable_flags_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_unchanged_not_started, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_started, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_powered_down, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped_powered_down, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_loaded_fw, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_running_fw, + all_pop_nonvol_writeable_flags_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_before_run, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_while_running, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_after_stop, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_not_current_fw, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_every_run, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_reload, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_swap, + all_pop_nonvol_writeable_flags_gen_params), + + { } /* terminator */ +}; + +static struct kunit_case cs_dsp_ctl_cache_test_cases_v3[] = { + KUNIT_CASE(cs_dsp_ctl_v2_cache_alloc), + + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_len_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_offset_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_xy_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_alg_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_nonvol_readable_flags_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init_write_only, + all_pop_nonvol_write_only_length_gen_params), + + KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fw_same_controls), + KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls), + KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_mems), + KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_algs), + KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_offsets), + + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_started, + all_pop_nonvol_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped, + all_pop_nonvol_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_powered_down, + all_pop_nonvol_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped_powered_down, + all_pop_nonvol_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_loaded_fw, + all_pop_nonvol_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_running_fw, + all_pop_nonvol_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running, + all_pop_nonvol_readable_nonzero_flags_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_len_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_offset_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_xy_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_alg_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_nonvol_writeable_flags_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged, + all_pop_varying_len_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged, + all_pop_varying_offset_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged, + all_pop_varying_xy_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged, + all_pop_varying_alg_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged, + all_pop_nonvol_writeable_flags_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_unchanged_not_started, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_started, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_powered_down, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped_powered_down, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_loaded_fw, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_running_fw, + all_pop_nonvol_writeable_flags_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_before_run, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_while_running, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_after_stop, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_not_current_fw, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_every_run, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_reload, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_swap, + all_pop_nonvol_writeable_flags_gen_params), + + { } /* terminator */ +}; + +static struct kunit_suite cs_dsp_ctl_cache_test_halo = { + .name = "cs_dsp_ctl_cache_wmfwV3_halo", + .init = cs_dsp_ctl_cache_test_halo_init, + .test_cases = cs_dsp_ctl_cache_test_cases_v3, +}; + +static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1 = { + .name = "cs_dsp_ctl_cache_wmfwV1_adsp2_32bit", + .init = cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1_init, + .test_cases = cs_dsp_ctl_cache_test_cases_v1, +}; + +static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2 = { + .name = "cs_dsp_ctl_cache_wmfwV2_adsp2_32bit", + .init = cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2_init, + .test_cases = cs_dsp_ctl_cache_test_cases_v2, +}; + +static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1 = { + .name = "cs_dsp_ctl_cache_wmfwV1_adsp2_16bit", + .init = cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1_init, + .test_cases = cs_dsp_ctl_cache_test_cases_v1, +}; + +static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2 = { + .name = "cs_dsp_ctl_cache_wmfwV2_adsp2_16bit", + .init = cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2_init, + .test_cases = cs_dsp_ctl_cache_test_cases_v2, +}; + +kunit_test_suites(&cs_dsp_ctl_cache_test_halo, + &cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1, + &cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2, + &cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1, + &cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2); diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c b/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c new file mode 100644 index 000000000000..942ba1af5e7c --- /dev/null +++ b/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c @@ -0,0 +1,1838 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// KUnit tests for cs_dsp. +// +// Copyright (C) 2024 Cirrus Logic, Inc. and +// Cirrus Logic International Semiconductor Ltd. + +#include <kunit/device.h> +#include <kunit/resource.h> +#include <kunit/test.h> +#include <linux/build_bug.h> +#include <linux/firmware/cirrus/cs_dsp.h> +#include <linux/firmware/cirrus/cs_dsp_test_utils.h> +#include <linux/firmware/cirrus/wmfw.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/regmap.h> + +KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *); +KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *); + +struct cs_dsp_test_local { + struct cs_dsp_mock_xm_header *xm_header; + struct cs_dsp_mock_wmfw_builder *wmfw_builder; + int wmfw_version; +}; + +struct cs_dsp_ctl_parse_test_param { + int mem_type; + int alg_id; + unsigned int offset; + unsigned int length; + u16 ctl_type; + u16 flags; +}; + +static const struct cs_dsp_mock_alg_def cs_dsp_ctl_parse_test_algs[] = { + { + .id = 0xfafa, + .ver = 0x100000, + .xm_size_words = 164, + .ym_size_words = 164, + .zm_size_words = 164, + }, + { + .id = 0xb, + .ver = 0x100001, + .xm_size_words = 8, + .ym_size_words = 8, + .zm_size_words = 8, + }, + { + .id = 0x9f1234, + .ver = 0x100500, + .xm_size_words = 16, + .ym_size_words = 16, + .zm_size_words = 16, + }, + { + .id = 0xff00ff, + .ver = 0x300113, + .xm_size_words = 16, + .ym_size_words = 16, + .zm_size_words = 16, + }, +}; + +static const struct cs_dsp_mock_coeff_def mock_coeff_template = { + .shortname = "Dummy Coeff", + .type = WMFW_CTL_TYPE_BYTES, + .mem_type = WMFW_ADSP2_YM, + .flags = WMFW_CTL_FLAG_VOLATILE, + .length_bytes = 4, +}; + +static char *cs_dsp_ctl_alloc_test_string(struct kunit *test, char c, size_t len) +{ + char *str; + + str = kunit_kmalloc(test, len + 1, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, str); + memset(str, c, len); + str[len] = '\0'; + + return str; +} + +/* Algorithm info block without controls should load */ +static void cs_dsp_ctl_parse_no_coeffs(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); +} + +/* + * V1 controls do not have names, the name field in the coefficient entry + * should be ignored. + */ +static void cs_dsp_ctl_parse_v1_name(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + + def.fullname = "Dummy"; + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, ctl->subname_len, 0); + KUNIT_EXPECT_EQ(test, ctl->flags, def.flags); + KUNIT_EXPECT_EQ(test, ctl->type, def.type); + KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes); +} + +/* + * V1 controls do not have names, the name field in the coefficient entry + * should be ignored. Test with a zero-length name string. + */ +static void cs_dsp_ctl_parse_empty_v1_name(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + + def.fullname = "\0"; + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, ctl->subname_len, 0); + KUNIT_EXPECT_EQ(test, ctl->flags, def.flags); + KUNIT_EXPECT_EQ(test, ctl->type, def.type); + KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes); +} + +/* + * V1 controls do not have names, the name field in the coefficient entry + * should be ignored. Test with a maximum length name string. + */ +static void cs_dsp_ctl_parse_max_v1_name(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + + def.fullname = cs_dsp_ctl_alloc_test_string(test, 'A', 255); + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, ctl->subname_len, 0); + KUNIT_EXPECT_EQ(test, ctl->flags, def.flags); + KUNIT_EXPECT_EQ(test, ctl->type, def.type); + KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes); +} + +/* Short name from coeff descriptor should be used as control name. */ +static void cs_dsp_ctl_parse_short_name(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname)); + KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len); + KUNIT_EXPECT_EQ(test, ctl->flags, def.flags); + KUNIT_EXPECT_EQ(test, ctl->type, def.type); + KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes); +} + +/* + * Short name from coeff descriptor should be used as control name. + * Test with a short name that is a single character. + */ +static void cs_dsp_ctl_parse_min_short_name(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + + def.shortname = "Q"; + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, ctl->subname_len, 1); + KUNIT_EXPECT_EQ(test, ctl->subname[0], 'Q'); + KUNIT_EXPECT_EQ(test, ctl->flags, def.flags); + KUNIT_EXPECT_EQ(test, ctl->type, def.type); + KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes); +} + +/* + * Short name from coeff descriptor should be used as control name. + * Test with a maximum length name. + */ +static void cs_dsp_ctl_parse_max_short_name(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + + def.shortname = cs_dsp_ctl_alloc_test_string(test, 'A', 255); + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, ctl->subname_len, 255); + KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len); + KUNIT_EXPECT_EQ(test, ctl->flags, def.flags); + KUNIT_EXPECT_EQ(test, ctl->type, def.type); + KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes); +} + +/* + * Full name from coeff descriptor should be ignored. It is a variable + * length field so affects the position of subsequent fields. + * Test with a 1-character full name. + */ +static void cs_dsp_ctl_parse_with_min_fullname(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + + def.fullname = "Q"; + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname)); + KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len); + KUNIT_EXPECT_EQ(test, ctl->flags, def.flags); + KUNIT_EXPECT_EQ(test, ctl->type, def.type); + KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes); +} + +/* + * Full name from coeff descriptor should be ignored. It is a variable + * length field so affects the position of subsequent fields. + * Test with a maximum length full name. + */ +static void cs_dsp_ctl_parse_with_max_fullname(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + + def.fullname = cs_dsp_ctl_alloc_test_string(test, 'A', 255); + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname)); + KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len); + KUNIT_EXPECT_EQ(test, ctl->flags, def.flags); + KUNIT_EXPECT_EQ(test, ctl->type, def.type); + KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes); +} + +/* + * Description from coeff descriptor should be ignored. It is a variable + * length field so affects the position of subsequent fields. + * Test with a 1-character description + */ +static void cs_dsp_ctl_parse_with_min_description(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + + def.description = "Q"; + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname)); + KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len); + KUNIT_EXPECT_EQ(test, ctl->flags, def.flags); + KUNIT_EXPECT_EQ(test, ctl->type, def.type); + KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes); +} + +/* + * Description from coeff descriptor should be ignored. It is a variable + * length field so affects the position of subsequent fields. + * Test with a maximum length description + */ +static void cs_dsp_ctl_parse_with_max_description(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + + def.description = cs_dsp_ctl_alloc_test_string(test, 'A', 65535); + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname)); + KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len); + KUNIT_EXPECT_EQ(test, ctl->flags, def.flags); + KUNIT_EXPECT_EQ(test, ctl->type, def.type); + KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes); +} + +/* + * Full name and description from coeff descriptor are variable length + * fields so affects the position of subsequent fields. + * Test with a maximum length full name and description + */ +static void cs_dsp_ctl_parse_with_max_fullname_and_description(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + + def.fullname = cs_dsp_ctl_alloc_test_string(test, 'A', 255); + def.description = cs_dsp_ctl_alloc_test_string(test, 'A', 65535); + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname)); + KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len); + KUNIT_EXPECT_EQ(test, ctl->flags, def.flags); + KUNIT_EXPECT_EQ(test, ctl->type, def.type); + KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes); +} + +static const char * const cs_dsp_ctl_alignment_test_names[] = { + "1", "12", "123", "1234", "12345", "123456", "1234567", + "12345678", "123456789", "123456789A", "123456789AB", + "123456789ABC", "123456789ABCD", "123456789ABCDE", + "123456789ABCDEF", +}; + +/* + * Variable-length string fields are padded to a multiple of 4-bytes. + * Test this with various lengths of short name. + */ +static void cs_dsp_ctl_shortname_alignment(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + int i; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + + for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) { + def.shortname = cs_dsp_ctl_alignment_test_names[i]; + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + } + + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) { + mutex_lock(&priv->dsp->pwr_lock); + ctl = cs_dsp_get_ctl(priv->dsp, cs_dsp_ctl_alignment_test_names[i], + def.mem_type, cs_dsp_ctl_parse_test_algs[0].id); + mutex_unlock(&priv->dsp->pwr_lock); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, ctl->subname_len, i + 1); + KUNIT_EXPECT_MEMEQ(test, ctl->subname, cs_dsp_ctl_alignment_test_names[i], + ctl->subname_len); + /* Test fields that are parsed after the variable-length fields */ + KUNIT_EXPECT_EQ(test, ctl->flags, def.flags); + KUNIT_EXPECT_EQ(test, ctl->type, def.type); + KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes); + } +} + +/* + * Variable-length string fields are padded to a multiple of 4-bytes. + * Test this with various lengths of full name. + */ +static void cs_dsp_ctl_fullname_alignment(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl; + char ctl_name[4]; + struct firmware *wmfw; + int i; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + + for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) { + /* + * Create a unique control name of 3 characters so that + * the shortname field is exactly 4 bytes long including + * the length byte. + */ + snprintf(ctl_name, sizeof(ctl_name), "%03d", i); + KUNIT_ASSERT_EQ(test, strlen(ctl_name), 3); + def.shortname = ctl_name; + + def.fullname = cs_dsp_ctl_alignment_test_names[i]; + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + } + + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) { + snprintf(ctl_name, sizeof(ctl_name), "%03d", i); + + mutex_lock(&priv->dsp->pwr_lock); + ctl = cs_dsp_get_ctl(priv->dsp, ctl_name, def.mem_type, + cs_dsp_ctl_parse_test_algs[0].id); + mutex_unlock(&priv->dsp->pwr_lock); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, ctl->subname_len, 3); + KUNIT_EXPECT_MEMEQ(test, ctl->subname, ctl_name, ctl->subname_len); + /* Test fields that are parsed after the variable-length fields */ + KUNIT_EXPECT_EQ(test, ctl->flags, def.flags); + KUNIT_EXPECT_EQ(test, ctl->type, def.type); + KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes); + } +} + +/* + * Variable-length string fields are padded to a multiple of 4-bytes. + * Test this with various lengths of description. + */ +static void cs_dsp_ctl_description_alignment(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl; + char ctl_name[4]; + struct firmware *wmfw; + int i; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + + for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) { + /* + * Create a unique control name of 3 characters so that + * the shortname field is exactly 4 bytes long including + * the length byte. + */ + snprintf(ctl_name, sizeof(ctl_name), "%03d", i); + KUNIT_ASSERT_EQ(test, strlen(ctl_name), 3); + def.shortname = ctl_name; + + def.description = cs_dsp_ctl_alignment_test_names[i]; + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + } + + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) { + snprintf(ctl_name, sizeof(ctl_name), "%03d", i); + + mutex_lock(&priv->dsp->pwr_lock); + ctl = cs_dsp_get_ctl(priv->dsp, ctl_name, def.mem_type, + cs_dsp_ctl_parse_test_algs[0].id); + mutex_unlock(&priv->dsp->pwr_lock); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, ctl->subname_len, 3); + KUNIT_EXPECT_MEMEQ(test, ctl->subname, ctl_name, ctl->subname_len); + /* Test fields that are parsed after the variable-length fields */ + KUNIT_EXPECT_EQ(test, ctl->flags, def.flags); + KUNIT_EXPECT_EQ(test, ctl->type, def.type); + KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes); + } +} + +static const char * const cs_dsp_get_ctl_test_names[] = { + "Up", "Down", "Switch", "Mute", + "Left Up", "Left Down", "Right Up", "Right Down", + "Left Mute", "Right Mute", + "_trunc_1", "_trunc_2", " trunc", +}; + +/* Test using cs_dsp_get_ctl() to lookup various controls. */ +static void cs_dsp_get_ctl_test(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + int i; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + + for (i = 0; i < ARRAY_SIZE(cs_dsp_get_ctl_test_names); i++) { + def.shortname = cs_dsp_get_ctl_test_names[i]; + def.offset_dsp_words = i; + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + } + + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + for (i = 0; i < ARRAY_SIZE(cs_dsp_get_ctl_test_names); i++) { + mutex_lock(&priv->dsp->pwr_lock); + ctl = cs_dsp_get_ctl(priv->dsp, cs_dsp_get_ctl_test_names[i], + def.mem_type, cs_dsp_ctl_parse_test_algs[0].id); + mutex_unlock(&priv->dsp->pwr_lock); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(cs_dsp_get_ctl_test_names[i])); + KUNIT_EXPECT_MEMEQ(test, ctl->subname, cs_dsp_get_ctl_test_names[i], + ctl->subname_len); + KUNIT_EXPECT_EQ(test, ctl->offset, i); + } +} + +/* + * cs_dsp_get_ctl() searches for the control in the currently loaded + * firmware, so create identical controls in multiple firmware and + * test that the correct one is found. + */ +static void cs_dsp_get_ctl_test_multiple_wmfw(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl; + struct cs_dsp_mock_wmfw_builder *builder2; + struct firmware *wmfw; + + def.shortname = "_A_CONTROL"; + + /* Create a second mock wmfw builder */ + builder2 = cs_dsp_mock_wmfw_init(priv, + cs_dsp_mock_wmfw_format_version(local->wmfw_builder)); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder2); + cs_dsp_mock_wmfw_add_data_block(builder2, + WMFW_ADSP2_XM, 0, + local->xm_header->blob_data, + local->xm_header->blob_size_bytes); + + /* Load a 'misc' firmware with a control */ + def.offset_dsp_words = 1; + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + cs_dsp_power_down(priv->dsp); + + /* Load a 'mbc/vss' firmware with a control of the same name */ + def.offset_dsp_words = 2; + cs_dsp_mock_wmfw_start_alg_info_block(builder2, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(builder2, &def); + cs_dsp_mock_wmfw_end_alg_info_block(builder2); + wmfw = cs_dsp_mock_wmfw_get_firmware(builder2); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_fw2", NULL, NULL, "mbc/vss"), 0); + + /* A lookup should return the control for the current firmware */ + mutex_lock(&priv->dsp->pwr_lock); + ctl = cs_dsp_get_ctl(priv->dsp, def.shortname, + def.mem_type, cs_dsp_ctl_parse_test_algs[0].id); + mutex_unlock(&priv->dsp->pwr_lock); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, ctl->offset, 2); + + /* Re-load the 'misc' firmware and a lookup should return its control */ + cs_dsp_power_down(priv->dsp); + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + mutex_lock(&priv->dsp->pwr_lock); + ctl = cs_dsp_get_ctl(priv->dsp, def.shortname, + def.mem_type, cs_dsp_ctl_parse_test_algs[0].id); + mutex_unlock(&priv->dsp->pwr_lock); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, ctl->offset, 1); +} + +/* Test that the value of the memory type field is parsed correctly. */ +static void cs_dsp_ctl_parse_memory_type(struct kunit *test) +{ + const struct cs_dsp_ctl_parse_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + + /* kunit_skip() marks the test skipped forever, so just return */ + if ((param->mem_type == WMFW_ADSP2_ZM) && !cs_dsp_mock_has_zm(priv)) + return; + + def.mem_type = param->mem_type; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, ctl->alg_region.type, param->mem_type); + KUNIT_EXPECT_EQ(test, ctl->flags, def.flags); + KUNIT_EXPECT_EQ(test, ctl->type, def.type); + KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes); +} + +/* + * Test that the algorithm id from the parent alg-info block is + * correctly stored in the cs_dsp_coeff_ctl. + */ +static void cs_dsp_ctl_parse_alg_id(struct kunit *test) +{ + const struct cs_dsp_ctl_parse_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + param->alg_id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, ctl->alg_region.alg, param->alg_id); + KUNIT_EXPECT_EQ(test, ctl->alg_region.type, def.mem_type); + KUNIT_EXPECT_EQ(test, ctl->flags, def.flags); + KUNIT_EXPECT_EQ(test, ctl->type, def.type); + KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes); +} + +/* + * Test that the values of (alg id, memory type) tuple is parsed correctly. + * The alg id is parsed from the alg-info block, but the memory type is + * parsed from the coefficient info descriptor. + */ +static void cs_dsp_ctl_parse_alg_mem(struct kunit *test) +{ + const struct cs_dsp_ctl_parse_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + + /* kunit_skip() marks the test skipped forever, so just return */ + if ((param->mem_type == WMFW_ADSP2_ZM) && !cs_dsp_mock_has_zm(priv)) + return; + + def.mem_type = param->mem_type; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + param->alg_id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, ctl->alg_region.alg, param->alg_id); + KUNIT_EXPECT_EQ(test, ctl->alg_region.type, param->mem_type); +} + +/* Test that the value of the offset field is parsed correctly. */ +static void cs_dsp_ctl_parse_offset(struct kunit *test) +{ + const struct cs_dsp_ctl_parse_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + + def.offset_dsp_words = param->offset; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, ctl->offset, param->offset); + KUNIT_EXPECT_EQ(test, ctl->flags, def.flags); + KUNIT_EXPECT_EQ(test, ctl->type, def.type); + KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes); +} + +/* Test that the value of the length field is parsed correctly. */ +static void cs_dsp_ctl_parse_length(struct kunit *test) +{ + const struct cs_dsp_ctl_parse_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + + def.length_bytes = param->length; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, ctl->offset, def.offset_dsp_words); + KUNIT_EXPECT_EQ(test, ctl->flags, def.flags); + KUNIT_EXPECT_EQ(test, ctl->type, def.type); + KUNIT_EXPECT_EQ(test, ctl->len, param->length); +} + +/* Test that the value of the control type field is parsed correctly. */ +static void cs_dsp_ctl_parse_ctl_type(struct kunit *test) +{ + const struct cs_dsp_ctl_parse_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + + def.type = param->ctl_type; + def.flags = param->flags; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, ctl->type, param->ctl_type); + KUNIT_EXPECT_EQ(test, ctl->flags, def.flags); + KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes); +} + +/* Test that the value of the flags field is parsed correctly. */ +static void cs_dsp_ctl_parse_flags(struct kunit *test) +{ + const struct cs_dsp_ctl_parse_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 reg_val; + + /* + * Non volatile controls will be read to initialize the cache + * so the regmap cache must contain something to read. + */ + reg_val = 0xf11100; + regmap_raw_write(priv->dsp->regmap, + cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM), + ®_val, sizeof(reg_val)); + + def.flags = param->flags; + def.mem_type = WMFW_ADSP2_YM; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, ctl->type, def.type); + KUNIT_EXPECT_EQ(test, ctl->flags, param->flags); + KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes); +} + +/* Test that invalid combinations of (control type, flags) are rejected. */ +static void cs_dsp_ctl_illegal_type_flags(struct kunit *test) +{ + const struct cs_dsp_ctl_parse_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct firmware *wmfw; + u32 reg_val; + + /* + * Non volatile controls will be read to initialize the cache + * so the regmap cache must contain something to read. + */ + reg_val = 0xf11100; + regmap_raw_write(priv->dsp->regmap, + cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM), + ®_val, sizeof(reg_val)); + + def.type = param->ctl_type; + def.flags = param->flags; + def.mem_type = WMFW_ADSP2_YM; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_LT(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); +} + +/* Test that the correct firmware name is entered in the cs_dsp_coeff_ctl. */ +static void cs_dsp_ctl_parse_fw_name(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *walkctl, *ctl1, *ctl2; + struct cs_dsp_mock_wmfw_builder *builder2; + struct firmware *wmfw; + + /* Create a second mock wmfw builder */ + builder2 = cs_dsp_mock_wmfw_init(priv, + cs_dsp_mock_wmfw_format_version(local->wmfw_builder)); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder2); + cs_dsp_mock_wmfw_add_data_block(builder2, + WMFW_ADSP2_XM, 0, + local->xm_header->blob_data, + local->xm_header->blob_size_bytes); + + /* Load a 'misc' firmware with a control */ + def.offset_dsp_words = 1; + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + cs_dsp_power_down(priv->dsp); + + /* Load a 'mbc/vss' firmware with a control */ + def.offset_dsp_words = 2; + cs_dsp_mock_wmfw_start_alg_info_block(builder2, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(builder2, &def); + cs_dsp_mock_wmfw_end_alg_info_block(builder2); + wmfw = cs_dsp_mock_wmfw_get_firmware(builder2); + KUNIT_ASSERT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_fw2", NULL, NULL, "mbc/vss"), 0); + + /* Both controls should be in the list (order not guaranteed) */ + KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2); + ctl1 = NULL; + ctl2 = NULL; + list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) { + if (strcmp(walkctl->fw_name, "misc") == 0) + ctl1 = walkctl; + else if (strcmp(walkctl->fw_name, "mbc/vss") == 0) + ctl2 = walkctl; + } + + KUNIT_EXPECT_NOT_NULL(test, ctl1); + KUNIT_EXPECT_NOT_NULL(test, ctl2); + KUNIT_EXPECT_EQ(test, ctl1->offset, 1); + KUNIT_EXPECT_EQ(test, ctl2->offset, 2); +} + +/* Controls are unique if the algorithm ID is different */ +static void cs_dsp_ctl_alg_id_uniqueness(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl1, *ctl2; + struct firmware *wmfw; + + /* Create an algorithm containing the control */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Create a different algorithm containing an identical control */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[1].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + cs_dsp_power_down(priv->dsp); + + /* Both controls should be in the list */ + KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2); + ctl1 = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + ctl2 = list_next_entry(ctl1, list); + KUNIT_EXPECT_NOT_NULL(test, ctl1); + KUNIT_EXPECT_NOT_NULL(test, ctl2); + KUNIT_EXPECT_NE(test, ctl1->alg_region.alg, ctl2->alg_region.alg); + KUNIT_EXPECT_EQ(test, ctl1->alg_region.type, ctl2->alg_region.type); + KUNIT_EXPECT_EQ(test, ctl1->offset, ctl2->offset); + KUNIT_EXPECT_EQ(test, ctl1->type, ctl2->type); + KUNIT_EXPECT_EQ(test, ctl1->flags, ctl2->flags); + KUNIT_EXPECT_EQ(test, ctl1->len, ctl2->len); + KUNIT_EXPECT_STREQ(test, ctl1->fw_name, ctl2->fw_name); + KUNIT_EXPECT_EQ(test, ctl1->subname_len, ctl2->subname_len); + if (ctl1->subname_len) + KUNIT_EXPECT_MEMEQ(test, ctl1->subname, ctl2->subname, ctl1->subname_len); +} + +/* Controls are unique if the memory region is different */ +static void cs_dsp_ctl_mem_uniqueness(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl1, *ctl2; + struct firmware *wmfw; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + /* Create control in XM */ + def.mem_type = WMFW_ADSP2_XM; + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + + /* Create control in YM */ + def.mem_type = WMFW_ADSP2_YM; + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + cs_dsp_power_down(priv->dsp); + + /* Both controls should be in the list */ + KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2); + ctl1 = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + ctl2 = list_next_entry(ctl1, list); + KUNIT_EXPECT_NOT_NULL(test, ctl1); + KUNIT_EXPECT_NOT_NULL(test, ctl2); + KUNIT_EXPECT_EQ(test, ctl1->alg_region.alg, ctl2->alg_region.alg); + KUNIT_EXPECT_NE(test, ctl1->alg_region.type, ctl2->alg_region.type); + KUNIT_EXPECT_EQ(test, ctl1->offset, ctl2->offset); + KUNIT_EXPECT_EQ(test, ctl1->type, ctl2->type); + KUNIT_EXPECT_EQ(test, ctl1->flags, ctl2->flags); + KUNIT_EXPECT_EQ(test, ctl1->len, ctl2->len); + KUNIT_EXPECT_STREQ(test, ctl1->fw_name, ctl2->fw_name); + KUNIT_EXPECT_EQ(test, ctl1->subname_len, ctl2->subname_len); + if (ctl1->subname_len) + KUNIT_EXPECT_MEMEQ(test, ctl1->subname, ctl2->subname, ctl1->subname_len); +} + +/* Controls are unique if they are in different firmware */ +static void cs_dsp_ctl_fw_uniqueness(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl1, *ctl2; + struct cs_dsp_mock_wmfw_builder *builder2; + struct firmware *wmfw; + + /* Create a second mock wmfw builder */ + builder2 = cs_dsp_mock_wmfw_init(priv, + cs_dsp_mock_wmfw_format_version(local->wmfw_builder)); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder2); + cs_dsp_mock_wmfw_add_data_block(builder2, + WMFW_ADSP2_XM, 0, + local->xm_header->blob_data, + local->xm_header->blob_size_bytes); + + /* Load a 'misc' firmware with a control */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + cs_dsp_power_down(priv->dsp); + + /* Load a 'mbc/vss' firmware with the same control */ + cs_dsp_mock_wmfw_start_alg_info_block(builder2, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(builder2, &def); + cs_dsp_mock_wmfw_end_alg_info_block(builder2); + wmfw = cs_dsp_mock_wmfw_get_firmware(builder2); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw2", + NULL, NULL, "mbc/vss"), 0); + cs_dsp_power_down(priv->dsp); + + /* Both controls should be in the list */ + KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2); + ctl1 = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + ctl2 = list_next_entry(ctl1, list); + KUNIT_EXPECT_NOT_NULL(test, ctl1); + KUNIT_EXPECT_NOT_NULL(test, ctl2); + KUNIT_EXPECT_EQ(test, ctl1->alg_region.alg, ctl2->alg_region.alg); + KUNIT_EXPECT_EQ(test, ctl1->alg_region.type, ctl2->alg_region.type); + KUNIT_EXPECT_EQ(test, ctl1->offset, ctl2->offset); + KUNIT_EXPECT_EQ(test, ctl1->type, ctl2->type); + KUNIT_EXPECT_EQ(test, ctl1->flags, ctl2->flags); + KUNIT_EXPECT_EQ(test, ctl1->len, ctl2->len); + KUNIT_EXPECT_STRNEQ(test, ctl1->fw_name, ctl2->fw_name); + KUNIT_EXPECT_EQ(test, ctl1->subname_len, ctl2->subname_len); + if (ctl1->subname_len) + KUNIT_EXPECT_MEMEQ(test, ctl1->subname, ctl2->subname, ctl1->subname_len); +} + +/* + * Controls from a wmfw are only added to the list once. If the same + * wmfw is reloaded the controls are not added again. + * This creates multiple algorithms with one control each, which will + * work on both V1 format and >=V2 format controls. + */ +static void cs_dsp_ctl_squash_reloaded_controls(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctls[ARRAY_SIZE(cs_dsp_ctl_parse_test_algs)]; + struct cs_dsp_coeff_ctl *walkctl; + struct firmware *wmfw; + int i; + + /* Create some algorithms with a control */ + for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_parse_test_algs); i++) { + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[i].id, + "dummyalg", NULL); + def.mem_type = WMFW_ADSP2_YM; + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + } + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + cs_dsp_power_down(priv->dsp); + + /* All controls should be in the list */ + KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), + ARRAY_SIZE(cs_dsp_ctl_parse_test_algs)); + + /* Take a copy of the pointers to controls to compare against. */ + i = 0; + list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) { + KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls)); + ctls[i++] = walkctl; + } + + + /* Load the wmfw again */ + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + cs_dsp_power_down(priv->dsp); + + /* The number of controls should be the same */ + KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), + ARRAY_SIZE(cs_dsp_ctl_parse_test_algs)); + + /* And they should be the same objects */ + i = 0; + list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) { + KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls)); + KUNIT_ASSERT_PTR_EQ(test, walkctl, ctls[i++]); + } +} + +/* + * Controls from a wmfw are only added to the list once. If the same + * wmfw is reloaded the controls are not added again. + * This tests >=V2 firmware that can have multiple named controls in + * the same algorithm. + */ +static void cs_dsp_ctl_v2_squash_reloaded_controls(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctls[ARRAY_SIZE(cs_dsp_get_ctl_test_names)]; + struct cs_dsp_coeff_ctl *walkctl; + struct firmware *wmfw; + int i; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + + /* Create some controls */ + for (i = 0; i < ARRAY_SIZE(cs_dsp_get_ctl_test_names); i++) { + def.shortname = cs_dsp_get_ctl_test_names[i]; + def.offset_dsp_words = i; + if (i & BIT(0)) + def.mem_type = WMFW_ADSP2_XM; + else + def.mem_type = WMFW_ADSP2_YM; + + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + } + + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + cs_dsp_power_down(priv->dsp); + + /* All controls should be in the list */ + KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), + ARRAY_SIZE(cs_dsp_get_ctl_test_names)); + + /* Take a copy of the pointers to controls to compare against. */ + i = 0; + list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) { + KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls)); + ctls[i++] = walkctl; + } + + + /* Load the wmfw again */ + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + cs_dsp_power_down(priv->dsp); + + /* The number of controls should be the same */ + KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), + ARRAY_SIZE(cs_dsp_get_ctl_test_names)); + + /* And they should be the same objects */ + i = 0; + list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) { + KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls)); + KUNIT_ASSERT_PTR_EQ(test, walkctl, ctls[i++]); + } +} + +static const char * const cs_dsp_ctl_v2_compare_len_names[] = { + "LEFT", + "LEFT_", + "LEFT_SPK", + "LEFT_SPK_V", + "LEFT_SPK_VOL", + "LEFT_SPK_MUTE", + "LEFT_SPK_1", + "LEFT_X", + "LEFT2", +}; + +/* + * When comparing shortnames the full length of both strings is + * considered, not only the characters in of the shortest string. + * So that "LEFT" is not the same as "LEFT2". + * This is specifically to test for the bug that was fixed by commit: + * 7ac1102b227b ("firmware: cs_dsp: Fix new control name check") + */ +static void cs_dsp_ctl_v2_compare_len(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + int i; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_parse_test_algs[0].id, + "dummyalg", NULL); + + for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_v2_compare_len_names); i++) { + def.shortname = cs_dsp_ctl_v2_compare_len_names[i]; + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + } + + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_v2_compare_len_names); i++) { + mutex_lock(&priv->dsp->pwr_lock); + ctl = cs_dsp_get_ctl(priv->dsp, cs_dsp_ctl_v2_compare_len_names[i], + def.mem_type, cs_dsp_ctl_parse_test_algs[0].id); + mutex_unlock(&priv->dsp->pwr_lock); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, ctl->subname_len, + strlen(cs_dsp_ctl_v2_compare_len_names[i])); + KUNIT_EXPECT_MEMEQ(test, ctl->subname, cs_dsp_ctl_v2_compare_len_names[i], + ctl->subname_len); + } +} + +static int cs_dsp_ctl_parse_test_common_init(struct kunit *test, struct cs_dsp *dsp, + int wmfw_version) +{ + struct cs_dsp_test *priv; + struct cs_dsp_test_local *local; + struct device *test_dev; + int ret; + + priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL); + if (!local) + return -ENOMEM; + + priv->test = test; + priv->dsp = dsp; + test->priv = priv; + priv->local = local; + priv->local->wmfw_version = wmfw_version; + + /* Create dummy struct device */ + test_dev = kunit_device_register(test, "cs_dsp_test_drv"); + if (IS_ERR(test_dev)) + return PTR_ERR(test_dev); + + dsp->dev = get_device(test_dev); + if (!dsp->dev) + return -ENODEV; + + ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev); + if (ret) + return ret; + + dev_set_drvdata(dsp->dev, priv); + + /* Allocate regmap */ + ret = cs_dsp_mock_regmap_init(priv); + if (ret) + return ret; + + /* + * There must always be a XM header with at least 1 algorithm, so create + * a dummy one that tests can use and extract it to a data blob. + */ + local->xm_header = cs_dsp_create_mock_xm_header(priv, + cs_dsp_ctl_parse_test_algs, + ARRAY_SIZE(cs_dsp_ctl_parse_test_algs)); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header); + + local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, priv->local->wmfw_version); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder); + + /* Add dummy XM header blob to wmfw */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + WMFW_ADSP2_XM, 0, + local->xm_header->blob_data, + local->xm_header->blob_size_bytes); + + /* Init cs_dsp */ + dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops); + + switch (dsp->type) { + case WMFW_ADSP2: + ret = cs_dsp_adsp2_init(dsp); + break; + case WMFW_HALO: + ret = cs_dsp_halo_init(dsp); + break; + default: + KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type); + return -EINVAL; + } + + if (ret) + return ret; + + /* Automatically call cs_dsp_remove() when test case ends */ + return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp); +} + +static int cs_dsp_ctl_parse_test_halo_init(struct kunit *test) +{ + struct cs_dsp *dsp; + + /* Fill in cs_dsp and initialize */ + dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL); + if (!dsp) + return -ENOMEM; + + dsp->num = 1; + dsp->type = WMFW_HALO; + dsp->mem = cs_dsp_mock_halo_dsp1_regions; + dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes); + dsp->base = cs_dsp_mock_halo_core_base; + dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base; + + return cs_dsp_ctl_parse_test_common_init(test, dsp, 3); +} + +static int cs_dsp_ctl_parse_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver) +{ + struct cs_dsp *dsp; + + /* Fill in cs_dsp and initialize */ + dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL); + if (!dsp) + return -ENOMEM; + + dsp->num = 1; + dsp->type = WMFW_ADSP2; + dsp->rev = 1; + dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions; + dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes); + dsp->base = cs_dsp_mock_adsp2_32bit_sysbase; + + return cs_dsp_ctl_parse_test_common_init(test, dsp, wmfw_ver); +} + +static int cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1_init(struct kunit *test) +{ + return cs_dsp_ctl_parse_test_adsp2_32bit_init(test, 1); +} + +static int cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2_init(struct kunit *test) +{ + return cs_dsp_ctl_parse_test_adsp2_32bit_init(test, 2); +} + +static int cs_dsp_ctl_parse_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver) +{ + struct cs_dsp *dsp; + + /* Fill in cs_dsp and initialize */ + dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL); + if (!dsp) + return -ENOMEM; + + dsp->num = 1; + dsp->type = WMFW_ADSP2; + dsp->rev = 0; + dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions; + dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes); + dsp->base = cs_dsp_mock_adsp2_16bit_sysbase; + + return cs_dsp_ctl_parse_test_common_init(test, dsp, wmfw_ver); +} + +static int cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1_init(struct kunit *test) +{ + return cs_dsp_ctl_parse_test_adsp2_16bit_init(test, 1); +} + +static int cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2_init(struct kunit *test) +{ + return cs_dsp_ctl_parse_test_adsp2_16bit_init(test, 2); +} + +static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_mem_type_param_cases[] = { + { .mem_type = WMFW_ADSP2_XM }, + { .mem_type = WMFW_ADSP2_YM }, + { .mem_type = WMFW_ADSP2_ZM }, +}; + +static void cs_dsp_ctl_mem_type_desc(const struct cs_dsp_ctl_parse_test_param *param, + char *desc) +{ + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s", + cs_dsp_mem_region_name(param->mem_type)); +} + +KUNIT_ARRAY_PARAM(cs_dsp_ctl_mem_type, + cs_dsp_ctl_mem_type_param_cases, + cs_dsp_ctl_mem_type_desc); + +static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_alg_id_param_cases[] = { + { .alg_id = 0xb }, + { .alg_id = 0xfafa }, + { .alg_id = 0x9f1234 }, + { .alg_id = 0xff00ff }, +}; + +static void cs_dsp_ctl_alg_id_desc(const struct cs_dsp_ctl_parse_test_param *param, + char *desc) +{ + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg_id:%#x", param->alg_id); +} + +KUNIT_ARRAY_PARAM(cs_dsp_ctl_alg_id, + cs_dsp_ctl_alg_id_param_cases, + cs_dsp_ctl_alg_id_desc); + +static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_offset_param_cases[] = { + { .offset = 0x0 }, + { .offset = 0x1 }, + { .offset = 0x2 }, + { .offset = 0x3 }, + { .offset = 0x4 }, + { .offset = 0x5 }, + { .offset = 0x6 }, + { .offset = 0x7 }, + { .offset = 0xe0 }, + { .offset = 0xf1 }, + { .offset = 0xfffe }, + { .offset = 0xffff }, +}; + +static void cs_dsp_ctl_offset_desc(const struct cs_dsp_ctl_parse_test_param *param, + char *desc) +{ + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "offset:%#x", param->offset); +} + +KUNIT_ARRAY_PARAM(cs_dsp_ctl_offset, + cs_dsp_ctl_offset_param_cases, + cs_dsp_ctl_offset_desc); + +static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_length_param_cases[] = { + { .length = 0x4 }, + { .length = 0x8 }, + { .length = 0x18 }, + { .length = 0xf000 }, +}; + +static void cs_dsp_ctl_length_desc(const struct cs_dsp_ctl_parse_test_param *param, + char *desc) +{ + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "length:%#x", param->length); +} + +KUNIT_ARRAY_PARAM(cs_dsp_ctl_length, + cs_dsp_ctl_length_param_cases, + cs_dsp_ctl_length_desc); + +/* Note: some control types mandate specific flags settings */ +static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_type_param_cases[] = { + { .ctl_type = WMFW_CTL_TYPE_BYTES, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE }, + { .ctl_type = WMFW_CTL_TYPE_ACKED, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE }, + { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE | + WMFW_CTL_FLAG_SYS }, + { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_SYS }, + { .ctl_type = WMFW_CTL_TYPE_FWEVENT, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE | + WMFW_CTL_FLAG_SYS }, +}; + +static void cs_dsp_ctl_type_flags_desc(const struct cs_dsp_ctl_parse_test_param *param, + char *desc) +{ + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "ctl_type:%#x flags:%#x", + param->ctl_type, param->flags); +} + +KUNIT_ARRAY_PARAM(cs_dsp_ctl_type, + cs_dsp_ctl_type_param_cases, + cs_dsp_ctl_type_flags_desc); + +static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_flags_param_cases[] = { + { .flags = 0 }, + { .flags = WMFW_CTL_FLAG_READABLE }, + { .flags = WMFW_CTL_FLAG_WRITEABLE }, + { .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE }, + { .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE }, + { .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE }, + { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE }, + { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE }, + { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE }, + { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE }, + { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | + WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE }, +}; + +static void cs_dsp_ctl_flags_desc(const struct cs_dsp_ctl_parse_test_param *param, + char *desc) +{ + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "flags:%#x", param->flags); +} + +KUNIT_ARRAY_PARAM(cs_dsp_ctl_flags, + cs_dsp_ctl_flags_param_cases, + cs_dsp_ctl_flags_desc); + +static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_illegal_type_flags_param_cases[] = { + /* ACKED control must be volatile + read + write */ + { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = 0 }, + { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = WMFW_CTL_FLAG_READABLE }, + { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = WMFW_CTL_FLAG_WRITEABLE }, + { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = WMFW_CTL_FLAG_VOLATILE }, + { .ctl_type = WMFW_CTL_TYPE_ACKED, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE }, + { .ctl_type = WMFW_CTL_TYPE_ACKED, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE }, + + /* HOSTEVENT must be system + volatile + read + write */ + { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = 0 }, + { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_READABLE }, + { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_WRITEABLE }, + { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, + .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE }, + { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_VOLATILE }, + { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE }, + { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE }, + { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE }, + { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_SYS }, + { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE }, + { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE }, + { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE }, + { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE }, + { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE }, + + /* FWEVENT rules same as HOSTEVENT */ + { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = 0 }, + { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_READABLE }, + { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_WRITEABLE }, + { .ctl_type = WMFW_CTL_TYPE_FWEVENT, + .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE }, + { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_VOLATILE }, + { .ctl_type = WMFW_CTL_TYPE_FWEVENT, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE }, + { .ctl_type = WMFW_CTL_TYPE_FWEVENT, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE }, + { .ctl_type = WMFW_CTL_TYPE_FWEVENT, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE }, + { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_SYS }, + { .ctl_type = WMFW_CTL_TYPE_FWEVENT, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE }, + { .ctl_type = WMFW_CTL_TYPE_FWEVENT, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE }, + { .ctl_type = WMFW_CTL_TYPE_FWEVENT, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE }, + { .ctl_type = WMFW_CTL_TYPE_FWEVENT, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE }, + { .ctl_type = WMFW_CTL_TYPE_FWEVENT, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE }, + + /* + * HOSTBUFFER must be system + volatile + readable or + * system + volatile + readable + writeable + */ + { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = 0 }, + { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_READABLE }, + { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_WRITEABLE }, + { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, + .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE}, + { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_VOLATILE }, + { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE }, + { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE }, + { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE }, + { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_SYS }, + { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE }, + { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE }, + { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE }, + { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE }, + { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE }, +}; + +KUNIT_ARRAY_PARAM(cs_dsp_ctl_illegal_type_flags, + cs_dsp_ctl_illegal_type_flags_param_cases, + cs_dsp_ctl_type_flags_desc); + +static struct kunit_case cs_dsp_ctl_parse_test_cases_v1[] = { + KUNIT_CASE(cs_dsp_ctl_parse_no_coeffs), + KUNIT_CASE(cs_dsp_ctl_parse_v1_name), + KUNIT_CASE(cs_dsp_ctl_parse_empty_v1_name), + KUNIT_CASE(cs_dsp_ctl_parse_max_v1_name), + + KUNIT_CASE_PARAM(cs_dsp_ctl_parse_memory_type, cs_dsp_ctl_mem_type_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_id, cs_dsp_ctl_alg_id_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_mem, cs_dsp_ctl_mem_type_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_parse_offset, cs_dsp_ctl_offset_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_parse_length, cs_dsp_ctl_length_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_parse_ctl_type, cs_dsp_ctl_type_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_parse_flags, cs_dsp_ctl_flags_gen_params), + KUNIT_CASE(cs_dsp_ctl_parse_fw_name), + + KUNIT_CASE(cs_dsp_ctl_alg_id_uniqueness), + KUNIT_CASE(cs_dsp_ctl_mem_uniqueness), + KUNIT_CASE(cs_dsp_ctl_fw_uniqueness), + KUNIT_CASE(cs_dsp_ctl_squash_reloaded_controls), + + { } /* terminator */ +}; + +static struct kunit_case cs_dsp_ctl_parse_test_cases_v2_v3[] = { + KUNIT_CASE(cs_dsp_ctl_parse_no_coeffs), + KUNIT_CASE(cs_dsp_ctl_parse_short_name), + KUNIT_CASE(cs_dsp_ctl_parse_min_short_name), + KUNIT_CASE(cs_dsp_ctl_parse_max_short_name), + KUNIT_CASE(cs_dsp_ctl_parse_with_min_fullname), + KUNIT_CASE(cs_dsp_ctl_parse_with_max_fullname), + KUNIT_CASE(cs_dsp_ctl_parse_with_min_description), + KUNIT_CASE(cs_dsp_ctl_parse_with_max_description), + KUNIT_CASE(cs_dsp_ctl_parse_with_max_fullname_and_description), + KUNIT_CASE(cs_dsp_ctl_shortname_alignment), + KUNIT_CASE(cs_dsp_ctl_fullname_alignment), + KUNIT_CASE(cs_dsp_ctl_description_alignment), + KUNIT_CASE(cs_dsp_get_ctl_test), + KUNIT_CASE(cs_dsp_get_ctl_test_multiple_wmfw), + + KUNIT_CASE_PARAM(cs_dsp_ctl_parse_memory_type, cs_dsp_ctl_mem_type_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_id, cs_dsp_ctl_alg_id_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_mem, cs_dsp_ctl_mem_type_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_parse_offset, cs_dsp_ctl_offset_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_parse_length, cs_dsp_ctl_length_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_parse_ctl_type, cs_dsp_ctl_type_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_parse_flags, cs_dsp_ctl_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_illegal_type_flags, + cs_dsp_ctl_illegal_type_flags_gen_params), + KUNIT_CASE(cs_dsp_ctl_parse_fw_name), + + KUNIT_CASE(cs_dsp_ctl_alg_id_uniqueness), + KUNIT_CASE(cs_dsp_ctl_mem_uniqueness), + KUNIT_CASE(cs_dsp_ctl_fw_uniqueness), + KUNIT_CASE(cs_dsp_ctl_squash_reloaded_controls), + KUNIT_CASE(cs_dsp_ctl_v2_squash_reloaded_controls), + KUNIT_CASE(cs_dsp_ctl_v2_compare_len), + + { } /* terminator */ +}; + +static struct kunit_suite cs_dsp_ctl_parse_test_halo = { + .name = "cs_dsp_ctl_parse_wmfwV3_halo", + .init = cs_dsp_ctl_parse_test_halo_init, + .test_cases = cs_dsp_ctl_parse_test_cases_v2_v3, +}; + +static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1 = { + .name = "cs_dsp_ctl_parse_wmfwV1_adsp2_32bit", + .init = cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1_init, + .test_cases = cs_dsp_ctl_parse_test_cases_v1, +}; + +static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2 = { + .name = "cs_dsp_ctl_parse_wmfwV2_adsp2_32bit", + .init = cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2_init, + .test_cases = cs_dsp_ctl_parse_test_cases_v2_v3, +}; + +static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1 = { + .name = "cs_dsp_ctl_parse_wmfwV1_adsp2_16bit", + .init = cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1_init, + .test_cases = cs_dsp_ctl_parse_test_cases_v1, +}; + +static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2 = { + .name = "cs_dsp_ctl_parse_wmfwV2_adsp2_16bit", + .init = cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2_init, + .test_cases = cs_dsp_ctl_parse_test_cases_v2_v3, +}; + +kunit_test_suites(&cs_dsp_ctl_parse_test_halo, + &cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1, + &cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2, + &cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1, + &cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2); diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c b/drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c new file mode 100644 index 000000000000..bda00a95d4f9 --- /dev/null +++ b/drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c @@ -0,0 +1,2669 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// KUnit tests for cs_dsp. +// +// Copyright (C) 2024 Cirrus Logic, Inc. and +// Cirrus Logic International Semiconductor Ltd. + +#include <kunit/device.h> +#include <kunit/resource.h> +#include <kunit/test.h> +#include <linux/build_bug.h> +#include <linux/firmware/cirrus/cs_dsp.h> +#include <linux/firmware/cirrus/cs_dsp_test_utils.h> +#include <linux/firmware/cirrus/wmfw.h> +#include <linux/list.h> +#include <linux/random.h> +#include <linux/regmap.h> + +KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *); +KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_stop_wrapper, cs_dsp_stop, struct cs_dsp *); +KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *); + +struct cs_dsp_test_local { + struct cs_dsp_mock_xm_header *xm_header; + struct cs_dsp_mock_wmfw_builder *wmfw_builder; + int wmfw_version; +}; + +struct cs_dsp_ctl_rw_test_param { + int mem_type; + int alg_id; + unsigned int offs_words; + unsigned int len_bytes; + u16 ctl_type; + u16 flags; +}; + +static const struct cs_dsp_mock_alg_def cs_dsp_ctl_rw_test_algs[] = { + { + .id = 0xfafa, + .ver = 0x100000, + .xm_base_words = 60, + .xm_size_words = 1000, + .ym_base_words = 0, + .ym_size_words = 1000, + .zm_base_words = 0, + .zm_size_words = 1000, + }, + { + .id = 0xb, + .ver = 0x100001, + .xm_base_words = 1060, + .xm_size_words = 1000, + .ym_base_words = 1000, + .ym_size_words = 1000, + .zm_base_words = 1000, + .zm_size_words = 1000, + }, + { + .id = 0x9f1234, + .ver = 0x100500, + .xm_base_words = 2060, + .xm_size_words = 32, + .ym_base_words = 2000, + .ym_size_words = 32, + .zm_base_words = 2000, + .zm_size_words = 32, + }, + { + .id = 0xff00ff, + .ver = 0x300113, + .xm_base_words = 2100, + .xm_size_words = 32, + .ym_base_words = 2032, + .ym_size_words = 32, + .zm_base_words = 2032, + .zm_size_words = 32, + }, +}; + +static const struct cs_dsp_mock_coeff_def mock_coeff_template = { + .shortname = "Dummy Coeff", + .type = WMFW_CTL_TYPE_BYTES, + .mem_type = WMFW_ADSP2_YM, + .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE, + .length_bytes = 4, +}; + +static int _find_alg_entry(struct kunit *test, unsigned int alg_id) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_rw_test_algs); ++i) { + if (cs_dsp_ctl_rw_test_algs[i].id == alg_id) + break; + } + + KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(cs_dsp_ctl_rw_test_algs)); + + return i; +} + +static int _get_alg_mem_base_words(struct kunit *test, int alg_index, int mem_type) +{ + switch (mem_type) { + case WMFW_ADSP2_XM: + return cs_dsp_ctl_rw_test_algs[alg_index].xm_base_words; + case WMFW_ADSP2_YM: + return cs_dsp_ctl_rw_test_algs[alg_index].ym_base_words; + case WMFW_ADSP2_ZM: + return cs_dsp_ctl_rw_test_algs[alg_index].zm_base_words; + default: + KUNIT_FAIL(test, "Bug in test: illegal memory type %d\n", mem_type); + return 0; + } +} + +static struct cs_dsp_mock_wmfw_builder *_create_dummy_wmfw(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp_mock_wmfw_builder *builder; + + builder = cs_dsp_mock_wmfw_init(priv, local->wmfw_version); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder); + + /* Init an XM header */ + cs_dsp_mock_wmfw_add_data_block(builder, + WMFW_ADSP2_XM, 0, + local->xm_header->blob_data, + local->xm_header->blob_size_bytes); + + return builder; +} + +/* + * Write to a control while the firmware is running. + * This should write to the underlying registers. + */ +static void cs_dsp_ctl_write_running(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback; + + reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create some initial register content */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + memset(reg_vals, 0, param->len_bytes); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + /* Start the firmware and add an action to stop it during cleanup */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + + /* + * Write new data to the control, it should be written to the registers + * and cs_dsp_coeff_lock_and_write_ctrl() should return 1 to indicate + * that the control content changed. + */ + get_random_bytes(reg_vals, param->len_bytes); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes), + 1); + KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes); + + /* Drop expected writes and the regmap cache should be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Read from a volatile control while the firmware is running. + * This should return the current state of the underlying registers. + */ +static void cs_dsp_ctl_read_volatile_running(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback; + + reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create some initial register content */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + memset(reg_vals, 0, param->len_bytes); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + /* Start the firmware and add an action to stop it during cleanup */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + + /* Read the control, it should return the current register content */ + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes); + + /* + * Change the register content and read the control, it should return + * the new register content + */ + get_random_bytes(reg_vals, param->len_bytes); + KUNIT_ASSERT_EQ(test, regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes), 0); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes); +} + +/* + * Read from a volatile control before the firmware is started. + * This should return an error. + */ +static void cs_dsp_ctl_read_volatile_not_started(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals; + + reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + /* Create some initial register content */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Read the control, it should return an error */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_LT(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes), + 0); +} + +/* + * Read from a volatile control after the firmware has stopped. + * This should return an error. + */ +static void cs_dsp_ctl_read_volatile_stopped(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals; + + reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + /* Create some initial register content */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Start and stop the firmware */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + cs_dsp_stop(dsp); + + /* Read the control, it should return an error */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_LT(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes), + 0); +} + +/* + * Read from a volatile control after the DSP has been powered down. + * This should return an error. + */ +static void cs_dsp_ctl_read_volatile_stopped_powered_down(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals; + + reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + /* Create some initial register content */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Start and stop the firmware then power down */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + cs_dsp_stop(dsp); + cs_dsp_power_down(dsp); + + /* Read the control, it should return an error */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_LT(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes), + 0); +} + +/* + * Read from a volatile control when a different firmware is currently + * loaded into the DSP. + * Should return an error. + */ +static void cs_dsp_ctl_read_volatile_not_current_loaded_fw(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test); + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals; + + reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + /* Create some DSP data to be read into the control cache */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Power-up DSP */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Power-down DSP then power-up with a different firmware */ + cs_dsp_power_down(dsp); + wmfw = cs_dsp_mock_wmfw_get_firmware(builder2); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0); + + /* Read the control, it should return an error */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_LT(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes), + 0); +} + +/* + * Read from a volatile control when a different firmware is currently + * running. + * Should return an error. + */ +static void cs_dsp_ctl_read_volatile_not_current_running_fw(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test); + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals; + + reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + /* Create some DSP data to be read into the control cache */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Power-up DSP */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Power-down DSP then power-up with a different firmware */ + cs_dsp_power_down(dsp); + wmfw = cs_dsp_mock_wmfw_get_firmware(builder2); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0); + + /* Start the firmware and add an action to stop it during cleanup */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + + /* Read the control, it should return an error */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_LT(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes), + 0); +} + +/* + * Write to a volatile control before the firmware is started. + * This should return an error. + */ +static void cs_dsp_ctl_write_volatile_not_started(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals; + + reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + /* Create some initial register content */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Drop expected writes and the regmap cache should be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes); + + /* Write the control, it should return an error */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_LT(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes), + 0); + + /* Should not have been any writes to registers */ + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Write to a volatile control after the firmware has stopped. + * This should return an error. + */ +static void cs_dsp_ctl_write_volatile_stopped(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals; + + reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + /* Create some initial register content */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Start and stop the firmware */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + cs_dsp_stop(dsp); + + /* Drop expected writes and the regmap cache should be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes); + + /* Write the control, it should return an error */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_LT(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes), + 0); + + /* Should not have been any writes to registers */ + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Write to a volatile control after the DSP has been powered down. + * This should return an error. + */ +static void cs_dsp_ctl_write_volatile_stopped_powered_down(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals; + + reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + /* Create some initial register content */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Start and stop the firmware then power down */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + cs_dsp_stop(dsp); + cs_dsp_power_down(dsp); + + /* Drop expected writes and the regmap cache should be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes); + + /* Write the control, it should return an error */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_LT(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes), + 0); + + /* Should not have been any writes to registers */ + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Write to a volatile control when a different firmware is currently + * loaded into the DSP. + * Should return an error. + */ +static void cs_dsp_ctl_write_volatile_not_current_loaded_fw(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test); + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals; + + reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + /* Create some DSP data to be read into the control cache */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Power-up DSP */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Power-down DSP then power-up with a different firmware */ + cs_dsp_power_down(dsp); + wmfw = cs_dsp_mock_wmfw_get_firmware(builder2); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0); + + /* Drop expected writes and the regmap cache should be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes); + + /* Write the control, it should return an error */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_LT(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes), + 0); + + /* Should not have been any writes to registers */ + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Write to a volatile control when a different firmware is currently + * running. + * Should return an error. + */ +static void cs_dsp_ctl_write_volatile_not_current_running_fw(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test); + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals; + + reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + /* Create some DSP data to be read into the control cache */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes); + + /* Create control pointing to this data */ + def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + /* Power-up DSP */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + /* Power-down DSP then power-up with a different firmware */ + cs_dsp_power_down(dsp); + wmfw = cs_dsp_mock_wmfw_get_firmware(builder2); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0); + + /* Start the firmware and add an action to stop it during cleanup */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + + /* Drop expected writes and the regmap cache should be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes); + + /* Write the control, it should return an error */ + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_LT(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes), + 0); + + /* Should not have been any writes to registers */ + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Read from an offset into the control data. Should return only the + * portion of data from the offset position. + */ +static void cs_dsp_ctl_read_with_seek(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback; + unsigned int seek_words; + + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = 48; + + reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create some initial register content */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + get_random_bytes(reg_vals, def.length_bytes); + regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes); + + /* Create control pointing to this data */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + /* Start the firmware and add an action to stop it during cleanup */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + + for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) { + unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32)); + + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words, + readback, len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, ®_vals[seek_words], len_bytes); + } +} + +/* + * Read from an offset into the control cache. Should return only the + * portion of data from the offset position. + * Same as cs_dsp_ctl_read_with_seek() except the control is cached + * and the firmware is not running. + */ +static void cs_dsp_ctl_read_cache_with_seek(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback; + unsigned int seek_words; + + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = 48; + + reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create some initial register content */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + get_random_bytes(reg_vals, def.length_bytes); + regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes); + + /* Create control pointing to this data */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + /* Start and stop the firmware so the read will come from the cache */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + cs_dsp_stop(dsp); + + for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) { + unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32)); + + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words, + readback, len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, ®_vals[seek_words], len_bytes); + } +} + +/* + * Read less than the full length of data from a control. Should return + * only the requested number of bytes. + */ +static void cs_dsp_ctl_read_truncated(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback; + unsigned int len_bytes; + + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = 48; + + reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create some initial register content */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + get_random_bytes(reg_vals, def.length_bytes); + regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes); + + /* Create control pointing to this data */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + /* Start the firmware and add an action to stop it during cleanup */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + + /* Reads are only allowed to be a multiple of the DSP word length */ + for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) { + memset(readback, 0, def.length_bytes); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, len_bytes); + KUNIT_EXPECT_MEMNEQ(test, + (u8 *)readback + len_bytes, + (u8 *)reg_vals + len_bytes, + def.length_bytes - len_bytes); + } +} + +/* + * Read less than the full length of data from a cached control. + * Should return only the requested number of bytes. + * Same as cs_dsp_ctl_read_truncated() except the control is cached + * and the firmware is not running. + */ +static void cs_dsp_ctl_read_cache_truncated(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback; + unsigned int len_bytes; + + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = 48; + + reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create some initial register content */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + get_random_bytes(reg_vals, def.length_bytes); + regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes); + + /* Create control pointing to this data */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + /* Start and stop the firmware so the read will come from the cache */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + cs_dsp_stop(dsp); + + /* Reads are only allowed to be a multiple of the DSP word length */ + for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) { + memset(readback, 0, def.length_bytes); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, len_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, len_bytes); + KUNIT_EXPECT_MEMNEQ(test, + (u8 *)readback + len_bytes, + (u8 *)reg_vals + len_bytes, + def.length_bytes - len_bytes); + } +} + +/* + * Write to an offset into the control data. Should only change the + * portion of data from the offset position. + */ +static void cs_dsp_ctl_write_with_seek(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback, *new_data; + unsigned int seek_words; + + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = 48; + + reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data); + + /* Create some initial register content */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + get_random_bytes(reg_vals, def.length_bytes); + regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes); + + /* Create control pointing to this data */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + /* Start the firmware and add an action to stop it during cleanup */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + + for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) { + unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32)); + + /* Reset the register values to the test data */ + regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes); + + get_random_bytes(new_data, def.length_bytes); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words, + new_data, len_bytes), + 1); + KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, def.length_bytes), + 0); + /* Initial portion of readback should be unchanged */ + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, seek_words * sizeof(u32)); + KUNIT_EXPECT_MEMEQ(test, &readback[seek_words], new_data, len_bytes); + } +} + +/* + * Write to an offset into the control cache. Should only change the + * portion of data from the offset position. + * Same as cs_dsp_ctl_write_with_seek() except the control is cached + * and the firmware is not running. + */ +static void cs_dsp_ctl_write_cache_with_seek(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback, *new_data; + unsigned int seek_words; + + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = 48; + + reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data); + + /* Create some initial register content */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + get_random_bytes(reg_vals, def.length_bytes); + regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes); + + /* Create control pointing to this data */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + /* Start and stop the firmware so the read will come from the cache */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + cs_dsp_stop(dsp); + + for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) { + unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32)); + + /* Reset the cache to the test data */ + KUNIT_EXPECT_GE(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, + def.length_bytes), + 0); + + get_random_bytes(new_data, def.length_bytes); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words, + new_data, len_bytes), + 1); + + memset(readback, 0, def.length_bytes); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, + def.length_bytes), + 0); + /* Initial portion of readback should be unchanged */ + KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, seek_words * sizeof(u32)); + KUNIT_EXPECT_MEMEQ(test, &readback[seek_words], new_data, len_bytes); + } +} + +/* + * Write less than the full length of data to a control. Should only + * change the requested number of bytes. + */ +static void cs_dsp_ctl_write_truncated(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback, *new_data; + unsigned int len_bytes; + + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = 48; + + reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data); + + /* Create some initial register content */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + get_random_bytes(reg_vals, def.length_bytes); + regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes); + + /* Create control pointing to this data */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + /* Start the firmware and add an action to stop it during cleanup */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + + /* Writes are only allowed to be a multiple of the DSP word length */ + for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) { + /* Reset the register values to the test data */ + regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes); + + get_random_bytes(new_data, def.length_bytes); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, new_data, len_bytes), + 1); + + memset(readback, 0, def.length_bytes); + KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, def.length_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, new_data, len_bytes); + KUNIT_EXPECT_MEMEQ(test, + (u8 *)readback + len_bytes, + (u8 *)reg_vals + len_bytes, + def.length_bytes - len_bytes); + } +} + +/* + * Write less than the full length of data to a cached control. + * Should only change the requested number of bytes. + * Same as cs_dsp_ctl_write_truncated() except the control is cached + * and the firmware is not running. + */ +static void cs_dsp_ctl_write_cache_truncated(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals, *readback, *new_data; + unsigned int len_bytes; + + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = 48; + + reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + readback = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data); + + /* Create some initial register content */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + get_random_bytes(reg_vals, def.length_bytes); + regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes); + + /* Create control pointing to this data */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + /* Start and stop the firmware so the read will come from the cache */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + cs_dsp_stop(dsp); + + /* Writes are only allowed to be a multiple of the DSP word length */ + for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) { + /* Reset the cache to the test data */ + KUNIT_EXPECT_GE(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, + def.length_bytes), + 0); + + get_random_bytes(new_data, def.length_bytes); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, new_data, len_bytes), + 1); + + memset(readback, 0, def.length_bytes); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, + def.length_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, new_data, len_bytes); + KUNIT_EXPECT_MEMEQ(test, + (u8 *)readback + len_bytes, + (u8 *)reg_vals + len_bytes, + def.length_bytes - len_bytes); + } +} + +/* + * Read from an offset that is beyond the end of the control data. + * Should return an error. + */ +static void cs_dsp_ctl_read_with_seek_oob(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals; + unsigned int seek_words; + + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + /* Create some initial register content */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes); + + /* Create control pointing to this data */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + /* Start the firmware and add an action to stop it during cleanup */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + + seek_words = def.length_bytes / sizeof(u32); + KUNIT_EXPECT_LT(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words, + reg_vals, def.length_bytes), + 0); + + if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) { + /* Stop firmware and repeat the read from the cache */ + kunit_release_action(test, _cs_dsp_stop_wrapper, dsp); + KUNIT_ASSERT_FALSE(test, dsp->running); + + KUNIT_EXPECT_LT(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words, + reg_vals, def.length_bytes), + 0); + } +} + +/* + * Read more data than the length of the control data. + * Should return an error. + */ +static void cs_dsp_ctl_read_with_length_overflow(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals; + + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + /* Create some initial register content */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes); + + /* Create control pointing to this data */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + /* Start the firmware and add an action to stop it during cleanup */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + + KUNIT_EXPECT_LT(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, def.length_bytes + 1), + 0); + + if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) { + /* Stop firmware and repeat the read from the cache */ + kunit_release_action(test, _cs_dsp_stop_wrapper, dsp); + KUNIT_ASSERT_FALSE(test, dsp->running); + + KUNIT_EXPECT_LT(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, + def.length_bytes + 1), + 0); + } +} + +/* + * Read with a seek and length that ends beyond the end of control data. + * Should return an error. + */ +static void cs_dsp_ctl_read_with_seek_and_length_oob(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals; + + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + /* Create some initial register content */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes); + + /* Create control pointing to this data */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + /* Start the firmware and add an action to stop it during cleanup */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + + /* + * Read full control length but at a start offset of 1 so that + * offset + length exceeds the length of the control. + */ + KUNIT_EXPECT_LT(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 1, reg_vals, def.length_bytes), + 0); + + if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) { + /* Stop firmware and repeat the read from the cache */ + kunit_release_action(test, _cs_dsp_stop_wrapper, dsp); + KUNIT_ASSERT_FALSE(test, dsp->running); + + KUNIT_EXPECT_LT(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 1, reg_vals, + def.length_bytes), + 0); + } +} + +/* + * Write to an offset that is beyond the end of the control data. + * Should return an error without touching any registers. + */ +static void cs_dsp_ctl_write_with_seek_oob(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals; + unsigned int seek_words; + + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + /* Create some initial register content */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes); + + /* Create control pointing to this data */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + /* Start the firmware and add an action to stop it during cleanup */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + + /* Drop expected writes and the regmap cache should be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes); + + get_random_bytes(reg_vals, def.length_bytes); + seek_words = def.length_bytes / sizeof(u32); + KUNIT_EXPECT_LT(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words, + reg_vals, def.length_bytes), + 0); + + if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) { + /* Stop firmware and repeat the write to the cache */ + kunit_release_action(test, _cs_dsp_stop_wrapper, dsp); + KUNIT_ASSERT_FALSE(test, dsp->running); + + get_random_bytes(reg_vals, def.length_bytes); + KUNIT_EXPECT_LT(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words, + reg_vals, def.length_bytes), + 0); + } + + /* Check that it didn't write any registers */ + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Write more data than the length of the control data. + * Should return an error. + */ +static void cs_dsp_ctl_write_with_length_overflow(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals; + + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + /* Create some initial register content */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes); + + /* Create control pointing to this data */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + /* Start the firmware and add an action to stop it during cleanup */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + + /* Drop expected writes and the regmap cache should be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes); + + get_random_bytes(reg_vals, def.length_bytes); + KUNIT_EXPECT_LT(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, def.length_bytes + 1), + 0); + + if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) { + /* Stop firmware and repeat the write to the cache */ + kunit_release_action(test, _cs_dsp_stop_wrapper, dsp); + KUNIT_ASSERT_FALSE(test, dsp->running); + + get_random_bytes(reg_vals, def.length_bytes); + KUNIT_EXPECT_LT(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, + def.length_bytes + 1), + 0); + } + + /* Check that it didn't write any registers */ + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Write with a seek and length that ends beyond the end of control data. + * Should return an error. + */ +static void cs_dsp_ctl_write_with_seek_and_length_oob(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals; + + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + /* Create some initial register content */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes); + + /* Create control pointing to this data */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + /* Start the firmware and add an action to stop it during cleanup */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + + /* Drop expected writes and the regmap cache should be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes); + + /* + * Write full control length but at a start offset of 1 so that + * offset + length exceeeds the length of the control. + */ + get_random_bytes(reg_vals, def.length_bytes); + KUNIT_EXPECT_LT(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 1, reg_vals, def.length_bytes), + 0); + + if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) { + /* Stop firmware and repeat the write to the cache */ + kunit_release_action(test, _cs_dsp_stop_wrapper, dsp); + KUNIT_ASSERT_FALSE(test, dsp->running); + + get_random_bytes(reg_vals, def.length_bytes); + KUNIT_EXPECT_LT(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 1, reg_vals, + def.length_bytes), + 0); + } + + /* Check that it didn't write any registers */ + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Read from a write-only control. This is legal because controls can + * always be read. Write-only only indicates that it is not useful to + * populate the cache from the DSP memory. + */ +static void cs_dsp_ctl_read_from_writeonly(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *ctl_vals, *readback; + + /* Sanity check parameters */ + KUNIT_ASSERT_TRUE(test, param->flags & WMFW_CTL_FLAG_WRITEABLE); + KUNIT_ASSERT_FALSE(test, param->flags & WMFW_CTL_FLAG_READABLE); + + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + ctl_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals); + + readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Create control pointing to this data */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + /* Start the firmware and add an action to stop it during cleanup */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + + /* Write some test data to the control */ + get_random_bytes(ctl_vals, def.length_bytes); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, def.length_bytes), + 1); + + /* Read back the data */ + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, def.length_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, def.length_bytes); + + if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) { + /* Stop firmware and repeat the read from the cache */ + kunit_release_action(test, _cs_dsp_stop_wrapper, dsp); + KUNIT_ASSERT_FALSE(test, dsp->running); + + memset(readback, 0, def.length_bytes); + KUNIT_EXPECT_EQ(test, + cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, + def.length_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, def.length_bytes); + } +} + +/* + * Write to a read-only control. + * This should return an error without writing registers. + */ +static void cs_dsp_ctl_write_to_readonly(struct kunit *test) +{ + const struct cs_dsp_ctl_rw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct cs_dsp *dsp = priv->dsp; + struct cs_dsp_mock_coeff_def def = mock_coeff_template; + int alg_idx = _find_alg_entry(test, param->alg_id); + unsigned int reg, alg_base_words; + struct cs_dsp_coeff_ctl *ctl; + struct firmware *wmfw; + u32 *reg_vals; + + /* Sanity check parameters */ + KUNIT_ASSERT_FALSE(test, param->flags & WMFW_CTL_FLAG_WRITEABLE); + KUNIT_ASSERT_TRUE(test, param->flags & WMFW_CTL_FLAG_READABLE); + + def.flags = param->flags; + def.mem_type = param->mem_type; + def.offset_dsp_words = param->offs_words; + def.length_bytes = param->len_bytes; + + reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals); + + /* Create some initial register content */ + alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type); + reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg += (alg_base_words + param->offs_words) * + cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv); + regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes); + + /* Create control pointing to this data */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_ctl_rw_test_algs[alg_idx].id, + "dummyalg", NULL); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0); + + ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + + /* Start the firmware and add an action to stop it during cleanup */ + KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0); + KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0); + + /* Drop expected writes and the regmap cache should be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes); + + get_random_bytes(reg_vals, def.length_bytes); + KUNIT_EXPECT_LT(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, def.length_bytes), + 0); + + if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) { + /* Stop firmware and repeat the write to the cache */ + kunit_release_action(test, _cs_dsp_stop_wrapper, dsp); + KUNIT_ASSERT_FALSE(test, dsp->running); + + get_random_bytes(reg_vals, def.length_bytes); + KUNIT_EXPECT_LT(test, + cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, + def.length_bytes), + 0); + } + + /* Check that it didn't write any registers */ + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +static int cs_dsp_ctl_rw_test_common_init(struct kunit *test, struct cs_dsp *dsp, + int wmfw_version) +{ + struct cs_dsp_test *priv; + struct cs_dsp_test_local *local; + struct device *test_dev; + int ret; + + priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL); + if (!local) + return -ENOMEM; + + priv->test = test; + priv->dsp = dsp; + test->priv = priv; + priv->local = local; + priv->local->wmfw_version = wmfw_version; + + /* Create dummy struct device */ + test_dev = kunit_device_register(test, "cs_dsp_test_drv"); + if (IS_ERR(test_dev)) + return PTR_ERR(test_dev); + + dsp->dev = get_device(test_dev); + if (!dsp->dev) + return -ENODEV; + + ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev); + if (ret) + return ret; + + dev_set_drvdata(dsp->dev, priv); + + /* Allocate regmap */ + ret = cs_dsp_mock_regmap_init(priv); + if (ret) + return ret; + + /* + * There must always be a XM header with at least 1 algorithm, so create + * a dummy one that tests can use and extract it to a data blob. + */ + local->xm_header = cs_dsp_create_mock_xm_header(priv, + cs_dsp_ctl_rw_test_algs, + ARRAY_SIZE(cs_dsp_ctl_rw_test_algs)); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header); + + /* Create wmfw builder */ + local->wmfw_builder = _create_dummy_wmfw(test); + + /* Init cs_dsp */ + dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops); + + switch (dsp->type) { + case WMFW_ADSP2: + ret = cs_dsp_adsp2_init(dsp); + break; + case WMFW_HALO: + ret = cs_dsp_halo_init(dsp); + break; + default: + KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type); + return -EINVAL; + } + + if (ret) + return ret; + + /* Automatically call cs_dsp_remove() when test case ends */ + return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp); +} + +static int cs_dsp_ctl_rw_test_halo_init(struct kunit *test) +{ + struct cs_dsp *dsp; + + /* Fill in cs_dsp and initialize */ + dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL); + if (!dsp) + return -ENOMEM; + + dsp->num = 1; + dsp->type = WMFW_HALO; + dsp->mem = cs_dsp_mock_halo_dsp1_regions; + dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes); + dsp->base = cs_dsp_mock_halo_core_base; + dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base; + + return cs_dsp_ctl_rw_test_common_init(test, dsp, 3); +} + +static int cs_dsp_ctl_rw_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver) +{ + struct cs_dsp *dsp; + + /* Fill in cs_dsp and initialize */ + dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL); + if (!dsp) + return -ENOMEM; + + dsp->num = 1; + dsp->type = WMFW_ADSP2; + dsp->rev = 1; + dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions; + dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes); + dsp->base = cs_dsp_mock_adsp2_32bit_sysbase; + + return cs_dsp_ctl_rw_test_common_init(test, dsp, wmfw_ver); +} + +static int cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1_init(struct kunit *test) +{ + return cs_dsp_ctl_rw_test_adsp2_32bit_init(test, 1); +} + +static int cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2_init(struct kunit *test) +{ + return cs_dsp_ctl_rw_test_adsp2_32bit_init(test, 2); +} + +static int cs_dsp_ctl_rw_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver) +{ + struct cs_dsp *dsp; + + /* Fill in cs_dsp and initialize */ + dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL); + if (!dsp) + return -ENOMEM; + + dsp->num = 1; + dsp->type = WMFW_ADSP2; + dsp->rev = 0; + dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions; + dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes); + dsp->base = cs_dsp_mock_adsp2_16bit_sysbase; + + return cs_dsp_ctl_rw_test_common_init(test, dsp, wmfw_ver); +} + +static int cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1_init(struct kunit *test) +{ + return cs_dsp_ctl_rw_test_adsp2_16bit_init(test, 1); +} + +static int cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2_init(struct kunit *test) +{ + return cs_dsp_ctl_rw_test_adsp2_16bit_init(test, 2); +} + +static void cs_dsp_ctl_all_param_desc(const struct cs_dsp_ctl_rw_test_param *param, + char *desc) +{ + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg:%#x %s@%u len:%u flags:%#x", + param->alg_id, cs_dsp_mem_region_name(param->mem_type), + param->offs_words, param->len_bytes, param->flags); +} + +/* All parameters populated, with various lengths */ +static const struct cs_dsp_ctl_rw_test_param all_pop_varying_len_cases[] = { + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 8 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 12 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 16 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 48 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 100 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 1000 }, +}; +KUNIT_ARRAY_PARAM(all_pop_varying_len, all_pop_varying_len_cases, + cs_dsp_ctl_all_param_desc); + +/* All parameters populated, with various offsets */ +static const struct cs_dsp_ctl_rw_test_param all_pop_varying_offset_cases[] = { + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 0, .len_bytes = 4 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 2, .len_bytes = 4 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 3, .len_bytes = 4 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 8, .len_bytes = 4 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 10, .len_bytes = 4 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 128, .len_bytes = 4 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 180, .len_bytes = 4 }, +}; +KUNIT_ARRAY_PARAM(all_pop_varying_offset, all_pop_varying_offset_cases, + cs_dsp_ctl_all_param_desc); + +/* All parameters populated, with various X and Y memory regions */ +static const struct cs_dsp_ctl_rw_test_param all_pop_varying_xy_cases[] = { + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_XM, .offs_words = 1, .len_bytes = 4 }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 }, +}; +KUNIT_ARRAY_PARAM(all_pop_varying_xy, all_pop_varying_xy_cases, + cs_dsp_ctl_all_param_desc); + +/* All parameters populated, using ZM */ +static const struct cs_dsp_ctl_rw_test_param all_pop_z_cases[] = { + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_ZM, .offs_words = 1, .len_bytes = 4 }, +}; +KUNIT_ARRAY_PARAM(all_pop_z, all_pop_z_cases, cs_dsp_ctl_all_param_desc); + +/* All parameters populated, with various algorithm ids */ +static const struct cs_dsp_ctl_rw_test_param all_pop_varying_alg_cases[] = { + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 }, + { .alg_id = 0xb, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 }, + { .alg_id = 0x9f1234, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 }, + { .alg_id = 0xff00ff, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 }, +}; +KUNIT_ARRAY_PARAM(all_pop_varying_alg, all_pop_varying_alg_cases, + cs_dsp_ctl_all_param_desc); + +/* + * All parameters populated, with all combinations of flags for a + * readable control. + */ +static const struct cs_dsp_ctl_rw_test_param all_pop_readable_flags_cases[] = { + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = 0 + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_READABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | + WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE, + }, +}; +KUNIT_ARRAY_PARAM(all_pop_readable_flags, + all_pop_readable_flags_cases, + cs_dsp_ctl_all_param_desc); + +/* + * All parameters populated, with all combinations of flags for a + * read-only control + */ +static const struct cs_dsp_ctl_rw_test_param all_pop_readonly_flags_cases[] = { + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_READABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE, + }, +}; +KUNIT_ARRAY_PARAM(all_pop_readonly_flags, + all_pop_readonly_flags_cases, + cs_dsp_ctl_all_param_desc); + +/* + * All parameters populated, with all combinations of flags for a + * non-volatile readable control + */ +static const struct cs_dsp_ctl_rw_test_param all_pop_nonvol_readable_flags_cases[] = { + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = 0 + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_READABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE, + }, +}; +KUNIT_ARRAY_PARAM(all_pop_nonvol_readable_flags, + all_pop_nonvol_readable_flags_cases, + cs_dsp_ctl_all_param_desc); + +/* + * All parameters populated, with all combinations of flags for a + * writeable control + */ +static const struct cs_dsp_ctl_rw_test_param all_pop_writeable_flags_cases[] = { + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = 0 + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | + WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE, + }, +}; +KUNIT_ARRAY_PARAM(all_pop_writeable_flags, + all_pop_writeable_flags_cases, + cs_dsp_ctl_all_param_desc); + +/* + * All parameters populated, with all combinations of flags for a + * write-only control + */ +static const struct cs_dsp_ctl_rw_test_param all_pop_writeonly_flags_cases[] = { + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE, + }, +}; +KUNIT_ARRAY_PARAM(all_pop_writeonly_flags, + all_pop_writeonly_flags_cases, + cs_dsp_ctl_all_param_desc); + +/* + * All parameters populated, with all combinations of flags for a + * non-volatile writeable control + */ +static const struct cs_dsp_ctl_rw_test_param all_pop_nonvol_writeable_flags_cases[] = { + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = 0 + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE, + }, +}; +KUNIT_ARRAY_PARAM(all_pop_nonvol_writeable_flags, + all_pop_nonvol_writeable_flags_cases, + cs_dsp_ctl_all_param_desc); + +/* + * All parameters populated, with all combinations of flags for a + * volatile readable control. + */ +static const struct cs_dsp_ctl_rw_test_param all_pop_volatile_readable_flags_cases[] = { + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = 0 /* flags == 0 is volatile while firmware is running */ + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | + WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE, + }, +}; +KUNIT_ARRAY_PARAM(all_pop_volatile_readable_flags, + all_pop_volatile_readable_flags_cases, + cs_dsp_ctl_all_param_desc); + +/* + * All parameters populated, with all combinations of flags for a + * volatile readable control. + */ +static const struct cs_dsp_ctl_rw_test_param all_pop_volatile_writeable_flags_cases[] = { + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = 0 /* flags == 0 is volatile while firmware is running */ + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE, + }, + { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4, + .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | + WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE, + }, +}; +KUNIT_ARRAY_PARAM(all_pop_volatile_writeable_flags, + all_pop_volatile_writeable_flags_cases, + cs_dsp_ctl_all_param_desc); + +static struct kunit_case cs_dsp_ctl_rw_test_cases_adsp[] = { + KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_len_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_offset_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_xy_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_z_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_alg_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_writeable_flags_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_len_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_offset_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_xy_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_z_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, + all_pop_volatile_readable_flags_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_started, + all_pop_volatile_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped, + all_pop_volatile_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped_powered_down, + all_pop_volatile_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_loaded_fw, + all_pop_volatile_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_running_fw, + all_pop_volatile_readable_flags_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_started, + all_pop_volatile_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped, + all_pop_volatile_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped_powered_down, + all_pop_volatile_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_loaded_fw, + all_pop_volatile_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_running_fw, + all_pop_volatile_writeable_flags_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek, + all_pop_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_with_seek, + all_pop_nonvol_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_read_truncated, + all_pop_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_truncated, + all_pop_nonvol_readable_flags_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek, + all_pop_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_with_seek, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_truncated, + all_pop_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_truncated, + all_pop_nonvol_writeable_flags_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_oob, + all_pop_varying_len_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_length_overflow, + all_pop_varying_len_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_and_length_oob, + all_pop_varying_len_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_oob, + all_pop_varying_len_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_length_overflow, + all_pop_varying_len_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_and_length_oob, + all_pop_varying_len_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_read_from_writeonly, + all_pop_writeonly_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_to_readonly, + all_pop_readonly_flags_gen_params), + + { } /* terminator */ +}; + +static struct kunit_case cs_dsp_ctl_rw_test_cases_halo[] = { + KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_len_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_offset_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_xy_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_alg_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_writeable_flags_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_len_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_offset_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_xy_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, + all_pop_volatile_readable_flags_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_started, + all_pop_volatile_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped, + all_pop_volatile_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped_powered_down, + all_pop_volatile_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_loaded_fw, + all_pop_volatile_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_running_fw, + all_pop_volatile_readable_flags_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_started, + all_pop_volatile_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped, + all_pop_volatile_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped_powered_down, + all_pop_volatile_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_loaded_fw, + all_pop_volatile_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_running_fw, + all_pop_volatile_writeable_flags_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek, + all_pop_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_with_seek, + all_pop_nonvol_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_read_truncated, + all_pop_readable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_truncated, + all_pop_nonvol_readable_flags_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek, + all_pop_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_with_seek, + all_pop_nonvol_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_truncated, + all_pop_writeable_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_truncated, + all_pop_nonvol_writeable_flags_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_oob, + all_pop_varying_len_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_length_overflow, + all_pop_varying_len_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_and_length_oob, + all_pop_varying_len_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_oob, + all_pop_varying_len_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_length_overflow, + all_pop_varying_len_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_and_length_oob, + all_pop_varying_len_gen_params), + + KUNIT_CASE_PARAM(cs_dsp_ctl_read_from_writeonly, + all_pop_writeonly_flags_gen_params), + KUNIT_CASE_PARAM(cs_dsp_ctl_write_to_readonly, + all_pop_readonly_flags_gen_params), + + { } /* terminator */ +}; + +static struct kunit_suite cs_dsp_ctl_rw_test_halo = { + .name = "cs_dsp_ctl_rw_wmfwV3_halo", + .init = cs_dsp_ctl_rw_test_halo_init, + .test_cases = cs_dsp_ctl_rw_test_cases_halo, +}; + +static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1 = { + .name = "cs_dsp_ctl_rw_wmfwV1_adsp2_32bit", + .init = cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1_init, + .test_cases = cs_dsp_ctl_rw_test_cases_adsp, +}; + +static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2 = { + .name = "cs_dsp_ctl_rw_wmfwV2_adsp2_32bit", + .init = cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2_init, + .test_cases = cs_dsp_ctl_rw_test_cases_adsp, +}; + +static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1 = { + .name = "cs_dsp_ctl_rw_wmfwV1_adsp2_16bit", + .init = cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1_init, + .test_cases = cs_dsp_ctl_rw_test_cases_adsp, +}; + +static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2 = { + .name = "cs_dsp_ctl_rw_wmfwV2_adsp2_16bit", + .init = cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2_init, + .test_cases = cs_dsp_ctl_rw_test_cases_adsp, +}; + +kunit_test_suites(&cs_dsp_ctl_rw_test_halo, + &cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1, + &cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2, + &cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1, + &cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2); diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c new file mode 100644 index 000000000000..9e997c4ee2d6 --- /dev/null +++ b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c @@ -0,0 +1,2211 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// KUnit tests for cs_dsp. +// +// Copyright (C) 2024 Cirrus Logic, Inc. and +// Cirrus Logic International Semiconductor Ltd. +// + +#include <kunit/device.h> +#include <kunit/resource.h> +#include <kunit/test.h> +#include <linux/build_bug.h> +#include <linux/firmware/cirrus/cs_dsp.h> +#include <linux/firmware/cirrus/cs_dsp_test_utils.h> +#include <linux/firmware/cirrus/wmfw.h> +#include <linux/random.h> +#include <linux/regmap.h> +#include <linux/string.h> +#include <linux/vmalloc.h> + +/* + * Test method is: + * + * 1) Create a mock regmap in cache-only mode so that all writes will be cached. + * 2) Create dummy wmfw file. + * 3) Call cs_dsp_power_up() with the bin file. + * 4) Readback the cached value of registers that should have been written and + * check they have the correct value. + * 5) All the registers that are expected to have been written are dropped from + * the cache. This should leave the cache clean. + * 6) If the cache is still dirty there have been unexpected writes. + */ + +KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *) +KUNIT_DEFINE_ACTION_WRAPPER(_vfree_wrapper, vfree, void *) +KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *) + +struct cs_dsp_test_local { + struct cs_dsp_mock_xm_header *xm_header; + struct cs_dsp_mock_wmfw_builder *wmfw_builder; + int wmfw_version; +}; + +struct cs_dsp_wmfw_test_param { + unsigned int num_blocks; + int mem_type; +}; + +static const struct cs_dsp_mock_alg_def cs_dsp_wmfw_test_mock_algs[] = { + { + .id = 0xfafa, + .ver = 0x100000, + .xm_size_words = 164, + .ym_size_words = 164, + .zm_size_words = 164, + }, +}; + +/* + * wmfw that writes the XM header. + * cs_dsp always reads this back from unpacked XM. + */ +static void wmfw_write_xm_header_unpacked(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + unsigned int reg_addr; + u8 *readback; + + /* XM header payload was added to wmfw by test case init function */ + + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + + /* Read raw so endianness and register width don't matter */ + readback = kunit_kzalloc(test, local->xm_header->blob_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + local->xm_header->blob_size_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, local->xm_header->blob_data, + local->xm_header->blob_size_bytes); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* Write one payload of length param->num_blocks */ +static void wmfw_write_one_payload(struct kunit *test) +{ + const struct cs_dsp_wmfw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + unsigned int reg_addr; + u8 *payload_data, *readback; + unsigned int mem_offset_dsp_words = 0; + unsigned int payload_size_bytes; + + payload_size_bytes = param->num_blocks * + cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type); + + /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */ + do { + payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type); + } while (payload_size_bytes % 4); + + payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data); + get_random_bytes(payload_data, payload_size_bytes); + + readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Tests on XM must be after the XM header */ + if (param->mem_type == WMFW_ADSP2_XM) + mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32); + + /* Add a single payload */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + param->mem_type, mem_offset_dsp_words, + payload_data, payload_size_bytes); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words; + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes); + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* Write several smallest possible payloads for the given memory type */ +static void wmfw_write_multiple_oneblock_payloads(struct kunit *test) +{ + const struct cs_dsp_wmfw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + unsigned int reg_addr; + u8 *payload_data, *readback; + unsigned int mem_offset_dsp_words = 0; + unsigned int payload_size_bytes, payload_size_dsp_words; + const unsigned int num_payloads = param->num_blocks; + int i; + + /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */ + payload_size_dsp_words = 0; + payload_size_bytes = 0; + do { + payload_size_dsp_words += cs_dsp_mock_reg_block_length_dsp_words(priv, + param->mem_type); + payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type); + } while (payload_size_bytes % 4); + + payload_data = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data); + + readback = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + get_random_bytes(payload_data, num_payloads * payload_size_bytes); + + /* Tests on XM must be after the XM header */ + if (param->mem_type == WMFW_ADSP2_XM) + mem_offset_dsp_words += local->xm_header->blob_size_bytes / payload_size_bytes; + + /* Add multiple payloads of one block each */ + for (i = 0; i < num_payloads; ++i) { + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + param->mem_type, + mem_offset_dsp_words + (i * payload_size_dsp_words), + &payload_data[i * payload_size_bytes], + payload_size_bytes); + } + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words; + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + num_payloads * payload_size_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, payload_data, num_payloads * payload_size_bytes); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, num_payloads * payload_size_bytes); + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Write several smallest possible payloads of the given memory type + * in reverse address order + */ +static void wmfw_write_multiple_oneblock_payloads_reverse(struct kunit *test) +{ + const struct cs_dsp_wmfw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + unsigned int reg_addr; + u8 *payload_data, *readback; + unsigned int mem_offset_dsp_words = 0; + unsigned int payload_size_bytes, payload_size_dsp_words; + const unsigned int num_payloads = param->num_blocks; + int i; + + /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */ + payload_size_dsp_words = 0; + payload_size_bytes = 0; + do { + payload_size_dsp_words += cs_dsp_mock_reg_block_length_dsp_words(priv, + param->mem_type); + payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type); + } while (payload_size_bytes % 4); + + payload_data = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data); + + readback = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + get_random_bytes(payload_data, num_payloads * payload_size_bytes); + + /* Tests on XM must be after the XM header */ + if (param->mem_type == WMFW_ADSP2_XM) + mem_offset_dsp_words += local->xm_header->blob_size_bytes / payload_size_bytes; + + /* Add multiple payloads of one block each */ + for (i = num_payloads - 1; i >= 0; --i) { + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + param->mem_type, + mem_offset_dsp_words + (i * payload_size_dsp_words), + &payload_data[i * payload_size_bytes], + payload_size_bytes); + } + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words; + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + num_payloads * payload_size_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, payload_data, num_payloads * payload_size_bytes); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, num_payloads * payload_size_bytes); + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Write multiple payloads of length param->num_blocks. + * The payloads are not in address order and collectively do not patch + * a contiguous block of memory. + */ +static void wmfw_write_multiple_payloads_sparse_unordered(struct kunit *test) +{ + static const unsigned int random_offsets[] = { + 11, 69, 59, 61, 32, 75, 4, 38, 70, 13, 79, 47, 46, 53, 18, 44, + 54, 35, 51, 21, 26, 45, 27, 41, 66, 2, 17, 56, 40, 9, 8, 20, + 29, 19, 63, 42, 12, 16, 43, 3, 5, 55, 52, 22 + }; + const struct cs_dsp_wmfw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + unsigned int reg_addr; + u8 *payload_data, *readback; + unsigned int mem_offset_dsp_words = 0; + unsigned int payload_size_bytes, payload_size_dsp_words; + const int num_payloads = ARRAY_SIZE(random_offsets); + int i; + + payload_size_bytes = param->num_blocks * + cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type); + payload_size_dsp_words = param->num_blocks * + cs_dsp_mock_reg_block_length_dsp_words(priv, param->mem_type); + + /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */ + do { + payload_size_dsp_words += cs_dsp_mock_reg_block_length_dsp_words(priv, + param->mem_type); + payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type); + } while (payload_size_bytes % 4); + + payload_data = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data); + get_random_bytes(payload_data, payload_size_bytes); + + readback = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Tests on XM must be after the XM header */ + if (param->mem_type == WMFW_ADSP2_XM) + mem_offset_dsp_words += local->xm_header->blob_size_bytes / payload_size_bytes; + + /* Add multiple payloads of one block each at "random" locations */ + for (i = 0; i < num_payloads; ++i) { + unsigned int offset = random_offsets[i] * payload_size_dsp_words; + + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + param->mem_type, + mem_offset_dsp_words + offset, + &payload_data[i * payload_size_bytes], + payload_size_bytes); + } + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + + for (i = 0; i < num_payloads; ++i) { + unsigned int offset_num_regs = (random_offsets[i] * payload_size_bytes) / + regmap_get_val_bytes(priv->dsp->regmap); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type); + reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap); + reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words; + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, + &readback[i * payload_size_bytes], + payload_size_bytes), + 0); + + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes); + } + + KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* Write the whole of PM in a single unpacked payload */ +static void wmfw_write_all_unpacked_pm(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + unsigned int reg_addr; + u8 *payload_data, *readback; + unsigned int payload_size_bytes; + + payload_size_bytes = cs_dsp_mock_size_of_region(priv->dsp, WMFW_ADSP2_PM); + payload_data = vmalloc(payload_size_bytes); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data); + kunit_add_action_or_reset(priv->test, _vfree_wrapper, payload_data); + + readback = vmalloc(payload_size_bytes); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + kunit_add_action_or_reset(priv->test, _vfree_wrapper, readback); + memset(readback, 0, payload_size_bytes); + + /* Add a single PM payload */ + get_random_bytes(payload_data, payload_size_bytes); + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + WMFW_ADSP2_PM, 0, + payload_data, payload_size_bytes); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_PM); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes); + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* Write the whole of PM in a single packed payload */ +static void wmfw_write_all_packed_pm(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + unsigned int reg_addr; + u8 *payload_data, *readback; + unsigned int payload_size_bytes; + + payload_size_bytes = cs_dsp_mock_size_of_region(priv->dsp, WMFW_HALO_PM_PACKED); + payload_data = vmalloc(payload_size_bytes); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data); + kunit_add_action_or_reset(priv->test, _vfree_wrapper, payload_data); + + readback = vmalloc(payload_size_bytes); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + kunit_add_action_or_reset(priv->test, _vfree_wrapper, readback); + memset(readback, 0, payload_size_bytes); + + /* Add a single PM payload */ + get_random_bytes(payload_data, payload_size_bytes); + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + WMFW_HALO_PM_PACKED, 0, + payload_data, payload_size_bytes); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_HALO_PM_PACKED); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes); + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Write a series of payloads to various unpacked memory regions. + * The payloads are of various lengths and offsets, driven by the + * payload_defs table. The offset and length are both given as a + * number of minimum-sized register blocks to keep the maths simpler. + * (Where a minimum-sized register block is the smallest number of + * registers that contain a whole number of DSP words.) + */ +static void wmfw_write_multiple_unpacked_mem(struct kunit *test) +{ + static const struct { + int mem_type; + unsigned int offset_num_blocks; + unsigned int num_blocks; + } payload_defs[] = { + { WMFW_ADSP2_PM, 11, 60 }, + { WMFW_ADSP2_ZM, 69, 8 }, + { WMFW_ADSP2_YM, 32, 74 }, + { WMFW_ADSP2_XM, 70, 38 }, + { WMFW_ADSP2_PM, 84, 48 }, + { WMFW_ADSP2_XM, 46, 18 }, + { WMFW_ADSP2_PM, 0, 8 }, + { WMFW_ADSP2_YM, 0, 30 }, + { WMFW_ADSP2_PM, 160, 50 }, + { WMFW_ADSP2_ZM, 21, 26 }, + }; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + unsigned int payload_size_bytes, offset_num_dsp_words; + unsigned int reg_addr, offset_bytes, offset_num_regs; + void **payload_data; + void *readback; + int i, ret; + + payload_data = kunit_kcalloc(test, ARRAY_SIZE(payload_defs), sizeof(*payload_data), + GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data); + + for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) { + payload_size_bytes = payload_defs[i].num_blocks * + cs_dsp_mock_reg_block_length_bytes(priv, + payload_defs[i].mem_type); + + payload_data[i] = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data[i]); + get_random_bytes(payload_data[i], payload_size_bytes); + + offset_num_dsp_words = payload_defs[i].offset_num_blocks * + cs_dsp_mock_reg_block_length_dsp_words(priv, + payload_defs[i].mem_type); + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + payload_defs[i].mem_type, + offset_num_dsp_words, + payload_data[i], + payload_size_bytes); + } + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + + for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) { + payload_size_bytes = payload_defs[i].num_blocks * + cs_dsp_mock_reg_block_length_bytes(priv, + payload_defs[i].mem_type); + + readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + offset_bytes = payload_defs[i].offset_num_blocks * + cs_dsp_mock_reg_block_length_bytes(priv, payload_defs[i].mem_type); + offset_num_regs = offset_bytes / regmap_get_val_bytes(priv->dsp->regmap); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, payload_defs[i].mem_type); + reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap); + ret = regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes); + KUNIT_EXPECT_EQ_MSG(test, ret, 0, "%s @%u num:%u\n", + cs_dsp_mem_region_name(payload_defs[i].mem_type), + payload_defs[i].offset_num_blocks, payload_defs[i].num_blocks); + KUNIT_EXPECT_MEMEQ_MSG(test, readback, payload_data[i], payload_size_bytes, + "%s @%u num:%u\n", + cs_dsp_mem_region_name(payload_defs[i].mem_type), + payload_defs[i].offset_num_blocks, + payload_defs[i].num_blocks); + + kunit_kfree(test, readback); + + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes); + } + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Write a series of payloads to various packed and unpacked memory regions. + * The payloads are of various lengths and offsets, driven by the + * payload_defs table. The offset and length are both given as a + * number of minimum-sized register blocks to keep the maths simpler. + * (Where a minimum-sized register block is the smallest number of + * registers that contain a whole number of DSP words.) + */ +static void wmfw_write_multiple_packed_unpacked_mem(struct kunit *test) +{ + static const struct { + int mem_type; + unsigned int offset_num_blocks; + unsigned int num_blocks; + } payload_defs[] = { + { WMFW_HALO_PM_PACKED, 11, 60 }, + { WMFW_ADSP2_YM, 69, 8 }, + { WMFW_HALO_YM_PACKED, 32, 74 }, + { WMFW_HALO_XM_PACKED, 70, 38 }, + { WMFW_HALO_PM_PACKED, 84, 48 }, + { WMFW_HALO_XM_PACKED, 46, 18 }, + { WMFW_HALO_PM_PACKED, 0, 8 }, + { WMFW_HALO_YM_PACKED, 0, 30 }, + { WMFW_HALO_PM_PACKED, 160, 50 }, + { WMFW_ADSP2_XM, 21, 26 }, + }; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + unsigned int payload_size_bytes, offset_num_dsp_words; + unsigned int reg_addr, offset_bytes, offset_num_regs; + void **payload_data; + void *readback; + int i, ret; + + payload_data = kunit_kcalloc(test, ARRAY_SIZE(payload_defs), sizeof(*payload_data), + GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data); + + for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) { + payload_size_bytes = payload_defs[i].num_blocks * + cs_dsp_mock_reg_block_length_bytes(priv, + payload_defs[i].mem_type); + + payload_data[i] = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data[i]); + get_random_bytes(payload_data[i], payload_size_bytes); + + offset_num_dsp_words = payload_defs[i].offset_num_blocks * + cs_dsp_mock_reg_block_length_dsp_words(priv, + payload_defs[i].mem_type); + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + payload_defs[i].mem_type, + offset_num_dsp_words, + payload_data[i], + payload_size_bytes); + } + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + + for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) { + payload_size_bytes = payload_defs[i].num_blocks * + cs_dsp_mock_reg_block_length_bytes(priv, + payload_defs[i].mem_type); + + readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + offset_bytes = payload_defs[i].offset_num_blocks * + cs_dsp_mock_reg_block_length_bytes(priv, payload_defs[i].mem_type); + offset_num_regs = offset_bytes / regmap_get_val_bytes(priv->dsp->regmap); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, payload_defs[i].mem_type); + reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap); + ret = regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes); + KUNIT_EXPECT_EQ_MSG(test, ret, 0, "%s @%u num:%u\n", + cs_dsp_mem_region_name(payload_defs[i].mem_type), + payload_defs[i].offset_num_blocks, + payload_defs[i].num_blocks); + KUNIT_EXPECT_MEMEQ_MSG(test, readback, payload_data[i], payload_size_bytes, + "%s @%u num:%u\n", + cs_dsp_mem_region_name(payload_defs[i].mem_type), + payload_defs[i].offset_num_blocks, + payload_defs[i].num_blocks); + + kunit_kfree(test, readback); + + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes); + } + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Write XM/YM data that is one word longer than a packed block multiple, + * using one packed payload followed by one unpacked word. + */ +static void wmfw_write_packed_1_unpacked_trailing(struct kunit *test) +{ + const struct cs_dsp_wmfw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + int packed_mem_type = param->mem_type; + int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type); + unsigned int dsp_words_per_packed_block = + cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type); + unsigned int dsp_words_per_unpacked_block = + cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type); + unsigned int mem_offset_dsp_words = 0; + struct firmware *wmfw; + unsigned int reg_addr; + void *packed_payload_data, *readback; + u32 unpacked_payload_data[1]; + unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words; + unsigned int offset_num_regs; + + packed_payload_size_bytes = param->num_blocks * + cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type); + packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block; + + packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data); + get_random_bytes(packed_payload_data, packed_payload_size_bytes); + + get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data)); + + readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL); + + /* Tests on XM must be after the XM header */ + if (unpacked_mem_type == WMFW_ADSP2_XM) { + mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32); + + /* Round up to multiple of packed block length */ + mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block); + } + + /* Add a single packed payload */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + packed_mem_type, mem_offset_dsp_words, + packed_payload_data, packed_payload_size_bytes); + /* + * Add payload of one unpacked word to DSP memory right after + * the packed payload words. + */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + unpacked_mem_type, + mem_offset_dsp_words + packed_payload_size_dsp_words, + unpacked_payload_data, sizeof(unpacked_payload_data)); + + /* Download the wmfw */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + + /* + * Check that the packed payload was written correctly and drop + * it from the regmap cache. + */ + offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) * + cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type); + reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + packed_payload_size_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes); + + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes); + + /* + * Check that the unpacked word was written correctly and drop + * it from the regmap cache. The unpacked payload is offset within + * unpacked register space by the number of DSP words that were + * written in the packed payload. + */ + offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) * + cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type); + offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) * + cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type); + reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + sizeof(unpacked_payload_data)), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data)); + + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Write XM/YM data that is two words longer than a packed block multiple, + * using one packed payload followed by one payload of two unpacked words. + */ +static void wmfw_write_packed_2_unpacked_trailing(struct kunit *test) +{ + const struct cs_dsp_wmfw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + int packed_mem_type = param->mem_type; + int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type); + unsigned int dsp_words_per_packed_block = + cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type); + unsigned int dsp_words_per_unpacked_block = + cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type); + unsigned int mem_offset_dsp_words = 0; + struct firmware *wmfw; + unsigned int reg_addr; + void *packed_payload_data, *readback; + u32 unpacked_payload_data[2]; + unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words; + unsigned int offset_num_regs; + + packed_payload_size_bytes = param->num_blocks * + cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type); + packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block; + + packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data); + get_random_bytes(packed_payload_data, packed_payload_size_bytes); + + get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data)); + + readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL); + + /* Tests on XM must be after the XM header */ + if (unpacked_mem_type == WMFW_ADSP2_XM) { + mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32); + + /* Round up to multiple of packed block length */ + mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block); + } + + /* Add a single packed payload */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + packed_mem_type, mem_offset_dsp_words, + packed_payload_data, packed_payload_size_bytes); + /* + * Add payload of two unpacked words to DSP memory right after + * the packed payload words. + */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + unpacked_mem_type, + mem_offset_dsp_words + packed_payload_size_dsp_words, + unpacked_payload_data, sizeof(unpacked_payload_data)); + + /* Download the wmfw */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + + /* + * Check that the packed payload was written correctly and drop + * it from the regmap cache. + */ + offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) * + cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type); + reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + packed_payload_size_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes); + + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes); + + /* + * Check that the unpacked words were written correctly and drop + * them from the regmap cache. The unpacked payload is offset + * within unpacked register space by the number of DSP words + * that were written in the packed payload. + */ + offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) * + cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type); + offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) * + cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type); + reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + sizeof(unpacked_payload_data)), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data)); + + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Write XM/YM data that is three words longer than a packed block multiple, + * using one packed payload followed by one payload of three unpacked words. + */ +static void wmfw_write_packed_3_unpacked_trailing(struct kunit *test) +{ + const struct cs_dsp_wmfw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + int packed_mem_type = param->mem_type; + int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type); + unsigned int dsp_words_per_packed_block = + cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type); + unsigned int dsp_words_per_unpacked_block = + cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type); + unsigned int mem_offset_dsp_words = 0; + struct firmware *wmfw; + unsigned int reg_addr; + void *packed_payload_data, *readback; + u32 unpacked_payload_data[3]; + unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words; + unsigned int offset_num_regs; + + packed_payload_size_bytes = param->num_blocks * + cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type); + packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block; + + packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data); + get_random_bytes(packed_payload_data, packed_payload_size_bytes); + + get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data)); + + readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL); + + /* Tests on XM must be after the XM header */ + if (unpacked_mem_type == WMFW_ADSP2_XM) { + mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32); + + /* Round up to multiple of packed block length */ + mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block); + } + + /* Add a single packed payload */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + packed_mem_type, mem_offset_dsp_words, + packed_payload_data, packed_payload_size_bytes); + /* + * Add payload of three unpacked words to DSP memory right after + * the packed payload words. + */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + unpacked_mem_type, + mem_offset_dsp_words + packed_payload_size_dsp_words, + unpacked_payload_data, sizeof(unpacked_payload_data)); + + /* Download the wmfw */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + + /* + * Check that the packed payload was written correctly and drop + * it from the regmap cache. + */ + offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) * + cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type); + reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + packed_payload_size_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes); + + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes); + + /* + * Check that the unpacked words were written correctly and drop + * them from the regmap cache. The unpacked payload is offset + * within unpacked register space by the number of DSP words + * that were written in the packed payload. + */ + offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) * + cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type); + offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) * + cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type); + reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + sizeof(unpacked_payload_data)), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data)); + + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Write XM/YM data that is two words longer than a packed block multiple, + * using one packed payload followed by two payloads of one unpacked word each. + */ +static void wmfw_write_packed_2_single_unpacked_trailing(struct kunit *test) +{ + const struct cs_dsp_wmfw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + int packed_mem_type = param->mem_type; + int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type); + unsigned int dsp_words_per_packed_block = + cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type); + unsigned int dsp_words_per_unpacked_block = + cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type); + unsigned int mem_offset_dsp_words = 0; + struct firmware *wmfw; + unsigned int reg_addr; + void *packed_payload_data, *readback; + u32 unpacked_payload_data[2]; + unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words; + unsigned int offset_num_regs; + + packed_payload_size_bytes = param->num_blocks * + cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type); + packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block; + + packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data); + get_random_bytes(packed_payload_data, packed_payload_size_bytes); + + get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data)); + + readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL); + + /* Tests on XM must be after the XM header */ + if (unpacked_mem_type == WMFW_ADSP2_XM) { + mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32); + + /* Round up to multiple of packed block length */ + mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block); + } + + /* Add a single packed payload */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + packed_mem_type, mem_offset_dsp_words, + packed_payload_data, packed_payload_size_bytes); + /* + * Add two unpacked words to DSP memory right after the packed + * payload words. Each unpacked word in its own payload. + */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + unpacked_mem_type, + mem_offset_dsp_words + packed_payload_size_dsp_words, + &unpacked_payload_data[0], + sizeof(unpacked_payload_data[0])); + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + unpacked_mem_type, + mem_offset_dsp_words + packed_payload_size_dsp_words + 1, + &unpacked_payload_data[1], + sizeof(unpacked_payload_data[1])); + + /* Download the wmfw */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + + /* + * Check that the packed payload was written correctly and drop + * it from the regmap cache. + */ + offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) * + cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type); + reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + packed_payload_size_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes); + + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes); + + /* + * Check that the unpacked words were written correctly and drop + * them from the regmap cache. The unpacked words are offset + * within unpacked register space by the number of DSP words + * that were written in the packed payload. + */ + offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) * + cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type); + offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) * + cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type); + reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + sizeof(unpacked_payload_data)), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data)); + + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Write XM/YM data that is three words longer than a packed block multiple, + * using one packed payload followed by three payloads of one unpacked word each. + */ +static void wmfw_write_packed_3_single_unpacked_trailing(struct kunit *test) +{ + const struct cs_dsp_wmfw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + int packed_mem_type = param->mem_type; + int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type); + unsigned int dsp_words_per_packed_block = + cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type); + unsigned int dsp_words_per_unpacked_block = + cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type); + unsigned int mem_offset_dsp_words = 0; + struct firmware *wmfw; + unsigned int reg_addr; + void *packed_payload_data, *readback; + u32 unpacked_payload_data[3]; + unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words; + unsigned int offset_num_regs; + + packed_payload_size_bytes = param->num_blocks * + cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type); + packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block; + + packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data); + get_random_bytes(packed_payload_data, packed_payload_size_bytes); + + get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data)); + + readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL); + + /* Tests on XM must be after the XM header */ + if (unpacked_mem_type == WMFW_ADSP2_XM) { + mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32); + + /* Round up to multiple of packed block length */ + mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block); + } + + /* Add a single packed payload */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + packed_mem_type, mem_offset_dsp_words, + packed_payload_data, packed_payload_size_bytes); + /* + * Add three unpacked words to DSP memory right after the packed + * payload words. Each unpacked word in its own payload. + */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + unpacked_mem_type, + mem_offset_dsp_words + packed_payload_size_dsp_words, + &unpacked_payload_data[0], + sizeof(unpacked_payload_data[0])); + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + unpacked_mem_type, + mem_offset_dsp_words + packed_payload_size_dsp_words + 1, + &unpacked_payload_data[1], + sizeof(unpacked_payload_data[1])); + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + unpacked_mem_type, + mem_offset_dsp_words + packed_payload_size_dsp_words + 2, + &unpacked_payload_data[2], + sizeof(unpacked_payload_data[2])); + + /* Download the wmfw */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + /* + * Check that the packed payload was written correctly and drop + * it from the regmap cache. + */ + offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) * + cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type); + reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + packed_payload_size_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes); + + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes); + + /* + * Check that the unpacked words were written correctly and drop + * them from the regmap cache. The unpacked words are offset + * within unpacked register space by the number of DSP words + * that were written in the packed payload. + */ + offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) * + cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type); + offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) * + cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type); + reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + sizeof(unpacked_payload_data)), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data)); + + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Write XM/YM data that is one word longer than a packed block multiple, + * and does not start on a packed alignment. Use one unpacked word + * followed by a packed payload. + */ +static void wmfw_write_packed_1_unpacked_leading(struct kunit *test) +{ + const struct cs_dsp_wmfw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + int packed_mem_type = param->mem_type; + int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type); + unsigned int dsp_words_per_packed_block = + cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type); + unsigned int dsp_words_per_unpacked_block = + cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type); + unsigned int packed_payload_offset_dsp_words = 0; + struct firmware *wmfw; + unsigned int reg_addr; + void *packed_payload_data, *readback; + u32 unpacked_payload_data[1]; + unsigned int packed_payload_size_bytes; + unsigned int offset_num_regs; + + packed_payload_size_bytes = param->num_blocks * + cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type); + + packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data); + get_random_bytes(packed_payload_data, packed_payload_size_bytes); + + get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data)); + + readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL); + + /* Tests on XM must be after the XM header */ + if (unpacked_mem_type == WMFW_ADSP2_XM) + packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes / + sizeof(u32); + /* + * Leave space for an unaligned word before the packed block and + * round the packed block start to multiple of packed block length. + */ + packed_payload_offset_dsp_words += 1; + packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words, + dsp_words_per_packed_block); + + /* Add a single unpacked word right before the first word of packed data */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + unpacked_mem_type, + packed_payload_offset_dsp_words - 1, + unpacked_payload_data, sizeof(unpacked_payload_data)); + + /* Add payload of packed data to the DSP memory after the unpacked word. */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + packed_mem_type, + packed_payload_offset_dsp_words, + packed_payload_data, packed_payload_size_bytes); + + /* Download the wmfw */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + /* + * Check that the packed payload was written correctly and drop + * it from the regmap cache. + */ + offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) * + cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type); + reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + packed_payload_size_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes); + + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes); + + /* + * Check that the unpacked word was written correctly and drop + * it from the regmap cache. + */ + offset_num_regs = ((packed_payload_offset_dsp_words - 1) / dsp_words_per_unpacked_block) * + cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type); + reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + sizeof(unpacked_payload_data)), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data)); + + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Write XM/YM data that is two words longer than a packed block multiple, + * and does not start on a packed alignment. Use one payload of two unpacked + * words followed by a packed payload. + */ +static void wmfw_write_packed_2_unpacked_leading(struct kunit *test) +{ + const struct cs_dsp_wmfw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + int packed_mem_type = param->mem_type; + int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type); + unsigned int dsp_words_per_packed_block = + cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type); + unsigned int dsp_words_per_unpacked_block = + cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type); + unsigned int packed_payload_offset_dsp_words = 0; + struct firmware *wmfw; + unsigned int reg_addr; + void *packed_payload_data, *readback; + u32 unpacked_payload_data[2]; + unsigned int packed_payload_size_bytes; + unsigned int offset_num_regs; + + packed_payload_size_bytes = param->num_blocks * + cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type); + + packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data); + get_random_bytes(packed_payload_data, packed_payload_size_bytes); + + get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data)); + + readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL); + + /* Tests on XM must be after the XM header */ + if (unpacked_mem_type == WMFW_ADSP2_XM) + packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes / + sizeof(u32); + /* + * Leave space for two unaligned words before the packed block and + * round the packed block start to multiple of packed block length. + */ + packed_payload_offset_dsp_words += 2; + packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words, + dsp_words_per_packed_block); + + /* + * Add two unpacked words as a single payload right before the + * first word of packed data + */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + unpacked_mem_type, + packed_payload_offset_dsp_words - 2, + unpacked_payload_data, sizeof(unpacked_payload_data)); + + /* Add payload of packed data to the DSP memory after the unpacked words. */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + packed_mem_type, + packed_payload_offset_dsp_words, + packed_payload_data, packed_payload_size_bytes); + + /* Download the wmfw */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + /* + * Check that the packed payload was written correctly and drop + * it from the regmap cache. + */ + offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) * + cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type); + reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + packed_payload_size_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes); + + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes); + + /* + * Check that the unpacked words were written correctly and drop + * them from the regmap cache. + */ + offset_num_regs = ((packed_payload_offset_dsp_words - 2) / dsp_words_per_unpacked_block) * + cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type); + reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + sizeof(unpacked_payload_data)), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data)); + + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Write XM/YM data that is three words longer than a packed block multiple, + * and does not start on a packed alignment. Use one payload of three unpacked + * words followed by a packed payload. + */ +static void wmfw_write_packed_3_unpacked_leading(struct kunit *test) +{ + const struct cs_dsp_wmfw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + int packed_mem_type = param->mem_type; + int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type); + unsigned int dsp_words_per_packed_block = + cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type); + unsigned int dsp_words_per_unpacked_block = + cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type); + unsigned int packed_payload_offset_dsp_words = 0; + struct firmware *wmfw; + unsigned int reg_addr; + void *packed_payload_data, *readback; + u32 unpacked_payload_data[3]; + unsigned int packed_payload_size_bytes; + unsigned int offset_num_regs; + + packed_payload_size_bytes = param->num_blocks * + cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type); + + packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data); + get_random_bytes(packed_payload_data, packed_payload_size_bytes); + + get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data)); + + readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL); + + /* Tests on XM must be after the XM header */ + if (unpacked_mem_type == WMFW_ADSP2_XM) + packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes / + sizeof(u32); + /* + * Leave space for three unaligned words before the packed block and + * round the packed block start to multiple of packed block length. + */ + packed_payload_offset_dsp_words += 3; + packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words, + dsp_words_per_packed_block); + + /* + * Add three unpacked words as a single payload right before the + * first word of packed data + */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + unpacked_mem_type, + packed_payload_offset_dsp_words - 3, + unpacked_payload_data, sizeof(unpacked_payload_data)); + + /* Add payload of packed data to the DSP memory after the unpacked words. */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + packed_mem_type, + packed_payload_offset_dsp_words, + packed_payload_data, packed_payload_size_bytes); + + /* Download the wmfw */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + /* + * Check that the packed payload was written correctly and drop + * it from the regmap cache. + */ + offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) * + cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type); + reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + packed_payload_size_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes); + + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes); + + /* + * Check that the unpacked words were written correctly and drop + * them from the regmap cache. + */ + offset_num_regs = ((packed_payload_offset_dsp_words - 3) / dsp_words_per_unpacked_block) * + cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type); + reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + sizeof(unpacked_payload_data)), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data)); + + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Write XM/YM data that is two words longer than a packed block multiple, + * and does not start on a packed alignment. Use two payloads of one unpacked + * word each, followed by a packed payload. + */ +static void wmfw_write_packed_2_single_unpacked_leading(struct kunit *test) +{ + const struct cs_dsp_wmfw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + int packed_mem_type = param->mem_type; + int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type); + unsigned int dsp_words_per_packed_block = + cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type); + unsigned int dsp_words_per_unpacked_block = + cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type); + unsigned int packed_payload_offset_dsp_words = 0; + struct firmware *wmfw; + unsigned int reg_addr; + void *packed_payload_data, *readback; + u32 unpacked_payload_data[2]; + unsigned int packed_payload_size_bytes; + unsigned int offset_num_regs; + + packed_payload_size_bytes = param->num_blocks * + cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type); + + packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data); + get_random_bytes(packed_payload_data, packed_payload_size_bytes); + + get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data)); + + readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL); + + /* Tests on XM must be after the XM header */ + if (unpacked_mem_type == WMFW_ADSP2_XM) + packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes / + sizeof(u32); + /* + * Leave space for two unaligned words before the packed block and + * round the packed block start to multiple of packed block length. + */ + packed_payload_offset_dsp_words += 2; + packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words, + dsp_words_per_packed_block); + + /* + * Add two unpacked words as two payloads each containing a single + * unpacked word. + */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + unpacked_mem_type, + packed_payload_offset_dsp_words - 2, + &unpacked_payload_data[0], + sizeof(unpacked_payload_data[0])); + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + unpacked_mem_type, + packed_payload_offset_dsp_words - 1, + &unpacked_payload_data[1], + sizeof(unpacked_payload_data[1])); + + /* Add payload of packed data to the DSP memory after the unpacked words. */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + packed_mem_type, + packed_payload_offset_dsp_words, + packed_payload_data, packed_payload_size_bytes); + + /* Download the wmfw */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + /* + * Check that the packed payload was written correctly and drop + * it from the regmap cache. + */ + offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) * + cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type); + reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + packed_payload_size_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes); + + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes); + + /* + * Check that the unpacked words were written correctly and drop + * them from the regmap cache. + */ + offset_num_regs = ((packed_payload_offset_dsp_words - 2) / dsp_words_per_unpacked_block) * + cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type); + reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + sizeof(unpacked_payload_data)), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data)); + + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* + * Write XM/YM data that is three words longer than a packed block multiple, + * and does not start on a packed alignment. Use three payloads of one unpacked + * word each, followed by a packed payload. + */ +static void wmfw_write_packed_3_single_unpacked_leading(struct kunit *test) +{ + const struct cs_dsp_wmfw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + int packed_mem_type = param->mem_type; + int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type); + unsigned int dsp_words_per_packed_block = + cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type); + unsigned int dsp_words_per_unpacked_block = + cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type); + unsigned int packed_payload_offset_dsp_words = 0; + struct firmware *wmfw; + unsigned int reg_addr; + void *packed_payload_data, *readback; + u32 unpacked_payload_data[3]; + unsigned int packed_payload_size_bytes; + unsigned int offset_num_regs; + + packed_payload_size_bytes = param->num_blocks * + cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type); + + packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data); + get_random_bytes(packed_payload_data, packed_payload_size_bytes); + + get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data)); + + readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL); + + /* Tests on XM must be after the XM header */ + if (unpacked_mem_type == WMFW_ADSP2_XM) + packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes / + sizeof(u32); + /* + * Leave space for two unaligned words before the packed block and + * round the packed block start to multiple of packed block length. + */ + packed_payload_offset_dsp_words += 3; + packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words, + dsp_words_per_packed_block); + + /* + * Add three unpacked words as three payloads each containing a single + * unpacked word. + */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + unpacked_mem_type, + packed_payload_offset_dsp_words - 3, + &unpacked_payload_data[0], + sizeof(unpacked_payload_data[0])); + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + unpacked_mem_type, + packed_payload_offset_dsp_words - 2, + &unpacked_payload_data[1], + sizeof(unpacked_payload_data[1])); + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + unpacked_mem_type, + packed_payload_offset_dsp_words - 1, + &unpacked_payload_data[2], + sizeof(unpacked_payload_data[2])); + + /* Add payload of packed data to the DSP memory after the unpacked words. */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + packed_mem_type, + packed_payload_offset_dsp_words, + packed_payload_data, packed_payload_size_bytes); + + /* Download the wmfw */ + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + /* + * Check that the packed payload was written correctly and drop + * it from the regmap cache. + */ + offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) * + cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type); + reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + packed_payload_size_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes); + + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes); + + /* + * Check that the unpacked words were written correctly and drop + * them from the regmap cache. + */ + offset_num_regs = ((packed_payload_offset_dsp_words - 3) / dsp_words_per_unpacked_block) * + cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type); + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type); + reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, + sizeof(unpacked_payload_data)), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data)); + + cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data)); + + /* Drop expected writes and the cache should then be clean */ + cs_dsp_mock_xm_header_drop_from_regmap_cache(priv); + KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true)); +} + +/* Load a wmfw containing multiple info blocks */ +static void wmfw_load_with_info(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + unsigned int reg_addr; + u8 *payload_data, *readback; + char *infobuf; + const unsigned int payload_size_bytes = 48; + int ret; + + payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data); + get_random_bytes(payload_data, payload_size_bytes); + + readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Add a couple of info blocks at the start of the wmfw */ + cs_dsp_mock_wmfw_add_info(local->wmfw_builder, "This is a timestamp"); + cs_dsp_mock_wmfw_add_info(local->wmfw_builder, "This is some more info"); + + /* Add a single payload */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + WMFW_ADSP2_YM, 0, + payload_data, payload_size_bytes); + + /* Add a bigger info block then another small one*/ + infobuf = kunit_kzalloc(test, 512, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, infobuf); + + for (; strlcat(infobuf, "Waffle{Blah}\n", 512) < 512;) + ; + + cs_dsp_mock_wmfw_add_info(local->wmfw_builder, infobuf); + cs_dsp_mock_wmfw_add_info(local->wmfw_builder, "Another block of info"); + + /* Add another payload */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + WMFW_ADSP2_YM, 64, + payload_data, payload_size_bytes); + + wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder); + + ret = cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"); + KUNIT_EXPECT_EQ_MSG(test, ret, 0, "cs_dsp_power_up failed: %d\n", ret); + + /* Check first payload was written */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes); + + /* Check second payload was written */ + reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * 64; + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes); +} + +static int cs_dsp_wmfw_test_common_init(struct kunit *test, struct cs_dsp *dsp, + int wmfw_version) +{ + struct cs_dsp_test *priv; + struct cs_dsp_test_local *local; + struct device *test_dev; + int ret; + + priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL); + if (!local) + return -ENOMEM; + + priv->test = test; + priv->dsp = dsp; + test->priv = priv; + priv->local = local; + priv->local->wmfw_version = wmfw_version; + + /* Create dummy struct device */ + test_dev = kunit_device_register(test, "cs_dsp_test_drv"); + if (IS_ERR(test_dev)) + return PTR_ERR(test_dev); + + dsp->dev = get_device(test_dev); + if (!dsp->dev) + return -ENODEV; + + ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev); + if (ret) + return ret; + + dev_set_drvdata(dsp->dev, priv); + + /* Allocate regmap */ + ret = cs_dsp_mock_regmap_init(priv); + if (ret) + return ret; + + /* + * There must always be a XM header with at least 1 algorithm, so create + * a dummy one that tests can use and extract it to a data payload. + */ + local->xm_header = cs_dsp_create_mock_xm_header(priv, + cs_dsp_wmfw_test_mock_algs, + ARRAY_SIZE(cs_dsp_wmfw_test_mock_algs)); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header); + + local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, priv->local->wmfw_version); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder); + + /* Add dummy XM header payload to wmfw */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + WMFW_ADSP2_XM, 0, + local->xm_header->blob_data, + local->xm_header->blob_size_bytes); + + /* Init cs_dsp */ + dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops); + + switch (dsp->type) { + case WMFW_ADSP2: + ret = cs_dsp_adsp2_init(dsp); + break; + case WMFW_HALO: + ret = cs_dsp_halo_init(dsp); + break; + default: + KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type); + return -EINVAL; + } + + if (ret) + return ret; + + /* Automatically call cs_dsp_remove() when test case ends */ + return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp); +} + +static int cs_dsp_wmfw_test_halo_init(struct kunit *test) +{ + struct cs_dsp *dsp; + + /* Fill in cs_dsp and initialize */ + dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL); + if (!dsp) + return -ENOMEM; + + dsp->num = 1; + dsp->type = WMFW_HALO; + dsp->mem = cs_dsp_mock_halo_dsp1_regions; + dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes); + dsp->base = cs_dsp_mock_halo_core_base; + dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base; + + return cs_dsp_wmfw_test_common_init(test, dsp, 3); +} + +static int cs_dsp_wmfw_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver) +{ + struct cs_dsp *dsp; + + /* Fill in cs_dsp and initialize */ + dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL); + if (!dsp) + return -ENOMEM; + + dsp->num = 1; + dsp->type = WMFW_ADSP2; + dsp->rev = 1; + dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions; + dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes); + dsp->base = cs_dsp_mock_adsp2_32bit_sysbase; + + return cs_dsp_wmfw_test_common_init(test, dsp, wmfw_ver); +} + +static int cs_dsp_wmfw_test_adsp2_32bit_wmfw0_init(struct kunit *test) +{ + return cs_dsp_wmfw_test_adsp2_32bit_init(test, 0); +} + +static int cs_dsp_wmfw_test_adsp2_32bit_wmfw1_init(struct kunit *test) +{ + return cs_dsp_wmfw_test_adsp2_32bit_init(test, 1); +} + +static int cs_dsp_wmfw_test_adsp2_32bit_wmfw2_init(struct kunit *test) +{ + return cs_dsp_wmfw_test_adsp2_32bit_init(test, 2); +} + +static int cs_dsp_wmfw_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver) +{ + struct cs_dsp *dsp; + + /* Fill in cs_dsp and initialize */ + dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL); + if (!dsp) + return -ENOMEM; + + dsp->num = 1; + dsp->type = WMFW_ADSP2; + dsp->rev = 0; + dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions; + dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes); + dsp->base = cs_dsp_mock_adsp2_16bit_sysbase; + + return cs_dsp_wmfw_test_common_init(test, dsp, wmfw_ver); +} + +static int cs_dsp_wmfw_test_adsp2_16bit_wmfw0_init(struct kunit *test) +{ + return cs_dsp_wmfw_test_adsp2_16bit_init(test, 0); +} + +static int cs_dsp_wmfw_test_adsp2_16bit_wmfw1_init(struct kunit *test) +{ + return cs_dsp_wmfw_test_adsp2_16bit_init(test, 1); +} + +static int cs_dsp_wmfw_test_adsp2_16bit_wmfw2_init(struct kunit *test) +{ + return cs_dsp_wmfw_test_adsp2_16bit_init(test, 2); +} + +static void cs_dsp_mem_param_desc(const struct cs_dsp_wmfw_test_param *param, char *desc) +{ + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s num_blocks:%u", + cs_dsp_mem_region_name(param->mem_type), + param->num_blocks); +} + +static const struct cs_dsp_wmfw_test_param adsp2_all_num_blocks_param_cases[] = { + { .mem_type = WMFW_ADSP2_PM, .num_blocks = 1 }, + { .mem_type = WMFW_ADSP2_PM, .num_blocks = 2 }, + { .mem_type = WMFW_ADSP2_PM, .num_blocks = 3 }, + { .mem_type = WMFW_ADSP2_PM, .num_blocks = 4 }, + { .mem_type = WMFW_ADSP2_PM, .num_blocks = 5 }, + { .mem_type = WMFW_ADSP2_PM, .num_blocks = 6 }, + { .mem_type = WMFW_ADSP2_PM, .num_blocks = 12 }, + { .mem_type = WMFW_ADSP2_PM, .num_blocks = 13 }, + { .mem_type = WMFW_ADSP2_PM, .num_blocks = 14 }, + { .mem_type = WMFW_ADSP2_PM, .num_blocks = 15 }, + { .mem_type = WMFW_ADSP2_PM, .num_blocks = 16 }, + + { .mem_type = WMFW_ADSP2_XM, .num_blocks = 1 }, + { .mem_type = WMFW_ADSP2_XM, .num_blocks = 2 }, + { .mem_type = WMFW_ADSP2_XM, .num_blocks = 3 }, + { .mem_type = WMFW_ADSP2_XM, .num_blocks = 4 }, + { .mem_type = WMFW_ADSP2_XM, .num_blocks = 5 }, + { .mem_type = WMFW_ADSP2_XM, .num_blocks = 6 }, + { .mem_type = WMFW_ADSP2_XM, .num_blocks = 12 }, + { .mem_type = WMFW_ADSP2_XM, .num_blocks = 13 }, + { .mem_type = WMFW_ADSP2_XM, .num_blocks = 14 }, + { .mem_type = WMFW_ADSP2_XM, .num_blocks = 15 }, + { .mem_type = WMFW_ADSP2_XM, .num_blocks = 16 }, + + { .mem_type = WMFW_ADSP2_YM, .num_blocks = 1 }, + { .mem_type = WMFW_ADSP2_YM, .num_blocks = 2 }, + { .mem_type = WMFW_ADSP2_YM, .num_blocks = 3 }, + { .mem_type = WMFW_ADSP2_YM, .num_blocks = 4 }, + { .mem_type = WMFW_ADSP2_YM, .num_blocks = 5 }, + { .mem_type = WMFW_ADSP2_YM, .num_blocks = 6 }, + { .mem_type = WMFW_ADSP2_YM, .num_blocks = 12 }, + { .mem_type = WMFW_ADSP2_YM, .num_blocks = 13 }, + { .mem_type = WMFW_ADSP2_YM, .num_blocks = 14 }, + { .mem_type = WMFW_ADSP2_YM, .num_blocks = 15 }, + { .mem_type = WMFW_ADSP2_YM, .num_blocks = 16 }, + + { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 1 }, + { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 2 }, + { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 3 }, + { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 4 }, + { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 5 }, + { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 6 }, + { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 12 }, + { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 13 }, + { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 14 }, + { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 15 }, + { .mem_type = WMFW_ADSP2_XM, .num_blocks = 16 }, +}; + +KUNIT_ARRAY_PARAM(adsp2_all_num_blocks, + adsp2_all_num_blocks_param_cases, + cs_dsp_mem_param_desc); + +static const struct cs_dsp_wmfw_test_param halo_all_num_blocks_param_cases[] = { + { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 1 }, + { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 2 }, + { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 3 }, + { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 4 }, + { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 5 }, + { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 6 }, + { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 12 }, + { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 13 }, + { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 14 }, + { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 15 }, + { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 16 }, + + { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 1 }, + { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 2 }, + { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 3 }, + { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 4 }, + { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 5 }, + { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 6 }, + { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 12 }, + { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 13 }, + { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 14 }, + { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 15 }, + { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 16 }, + + { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 1 }, + { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 2 }, + { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 3 }, + { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 4 }, + { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 5 }, + { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 6 }, + { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 12 }, + { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 13 }, + { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 14 }, + { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 15 }, + { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 16 }, + + { .mem_type = WMFW_ADSP2_XM, .num_blocks = 1 }, + { .mem_type = WMFW_ADSP2_XM, .num_blocks = 2 }, + { .mem_type = WMFW_ADSP2_XM, .num_blocks = 3 }, + { .mem_type = WMFW_ADSP2_XM, .num_blocks = 4 }, + { .mem_type = WMFW_ADSP2_XM, .num_blocks = 5 }, + { .mem_type = WMFW_ADSP2_XM, .num_blocks = 6 }, + { .mem_type = WMFW_ADSP2_XM, .num_blocks = 12 }, + { .mem_type = WMFW_ADSP2_XM, .num_blocks = 13 }, + { .mem_type = WMFW_ADSP2_XM, .num_blocks = 14 }, + { .mem_type = WMFW_ADSP2_XM, .num_blocks = 15 }, + { .mem_type = WMFW_ADSP2_XM, .num_blocks = 16 }, + + { .mem_type = WMFW_ADSP2_YM, .num_blocks = 1 }, + { .mem_type = WMFW_ADSP2_YM, .num_blocks = 2 }, + { .mem_type = WMFW_ADSP2_YM, .num_blocks = 3 }, + { .mem_type = WMFW_ADSP2_YM, .num_blocks = 4 }, + { .mem_type = WMFW_ADSP2_YM, .num_blocks = 5 }, + { .mem_type = WMFW_ADSP2_YM, .num_blocks = 6 }, + { .mem_type = WMFW_ADSP2_YM, .num_blocks = 12 }, + { .mem_type = WMFW_ADSP2_YM, .num_blocks = 13 }, + { .mem_type = WMFW_ADSP2_YM, .num_blocks = 14 }, + { .mem_type = WMFW_ADSP2_YM, .num_blocks = 15 }, + { .mem_type = WMFW_ADSP2_YM, .num_blocks = 16 }, +}; + +KUNIT_ARRAY_PARAM(halo_all_num_blocks, + halo_all_num_blocks_param_cases, + cs_dsp_mem_param_desc); + +static const struct cs_dsp_wmfw_test_param packed_xy_num_blocks_param_cases[] = { + { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 1 }, + { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 2 }, + { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 3 }, + { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 4 }, + { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 5 }, + { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 6 }, + { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 12 }, + { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 13 }, + { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 14 }, + { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 15 }, + { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 16 }, + + { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 1 }, + { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 2 }, + { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 3 }, + { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 4 }, + { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 5 }, + { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 6 }, + { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 12 }, + { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 13 }, + { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 14 }, + { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 15 }, + { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 16 }, +}; + +KUNIT_ARRAY_PARAM(packed_xy_num_blocks, + packed_xy_num_blocks_param_cases, + cs_dsp_mem_param_desc); + +static struct kunit_case cs_dsp_wmfw_test_cases_halo[] = { + KUNIT_CASE(wmfw_write_xm_header_unpacked), + + KUNIT_CASE_PARAM(wmfw_write_one_payload, + halo_all_num_blocks_gen_params), + KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads, + halo_all_num_blocks_gen_params), + KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads_reverse, + halo_all_num_blocks_gen_params), + KUNIT_CASE_PARAM(wmfw_write_multiple_payloads_sparse_unordered, + halo_all_num_blocks_gen_params), + + KUNIT_CASE(wmfw_write_all_packed_pm), + KUNIT_CASE(wmfw_write_multiple_packed_unpacked_mem), + + KUNIT_CASE_PARAM(wmfw_write_packed_1_unpacked_trailing, + packed_xy_num_blocks_gen_params), + KUNIT_CASE_PARAM(wmfw_write_packed_2_unpacked_trailing, + packed_xy_num_blocks_gen_params), + KUNIT_CASE_PARAM(wmfw_write_packed_3_unpacked_trailing, + packed_xy_num_blocks_gen_params), + KUNIT_CASE_PARAM(wmfw_write_packed_2_single_unpacked_trailing, + packed_xy_num_blocks_gen_params), + KUNIT_CASE_PARAM(wmfw_write_packed_3_single_unpacked_trailing, + packed_xy_num_blocks_gen_params), + KUNIT_CASE_PARAM(wmfw_write_packed_1_unpacked_leading, + packed_xy_num_blocks_gen_params), + KUNIT_CASE_PARAM(wmfw_write_packed_2_unpacked_leading, + packed_xy_num_blocks_gen_params), + KUNIT_CASE_PARAM(wmfw_write_packed_3_unpacked_leading, + packed_xy_num_blocks_gen_params), + KUNIT_CASE_PARAM(wmfw_write_packed_2_single_unpacked_leading, + packed_xy_num_blocks_gen_params), + KUNIT_CASE_PARAM(wmfw_write_packed_3_single_unpacked_leading, + packed_xy_num_blocks_gen_params), + + KUNIT_CASE(wmfw_load_with_info), + + { } /* terminator */ +}; + +static struct kunit_case cs_dsp_wmfw_test_cases_adsp2[] = { + KUNIT_CASE(wmfw_write_xm_header_unpacked), + KUNIT_CASE_PARAM(wmfw_write_one_payload, + adsp2_all_num_blocks_gen_params), + KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads, + adsp2_all_num_blocks_gen_params), + KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads_reverse, + adsp2_all_num_blocks_gen_params), + KUNIT_CASE_PARAM(wmfw_write_multiple_payloads_sparse_unordered, + adsp2_all_num_blocks_gen_params), + + KUNIT_CASE(wmfw_write_all_unpacked_pm), + KUNIT_CASE(wmfw_write_multiple_unpacked_mem), + + KUNIT_CASE(wmfw_load_with_info), + + { } /* terminator */ +}; + +static struct kunit_suite cs_dsp_wmfw_test_halo = { + .name = "cs_dsp_wmfwV3_halo", + .init = cs_dsp_wmfw_test_halo_init, + .test_cases = cs_dsp_wmfw_test_cases_halo, +}; + +static struct kunit_suite cs_dsp_wmfw_test_adsp2_32bit_wmfw0 = { + .name = "cs_dsp_wmfwV0_adsp2_32bit", + .init = cs_dsp_wmfw_test_adsp2_32bit_wmfw0_init, + .test_cases = cs_dsp_wmfw_test_cases_adsp2, +}; + +static struct kunit_suite cs_dsp_wmfw_test_adsp2_32bit_wmfw1 = { + .name = "cs_dsp_wmfwV1_adsp2_32bit", + .init = cs_dsp_wmfw_test_adsp2_32bit_wmfw1_init, + .test_cases = cs_dsp_wmfw_test_cases_adsp2, +}; + +static struct kunit_suite cs_dsp_wmfw_test_adsp2_32bit_wmfw2 = { + .name = "cs_dsp_wmfwV2_adsp2_32bit", + .init = cs_dsp_wmfw_test_adsp2_32bit_wmfw2_init, + .test_cases = cs_dsp_wmfw_test_cases_adsp2, +}; + +static struct kunit_suite cs_dsp_wmfw_test_adsp2_16bit_wmfw0 = { + .name = "cs_dsp_wmfwV0_adsp2_16bit", + .init = cs_dsp_wmfw_test_adsp2_16bit_wmfw0_init, + .test_cases = cs_dsp_wmfw_test_cases_adsp2, +}; + +static struct kunit_suite cs_dsp_wmfw_test_adsp2_16bit_wmfw1 = { + .name = "cs_dsp_wmfwV1_adsp2_16bit", + .init = cs_dsp_wmfw_test_adsp2_16bit_wmfw1_init, + .test_cases = cs_dsp_wmfw_test_cases_adsp2, +}; + +static struct kunit_suite cs_dsp_wmfw_test_adsp2_16bit_wmfw2 = { + .name = "cs_dsp_wmfwV2_adsp2_16bit", + .init = cs_dsp_wmfw_test_adsp2_16bit_wmfw2_init, + .test_cases = cs_dsp_wmfw_test_cases_adsp2, +}; + +kunit_test_suites(&cs_dsp_wmfw_test_halo, + &cs_dsp_wmfw_test_adsp2_32bit_wmfw0, + &cs_dsp_wmfw_test_adsp2_32bit_wmfw1, + &cs_dsp_wmfw_test_adsp2_32bit_wmfw2, + &cs_dsp_wmfw_test_adsp2_16bit_wmfw0, + &cs_dsp_wmfw_test_adsp2_16bit_wmfw1, + &cs_dsp_wmfw_test_adsp2_16bit_wmfw2); diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c new file mode 100644 index 000000000000..c309843261d7 --- /dev/null +++ b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c @@ -0,0 +1,1347 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// KUnit tests for cs_dsp. +// +// Copyright (C) 2024 Cirrus Logic, Inc. and +// Cirrus Logic International Semiconductor Ltd. +// + +#include <kunit/device.h> +#include <kunit/resource.h> +#include <kunit/test.h> +#include <linux/build_bug.h> +#include <linux/firmware/cirrus/cs_dsp.h> +#include <linux/firmware/cirrus/cs_dsp_test_utils.h> +#include <linux/firmware/cirrus/wmfw.h> +#include <linux/random.h> +#include <linux/regmap.h> +#include <linux/string.h> +#include <linux/vmalloc.h> + +KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *); +KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *); + +struct cs_dsp_test_local { + struct cs_dsp_mock_xm_header *xm_header; + struct cs_dsp_mock_wmfw_builder *wmfw_builder; + int wmfw_version; +}; + +struct cs_dsp_wmfw_test_param { + int block_type; +}; + +static const struct cs_dsp_mock_alg_def cs_dsp_wmfw_err_test_mock_algs[] = { + { + .id = 0xfafa, + .ver = 0x100000, + .xm_size_words = 164, + .ym_size_words = 164, + .zm_size_words = 164, + }, +}; + +static const struct cs_dsp_mock_coeff_def mock_coeff_template = { + .shortname = "Dummy Coeff", + .type = WMFW_CTL_TYPE_BYTES, + .mem_type = WMFW_ADSP2_YM, + .flags = WMFW_CTL_FLAG_VOLATILE, + .length_bytes = 4, +}; + +/* Load a wmfw containing unknown blocks. They should be skipped. */ +static void wmfw_load_with_unknown_blocks(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + unsigned int reg_addr; + u8 *payload_data, *readback; + u8 random_data[8]; + const unsigned int payload_size_bytes = 64; + + /* Add dummy XM header payload to wmfw */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + WMFW_ADSP2_XM, 0, + local->xm_header->blob_data, + local->xm_header->blob_size_bytes); + + payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data); + get_random_bytes(payload_data, payload_size_bytes); + + readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback); + + /* Add some unknown blocks at the start of the wmfw */ + get_random_bytes(random_data, sizeof(random_data)); + cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, 0xf5, 0, + random_data, sizeof(random_data)); + cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, 0xc0, 0, random_data, + sizeof(random_data)); + cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, 0x33, 0, NULL, 0); + + /* Add a single payload to be written to DSP memory */ + cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder, + WMFW_ADSP2_YM, 0, + payload_data, payload_size_bytes); + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + + /* Sanity-check that the good wmfw loads ok */ + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"), + 0); + cs_dsp_power_down(priv->dsp); + + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + + /* Check that the payload was written to memory */ + reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM); + KUNIT_EXPECT_EQ(test, + regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes), + 0); + KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes); +} + +/* Load a wmfw that doesn't have a valid magic marker. */ +static void wmfw_err_wrong_magic(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + + /* Sanity-check that the good wmfw loads ok */ + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"), + 0); + cs_dsp_power_down(priv->dsp); + + memcpy((void *)wmfw->data, "WMDR", 4); + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + + memcpy((void *)wmfw->data, "xMFW", 4); + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + + memcpy((void *)wmfw->data, "WxFW", 4); + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + + memcpy((void *)wmfw->data, "WMxW", 4); + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + + memcpy((void *)wmfw->data, "WMFx", 4); + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + + memset((void *)wmfw->data, 0, 4); + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); +} + +/* Load a wmfw that is too short for a valid header. */ +static void wmfw_err_too_short_for_header(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + + /* Sanity-check that the good wmfw loads ok */ + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"), + 0); + cs_dsp_power_down(priv->dsp); + + do { + wmfw->size--; + + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + } while (wmfw->size > 0); +} + +/* Header length field isn't a valid header length. */ +static void wmfw_err_bad_header_length(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + struct wmfw_header *header; + unsigned int real_len, len; + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + + /* Sanity-check that the good wmfw loads ok */ + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"), + 0); + cs_dsp_power_down(priv->dsp); + + header = (struct wmfw_header *)wmfw->data; + real_len = le32_to_cpu(header->len); + + for (len = 0; len < real_len; len++) { + header->len = cpu_to_le32(len); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + } + + for (len = real_len + 1; len < real_len + 7; len++) { + header->len = cpu_to_le32(len); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + } + + header->len = cpu_to_le32(0xffffffff); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + + header->len = cpu_to_le32(0x80000000); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + + header->len = cpu_to_le32(0x7fffffff); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); +} + +/* Wrong core type in header. */ +static void wmfw_err_bad_core_type(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + struct wmfw_header *header; + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + + /* Sanity-check that the good wmfw loads ok */ + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"), + 0); + cs_dsp_power_down(priv->dsp); + + header = (struct wmfw_header *)wmfw->data; + + header->core = 0; + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + + header->core = 1; + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + + header->core = priv->dsp->type + 1; + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + + header->core = 0xff; + KUNIT_EXPECT_LT(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); +} + +/* File too short to contain a full block header */ +static void wmfw_too_short_for_block_header(struct kunit *test) +{ + const struct cs_dsp_wmfw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + unsigned int header_length; + u32 dummy_payload = 0; + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + header_length = wmfw->size; + kunit_kfree(test, wmfw); + + /* Add the block. A block must have at least 4 bytes of payload */ + cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, param->block_type, 0, + &dummy_payload, sizeof(dummy_payload)); + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + KUNIT_ASSERT_GT(test, wmfw->size, header_length); + + /* Sanity-check that the good wmfw loads ok */ + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"), + 0); + cs_dsp_power_down(priv->dsp); + + for (wmfw->size--; wmfw->size > header_length; wmfw->size--) { + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + } +} + +/* File too short to contain the block payload */ +static void wmfw_too_short_for_block_payload(struct kunit *test) +{ + const struct cs_dsp_wmfw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + static const u8 payload[256] = { }; + int i; + + cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, param->block_type, 0, + payload, sizeof(payload)); + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + + /* Sanity-check that the good wmfw loads ok */ + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"), + 0); + cs_dsp_power_down(priv->dsp); + + for (i = 0; i < sizeof(payload); i++) { + wmfw->size--; + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + } +} + +/* Block payload length is a garbage value */ +static void wmfw_block_payload_len_garbage(struct kunit *test) +{ + const struct cs_dsp_wmfw_test_param *param = test->param_value; + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + struct wmfw_header *header; + struct wmfw_region *region; + u32 payload = 0; + + + cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, param->block_type, 0, + &payload, sizeof(payload)); + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + + /* Sanity-check that the good wmfw loads ok */ + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"), + 0); + cs_dsp_power_down(priv->dsp); + + header = (struct wmfw_header *)wmfw->data; + region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)]; + + /* Sanity check that we're looking at the correct part of the wmfw */ + KUNIT_ASSERT_EQ(test, le32_to_cpu(region->offset) >> 24, param->block_type); + KUNIT_ASSERT_EQ(test, le32_to_cpu(region->len), sizeof(payload)); + + region->len = cpu_to_le32(0x8000); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + + region->len = cpu_to_le32(0xffff); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + + region->len = cpu_to_le32(0x7fffffff); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + + region->len = cpu_to_le32(0x80000000); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + + region->len = cpu_to_le32(0xffffffff); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); +} + +/* File too short to contain an algorithm header */ +static void wmfw_too_short_for_alg_header(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + unsigned int header_length; + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + header_length = wmfw->size; + kunit_kfree(test, wmfw); + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_wmfw_err_test_mock_algs[0].id, + NULL, NULL); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + KUNIT_ASSERT_GT(test, wmfw->size, header_length); + + /* Sanity-check that the good wmfw loads ok */ + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"), + 0); + cs_dsp_power_down(priv->dsp); + + for (wmfw->size--; wmfw->size > header_length; wmfw->size--) { + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + } +} + +/* V1 algorithm name does not have NUL terminator */ +static void wmfw_v1_alg_name_unterminated(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + struct wmfw_header *header; + struct wmfw_region *region; + struct wmfw_adsp_alg_data *alg_data; + struct cs_dsp_coeff_ctl *ctl; + + /* Create alg info block with a coefficient */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_wmfw_err_test_mock_algs[0].id, + "abc", "de"); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + + header = (struct wmfw_header *)wmfw->data; + region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)]; + alg_data = (struct wmfw_adsp_alg_data *)region->data; + + /* Sanity check we're pointing at the alg header */ + KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->id), cs_dsp_wmfw_err_test_mock_algs[0].id); + + /* Write a string to the alg name that overflows the array */ + memset(alg_data->descr, 0, sizeof(alg_data->descr)); + memset(alg_data->name, 'A', sizeof(alg_data->name)); + memset(alg_data->descr, 'A', sizeof(alg_data->descr) - 1); + + /* + * Sanity-check that a strlen would overflow alg_data->name. + * FORTIFY_STRING obstructs testing what strlen() would actually + * return, so instead verify that a strnlen() returns + * sizeof(alg_data->name[]), therefore it doesn't have a NUL. + */ + KUNIT_ASSERT_EQ(test, strnlen(alg_data->name, sizeof(alg_data->name)), + sizeof(alg_data->name)); + + /* + * The alg name isn't stored, but cs_dsp parses the name field. + * It should load the file successfully and create the control. + * If FORTIFY_STRING is enabled it will detect a buffer overflow + * if cs_dsp string length walks past end of alg name array. + */ + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, ctl->subname_len, 0); +} + +/* V2+ algorithm name exceeds length of containing block */ +static void wmfw_v2_alg_name_exceeds_block(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + struct wmfw_header *header; + struct wmfw_region *region; + __le32 *alg_data; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_wmfw_err_test_mock_algs[0].id, + "abc", NULL); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + + /* Sanity-check that the good wmfw loads ok */ + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"), + 0); + cs_dsp_power_down(priv->dsp); + + header = (struct wmfw_header *)wmfw->data; + region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)]; + alg_data = (__force __le32 *)region->data; + + /* + * Sanity check we're pointing at the alg header of + * [ alg_id ][name_len]abc + */ + KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id); + KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[1]), 3 | ('a' << 8) | ('b' << 16) | ('c' << 24)); + KUNIT_ASSERT_EQ(test, *(u8 *)&alg_data[1], 3); + + /* Set name string length longer than available space */ + *(u8 *)&alg_data[1] = 4; + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + + *(u8 *)&alg_data[1] = 7; + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + + *(u8 *)&alg_data[1] = 0x80; + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + + *(u8 *)&alg_data[1] = 0xff; + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); +} + +/* V2+ algorithm description exceeds length of containing block */ +static void wmfw_v2_alg_description_exceeds_block(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + struct wmfw_header *header; + struct wmfw_region *region; + __le32 *alg_data; + + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_wmfw_err_test_mock_algs[0].id, + "abc", "de"); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + + /* Sanity-check that the good wmfw loads ok */ + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"), + 0); + cs_dsp_power_down(priv->dsp); + + header = (struct wmfw_header *)wmfw->data; + region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)]; + alg_data = (__force __le32 *)region->data; + + /* + * Sanity check we're pointing at the alg header of + * [ alg_id ][name_len]abc[desc_len]de + */ + KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id); + KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[2]), 2 | ('d' << 16) | ('e' << 24)); + KUNIT_ASSERT_EQ(test, le16_to_cpu(*(__le16 *)&alg_data[2]), 2); + + /* Set name string length longer than available space */ + *(__le16 *)&alg_data[2] = cpu_to_le16(4); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + + *(__le16 *)&alg_data[2] = cpu_to_le16(7); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + + *(__le16 *)&alg_data[2] = cpu_to_le16(0x80); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + + *(__le16 *)&alg_data[2] = cpu_to_le16(0xff); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + + *(__le16 *)&alg_data[2] = cpu_to_le16(0x8000); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + + *(__le16 *)&alg_data[2] = cpu_to_le16(0xffff); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); +} + +/* V1 coefficient count exceeds length of containing block */ +static void wmfw_v1_coeff_count_exceeds_block(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + struct wmfw_header *header; + struct wmfw_region *region; + struct wmfw_adsp_alg_data *alg_data; + + /* Create alg info block with a coefficient */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_wmfw_err_test_mock_algs[0].id, + "abc", "de"); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + + /* Sanity-check that the good wmfw loads ok */ + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"), + 0); + cs_dsp_power_down(priv->dsp); + + header = (struct wmfw_header *)wmfw->data; + region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)]; + alg_data = (struct wmfw_adsp_alg_data *)region->data; + + /* Sanity check we're pointing at the alg header */ + KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->id), cs_dsp_wmfw_err_test_mock_algs[0].id); + + /* Add one to the coefficient count */ + alg_data->ncoeff = cpu_to_le32(le32_to_cpu(alg_data->ncoeff) + 1); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + + /* Make the coefficient count garbage */ + alg_data->ncoeff = cpu_to_le32(0xffffffff); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + + alg_data->ncoeff = cpu_to_le32(0x7fffffff); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + + alg_data->ncoeff = cpu_to_le32(0x80000000); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); +} + +/* V2+ coefficient count exceeds length of containing block */ +static void wmfw_v2_coeff_count_exceeds_block(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + struct wmfw_header *header; + struct wmfw_region *region; + __le32 *alg_data, *ncoeff; + + /* Create alg info block with a coefficient */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_wmfw_err_test_mock_algs[0].id, + "abc", "de"); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + + /* Sanity-check that the good wmfw loads ok */ + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"), + 0); + cs_dsp_power_down(priv->dsp); + + header = (struct wmfw_header *)wmfw->data; + region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)]; + alg_data = (__force __le32 *)region->data; + + /* Sanity check we're pointing at the alg header */ + KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id); + + ncoeff = (__force __le32 *)&alg_data[3]; + KUNIT_ASSERT_EQ(test, le32_to_cpu(*ncoeff), 1); + + /* Add one to the coefficient count */ + *ncoeff = cpu_to_le32(2); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + + /* Make the coefficient count garbage */ + *ncoeff = cpu_to_le32(0xffffffff); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + + *ncoeff = cpu_to_le32(0x7fffffff); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + + *ncoeff = cpu_to_le32(0x80000000); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); +} + +/* V2+ coefficient block size exceeds length of containing block */ +static void wmfw_v2_coeff_block_size_exceeds_block(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + struct wmfw_header *header; + struct wmfw_region *region; + __le32 *alg_data, *coeff; + + /* Create alg info block with a coefficient */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_wmfw_err_test_mock_algs[0].id, + "abc", "de"); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + + /* Sanity-check that the good wmfw loads ok */ + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"), + 0); + cs_dsp_power_down(priv->dsp); + + header = (struct wmfw_header *)wmfw->data; + region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)]; + alg_data = (__force __le32 *)region->data; + + /* Sanity check we're pointing at the alg header */ + KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id); + + /* Sanity check we're pointing at the coeff block */ + coeff = (__force __le32 *)&alg_data[4]; + KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16); + + /* Add one to the block size */ + coeff[1] = cpu_to_le32(le32_to_cpu(coeff[1]) + 1); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + + /* Make the block size garbage */ + coeff[1] = cpu_to_le32(0xffffffff); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + + coeff[1] = cpu_to_le32(0x7fffffff); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + + coeff[1] = cpu_to_le32(0x80000000); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); +} + +/* V1 coeff name does not have NUL terminator */ +static void wmfw_v1_coeff_name_unterminated(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + struct wmfw_header *header; + struct wmfw_region *region; + struct wmfw_adsp_alg_data *alg_data; + struct wmfw_adsp_coeff_data *coeff; + struct cs_dsp_coeff_ctl *ctl; + + /* Create alg info block with a coefficient */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_wmfw_err_test_mock_algs[0].id, + "abc", "de"); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + + header = (struct wmfw_header *)wmfw->data; + region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)]; + alg_data = (struct wmfw_adsp_alg_data *)region->data; + + /* Sanity check we're pointing at the alg header */ + KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->id), cs_dsp_wmfw_err_test_mock_algs[0].id); + KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->ncoeff), 1); + + coeff = (void *)alg_data->data; + + /* Write a string to the coeff name that overflows the array */ + memset(coeff->descr, 0, sizeof(coeff->descr)); + memset(coeff->name, 'A', sizeof(coeff->name)); + memset(coeff->descr, 'A', sizeof(coeff->descr) - 1); + + /* + * Sanity-check that a strlen would overflow coeff->name. + * FORTIFY_STRING obstructs testing what strlen() would actually + * return, so instead verify that a strnlen() returns + * sizeof(coeff->name[]), therefore it doesn't have a NUL. + */ + KUNIT_ASSERT_EQ(test, strnlen(coeff->name, sizeof(coeff->name)), + sizeof(coeff->name)); + + /* + * V1 controls do not have names, but cs_dsp parses the name + * field. It should load the file successfully and create the + * control. + * If FORTIFY_STRING is enabled it will detect a buffer overflow + * if cs_dsp string length walks past end of coeff name array. + */ + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + 0); + ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list); + KUNIT_ASSERT_NOT_NULL(test, ctl); + KUNIT_EXPECT_EQ(test, ctl->subname_len, 0); +} + +/* V2+ coefficient shortname exceeds length of coeff block */ +static void wmfw_v2_coeff_shortname_exceeds_block(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + struct wmfw_header *header; + struct wmfw_region *region; + __le32 *alg_data, *coeff; + + /* Create alg info block with a coefficient */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_wmfw_err_test_mock_algs[0].id, + "abc", "de"); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + + /* Sanity-check that the good wmfw loads ok */ + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"), + 0); + cs_dsp_power_down(priv->dsp); + + header = (struct wmfw_header *)wmfw->data; + region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)]; + alg_data = (__force __le32 *)region->data; + + /* Sanity check we're pointing at the alg header */ + KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id); + + /* Sanity check we're pointing at the coeff block */ + coeff = (__force __le32 *)&alg_data[4]; + KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16); + + /* Add one to the shortname length */ + coeff[2] = cpu_to_le32(le32_to_cpu(coeff[2]) + 1); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + + /* Maximum shortname length */ + coeff[2] = cpu_to_le32(255); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); +} + +/* V2+ coefficient fullname exceeds length of coeff block */ +static void wmfw_v2_coeff_fullname_exceeds_block(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + struct wmfw_header *header; + struct wmfw_region *region; + __le32 *alg_data, *coeff, *fullname; + size_t shortlen; + + /* Create alg info block with a coefficient */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_wmfw_err_test_mock_algs[0].id, + "abc", "de"); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + + /* Sanity-check that the good wmfw loads ok */ + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"), + 0); + cs_dsp_power_down(priv->dsp); + + header = (struct wmfw_header *)wmfw->data; + region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)]; + alg_data = (__force __le32 *)region->data; + + /* Sanity check we're pointing at the alg header */ + KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id); + + /* Sanity check we're pointing at the coeff block */ + coeff = (__force __le32 *)&alg_data[4]; + KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16); + + /* Fullname follows the shortname rounded up to a __le32 boundary */ + shortlen = round_up(le32_to_cpu(coeff[2]) & 0xff, sizeof(__le32)); + fullname = &coeff[2] + (shortlen / sizeof(*coeff)); + + /* Fullname increases in blocks of __le32 so increase past the current __le32 */ + fullname[0] = cpu_to_le32(round_up(le32_to_cpu(fullname[0]) + 1, sizeof(__le32))); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + + /* Maximum fullname length */ + fullname[0] = cpu_to_le32(255); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); +} + +/* V2+ coefficient description exceeds length of coeff block */ +static void wmfw_v2_coeff_description_exceeds_block(struct kunit *test) +{ + struct cs_dsp_test *priv = test->priv; + struct cs_dsp_test_local *local = priv->local; + struct firmware *wmfw; + struct wmfw_header *header; + struct wmfw_region *region; + __le32 *alg_data, *coeff, *fullname, *description; + size_t namelen; + + /* Create alg info block with a coefficient */ + cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder, + cs_dsp_wmfw_err_test_mock_algs[0].id, + "abc", "de"); + cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template); + cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder); + + wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder); + + /* Sanity-check that the good wmfw loads ok */ + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"), + 0); + cs_dsp_power_down(priv->dsp); + + header = (struct wmfw_header *)wmfw->data; + region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)]; + alg_data = (__force __le32 *)region->data; + + /* Sanity check we're pointing at the alg header */ + KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id); + + /* Sanity check we're pointing at the coeff block */ + coeff = (__force __le32 *)&alg_data[4]; + KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16); + + /* Description follows the shortname and fullname rounded up to __le32 boundaries */ + namelen = round_up(le32_to_cpu(coeff[2]) & 0xff, sizeof(__le32)); + fullname = &coeff[2] + (namelen / sizeof(*coeff)); + namelen = round_up(le32_to_cpu(fullname[0]) & 0xff, sizeof(__le32)); + description = fullname + (namelen / sizeof(*fullname)); + + /* Description increases in blocks of __le32 so increase past the current __le32 */ + description[0] = cpu_to_le32(round_up(le32_to_cpu(fullname[0]) + 1, sizeof(__le32))); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); + + /* Maximum description length */ + fullname[0] = cpu_to_le32(0xffff); + KUNIT_EXPECT_EQ(test, + cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"), + -EOVERFLOW); +} + +static void cs_dsp_wmfw_err_test_exit(struct kunit *test) +{ + /* + * Testing error conditions can produce a lot of log output + * from cs_dsp error messages, so rate limit the test cases. + */ + usleep_range(200, 500); +} + +static int cs_dsp_wmfw_err_test_common_init(struct kunit *test, struct cs_dsp *dsp, + int wmfw_version) +{ + struct cs_dsp_test *priv; + struct cs_dsp_test_local *local; + struct device *test_dev; + int ret; + + priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL); + if (!local) + return -ENOMEM; + + priv->test = test; + priv->dsp = dsp; + test->priv = priv; + priv->local = local; + local->wmfw_version = wmfw_version; + + /* Create dummy struct device */ + test_dev = kunit_device_register(test, "cs_dsp_test_drv"); + if (IS_ERR(test_dev)) + return PTR_ERR(test_dev); + + dsp->dev = get_device(test_dev); + if (!dsp->dev) + return -ENODEV; + + ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev); + if (ret) + return ret; + + dev_set_drvdata(dsp->dev, priv); + + /* Allocate regmap */ + ret = cs_dsp_mock_regmap_init(priv); + if (ret) + return ret; + + /* + * There must always be a XM header with at least 1 algorithm, + * so create a dummy one and pre-populate XM so the wmfw doesn't + * have to contain an XM blob. + */ + local->xm_header = cs_dsp_create_mock_xm_header(priv, + cs_dsp_wmfw_err_test_mock_algs, + ARRAY_SIZE(cs_dsp_wmfw_err_test_mock_algs)); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header); + cs_dsp_mock_xm_header_write_to_regmap(local->xm_header); + + local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, local->wmfw_version); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder); + + /* Init cs_dsp */ + dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops); + + switch (dsp->type) { + case WMFW_ADSP2: + ret = cs_dsp_adsp2_init(dsp); + break; + case WMFW_HALO: + ret = cs_dsp_halo_init(dsp); + break; + default: + KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type); + return -EINVAL; + } + + if (ret) + return ret; + + /* Automatically call cs_dsp_remove() when test case ends */ + return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp); +} + +static int cs_dsp_wmfw_err_test_halo_init(struct kunit *test) +{ + struct cs_dsp *dsp; + + /* Fill in cs_dsp and initialize */ + dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL); + if (!dsp) + return -ENOMEM; + + dsp->num = 1; + dsp->type = WMFW_HALO; + dsp->mem = cs_dsp_mock_halo_dsp1_regions; + dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes); + dsp->base = cs_dsp_mock_halo_core_base; + dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base; + + return cs_dsp_wmfw_err_test_common_init(test, dsp, 3); +} + +static int cs_dsp_wmfw_err_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver) +{ + struct cs_dsp *dsp; + + /* Fill in cs_dsp and initialize */ + dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL); + if (!dsp) + return -ENOMEM; + + dsp->num = 1; + dsp->type = WMFW_ADSP2; + dsp->rev = 1; + dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions; + dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes); + dsp->base = cs_dsp_mock_adsp2_32bit_sysbase; + + return cs_dsp_wmfw_err_test_common_init(test, dsp, wmfw_ver); +} + +static int cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0_init(struct kunit *test) +{ + return cs_dsp_wmfw_err_test_adsp2_32bit_init(test, 0); +} + +static int cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1_init(struct kunit *test) +{ + return cs_dsp_wmfw_err_test_adsp2_32bit_init(test, 1); +} + +static int cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2_init(struct kunit *test) +{ + return cs_dsp_wmfw_err_test_adsp2_32bit_init(test, 2); +} + +static int cs_dsp_wmfw_err_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver) +{ + struct cs_dsp *dsp; + + /* Fill in cs_dsp and initialize */ + dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL); + if (!dsp) + return -ENOMEM; + + dsp->num = 1; + dsp->type = WMFW_ADSP2; + dsp->rev = 0; + dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions; + dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes); + dsp->base = cs_dsp_mock_adsp2_16bit_sysbase; + + return cs_dsp_wmfw_err_test_common_init(test, dsp, wmfw_ver); +} + +static int cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0_init(struct kunit *test) +{ + return cs_dsp_wmfw_err_test_adsp2_16bit_init(test, 0); +} + +static int cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1_init(struct kunit *test) +{ + return cs_dsp_wmfw_err_test_adsp2_16bit_init(test, 1); +} + +static int cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2_init(struct kunit *test) +{ + return cs_dsp_wmfw_err_test_adsp2_16bit_init(test, 2); +} + +static void cs_dsp_wmfw_err_block_types_desc(const struct cs_dsp_wmfw_test_param *param, + char *desc) +{ + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "block_type:%#x", param->block_type); +} + +static const struct cs_dsp_wmfw_test_param wmfw_valid_block_types_adsp2_cases[] = { + { .block_type = WMFW_INFO_TEXT }, + { .block_type = WMFW_ADSP2_PM }, + { .block_type = WMFW_ADSP2_YM }, +}; + +KUNIT_ARRAY_PARAM(wmfw_valid_block_types_adsp2, + wmfw_valid_block_types_adsp2_cases, + cs_dsp_wmfw_err_block_types_desc); + +static const struct cs_dsp_wmfw_test_param wmfw_valid_block_types_halo_cases[] = { + { .block_type = WMFW_INFO_TEXT }, + { .block_type = WMFW_HALO_PM_PACKED }, + { .block_type = WMFW_ADSP2_YM }, +}; + +KUNIT_ARRAY_PARAM(wmfw_valid_block_types_halo, + wmfw_valid_block_types_halo_cases, + cs_dsp_wmfw_err_block_types_desc); + +static const struct cs_dsp_wmfw_test_param wmfw_invalid_block_types_cases[] = { + { .block_type = 0x33 }, + { .block_type = 0xf5 }, + { .block_type = 0xc0 }, +}; + +KUNIT_ARRAY_PARAM(wmfw_invalid_block_types, + wmfw_invalid_block_types_cases, + cs_dsp_wmfw_err_block_types_desc); + +static struct kunit_case cs_dsp_wmfw_err_test_cases_v0[] = { + KUNIT_CASE(wmfw_load_with_unknown_blocks), + KUNIT_CASE(wmfw_err_wrong_magic), + KUNIT_CASE(wmfw_err_too_short_for_header), + KUNIT_CASE(wmfw_err_bad_header_length), + KUNIT_CASE(wmfw_err_bad_core_type), + + KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_adsp2_gen_params), + KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params), + KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_adsp2_gen_params), + KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params), + KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_adsp2_gen_params), + KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params), + + { } /* terminator */ +}; + +static struct kunit_case cs_dsp_wmfw_err_test_cases_v1[] = { + KUNIT_CASE(wmfw_load_with_unknown_blocks), + KUNIT_CASE(wmfw_err_wrong_magic), + KUNIT_CASE(wmfw_err_too_short_for_header), + KUNIT_CASE(wmfw_err_bad_header_length), + KUNIT_CASE(wmfw_err_bad_core_type), + + KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_adsp2_gen_params), + KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params), + KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_adsp2_gen_params), + KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params), + KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_adsp2_gen_params), + KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params), + + KUNIT_CASE(wmfw_too_short_for_alg_header), + KUNIT_CASE(wmfw_v1_alg_name_unterminated), + KUNIT_CASE(wmfw_v1_coeff_count_exceeds_block), + KUNIT_CASE(wmfw_v1_coeff_name_unterminated), + + { } /* terminator */ +}; + +static struct kunit_case cs_dsp_wmfw_err_test_cases_v2[] = { + KUNIT_CASE(wmfw_load_with_unknown_blocks), + KUNIT_CASE(wmfw_err_wrong_magic), + KUNIT_CASE(wmfw_err_too_short_for_header), + KUNIT_CASE(wmfw_err_bad_header_length), + KUNIT_CASE(wmfw_err_bad_core_type), + + KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_adsp2_gen_params), + KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params), + KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_adsp2_gen_params), + KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params), + KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_adsp2_gen_params), + KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params), + + KUNIT_CASE(wmfw_too_short_for_alg_header), + KUNIT_CASE(wmfw_v2_alg_name_exceeds_block), + KUNIT_CASE(wmfw_v2_alg_description_exceeds_block), + KUNIT_CASE(wmfw_v2_coeff_count_exceeds_block), + KUNIT_CASE(wmfw_v2_coeff_block_size_exceeds_block), + KUNIT_CASE(wmfw_v2_coeff_shortname_exceeds_block), + KUNIT_CASE(wmfw_v2_coeff_fullname_exceeds_block), + KUNIT_CASE(wmfw_v2_coeff_description_exceeds_block), + + { } /* terminator */ +}; + +static struct kunit_case cs_dsp_wmfw_err_test_cases_v3[] = { + KUNIT_CASE(wmfw_load_with_unknown_blocks), + KUNIT_CASE(wmfw_err_wrong_magic), + KUNIT_CASE(wmfw_err_too_short_for_header), + KUNIT_CASE(wmfw_err_bad_header_length), + KUNIT_CASE(wmfw_err_bad_core_type), + + KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_halo_gen_params), + KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params), + KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_halo_gen_params), + KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params), + KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_halo_gen_params), + KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params), + + KUNIT_CASE(wmfw_too_short_for_alg_header), + KUNIT_CASE(wmfw_v2_alg_name_exceeds_block), + KUNIT_CASE(wmfw_v2_alg_description_exceeds_block), + KUNIT_CASE(wmfw_v2_coeff_count_exceeds_block), + KUNIT_CASE(wmfw_v2_coeff_block_size_exceeds_block), + KUNIT_CASE(wmfw_v2_coeff_shortname_exceeds_block), + KUNIT_CASE(wmfw_v2_coeff_fullname_exceeds_block), + KUNIT_CASE(wmfw_v2_coeff_description_exceeds_block), + + { } /* terminator */ +}; + +static struct kunit_suite cs_dsp_wmfw_err_test_halo = { + .name = "cs_dsp_wmfwV3_err_halo", + .init = cs_dsp_wmfw_err_test_halo_init, + .exit = cs_dsp_wmfw_err_test_exit, + .test_cases = cs_dsp_wmfw_err_test_cases_v3, +}; + +static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0 = { + .name = "cs_dsp_wmfwV0_err_adsp2_32bit", + .init = cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0_init, + .exit = cs_dsp_wmfw_err_test_exit, + .test_cases = cs_dsp_wmfw_err_test_cases_v0, +}; + +static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1 = { + .name = "cs_dsp_wmfwV1_err_adsp2_32bit", + .init = cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1_init, + .exit = cs_dsp_wmfw_err_test_exit, + .test_cases = cs_dsp_wmfw_err_test_cases_v1, +}; + +static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2 = { + .name = "cs_dsp_wmfwV2_err_adsp2_32bit", + .init = cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2_init, + .exit = cs_dsp_wmfw_err_test_exit, + .test_cases = cs_dsp_wmfw_err_test_cases_v2, +}; + +static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0 = { + .name = "cs_dsp_wmfwV0_err_adsp2_16bit", + .init = cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0_init, + .exit = cs_dsp_wmfw_err_test_exit, + .test_cases = cs_dsp_wmfw_err_test_cases_v0, +}; + +static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1 = { + .name = "cs_dsp_wmfwV1_err_adsp2_16bit", + .init = cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1_init, + .exit = cs_dsp_wmfw_err_test_exit, + .test_cases = cs_dsp_wmfw_err_test_cases_v1, +}; + +static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2 = { + .name = "cs_dsp_wmfwV2_err_adsp2_16bit", + .init = cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2_init, + .exit = cs_dsp_wmfw_err_test_exit, + .test_cases = cs_dsp_wmfw_err_test_cases_v2, +}; + +kunit_test_suites(&cs_dsp_wmfw_err_test_halo, + &cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0, + &cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1, + &cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2, + &cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0, + &cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1, + &cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2); diff --git a/drivers/firmware/cirrus/test/cs_dsp_tests.c b/drivers/firmware/cirrus/test/cs_dsp_tests.c new file mode 100644 index 000000000000..7b829a03ca52 --- /dev/null +++ b/drivers/firmware/cirrus/test/cs_dsp_tests.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Utility module for cs_dsp KUnit testing. +// +// Copyright (C) 2024 Cirrus Logic, Inc. and +// Cirrus Logic International Semiconductor Ltd. + +#include <linux/module.h> + +MODULE_DESCRIPTION("KUnit tests for Cirrus Logic DSP driver"); +MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS("FW_CS_DSP"); +MODULE_IMPORT_NS("FW_CS_DSP_KUNIT_TEST_UTILS"); diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c index 5f3a3e913d28..d19c78a78ae3 100644 --- a/drivers/firmware/dmi-id.c +++ b/drivers/firmware/dmi-id.c @@ -169,9 +169,14 @@ static int dmi_dev_uevent(const struct device *dev, struct kobj_uevent_env *env) return 0; } +static void dmi_dev_release(struct device *dev) +{ + kfree(dev); +} + static struct class dmi_class = { .name = "dmi", - .dev_release = (void(*)(struct device *)) kfree, + .dev_release = dmi_dev_release, .dev_uevent = dmi_dev_uevent, }; diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c index 8d91997036e4..9cc963b2edc0 100644 --- a/drivers/firmware/dmi-sysfs.c +++ b/drivers/firmware/dmi-sysfs.c @@ -431,9 +431,9 @@ static ssize_t dmi_sel_raw_read_helper(struct dmi_sysfs_entry *entry, } } -static ssize_t dmi_sel_raw_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buf, loff_t pos, size_t count) +static ssize_t raw_event_log_read(struct file *filp, struct kobject *kobj, + const struct bin_attribute *bin_attr, + char *buf, loff_t pos, size_t count) { struct dmi_sysfs_entry *entry = to_entry(kobj->parent); struct dmi_read_state state = { @@ -445,10 +445,7 @@ static ssize_t dmi_sel_raw_read(struct file *filp, struct kobject *kobj, return find_dmi_entry(entry, dmi_sel_raw_read_helper, &state); } -static struct bin_attribute dmi_sel_raw_attr = { - .attr = {.name = "raw_event_log", .mode = 0400}, - .read = dmi_sel_raw_read, -}; +static const BIN_ATTR_ADMIN_RO(raw_event_log, 0); static int dmi_system_event_log(struct dmi_sysfs_entry *entry) { @@ -464,7 +461,7 @@ static int dmi_system_event_log(struct dmi_sysfs_entry *entry) if (ret) goto out_free; - ret = sysfs_create_bin_file(entry->child, &dmi_sel_raw_attr); + ret = sysfs_create_bin_file(entry->child, &bin_attr_raw_event_log); if (ret) goto out_del; @@ -537,10 +534,10 @@ static ssize_t dmi_entry_raw_read_helper(struct dmi_sysfs_entry *entry, &state->pos, dh, entry_length); } -static ssize_t dmi_entry_raw_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buf, loff_t pos, size_t count) +static ssize_t raw_read(struct file *filp, + struct kobject *kobj, + const struct bin_attribute *bin_attr, + char *buf, loff_t pos, size_t count) { struct dmi_sysfs_entry *entry = to_entry(kobj); struct dmi_read_state state = { @@ -552,10 +549,7 @@ static ssize_t dmi_entry_raw_read(struct file *filp, return find_dmi_entry(entry, dmi_entry_raw_read_helper, &state); } -static const struct bin_attribute dmi_entry_raw_attr = { - .attr = {.name = "raw", .mode = 0400}, - .read = dmi_entry_raw_read, -}; +static const BIN_ATTR_ADMIN_RO(raw, 0); static void dmi_sysfs_entry_release(struct kobject *kobj) { @@ -630,7 +624,7 @@ static void __init dmi_sysfs_register_handle(const struct dmi_header *dh, goto out_err; /* Create the raw binary file to access the entry */ - *ret = sysfs_create_bin_file(&entry->kobj, &dmi_entry_raw_attr); + *ret = sysfs_create_bin_file(&entry->kobj, &bin_attr_raw); if (*ret) goto out_err; diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index 015c95a825d3..70d39adf50dc 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -9,7 +9,7 @@ #include <linux/memblock.h> #include <linux/random.h> #include <asm/dmi.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #ifndef SMBIOS_ENTRY_POINT_SCAN_START #define SMBIOS_ENTRY_POINT_SCAN_START 0xF0000 @@ -42,6 +42,7 @@ static struct dmi_memdev_info { u8 type; /* DDR2, DDR3, DDR4 etc */ } *dmi_memdev; static int dmi_memdev_nr; +static int dmi_memdev_populated_nr __initdata; static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s) { @@ -102,6 +103,17 @@ static void dmi_decode_table(u8 *buf, const struct dmi_header *dm = (const struct dmi_header *)data; /* + * If a short entry is found (less than 4 bytes), not only it + * is invalid, but we cannot reliably locate the next entry. + */ + if (dm->length < sizeof(struct dmi_header)) { + pr_warn(FW_BUG + "Corrupted DMI table, offset %zd (only %d entries processed)\n", + data - buf, i); + break; + } + + /* * We want to know the total length (formatted area and * strings) before decoding to make sure we won't run off the * table in dmi_decode or dmi_string @@ -448,6 +460,9 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v) else bytes = (u64)get_unaligned((u32 *)&d[0x1C]) << 20; + if (bytes) + dmi_memdev_populated_nr++; + dmi_memdev[nr].size = bytes; nr++; } @@ -746,16 +761,8 @@ static void __init dmi_scan_machine(void) pr_info("DMI not present or invalid.\n"); } -static ssize_t raw_table_read(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, - loff_t pos, size_t count) -{ - memcpy(buf, attr->private + pos, count); - return count; -} - -static BIN_ATTR(smbios_entry_point, S_IRUSR, raw_table_read, NULL, 0); -static BIN_ATTR(DMI, S_IRUSR, raw_table_read, NULL, 0); +static __ro_after_init BIN_ATTR_SIMPLE_ADMIN_RO(smbios_entry_point); +static __ro_after_init BIN_ATTR_SIMPLE_ADMIN_RO(DMI); static int __init dmi_init(void) { @@ -824,6 +831,8 @@ void __init dmi_setup(void) return; dmi_memdev_walk(); + pr_info("DMI: Memory slots populated: %d/%d\n", + dmi_memdev_populated_nr, dmi_memdev_nr); dump_stack_set_arch_desc("%s", dmi_ids_string); } diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 72f2537d90ca..db8c5c03d3a2 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -76,20 +76,14 @@ config EFI_ZBOOT bool "Enable the generic EFI decompressor" depends on EFI_GENERIC_STUB && !ARM select HAVE_KERNEL_GZIP - select HAVE_KERNEL_LZ4 - select HAVE_KERNEL_LZMA - select HAVE_KERNEL_LZO - select HAVE_KERNEL_XZ select HAVE_KERNEL_ZSTD help Create the bootable image as an EFI application that carries the actual kernel image in compressed form, and decompresses it into - memory before executing it via LoadImage/StartImage EFI boot service - calls. For compatibility with non-EFI loaders, the payload can be - decompressed and executed by the loader as well, provided that the - loader implements the decompression algorithm and that non-EFI boot - is supported by the encapsulated image. (The compression algorithm - used is described in the zboot image header) + memory before executing it. For compatibility with non-EFI loaders, + the payload can be decompressed and executed by the loader as well, + provided that the loader implements the decompression algorithm. + (The compression algorithm used is described in the zboot header) config EFI_ARMSTUB_DTB_LOADER bool "Enable the DTB loader" @@ -287,6 +281,30 @@ config EFI_EMBEDDED_FIRMWARE bool select CRYPTO_LIB_SHA256 +config EFI_SBAT + def_bool y if EFI_SBAT_FILE!="" + +config EFI_SBAT_FILE + string "Embedded SBAT section file path" + depends on EFI_ZBOOT + help + SBAT section provides a way to improve SecureBoot revocations of UEFI + binaries by introducing a generation-based mechanism. With SBAT, older + UEFI binaries can be prevented from booting by bumping the minimal + required generation for the specific component in the bootloader. + + Note: SBAT information is distribution specific, i.e. the owner of the + signing SecureBoot certificate must define the SBAT policy. Linux + kernel upstream does not define SBAT components and their generations. + + See https://github.com/rhboot/shim/blob/main/SBAT.md for the additional + details. + + Specify a file with SBAT data which is going to be embedded as '.sbat' + section into the kernel. + + If unsure, leave blank. + endmenu config UEFI_CPER diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c index 97bafb5f7038..0c17bdd388e1 100644 --- a/drivers/firmware/efi/capsule-loader.c +++ b/drivers/firmware/efi/capsule-loader.c @@ -309,7 +309,6 @@ static const struct file_operations efi_capsule_fops = { .open = efi_capsule_open, .write = efi_capsule_write, .release = efi_capsule_release, - .llseek = no_llseek, }; static struct miscdevice efi_capsule_misc = { diff --git a/drivers/firmware/efi/cper-arm.c b/drivers/firmware/efi/cper-arm.c index fa9c1c3bf168..f0a63d09d3c4 100644 --- a/drivers/firmware/efi/cper-arm.c +++ b/drivers/firmware/efi/cper-arm.c @@ -311,7 +311,7 @@ void cper_print_proc_arm(const char *pfx, ctx_info = (struct cper_arm_ctx_info *)err_info; max_ctx_type = ARRAY_SIZE(arm_reg_ctx_strs) - 1; for (i = 0; i < proc->context_info_num; i++) { - int size = sizeof(*ctx_info) + ctx_info->size; + int size = ALIGN(sizeof(*ctx_info) + ctx_info->size, 16); printk("%sContext info structure %d:\n", pfx, i); if (len < size) { diff --git a/drivers/firmware/efi/cper-x86.c b/drivers/firmware/efi/cper-x86.c index 438ed9eff6d0..3949d7b5e808 100644 --- a/drivers/firmware/efi/cper-x86.c +++ b/drivers/firmware/efi/cper-x86.c @@ -325,7 +325,7 @@ void cper_print_proc_ia(const char *pfx, const struct cper_sec_proc_ia *proc) ctx_info = (struct cper_ia_proc_ctx *)err_info; for (i = 0; i < VALID_PROC_CXT_INFO_NUM(proc->validation_bits); i++) { - int size = sizeof(*ctx_info) + ctx_info->reg_arr_size; + int size = ALIGN(sizeof(*ctx_info) + ctx_info->reg_arr_size, 16); int groupsize = 4; printk("%sContext Information Structure %d:\n", pfx, i); diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 7d2cdd9e2227..928409199a1a 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -24,7 +24,7 @@ #include <linux/bcd.h> #include <acpi/ghes.h> #include <ras/ras_event.h> -#include "cper_cxl.h" +#include <cxl/event.h> /* * CPER record ID need to be unique even after reboot, because record @@ -434,12 +434,17 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie, "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n", pfx, pcie->bridge.secondary_status, pcie->bridge.control); - /* Fatal errors call __ghes_panic() before AER handler prints this */ - if ((pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) && - (gdata->error_severity & CPER_SEV_FATAL)) { + /* + * Print all valid AER info. Record may be from BERT (boot-time) or GHES (run-time). + * + * Fatal errors call __ghes_panic() before AER handler prints this. + */ + if (pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) { struct aer_capability_regs *aer; aer = (struct aer_capability_regs *)pcie->aer_info; + printk("%saer_cor_status: 0x%08x, aer_cor_mask: 0x%08x\n", + pfx, aer->cor_status, aer->cor_mask); printk("%saer_uncor_status: 0x%08x, aer_uncor_mask: 0x%08x\n", pfx, aer->uncor_status, aer->uncor_mask); printk("%saer_uncor_severity: 0x%08x\n", @@ -619,11 +624,11 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata else goto err_section_too_small; } else if (guid_equal(sec_type, &CPER_SEC_CXL_PROT_ERR)) { - struct cper_sec_prot_err *prot_err = acpi_hest_get_payload(gdata); + struct cxl_cper_sec_prot_err *prot_err = acpi_hest_get_payload(gdata); printk("%ssection_type: CXL Protocol Error\n", newpfx); if (gdata->error_data_length >= sizeof(*prot_err)) - cper_print_prot_err(newpfx, prot_err); + cxl_cper_print_prot_err(newpfx, prot_err); else goto err_section_too_small; } else { diff --git a/drivers/firmware/efi/cper_cxl.c b/drivers/firmware/efi/cper_cxl.c index a55771b99a97..8a7667faf953 100644 --- a/drivers/firmware/efi/cper_cxl.c +++ b/drivers/firmware/efi/cper_cxl.c @@ -8,26 +8,7 @@ */ #include <linux/cper.h> -#include "cper_cxl.h" - -#define PROT_ERR_VALID_AGENT_TYPE BIT_ULL(0) -#define PROT_ERR_VALID_AGENT_ADDRESS BIT_ULL(1) -#define PROT_ERR_VALID_DEVICE_ID BIT_ULL(2) -#define PROT_ERR_VALID_SERIAL_NUMBER BIT_ULL(3) -#define PROT_ERR_VALID_CAPABILITY BIT_ULL(4) -#define PROT_ERR_VALID_DVSEC BIT_ULL(5) -#define PROT_ERR_VALID_ERROR_LOG BIT_ULL(6) - -/* CXL RAS Capability Structure, CXL v3.0 sec 8.2.4.16 */ -struct cxl_ras_capability_regs { - u32 uncor_status; - u32 uncor_mask; - u32 uncor_severity; - u32 cor_status; - u32 cor_mask; - u32 cap_control; - u32 header_log[16]; -}; +#include <cxl/event.h> static const char * const prot_err_agent_type_strs[] = { "Restricted CXL Device", @@ -40,22 +21,8 @@ static const char * const prot_err_agent_type_strs[] = { "CXL Upstream Switch Port", }; -/* - * The layout of the enumeration and the values matches CXL Agent Type - * field in the UEFI 2.10 Section N.2.13, - */ -enum { - RCD, /* Restricted CXL Device */ - RCH_DP, /* Restricted CXL Host Downstream Port */ - DEVICE, /* CXL Device */ - LD, /* CXL Logical Device */ - FMLD, /* CXL Fabric Manager managed Logical Device */ - RP, /* CXL Root Port */ - DSP, /* CXL Downstream Switch Port */ - USP, /* CXL Upstream Switch Port */ -}; - -void cper_print_prot_err(const char *pfx, const struct cper_sec_prot_err *prot_err) +void cxl_cper_print_prot_err(const char *pfx, + const struct cxl_cper_sec_prot_err *prot_err) { if (prot_err->valid_bits & PROT_ERR_VALID_AGENT_TYPE) pr_info("%s agent_type: %d, %s\n", pfx, prot_err->agent_type, diff --git a/drivers/firmware/efi/cper_cxl.h b/drivers/firmware/efi/cper_cxl.h deleted file mode 100644 index 86bfcf7909ec..000000000000 --- a/drivers/firmware/efi/cper_cxl.h +++ /dev/null @@ -1,66 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * UEFI Common Platform Error Record (CPER) support for CXL Section. - * - * Copyright (C) 2022 Advanced Micro Devices, Inc. - * - * Author: Smita Koralahalli <Smita.KoralahalliChannabasappa@amd.com> - */ - -#ifndef LINUX_CPER_CXL_H -#define LINUX_CPER_CXL_H - -/* CXL Protocol Error Section */ -#define CPER_SEC_CXL_PROT_ERR \ - GUID_INIT(0x80B9EFB4, 0x52B5, 0x4DE3, 0xA7, 0x77, 0x68, 0x78, \ - 0x4B, 0x77, 0x10, 0x48) - -#pragma pack(1) - -/* Compute Express Link Protocol Error Section, UEFI v2.10 sec N.2.13 */ -struct cper_sec_prot_err { - u64 valid_bits; - u8 agent_type; - u8 reserved[7]; - - /* - * Except for RCH Downstream Port, all the remaining CXL Agent - * types are uniquely identified by the PCIe compatible SBDF number. - */ - union { - u64 rcrb_base_addr; - struct { - u8 function; - u8 device; - u8 bus; - u16 segment; - u8 reserved_1[3]; - }; - } agent_addr; - - struct { - u16 vendor_id; - u16 device_id; - u16 subsystem_vendor_id; - u16 subsystem_id; - u8 class_code[2]; - u16 slot; - u8 reserved_1[4]; - } device_id; - - struct { - u32 lower_dw; - u32 upper_dw; - } dev_serial_num; - - u8 capability[60]; - u16 dvsec_len; - u16 err_len; - u8 reserved_2[4]; -}; - -#pragma pack() - -void cper_print_prot_err(const char *pfx, const struct cper_sec_prot_err *prot_err); - -#endif //__CPER_CXL_ diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c index 937be269fee8..13ea141c0def 100644 --- a/drivers/firmware/efi/dev-path-parser.c +++ b/drivers/firmware/efi/dev-path-parser.c @@ -47,9 +47,9 @@ static long __init parse_acpi_path(const struct efi_dev_path *node, return 0; } -static int __init match_pci_dev(struct device *dev, void *data) +static int __init match_pci_dev(struct device *dev, const void *data) { - unsigned int devfn = *(unsigned int *)data; + unsigned int devfn = *(const unsigned int *)data; return dev_is_pci(dev) && to_pci_dev(dev)->devfn == devfn; } diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c index 833cbb995dd3..a253b6144945 100644 --- a/drivers/firmware/efi/efi-pstore.c +++ b/drivers/firmware/efi/efi-pstore.c @@ -6,7 +6,7 @@ #include <linux/slab.h> #include <linux/ucs2_string.h> -MODULE_IMPORT_NS(EFIVAR); +MODULE_IMPORT_NS("EFIVAR"); #define DUMP_NAME_LEN 66 @@ -136,7 +136,7 @@ static int efi_pstore_read_func(struct pstore_record *record, &size, record->buf); if (status != EFI_SUCCESS) { kfree(record->buf); - return -EIO; + return efi_status_to_err(status); } /* @@ -162,7 +162,15 @@ static ssize_t efi_pstore_read(struct pstore_record *record) efi_status_t status; for (;;) { - varname_size = 1024; + /* + * A small set of old UEFI implementations reject sizes + * above a certain threshold, the lowest seen in the wild + * is 512. + * + * TODO: Commonize with the iteration implementation in + * fs/efivarfs to keep all the quirks in one place. + */ + varname_size = 512; /* * If this is the first read() call in the pstore enumeration, @@ -181,7 +189,7 @@ static ssize_t efi_pstore_read(struct pstore_record *record) return 0; if (status != EFI_SUCCESS) - return -EIO; + return efi_status_to_err(status); /* skip variables that don't concern us */ if (efi_guidcmp(guid, LINUX_EFI_CRASH_GUID)) @@ -219,7 +227,7 @@ static int efi_pstore_write(struct pstore_record *record) record->size, record->psi->buf, true); efivar_unlock(); - return status == EFI_SUCCESS ? 0 : -EIO; + return efi_status_to_err(status); }; static int efi_pstore_erase(struct pstore_record *record) @@ -230,7 +238,7 @@ static int efi_pstore_erase(struct pstore_record *record) PSTORE_EFI_ATTRIBUTES, 0, NULL); if (status != EFI_SUCCESS && status != EFI_NOT_FOUND) - return -EIO; + return efi_status_to_err(status); return 0; } diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index fdf07dd6f459..e57bff702b5f 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -148,9 +148,6 @@ static ssize_t systab_show(struct kobject *kobj, if (efi.smbios != EFI_INVALID_TABLE_ADDR) str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios); - if (IS_ENABLED(CONFIG_X86)) - str = efi_systab_show_arch(str); - return str - buf; } @@ -273,6 +270,7 @@ static __init int efivar_ssdt_load(void) efi_char16_t *name = NULL; efi_status_t status; efi_guid_t guid; + int ret = 0; if (!efivar_ssdt[0]) return 0; @@ -294,8 +292,8 @@ static __init int efivar_ssdt_load(void) efi_char16_t *name_tmp = krealloc(name, name_size, GFP_KERNEL); if (!name_tmp) { - kfree(name); - return -ENOMEM; + ret = -ENOMEM; + goto out; } name = name_tmp; continue; @@ -309,26 +307,38 @@ static __init int efivar_ssdt_load(void) pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid); status = efi.get_variable(name, &guid, NULL, &data_size, NULL); - if (status != EFI_BUFFER_TOO_SMALL || !data_size) - return -EIO; + if (status != EFI_BUFFER_TOO_SMALL || !data_size) { + ret = -EIO; + goto out; + } data = kmalloc(data_size, GFP_KERNEL); - if (!data) - return -ENOMEM; + if (!data) { + ret = -ENOMEM; + goto out; + } status = efi.get_variable(name, &guid, NULL, &data_size, data); if (status == EFI_SUCCESS) { - acpi_status ret = acpi_load_table(data, NULL); - if (ret) - pr_err("failed to load table: %u\n", ret); - else + acpi_status acpi_ret = acpi_load_table(data, NULL); + if (ACPI_FAILURE(acpi_ret)) { + pr_err("efivar_ssdt: failed to load table: %u\n", + acpi_ret); + } else { + /* + * The @data will be in use by ACPI engine, + * do not free it! + */ continue; + } } else { - pr_err("failed to get var data: 0x%lx\n", status); + pr_err("efivar_ssdt: failed to get var data: 0x%lx\n", status); } kfree(data); } - return 0; +out: + kfree(name); + return ret; } #else static inline int efivar_ssdt_load(void) { return 0; } @@ -349,7 +359,7 @@ static void __init efi_debugfs_init(void) int i = 0; efi_debugfs = debugfs_create_dir("efi", NULL); - if (IS_ERR_OR_NULL(efi_debugfs)) + if (IS_ERR(efi_debugfs)) return; for_each_efi_memory_desc(md) { @@ -433,7 +443,9 @@ static int __init efisubsys_init(void) error = generic_ops_register(); if (error) goto err_put; - efivar_ssdt_load(); + error = efivar_ssdt_load(); + if (error) + pr_err("efi: failed to load SSDT, error %d.\n", error); platform_device_register_simple("efivars", 0, NULL, 0); } @@ -546,6 +558,7 @@ int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) __weak __alias(__efi_mem_desc_lookup); +EXPORT_SYMBOL_GPL(efi_mem_desc_lookup); /* * Calculate the highest address of an efi memory descriptor. @@ -922,13 +935,15 @@ char * __init efi_md_typeattr_format(char *buf, size_t size, EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO | EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP | EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO | - EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE)) + EFI_MEMORY_MORE_RELIABLE | EFI_MEMORY_HOT_PLUGGABLE | + EFI_MEMORY_RUNTIME)) snprintf(pos, size, "|attr=0x%016llx]", (unsigned long long)attr); else snprintf(pos, size, - "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]", + "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]", attr & EFI_MEMORY_RUNTIME ? "RUN" : "", + attr & EFI_MEMORY_HOT_PLUGGABLE ? "HP" : "", attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "", attr & EFI_MEMORY_CPU_CRYPTO ? "CC" : "", attr & EFI_MEMORY_SP ? "SP" : "", diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c index 4f9fb086eab7..0a7c764dcc61 100644 --- a/drivers/firmware/efi/efibc.c +++ b/drivers/firmware/efi/efibc.c @@ -47,7 +47,7 @@ static int efibc_reboot_notifier_call(struct notifier_block *notifier, if (ret || !data) return NOTIFY_DONE; - wdata = kmalloc(MAX_DATA_LEN * sizeof(efi_char16_t), GFP_KERNEL); + wdata = kmalloc_array(MAX_DATA_LEN, sizeof(efi_char16_t), GFP_KERNEL); if (!wdata) return NOTIFY_DONE; diff --git a/drivers/firmware/efi/embedded-firmware.c b/drivers/firmware/efi/embedded-firmware.c index f5be8e22305b..b49a09d7e665 100644 --- a/drivers/firmware/efi/embedded-firmware.c +++ b/drivers/firmware/efi/embedded-firmware.c @@ -16,9 +16,9 @@ /* Exported for use by lib/test_firmware.c only */ LIST_HEAD(efi_embedded_fw_list); -EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_list, TEST_FIRMWARE); +EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_list, "TEST_FIRMWARE"); bool efi_embedded_fw_checked; -EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_checked, TEST_FIRMWARE); +EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_checked, "TEST_FIRMWARE"); static const struct dmi_system_id * const embedded_fw_table[] = { #ifdef CONFIG_TOUCHSCREEN_DMI diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c index 7a81c0ce4780..4bb7b0584bc9 100644 --- a/drivers/firmware/efi/esrt.c +++ b/drivers/firmware/efi/esrt.c @@ -75,8 +75,6 @@ static LIST_HEAD(entry_list); struct esre_attribute { struct attribute attr; ssize_t (*show)(struct esre_entry *entry, char *buf); - ssize_t (*store)(struct esre_entry *entry, - const char *buf, size_t count); }; static struct esre_entry *to_entry(struct kobject *kobj) diff --git a/drivers/firmware/efi/fdtparams.c b/drivers/firmware/efi/fdtparams.c index 0ec83ba58097..b815d2a754ee 100644 --- a/drivers/firmware/efi/fdtparams.c +++ b/drivers/firmware/efi/fdtparams.c @@ -8,7 +8,7 @@ #include <linux/libfdt.h> #include <linux/of_fdt.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> enum { SYSTAB, diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 31eb1e287ce1..2f173391b63d 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -11,7 +11,7 @@ cflags-y := $(KBUILD_CFLAGS) cflags-$(CONFIG_X86_32) := -march=i386 cflags-$(CONFIG_X86_64) := -mcmodel=small -cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \ +cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -std=gnu11 \ -fPIC -fno-strict-aliasing -mno-red-zone \ -mno-mmx -mno-sse -fshort-wchar \ -Wno-pointer-sign \ @@ -27,8 +27,10 @@ cflags-$(CONFIG_ARM64) += -fpie $(DISABLE_STACKLEAK_PLUGIN) \ cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \ -DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \ -DEFI_HAVE_STRCMP -fno-builtin -fpic \ - $(call cc-option,-mno-single-pic-base) -cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE -mno-relax + $(call cc-option,-mno-single-pic-base) \ + $(DISABLE_STACKLEAK_PLUGIN) +cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE -mno-relax \ + $(DISABLE_STACKLEAK_PLUGIN) cflags-$(CONFIG_LOONGARCH) += -fpie cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt @@ -56,16 +58,11 @@ KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_CFI), $(KBUILD_CFLAGS)) # disable LTO KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO), $(KBUILD_CFLAGS)) -GCOV_PROFILE := n -# Sanitizer runtimes are unavailable and cannot be linked here. -KASAN_SANITIZE := n -KCSAN_SANITIZE := n -KMSAN_SANITIZE := n -UBSAN_SANITIZE := n -OBJECT_FILES_NON_STANDARD := y +# The .data section would be renamed to .data.efistub, therefore, remove +# `-fdata-sections` flag from KBUILD_CFLAGS_KERNEL +KBUILD_CFLAGS_KERNEL := $(filter-out -fdata-sections, $(KBUILD_CFLAGS_KERNEL)) -# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. -KCOV_INSTRUMENT := n +KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \ file.o mem.o random.o randomalloc.o pci.o \ @@ -87,14 +84,19 @@ lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o \ lib-$(CONFIG_ARM) += arm32-stub.o lib-$(CONFIG_ARM64) += kaslr.o arm64.o arm64-stub.o smbios.o -lib-$(CONFIG_X86) += x86-stub.o +lib-$(CONFIG_X86) += x86-stub.o smbios.o lib-$(CONFIG_X86_64) += x86-5lvl.o lib-$(CONFIG_RISCV) += kaslr.o riscv.o riscv-stub.o lib-$(CONFIG_LOONGARCH) += loongarch.o loongarch-stub.o CFLAGS_arm32-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) -zboot-obj-$(CONFIG_RISCV) := lib-clz_ctz.o lib-ashldi3.o +zboot-obj-y := zboot-decompress-gzip.o +CFLAGS_zboot-decompress-gzip.o += -I$(srctree)/lib/zlib_inflate +zboot-obj-$(CONFIG_KERNEL_ZSTD) := zboot-decompress-zstd.o lib-xxhash.o +CFLAGS_zboot-decompress-zstd.o += -I$(srctree)/lib/zstd + +zboot-obj-$(CONFIG_RISCV) += lib-clz_ctz.o lib-ashldi3.o lib-$(CONFIG_EFI_ZBOOT) += zboot.o $(zboot-obj-y) lib-$(CONFIG_UNACCEPTED_MEMORY) += unaccepted_memory.o bitmap.o find.o diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot index 65ffd0b760b2..92e3c73502ba 100644 --- a/drivers/firmware/efi/libstub/Makefile.zboot +++ b/drivers/firmware/efi/libstub/Makefile.zboot @@ -12,22 +12,16 @@ quiet_cmd_copy_and_pad = PAD $@ $(obj)/vmlinux.bin: $(obj)/$(EFI_ZBOOT_PAYLOAD) FORCE $(call if_changed,copy_and_pad) -comp-type-$(CONFIG_KERNEL_GZIP) := gzip -comp-type-$(CONFIG_KERNEL_LZ4) := lz4 -comp-type-$(CONFIG_KERNEL_LZMA) := lzma -comp-type-$(CONFIG_KERNEL_LZO) := lzo -comp-type-$(CONFIG_KERNEL_XZ) := xzkern -comp-type-$(CONFIG_KERNEL_ZSTD) := zstd22 - # in GZIP, the appended le32 carrying the uncompressed size is part of the # format, but in other cases, we just append it at the end for convenience, # causing the original tools to complain when checking image integrity. -# So disregard it when calculating the payload size in the zimage header. -zboot-method-y := $(comp-type-y)_with_size -zboot-size-len-y := 4 +comp-type-y := gzip +zboot-method-y := gzip +zboot-size-len-y := 0 -zboot-method-$(CONFIG_KERNEL_GZIP) := gzip -zboot-size-len-$(CONFIG_KERNEL_GZIP) := 0 +comp-type-$(CONFIG_KERNEL_ZSTD) := zstd +zboot-method-$(CONFIG_KERNEL_ZSTD) := zstd22_with_size +zboot-size-len-$(CONFIG_KERNEL_ZSTD) := 4 $(obj)/vmlinuz: $(obj)/vmlinux.bin FORCE $(call if_changed,$(zboot-method-y)) @@ -50,6 +44,10 @@ AFLAGS_zboot-header.o += -DMACHINE_TYPE=IMAGE_FILE_MACHINE_$(EFI_ZBOOT_MACH_TYPE $(obj)/zboot-header.o: $(srctree)/drivers/firmware/efi/libstub/zboot-header.S FORCE $(call if_changed_rule,as_o_S) +ifneq ($(CONFIG_EFI_SBAT_FILE),) +$(obj)/zboot-header.o: $(CONFIG_EFI_SBAT_FILE) +endif + ZBOOT_DEPS := $(obj)/zboot-header.o $(objtree)/drivers/firmware/efi/libstub/lib.a LDFLAGS_vmlinuz.efi.elf := -T $(srctree)/drivers/firmware/efi/libstub/zboot.lds diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index 452b7ccd330e..2c3869356147 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -21,7 +21,6 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, efi_loaded_image_t *image, efi_handle_t image_handle) { - efi_status_t status; unsigned long kernel_size, kernel_codesize, kernel_memsize; if (image->image_base != _text) { @@ -39,15 +38,9 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, *reserve_size = kernel_memsize; *image_addr = (unsigned long)_text; - status = efi_kaslr_relocate_kernel(image_addr, - reserve_addr, reserve_size, - kernel_size, kernel_codesize, - kernel_memsize, - efi_kaslr_get_phys_seed(image_handle)); - if (status != EFI_SUCCESS) - return status; - - return EFI_SUCCESS; + return efi_kaslr_relocate_kernel(image_addr, reserve_addr, reserve_size, + kernel_size, kernel_codesize, kernel_memsize, + efi_kaslr_get_phys_seed(image_handle)); } asmlinkage void primary_entry(void); diff --git a/drivers/firmware/efi/libstub/arm64.c b/drivers/firmware/efi/libstub/arm64.c index 446e35eaf3d9..e57cd3de0a00 100644 --- a/drivers/firmware/efi/libstub/arm64.c +++ b/drivers/firmware/efi/libstub/arm64.c @@ -39,8 +39,7 @@ static bool system_needs_vamap(void) static char const emag[] = "eMAG"; default: - version = efi_get_smbios_string(&record->header, 4, - processor_version); + version = efi_get_smbios_string(record, processor_version); if (!version || (strncmp(version, altra, sizeof(altra) - 1) && strncmp(version, emag, sizeof(emag) - 1))) break; diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index de659f6a815f..7aa2f9ad2935 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c @@ -47,9 +47,10 @@ bool __pure __efi_soft_reserve_enabled(void) */ efi_status_t efi_parse_options(char const *cmdline) { - size_t len; + char *buf __free(efi_pool) = NULL; efi_status_t status; - char *str, *buf; + size_t len; + char *str; if (!cmdline) return EFI_SUCCESS; @@ -102,7 +103,6 @@ efi_status_t efi_parse_options(char const *cmdline) efi_parse_option_graphics(val + strlen("efifb:")); } } - efi_bs_call(free_pool, buf); return EFI_SUCCESS; } @@ -250,7 +250,7 @@ static efi_status_t efi_measure_tagged_event(unsigned long load_addr, u64, const union efistub_event *); struct { u32 hash_log_extend_event; } mixed_mode; } method; - struct efistub_measured_event *evt; + struct efistub_measured_event *evt __free(efi_pool) = NULL; int size = struct_size(evt, tagged_event.tagged_event_data, events[event].event_data_len); efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID; @@ -312,7 +312,6 @@ static efi_status_t efi_measure_tagged_event(unsigned long load_addr, status = efi_fn_call(&method, hash_log_extend_event, protocol, 0, load_addr, load_size, &evt->event_data); - efi_bs_call(free_pool, evt); if (status == EFI_SUCCESS) return EFI_SUCCESS; @@ -327,7 +326,7 @@ fail: * Size of memory allocated return in *cmd_line_len. * Returns NULL on error. */ -char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len) +char *efi_convert_cmdline(efi_loaded_image_t *image) { const efi_char16_t *options = efi_table_attr(image, load_options); u32 options_size = efi_table_attr(image, load_options_size); @@ -405,7 +404,6 @@ char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len) snprintf((char *)cmdline_addr, options_bytes, "%.*ls", options_bytes - 1, options); - *cmd_line_len = options_bytes; return (char *)cmdline_addr; } @@ -603,6 +601,7 @@ efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image, * @image: EFI loaded image protocol * @soft_limit: preferred address for loading the initrd * @hard_limit: upper limit address for loading the initrd + * @out: pointer to store the address of the initrd table * * Return: status code */ @@ -621,10 +620,6 @@ efi_status_t efi_load_initrd(efi_loaded_image_t *image, status = efi_load_initrd_dev_path(&initrd, hard_limit); if (status == EFI_SUCCESS) { efi_info("Loaded initrd from LINUX_EFI_INITRD_MEDIA_GUID device path\n"); - if (initrd.size > 0 && - efi_measure_tagged_event(initrd.base, initrd.size, - EFISTUB_EVT_INITRD) == EFI_SUCCESS) - efi_info("Measured initrd data into PCR 9\n"); } else if (status == EFI_NOT_FOUND) { status = efi_load_initrd_cmdline(image, &initrd, soft_limit, hard_limit); @@ -637,6 +632,11 @@ efi_status_t efi_load_initrd(efi_loaded_image_t *image, if (status != EFI_SUCCESS) goto failed; + if (initrd.size > 0 && + efi_measure_tagged_event(initrd.base, initrd.size, + EFISTUB_EVT_INITRD) == EFI_SUCCESS) + efi_info("Measured initrd data into PCR 9\n"); + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, sizeof(initrd), (void **)&tbl); if (status != EFI_SUCCESS) diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c index 958a680e0660..874f63b4a383 100644 --- a/drivers/firmware/efi/libstub/efi-stub.c +++ b/drivers/firmware/efi/libstub/efi-stub.c @@ -10,6 +10,7 @@ */ #include <linux/efi.h> +#include <linux/screen_info.h> #include <asm/efi.h> #include "efistub.h" @@ -53,25 +54,16 @@ void __weak free_screen_info(struct screen_info *si) static struct screen_info *setup_graphics(void) { - efi_guid_t gop_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID; - efi_status_t status; - unsigned long size; - void **gop_handle = NULL; - struct screen_info *si = NULL; + struct screen_info *si, tmp = {}; - size = 0; - status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, - &gop_proto, NULL, &size, gop_handle); - if (status == EFI_BUFFER_TOO_SMALL) { - si = alloc_screen_info(); - if (!si) - return NULL; - status = efi_setup_gop(si, &gop_proto, size); - if (status != EFI_SUCCESS) { - free_screen_info(si); - return NULL; - } - } + if (efi_setup_gop(&tmp) != EFI_SUCCESS) + return NULL; + + si = alloc_screen_info(); + if (!si) + return NULL; + + *si = tmp; return si; } @@ -112,45 +104,40 @@ static u32 get_supported_rt_services(void) efi_status_t efi_handle_cmdline(efi_loaded_image_t *image, char **cmdline_ptr) { - int cmdline_size = 0; + char *cmdline __free(efi_pool) = NULL; efi_status_t status; - char *cmdline; /* * Get the command line from EFI, using the LOADED_IMAGE * protocol. We are going to copy the command line into the * device tree, so this can be allocated anywhere. */ - cmdline = efi_convert_cmdline(image, &cmdline_size); + cmdline = efi_convert_cmdline(image); if (!cmdline) { efi_err("getting command line via LOADED_IMAGE_PROTOCOL\n"); return EFI_OUT_OF_RESOURCES; } - if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) || - IS_ENABLED(CONFIG_CMDLINE_FORCE) || - cmdline_size == 0) { - status = efi_parse_options(CONFIG_CMDLINE); + if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) { + status = efi_parse_options(cmdline); if (status != EFI_SUCCESS) { - efi_err("Failed to parse options\n"); - goto fail_free_cmdline; + efi_err("Failed to parse EFI load options\n"); + return status; } } - if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && cmdline_size > 0) { - status = efi_parse_options(cmdline); + if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) || + IS_ENABLED(CONFIG_CMDLINE_FORCE) || + cmdline[0] == 0) { + status = efi_parse_options(CONFIG_CMDLINE); if (status != EFI_SUCCESS) { - efi_err("Failed to parse options\n"); - goto fail_free_cmdline; + efi_err("Failed to parse built-in command line\n"); + return status; } } - *cmdline_ptr = cmdline; + *cmdline_ptr = no_free_ptr(cmdline); return EFI_SUCCESS; - -fail_free_cmdline: - efi_bs_call(free_pool, cmdline_ptr); - return status; } efi_status_t efi_stub_common(efi_handle_t handle, diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index 27abb4ce0291..f5ba032863a9 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -4,6 +4,7 @@ #define _DRIVERS_FIRMWARE_EFI_EFISTUB_H #include <linux/compiler.h> +#include <linux/cleanup.h> #include <linux/efi.h> #include <linux/kernel.h> #include <linux/kern_levels.h> @@ -122,11 +123,10 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, #define efi_get_handle_num(size) \ ((size) / (efi_is_native() ? sizeof(efi_handle_t) : sizeof(u32))) -#define for_each_efi_handle(handle, array, size, i) \ - for (i = 0; \ - i < efi_get_handle_num(size) && \ - ((handle = efi_get_handle_at((array), i)) || true); \ - i++) +#define for_each_efi_handle(handle, array, num) \ + for (int __i = 0; __i < (num) && \ + ((handle = efi_get_handle_at((array), __i)) || true); \ + __i++) static inline void efi_set_u64_split(u64 data, u32 *lo, u32 *hi) @@ -171,7 +171,7 @@ void efi_set_u64_split(u64 data, u32 *lo, u32 *hi) * the EFI memory map. Other related structures, e.g. x86 e820ext, need * to factor in this headroom requirement as well. */ -#define EFI_MMAP_NR_SLACK_SLOTS 8 +#define EFI_MMAP_NR_SLACK_SLOTS 32 typedef struct efi_generic_dev_path efi_device_path_protocol_t; @@ -314,7 +314,9 @@ union efi_boot_services { void *close_protocol; void *open_protocol_information; void *protocols_per_handle; - void *locate_handle_buffer; + efi_status_t (__efiapi *locate_handle_buffer)(int, efi_guid_t *, + void *, unsigned long *, + efi_handle_t **); efi_status_t (__efiapi *locate_protocol)(efi_guid_t *, void *, void **); efi_status_t (__efiapi *install_multiple_protocol_interfaces)(efi_handle_t *, ...); @@ -1053,10 +1055,11 @@ void efi_puts(const char *str); __printf(1, 2) int efi_printk(char const *fmt, ...); void efi_free(unsigned long size, unsigned long addr); +DEFINE_FREE(efi_pool, void *, if (_T) efi_bs_call(free_pool, _T)); void efi_apply_loadoptions_quirk(const void **load_options, u32 *load_options_size); -char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len); +char *efi_convert_cmdline(efi_loaded_image_t *image); efi_status_t efi_get_memory_map(struct efi_boot_memmap **map, bool install_cfg_tbl); @@ -1082,8 +1085,7 @@ efi_status_t efi_parse_options(char const *cmdline); void efi_parse_option_graphics(char *option); -efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto, - unsigned long size); +efi_status_t efi_setup_gop(struct screen_info *si); efi_status_t handle_cmdline_files(efi_loaded_image_t *image, const efi_char16_t *optstr, @@ -1204,14 +1206,13 @@ struct efi_smbios_type4_record { u16 thread_enabled; }; -#define efi_get_smbios_string(__record, __type, __name) ({ \ - int off = offsetof(struct efi_smbios_type ## __type ## _record, \ - __name); \ - __efi_get_smbios_string((__record), __type, off); \ +#define efi_get_smbios_string(__record, __field) ({ \ + __typeof__(__record) __rec = __record; \ + __efi_get_smbios_string(&__rec->header, &__rec->__field); \ }) const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record, - u8 type, int offset); + const u8 *offset); void efi_remap_image(unsigned long image_base, unsigned alloc_size, unsigned long code_size); @@ -1230,7 +1231,10 @@ efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab); efi_status_t allocate_unaccepted_bitmap(__u32 nr_desc, struct efi_boot_memmap *map); void process_unaccepted_memory(u64 start, u64 end); -void accept_memory(phys_addr_t start, phys_addr_t end); +void accept_memory(phys_addr_t start, unsigned long size); void arch_accept_memory(phys_addr_t start, phys_addr_t end); +efi_status_t efi_zboot_decompress_init(unsigned long *alloc_size); +efi_status_t efi_zboot_decompress(u8 *out, unsigned long outlen); + #endif diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c index 70e9789ff9de..6a337f1f8787 100644 --- a/drivers/firmware/efi/libstub/fdt.c +++ b/drivers/firmware/efi/libstub/fdt.c @@ -335,8 +335,8 @@ fail_free_new_fdt: fail: efi_free(fdt_size, fdt_addr); - - efi_bs_call(free_pool, priv.runtime_map); + if (!efi_novamap) + efi_bs_call(free_pool, priv.runtime_map); return EFI_LOAD_ERROR; } diff --git a/drivers/firmware/efi/libstub/file.c b/drivers/firmware/efi/libstub/file.c index d6a025df07dc..bd626d55dcbc 100644 --- a/drivers/firmware/efi/libstub/file.c +++ b/drivers/firmware/efi/libstub/file.c @@ -175,6 +175,12 @@ static efi_status_t efi_open_device_path(efi_file_protocol_t **volume, return status; } +#ifndef CONFIG_CMDLINE +#define CONFIG_CMDLINE +#endif + +static const efi_char16_t builtin_cmdline[] = L"" CONFIG_CMDLINE; + /* * Check the cmdline for a LILO-style file= arguments. * @@ -189,6 +195,8 @@ efi_status_t handle_cmdline_files(efi_loaded_image_t *image, unsigned long *load_addr, unsigned long *load_size) { + const bool ignore_load_options = IS_ENABLED(CONFIG_CMDLINE_OVERRIDE) || + IS_ENABLED(CONFIG_CMDLINE_FORCE); const efi_char16_t *cmdline = efi_table_attr(image, load_options); u32 cmdline_len = efi_table_attr(image, load_options_size); unsigned long efi_chunk_size = ULONG_MAX; @@ -197,6 +205,7 @@ efi_status_t handle_cmdline_files(efi_loaded_image_t *image, unsigned long alloc_addr; unsigned long alloc_size; efi_status_t status; + bool twopass; int offset; if (!load_addr || !load_size) @@ -209,6 +218,16 @@ efi_status_t handle_cmdline_files(efi_loaded_image_t *image, efi_chunk_size = EFI_READ_CHUNK_SIZE; alloc_addr = alloc_size = 0; + + if (!ignore_load_options && cmdline_len > 0) { + twopass = IS_ENABLED(CONFIG_CMDLINE_BOOL) || + IS_ENABLED(CONFIG_CMDLINE_EXTEND); + } else { +do_builtin: cmdline = builtin_cmdline; + cmdline_len = ARRAY_SIZE(builtin_cmdline) - 1; + twopass = false; + } + do { struct finfo fi; unsigned long size; @@ -290,6 +309,9 @@ efi_status_t handle_cmdline_files(efi_loaded_image_t *image, efi_call_proto(volume, close); } while (offset > 0); + if (twopass) + goto do_builtin; + *load_addr = alloc_addr; *load_size = alloc_size; diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c index ea5da307d542..3785fb4986b4 100644 --- a/drivers/firmware/efi/libstub/gop.c +++ b/drivers/firmware/efi/libstub/gop.c @@ -133,13 +133,11 @@ void efi_parse_option_graphics(char *option) static u32 choose_mode_modenum(efi_graphics_output_protocol_t *gop) { - efi_status_t status; - + efi_graphics_output_mode_info_t *info __free(efi_pool) = NULL; efi_graphics_output_protocol_mode_t *mode; - efi_graphics_output_mode_info_t *info; unsigned long info_size; - u32 max_mode, cur_mode; + efi_status_t status; int pf; mode = efi_table_attr(gop, mode); @@ -154,17 +152,13 @@ static u32 choose_mode_modenum(efi_graphics_output_protocol_t *gop) return cur_mode; } - status = efi_call_proto(gop, query_mode, cmdline.mode, - &info_size, &info); + status = efi_call_proto(gop, query_mode, cmdline.mode, &info_size, &info); if (status != EFI_SUCCESS) { efi_err("Couldn't get mode information\n"); return cur_mode; } pf = info->pixel_format; - - efi_bs_call(free_pool, info); - if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX) { efi_err("Invalid PixelFormat\n"); return cur_mode; @@ -173,6 +167,28 @@ static u32 choose_mode_modenum(efi_graphics_output_protocol_t *gop) return cmdline.mode; } +static u32 choose_mode(efi_graphics_output_protocol_t *gop, + bool (*match)(const efi_graphics_output_mode_info_t *, u32, void *), + void *ctx) +{ + efi_graphics_output_protocol_mode_t *mode = efi_table_attr(gop, mode); + u32 max_mode = efi_table_attr(mode, max_mode); + + for (u32 m = 0; m < max_mode; m++) { + efi_graphics_output_mode_info_t *info __free(efi_pool) = NULL; + unsigned long info_size; + efi_status_t status; + + status = efi_call_proto(gop, query_mode, m, &info_size, &info); + if (status != EFI_SUCCESS) + continue; + + if (match(info, m, ctx)) + return m; + } + return (unsigned long)ctx; +} + static u8 pixel_bpp(int pixel_format, efi_pixel_bitmask_t pixel_info) { if (pixel_format == PIXEL_BIT_MASK) { @@ -185,192 +201,117 @@ static u8 pixel_bpp(int pixel_format, efi_pixel_bitmask_t pixel_info) return 32; } -static u32 choose_mode_res(efi_graphics_output_protocol_t *gop) +static bool match_res(const efi_graphics_output_mode_info_t *info, u32 mode, void *ctx) { - efi_status_t status; + efi_pixel_bitmask_t pi = info->pixel_information; + int pf = info->pixel_format; - efi_graphics_output_protocol_mode_t *mode; - efi_graphics_output_mode_info_t *info; - unsigned long info_size; - - u32 max_mode, cur_mode; - int pf; - efi_pixel_bitmask_t pi; - u32 m, w, h; + if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX) + return false; - mode = efi_table_attr(gop, mode); + return cmdline.res.width == info->horizontal_resolution && + cmdline.res.height == info->vertical_resolution && + (cmdline.res.format < 0 || cmdline.res.format == pf) && + (!cmdline.res.depth || cmdline.res.depth == pixel_bpp(pf, pi)); +} - cur_mode = efi_table_attr(mode, mode); - info = efi_table_attr(mode, info); - pf = info->pixel_format; - pi = info->pixel_information; - w = info->horizontal_resolution; - h = info->vertical_resolution; +static u32 choose_mode_res(efi_graphics_output_protocol_t *gop) +{ + efi_graphics_output_protocol_mode_t *mode = efi_table_attr(gop, mode); + unsigned long cur_mode = efi_table_attr(mode, mode); - if (w == cmdline.res.width && h == cmdline.res.height && - (cmdline.res.format < 0 || cmdline.res.format == pf) && - (!cmdline.res.depth || cmdline.res.depth == pixel_bpp(pf, pi))) + if (match_res(efi_table_attr(mode, info), cur_mode, NULL)) return cur_mode; - max_mode = efi_table_attr(mode, max_mode); - - for (m = 0; m < max_mode; m++) { - if (m == cur_mode) - continue; - - status = efi_call_proto(gop, query_mode, m, - &info_size, &info); - if (status != EFI_SUCCESS) - continue; + return choose_mode(gop, match_res, (void *)cur_mode); +} - pf = info->pixel_format; - pi = info->pixel_information; - w = info->horizontal_resolution; - h = info->vertical_resolution; +struct match { + u32 mode; + u32 area; + u8 depth; +}; - efi_bs_call(free_pool, info); +static bool match_auto(const efi_graphics_output_mode_info_t *info, u32 mode, void *ctx) +{ + u32 area = info->horizontal_resolution * info->vertical_resolution; + efi_pixel_bitmask_t pi = info->pixel_information; + int pf = info->pixel_format; + u8 depth = pixel_bpp(pf, pi); + struct match *m = ctx; - if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX) - continue; - if (w == cmdline.res.width && h == cmdline.res.height && - (cmdline.res.format < 0 || cmdline.res.format == pf) && - (!cmdline.res.depth || cmdline.res.depth == pixel_bpp(pf, pi))) - return m; - } + if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX) + return false; - efi_err("Couldn't find requested mode\n"); + if (area > m->area || (area == m->area && depth > m->depth)) + *m = (struct match){ mode, area, depth }; - return cur_mode; + return false; } static u32 choose_mode_auto(efi_graphics_output_protocol_t *gop) { - efi_status_t status; - - efi_graphics_output_protocol_mode_t *mode; - efi_graphics_output_mode_info_t *info; - unsigned long info_size; - - u32 max_mode, cur_mode, best_mode, area; - u8 depth; - int pf; - efi_pixel_bitmask_t pi; - u32 m, w, h, a; - u8 d; - - mode = efi_table_attr(gop, mode); - - cur_mode = efi_table_attr(mode, mode); - max_mode = efi_table_attr(mode, max_mode); + struct match match = {}; - info = efi_table_attr(mode, info); - - pf = info->pixel_format; - pi = info->pixel_information; - w = info->horizontal_resolution; - h = info->vertical_resolution; - - best_mode = cur_mode; - area = w * h; - depth = pixel_bpp(pf, pi); + choose_mode(gop, match_auto, &match); - for (m = 0; m < max_mode; m++) { - if (m == cur_mode) - continue; - - status = efi_call_proto(gop, query_mode, m, - &info_size, &info); - if (status != EFI_SUCCESS) - continue; + return match.mode; +} - pf = info->pixel_format; - pi = info->pixel_information; - w = info->horizontal_resolution; - h = info->vertical_resolution; +static bool match_list(const efi_graphics_output_mode_info_t *info, u32 mode, void *ctx) +{ + efi_pixel_bitmask_t pi = info->pixel_information; + u32 cur_mode = (unsigned long)ctx; + int pf = info->pixel_format; + const char *dstr; + u8 depth = 0; + bool valid; - efi_bs_call(free_pool, info); + valid = !(pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX); - if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX) - continue; - a = w * h; - if (a < area) - continue; - d = pixel_bpp(pf, pi); - if (a > area || d > depth) { - best_mode = m; - area = a; - depth = d; - } + switch (pf) { + case PIXEL_RGB_RESERVED_8BIT_PER_COLOR: + dstr = "rgb"; + break; + case PIXEL_BGR_RESERVED_8BIT_PER_COLOR: + dstr = "bgr"; + break; + case PIXEL_BIT_MASK: + dstr = ""; + depth = pixel_bpp(pf, pi); + break; + case PIXEL_BLT_ONLY: + dstr = "blt"; + break; + default: + dstr = "xxx"; + break; } - return best_mode; + efi_printk("Mode %3u %c%c: Resolution %ux%u-%s%.0hhu\n", + mode, + (mode == cur_mode) ? '*' : ' ', + !valid ? '-' : ' ', + info->horizontal_resolution, + info->vertical_resolution, + dstr, depth); + + return false; } static u32 choose_mode_list(efi_graphics_output_protocol_t *gop) { - efi_status_t status; - - efi_graphics_output_protocol_mode_t *mode; - efi_graphics_output_mode_info_t *info; - unsigned long info_size; - - u32 max_mode, cur_mode; - int pf; - efi_pixel_bitmask_t pi; - u32 m, w, h; - u8 d; - const char *dstr; - bool valid; + efi_graphics_output_protocol_mode_t *mode = efi_table_attr(gop, mode); + unsigned long cur_mode = efi_table_attr(mode, mode); + u32 max_mode = efi_table_attr(mode, max_mode); efi_input_key_t key; - - mode = efi_table_attr(gop, mode); - - cur_mode = efi_table_attr(mode, mode); - max_mode = efi_table_attr(mode, max_mode); + efi_status_t status; efi_printk("Available graphics modes are 0-%u\n", max_mode-1); efi_puts(" * = current mode\n" " - = unusable mode\n"); - for (m = 0; m < max_mode; m++) { - status = efi_call_proto(gop, query_mode, m, - &info_size, &info); - if (status != EFI_SUCCESS) - continue; - pf = info->pixel_format; - pi = info->pixel_information; - w = info->horizontal_resolution; - h = info->vertical_resolution; - - efi_bs_call(free_pool, info); - - valid = !(pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX); - d = 0; - switch (pf) { - case PIXEL_RGB_RESERVED_8BIT_PER_COLOR: - dstr = "rgb"; - break; - case PIXEL_BGR_RESERVED_8BIT_PER_COLOR: - dstr = "bgr"; - break; - case PIXEL_BIT_MASK: - dstr = ""; - d = pixel_bpp(pf, pi); - break; - case PIXEL_BLT_ONLY: - dstr = "blt"; - break; - default: - dstr = "xxx"; - break; - } - - efi_printk("Mode %3u %c%c: Resolution %ux%u-%s%.0hhu\n", - m, - m == cur_mode ? '*' : ' ', - !valid ? '-' : ' ', - w, h, dstr, d); - } + choose_mode(gop, match_list, (void *)cur_mode); efi_puts("\nPress any key to continue (or wait 10 seconds)\n"); status = efi_wait_for_key(10 * EFI_USEC_PER_SEC, &key); @@ -461,26 +402,25 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line, } } -static efi_graphics_output_protocol_t * -find_gop(efi_guid_t *proto, unsigned long size, void **handles) +static efi_graphics_output_protocol_t *find_gop(unsigned long num, + const efi_handle_t handles[]) { efi_graphics_output_protocol_t *first_gop; efi_handle_t h; - int i; first_gop = NULL; - for_each_efi_handle(h, handles, size, i) { + for_each_efi_handle(h, handles, num) { efi_status_t status; efi_graphics_output_protocol_t *gop; efi_graphics_output_protocol_mode_t *mode; efi_graphics_output_mode_info_t *info; - - efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID; void *dummy = NULL; - status = efi_bs_call(handle_protocol, h, proto, (void **)&gop); + status = efi_bs_call(handle_protocol, h, + &EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID, + (void **)&gop); if (status != EFI_SUCCESS) continue; @@ -500,7 +440,8 @@ find_gop(efi_guid_t *proto, unsigned long size, void **handles) * Once we've found a GOP supporting ConOut, * don't bother looking any further. */ - status = efi_bs_call(handle_protocol, h, &conout_proto, &dummy); + status = efi_bs_call(handle_protocol, h, + &EFI_CONSOLE_OUT_DEVICE_GUID, &dummy); if (status == EFI_SUCCESS) return gop; @@ -511,16 +452,22 @@ find_gop(efi_guid_t *proto, unsigned long size, void **handles) return first_gop; } -static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto, - unsigned long size, void **handles) +efi_status_t efi_setup_gop(struct screen_info *si) { - efi_graphics_output_protocol_t *gop; + efi_handle_t *handles __free(efi_pool) = NULL; efi_graphics_output_protocol_mode_t *mode; efi_graphics_output_mode_info_t *info; + efi_graphics_output_protocol_t *gop; + efi_status_t status; + unsigned long num; - gop = find_gop(proto, size, handles); + status = efi_bs_call(locate_handle_buffer, EFI_LOCATE_BY_PROTOCOL, + &EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID, NULL, &num, + &handles); + if (status != EFI_SUCCESS) + return status; - /* Did we find any GOPs? */ + gop = find_gop(num, handles); if (!gop) return EFI_NOT_FOUND; @@ -552,29 +499,3 @@ static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto, return EFI_SUCCESS; } - -/* - * See if we have Graphics Output Protocol - */ -efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto, - unsigned long size) -{ - efi_status_t status; - void **gop_handle = NULL; - - status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size, - (void **)&gop_handle); - if (status != EFI_SUCCESS) - return status; - - status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, proto, NULL, - &size, gop_handle); - if (status != EFI_SUCCESS) - goto free_handle; - - status = setup_gop(si, proto, size, gop_handle); - -free_handle: - efi_bs_call(free_pool, gop_handle); - return status; -} diff --git a/drivers/firmware/efi/libstub/intrinsics.c b/drivers/firmware/efi/libstub/intrinsics.c index 965e734f6f98..418cd2e6dccc 100644 --- a/drivers/firmware/efi/libstub/intrinsics.c +++ b/drivers/firmware/efi/libstub/intrinsics.c @@ -15,8 +15,31 @@ void *__memmove(void *__dest, const void *__src, size_t count) __alias(memmove); void *__memset(void *s, int c, size_t count) __alias(memset); #endif +static void *efistub_memmove(u8 *dst, const u8 *src, size_t len) +{ + if (src > dst || dst >= (src + len)) + for (size_t i = 0; i < len; i++) + dst[i] = src[i]; + else + for (ssize_t i = len - 1; i >= 0; i--) + dst[i] = src[i]; + + return dst; +} + +static void *efistub_memset(void *dst, int c, size_t len) +{ + for (u8 *d = dst; len--; d++) + *d = c; + + return dst; +} + void *memcpy(void *dst, const void *src, size_t len) { + if (efi_table_attr(efi_system_table, boottime) == NULL) + return efistub_memmove(dst, src, len); + efi_bs_call(copy_mem, dst, src, len); return dst; } @@ -25,6 +48,9 @@ extern void *memmove(void *dst, const void *src, size_t len) __alias(memcpy); void *memset(void *dst, int c, size_t len) { + if (efi_table_attr(efi_system_table, boottime) == NULL) + return efistub_memset(dst, c, len); + efi_bs_call(set_mem, dst, len, c & U8_MAX); return dst; } diff --git a/drivers/firmware/efi/libstub/kaslr.c b/drivers/firmware/efi/libstub/kaslr.c index 1a9808012abd..4bc963e999eb 100644 --- a/drivers/firmware/efi/libstub/kaslr.c +++ b/drivers/firmware/efi/libstub/kaslr.c @@ -18,8 +18,6 @@ */ u32 efi_kaslr_get_phys_seed(efi_handle_t image_handle) { - efi_status_t status; - u32 phys_seed; efi_guid_t li_fixed_proto = LINUX_EFI_LOADED_IMAGE_FIXED_GUID; void *p; @@ -32,18 +30,20 @@ u32 efi_kaslr_get_phys_seed(efi_handle_t image_handle) &li_fixed_proto, &p) == EFI_SUCCESS) { efi_info("Image placement fixed by loader\n"); } else { + efi_status_t status; + u32 phys_seed; + status = efi_get_random_bytes(sizeof(phys_seed), (u8 *)&phys_seed); - if (status == EFI_SUCCESS) { + if (status == EFI_SUCCESS) return phys_seed; - } else if (status == EFI_NOT_FOUND) { + + if (status == EFI_NOT_FOUND) efi_info("EFI_RNG_PROTOCOL unavailable\n"); - efi_nokaslr = true; - } else if (status != EFI_SUCCESS) { - efi_err("efi_get_random_bytes() failed (0x%lx)\n", - status); - efi_nokaslr = true; - } + else + efi_err("efi_get_random_bytes() failed (0x%lx)\n", status); + + efi_nokaslr = true; } return 0; @@ -57,7 +57,7 @@ u32 efi_kaslr_get_phys_seed(efi_handle_t image_handle) */ static bool check_image_region(u64 base, u64 size) { - struct efi_boot_memmap *map; + struct efi_boot_memmap *map __free(efi_pool) = NULL; efi_status_t status; bool ret = false; int map_offset; @@ -80,8 +80,6 @@ static bool check_image_region(u64 base, u64 size) } } - efi_bs_call(free_pool, map); - return ret; } diff --git a/drivers/firmware/efi/libstub/loongarch.c b/drivers/firmware/efi/libstub/loongarch.c index 684c9354637c..3782d0a187d1 100644 --- a/drivers/firmware/efi/libstub/loongarch.c +++ b/drivers/firmware/efi/libstub/loongarch.c @@ -41,7 +41,7 @@ static efi_status_t exit_boot_func(struct efi_boot_memmap *map, void *priv) unsigned long __weak kernel_entry_address(unsigned long kernel_addr, efi_loaded_image_t *image) { - return *(unsigned long *)(kernel_addr + 8) - VMLINUX_LOAD_ADDRESS + kernel_addr; + return *(unsigned long *)(kernel_addr + 8) - PHYSADDR(VMLINUX_LOAD_ADDRESS) + kernel_addr; } efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image, @@ -74,6 +74,8 @@ efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image, /* Config Direct Mapping */ csr_write64(CSR_DMW0_INIT, LOONGARCH_CSR_DMWIN0); csr_write64(CSR_DMW1_INIT, LOONGARCH_CSR_DMWIN1); + csr_write64(CSR_DMW2_INIT, LOONGARCH_CSR_DMWIN2); + csr_write64(CSR_DMW3_INIT, LOONGARCH_CSR_DMWIN3); real_kernel_entry = (void *)kernel_entry_address(kernel_addr, image); diff --git a/drivers/firmware/efi/libstub/mem.c b/drivers/firmware/efi/libstub/mem.c index 4f1fa302234d..9c82259eea81 100644 --- a/drivers/firmware/efi/libstub/mem.c +++ b/drivers/firmware/efi/libstub/mem.c @@ -20,10 +20,10 @@ efi_status_t efi_get_memory_map(struct efi_boot_memmap **map, bool install_cfg_tbl) { + struct efi_boot_memmap tmp, *m __free(efi_pool) = NULL; int memtype = install_cfg_tbl ? EFI_ACPI_RECLAIM_MEMORY : EFI_LOADER_DATA; efi_guid_t tbl_guid = LINUX_EFI_BOOT_MEMMAP_GUID; - struct efi_boot_memmap *m, tmp; efi_status_t status; unsigned long size; @@ -48,24 +48,20 @@ efi_status_t efi_get_memory_map(struct efi_boot_memmap **map, */ status = efi_bs_call(install_configuration_table, &tbl_guid, m); if (status != EFI_SUCCESS) - goto free_map; + return status; } m->buff_size = m->map_size = size; status = efi_bs_call(get_memory_map, &m->map_size, m->map, &m->map_key, &m->desc_size, &m->desc_ver); - if (status != EFI_SUCCESS) - goto uninstall_table; + if (status != EFI_SUCCESS) { + if (install_cfg_tbl) + efi_bs_call(install_configuration_table, &tbl_guid, NULL); + return status; + } - *map = m; + *map = no_free_ptr(m); return EFI_SUCCESS; - -uninstall_table: - if (install_cfg_tbl) - efi_bs_call(install_configuration_table, &tbl_guid, NULL); -free_map: - efi_bs_call(free_pool, m); - return status; } /** diff --git a/drivers/firmware/efi/libstub/pci.c b/drivers/firmware/efi/libstub/pci.c index 99fb25d2bcf5..1dccf77958d3 100644 --- a/drivers/firmware/efi/libstub/pci.c +++ b/drivers/firmware/efi/libstub/pci.c @@ -16,37 +16,20 @@ void efi_pci_disable_bridge_busmaster(void) { efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID; - unsigned long pci_handle_size = 0; - efi_handle_t *pci_handle = NULL; + efi_handle_t *pci_handle __free(efi_pool) = NULL; + unsigned long pci_handle_num; efi_handle_t handle; efi_status_t status; u16 class, command; - int i; - status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto, - NULL, &pci_handle_size, NULL); - - if (status != EFI_BUFFER_TOO_SMALL) { - if (status != EFI_SUCCESS && status != EFI_NOT_FOUND) - efi_err("Failed to locate PCI I/O handles'\n"); - return; - } - - status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, pci_handle_size, - (void **)&pci_handle); + status = efi_bs_call(locate_handle_buffer, EFI_LOCATE_BY_PROTOCOL, + &pci_proto, NULL, &pci_handle_num, &pci_handle); if (status != EFI_SUCCESS) { - efi_err("Failed to allocate memory for 'pci_handle'\n"); + efi_err("Failed to locate PCI I/O handles\n"); return; } - status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto, - NULL, &pci_handle_size, pci_handle); - if (status != EFI_SUCCESS) { - efi_err("Failed to locate PCI I/O handles'\n"); - goto free_handle; - } - - for_each_efi_handle(handle, pci_handle, pci_handle_size, i) { + for_each_efi_handle(handle, pci_handle, pci_handle_num) { efi_pci_io_protocol_t *pci; unsigned long segment_nr, bus_nr, device_nr, func_nr; @@ -82,7 +65,7 @@ void efi_pci_disable_bridge_busmaster(void) efi_bs_call(disconnect_controller, handle, NULL, NULL); } - for_each_efi_handle(handle, pci_handle, pci_handle_size, i) { + for_each_efi_handle(handle, pci_handle, pci_handle_num) { efi_pci_io_protocol_t *pci; status = efi_bs_call(handle_protocol, handle, &pci_proto, @@ -108,7 +91,4 @@ void efi_pci_disable_bridge_busmaster(void) if (status != EFI_SUCCESS) efi_err("Failed to disable PCI busmastering\n"); } - -free_handle: - efi_bs_call(free_pool, pci_handle); } diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c index c41e7b2091cd..fd80b2f3233a 100644 --- a/drivers/firmware/efi/libstub/randomalloc.c +++ b/drivers/firmware/efi/libstub/randomalloc.c @@ -25,6 +25,9 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md, if (md->type != EFI_CONVENTIONAL_MEMORY) return 0; + if (md->attribute & EFI_MEMORY_HOT_PLUGGABLE) + return 0; + if (efi_soft_reserve_enabled() && (md->attribute & EFI_MEMORY_SP)) return 0; @@ -59,9 +62,9 @@ efi_status_t efi_random_alloc(unsigned long size, unsigned long alloc_min, unsigned long alloc_max) { + struct efi_boot_memmap *map __free(efi_pool) = NULL; unsigned long total_slots = 0, target_slot; unsigned long total_mirrored_slots = 0; - struct efi_boot_memmap *map; efi_status_t status; int map_offset; @@ -72,6 +75,10 @@ efi_status_t efi_random_alloc(unsigned long size, if (align < EFI_ALLOC_ALIGN) align = EFI_ALLOC_ALIGN; + /* Avoid address 0x0, as it can be mistaken for NULL */ + if (alloc_min == 0) + alloc_min = align; + size = round_up(size, EFI_ALLOC_ALIGN); /* count the suitable slots in each memory map entry */ @@ -130,7 +137,5 @@ efi_status_t efi_random_alloc(unsigned long size, break; } - efi_bs_call(free_pool, map); - return status; } diff --git a/drivers/firmware/efi/libstub/relocate.c b/drivers/firmware/efi/libstub/relocate.c index bf6fbd5d22a1..d4264bfb6dc1 100644 --- a/drivers/firmware/efi/libstub/relocate.c +++ b/drivers/firmware/efi/libstub/relocate.c @@ -23,14 +23,14 @@ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align, unsigned long *addr, unsigned long min) { - struct efi_boot_memmap *map; + struct efi_boot_memmap *map __free(efi_pool) = NULL; efi_status_t status; unsigned long nr_pages; int i; status = efi_get_memory_map(&map, false); if (status != EFI_SUCCESS) - goto fail; + return status; /* * Enforce minimum alignment that EFI or Linux requires when @@ -48,11 +48,14 @@ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align, unsigned long m = (unsigned long)map->map; u64 start, end; - desc = efi_early_memdesc_ptr(m, map->desc_size, i); + desc = efi_memdesc_ptr(m, map->desc_size, i); if (desc->type != EFI_CONVENTIONAL_MEMORY) continue; + if (desc->attribute & EFI_MEMORY_HOT_PLUGGABLE) + continue; + if (efi_soft_reserve_enabled() && (desc->attribute & EFI_MEMORY_SP)) continue; @@ -79,11 +82,9 @@ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align, } if (i == map->map_size / map->desc_size) - status = EFI_NOT_FOUND; + return EFI_NOT_FOUND; - efi_bs_call(free_pool, map); -fail: - return status; + return EFI_SUCCESS; } /** diff --git a/drivers/firmware/efi/libstub/riscv-stub.c b/drivers/firmware/efi/libstub/riscv-stub.c index c96d6dcee86c..e7d9204baee3 100644 --- a/drivers/firmware/efi/libstub/riscv-stub.c +++ b/drivers/firmware/efi/libstub/riscv-stub.c @@ -7,7 +7,7 @@ #include <asm/efi.h> #include <asm/sections.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "efistub.h" diff --git a/drivers/firmware/efi/libstub/riscv.c b/drivers/firmware/efi/libstub/riscv.c index 8022b104c3e6..f66f33ceb99e 100644 --- a/drivers/firmware/efi/libstub/riscv.c +++ b/drivers/firmware/efi/libstub/riscv.c @@ -7,7 +7,7 @@ #include <linux/libfdt.h> #include <asm/efi.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "efistub.h" diff --git a/drivers/firmware/efi/libstub/screen_info.c b/drivers/firmware/efi/libstub/screen_info.c index a51ec201ca3c..5d3a1e32d177 100644 --- a/drivers/firmware/efi/libstub/screen_info.c +++ b/drivers/firmware/efi/libstub/screen_info.c @@ -32,6 +32,8 @@ struct screen_info *__alloc_screen_info(void) if (status != EFI_SUCCESS) return NULL; + memset(si, 0, sizeof(*si)); + status = efi_bs_call(install_configuration_table, &screen_info_guid, si); if (status == EFI_SUCCESS) diff --git a/drivers/firmware/efi/libstub/smbios.c b/drivers/firmware/efi/libstub/smbios.c index c217de2cc8d5..f31410d7e7e1 100644 --- a/drivers/firmware/efi/libstub/smbios.c +++ b/drivers/firmware/efi/libstub/smbios.c @@ -6,20 +6,31 @@ #include "efistub.h" -typedef struct efi_smbios_protocol efi_smbios_protocol_t; - -struct efi_smbios_protocol { - efi_status_t (__efiapi *add)(efi_smbios_protocol_t *, efi_handle_t, - u16 *, struct efi_smbios_record *); - efi_status_t (__efiapi *update_string)(efi_smbios_protocol_t *, u16 *, - unsigned long *, u8 *); - efi_status_t (__efiapi *remove)(efi_smbios_protocol_t *, u16); - efi_status_t (__efiapi *get_next)(efi_smbios_protocol_t *, u16 *, u8 *, - struct efi_smbios_record **, - efi_handle_t *); - - u8 major_version; - u8 minor_version; +typedef union efi_smbios_protocol efi_smbios_protocol_t; + +union efi_smbios_protocol { + struct { + efi_status_t (__efiapi *add)(efi_smbios_protocol_t *, efi_handle_t, + u16 *, struct efi_smbios_record *); + efi_status_t (__efiapi *update_string)(efi_smbios_protocol_t *, u16 *, + unsigned long *, u8 *); + efi_status_t (__efiapi *remove)(efi_smbios_protocol_t *, u16); + efi_status_t (__efiapi *get_next)(efi_smbios_protocol_t *, u16 *, u8 *, + struct efi_smbios_record **, + efi_handle_t *); + + u8 major_version; + u8 minor_version; + }; + struct { + u32 add; + u32 update_string; + u32 remove; + u32 get_next; + + u8 major_version; + u8 minor_version; + } mixed_mode; }; const struct efi_smbios_record *efi_get_smbios_record(u8 type) @@ -38,7 +49,7 @@ const struct efi_smbios_record *efi_get_smbios_record(u8 type) } const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record, - u8 type, int offset) + const u8 *offset) { const u8 *strtable; @@ -46,7 +57,7 @@ const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record, return NULL; strtable = (u8 *)record + record->length; - for (int i = 1; i < ((u8 *)record)[offset]; i++) { + for (int i = 1; i < *offset; i++) { int len = strlen(strtable); if (!len) diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c index df3182f2e63a..a5c6c4f163fc 100644 --- a/drivers/firmware/efi/libstub/tpm.c +++ b/drivers/firmware/efi/libstub/tpm.c @@ -57,7 +57,7 @@ static void efi_retrieve_tcg2_eventlog(int version, efi_physical_addr_t log_loca struct linux_efi_tpm_eventlog *log_tbl = NULL; unsigned long first_entry_addr, last_entry_addr; size_t log_size, last_entry_size; - int final_events_size = 0; + u32 final_events_size = 0; first_entry_addr = (unsigned long) log_location; @@ -96,7 +96,7 @@ static void efi_retrieve_tcg2_eventlog(int version, efi_physical_addr_t log_loca } /* Allocate space for the logs and copy them. */ - status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, + status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY, sizeof(*log_tbl) + log_size, (void **)&log_tbl); if (status != EFI_SUCCESS) { @@ -110,9 +110,9 @@ static void efi_retrieve_tcg2_eventlog(int version, efi_physical_addr_t log_loca */ if (final_events_table && final_events_table->nr_events) { struct tcg_pcr_event2_head *header; - int offset; + u32 offset; void *data; - int event_size; + u32 event_size; int i = final_events_table->nr_events; data = (void *)final_events_table; @@ -124,6 +124,9 @@ static void efi_retrieve_tcg2_eventlog(int version, efi_physical_addr_t log_loca event_size = __calc_tpm2_event_size(header, (void *)(long)log_location, false); + /* If calc fails this is a malformed log */ + if (!event_size) + break; final_events_size += event_size; i--; } diff --git a/drivers/firmware/efi/libstub/unaccepted_memory.c b/drivers/firmware/efi/libstub/unaccepted_memory.c index 9a655f30ba47..757dbe734a47 100644 --- a/drivers/firmware/efi/libstub/unaccepted_memory.c +++ b/drivers/firmware/efi/libstub/unaccepted_memory.c @@ -29,7 +29,7 @@ efi_status_t allocate_unaccepted_bitmap(__u32 nr_desc, efi_memory_desc_t *d; unsigned long m = (unsigned long)map->map; - d = efi_early_memdesc_ptr(m, map->desc_size, i); + d = efi_memdesc_ptr(m, map->desc_size, i); if (d->type != EFI_UNACCEPTED_MEMORY) continue; @@ -177,9 +177,10 @@ void process_unaccepted_memory(u64 start, u64 end) start / unit_size, (end - start) / unit_size); } -void accept_memory(phys_addr_t start, phys_addr_t end) +void accept_memory(phys_addr_t start, unsigned long size) { unsigned long range_start, range_end; + phys_addr_t end = start + size; unsigned long bitmap_size; u64 unit_size; diff --git a/drivers/firmware/efi/libstub/x86-5lvl.c b/drivers/firmware/efi/libstub/x86-5lvl.c index 77359e802181..f1c5fb45d5f7 100644 --- a/drivers/firmware/efi/libstub/x86-5lvl.c +++ b/drivers/firmware/efi/libstub/x86-5lvl.c @@ -62,7 +62,7 @@ efi_status_t efi_setup_5level_paging(void) void efi_5level_switch(void) { - bool want_la57 = IS_ENABLED(CONFIG_X86_5LEVEL) && !efi_no5lvl; + bool want_la57 = !efi_no5lvl; bool have_la57 = native_read_cr4() & X86_CR4_LA57; bool need_toggle = want_la57 ^ have_la57; u64 *pgt = (void *)la57_toggle + PAGE_SIZE; diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c index d5a8182cf2e1..cafc90d4caaf 100644 --- a/drivers/firmware/efi/libstub/x86-stub.c +++ b/drivers/firmware/efi/libstub/x86-stub.c @@ -42,7 +42,7 @@ union sev_memory_acceptance_protocol { static efi_status_t preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom) { - struct pci_setup_rom *rom = NULL; + struct pci_setup_rom *rom __free(efi_pool) = NULL; efi_status_t status; unsigned long size; uint64_t romsize; @@ -75,14 +75,13 @@ preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom) rom->data.len = size - sizeof(struct setup_data); rom->data.next = 0; rom->pcilen = romsize; - *__rom = rom; status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16, PCI_VENDOR_ID, 1, &rom->vendor); if (status != EFI_SUCCESS) { efi_err("Failed to read rom->vendor\n"); - goto free_struct; + return status; } status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16, @@ -90,21 +89,18 @@ preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom) if (status != EFI_SUCCESS) { efi_err("Failed to read rom->devid\n"); - goto free_struct; + return status; } status = efi_call_proto(pci, get_location, &rom->segment, &rom->bus, &rom->device, &rom->function); if (status != EFI_SUCCESS) - goto free_struct; + return status; memcpy(rom->romdata, romimage, romsize); - return status; - -free_struct: - efi_bs_call(free_pool, rom); - return status; + *__rom = no_free_ptr(rom); + return EFI_SUCCESS; } /* @@ -119,38 +115,23 @@ free_struct: static void setup_efi_pci(struct boot_params *params) { efi_status_t status; - void **pci_handle = NULL; + efi_handle_t *pci_handle __free(efi_pool) = NULL; efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID; - unsigned long size = 0; struct setup_data *data; + unsigned long num; efi_handle_t h; - int i; - - status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, - &pci_proto, NULL, &size, pci_handle); - - if (status == EFI_BUFFER_TOO_SMALL) { - status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size, - (void **)&pci_handle); - - if (status != EFI_SUCCESS) { - efi_err("Failed to allocate memory for 'pci_handle'\n"); - return; - } - - status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, - &pci_proto, NULL, &size, pci_handle); - } + status = efi_bs_call(locate_handle_buffer, EFI_LOCATE_BY_PROTOCOL, + &pci_proto, NULL, &num, &pci_handle); if (status != EFI_SUCCESS) - goto free_handle; + return; data = (struct setup_data *)(unsigned long)params->hdr.setup_data; while (data && data->next) data = (struct setup_data *)(unsigned long)data->next; - for_each_efi_handle(h, pci_handle, size, i) { + for_each_efi_handle(h, pci_handle, num) { efi_pci_io_protocol_t *pci = NULL; struct pci_setup_rom *rom; @@ -170,9 +151,6 @@ static void setup_efi_pci(struct boot_params *params) data = (struct setup_data *)rom; } - -free_handle: - efi_bs_call(free_pool, pci_handle); } static void retrieve_apple_device_properties(struct boot_params *boot_params) @@ -225,6 +203,68 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params) } } +static bool apple_match_product_name(void) +{ + static const char type1_product_matches[][15] = { + "MacBookPro11,3", + "MacBookPro11,5", + "MacBookPro13,3", + "MacBookPro14,3", + "MacBookPro15,1", + "MacBookPro15,3", + "MacBookPro16,1", + "MacBookPro16,4", + }; + const struct efi_smbios_type1_record *record; + const u8 *product; + + record = (struct efi_smbios_type1_record *)efi_get_smbios_record(1); + if (!record) + return false; + + product = efi_get_smbios_string(record, product_name); + if (!product) + return false; + + for (int i = 0; i < ARRAY_SIZE(type1_product_matches); i++) { + if (!strcmp(product, type1_product_matches[i])) + return true; + } + + return false; +} + +static void apple_set_os(void) +{ + struct { + unsigned long version; + efi_status_t (__efiapi *set_os_version)(const char *); + efi_status_t (__efiapi *set_os_vendor)(const char *); + } *set_os; + efi_status_t status; + + if (!efi_is_64bit() || !apple_match_product_name()) + return; + + status = efi_bs_call(locate_protocol, &APPLE_SET_OS_PROTOCOL_GUID, NULL, + (void **)&set_os); + if (status != EFI_SUCCESS) + return; + + if (set_os->version >= 2) { + status = set_os->set_os_vendor("Apple Inc."); + if (status != EFI_SUCCESS) + efi_err("Failed to set OS vendor via apple_set_os\n"); + } + + if (set_os->version > 0) { + /* The version being set doesn't seem to matter */ + status = set_os->set_os_version("Mac OS X 10.9"); + if (status != EFI_SUCCESS) + efi_err("Failed to set OS version via apple_set_os\n"); + } +} + efi_status_t efi_adjust_memory_range_protection(unsigned long start, unsigned long size) { @@ -335,121 +375,21 @@ static const efi_char16_t apple[] = L"Apple"; static void setup_quirks(struct boot_params *boot_params) { - if (IS_ENABLED(CONFIG_APPLE_PROPERTIES) && - !memcmp(efistub_fw_vendor(), apple, sizeof(apple))) - retrieve_apple_device_properties(boot_params); -} + if (!memcmp(efistub_fw_vendor(), apple, sizeof(apple))) { + if (IS_ENABLED(CONFIG_APPLE_PROPERTIES)) + retrieve_apple_device_properties(boot_params); -/* - * See if we have Universal Graphics Adapter (UGA) protocol - */ -static efi_status_t -setup_uga(struct screen_info *si, efi_guid_t *uga_proto, unsigned long size) -{ - efi_status_t status; - u32 width, height; - void **uga_handle = NULL; - efi_uga_draw_protocol_t *uga = NULL, *first_uga; - efi_handle_t handle; - int i; - - status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size, - (void **)&uga_handle); - if (status != EFI_SUCCESS) - return status; - - status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, - uga_proto, NULL, &size, uga_handle); - if (status != EFI_SUCCESS) - goto free_handle; - - height = 0; - width = 0; - - first_uga = NULL; - for_each_efi_handle(handle, uga_handle, size, i) { - efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID; - u32 w, h, depth, refresh; - void *pciio; - - status = efi_bs_call(handle_protocol, handle, uga_proto, - (void **)&uga); - if (status != EFI_SUCCESS) - continue; - - pciio = NULL; - efi_bs_call(handle_protocol, handle, &pciio_proto, &pciio); - - status = efi_call_proto(uga, get_mode, &w, &h, &depth, &refresh); - if (status == EFI_SUCCESS && (!first_uga || pciio)) { - width = w; - height = h; - - /* - * Once we've found a UGA supporting PCIIO, - * don't bother looking any further. - */ - if (pciio) - break; - - first_uga = uga; - } + apple_set_os(); } - - if (!width && !height) - goto free_handle; - - /* EFI framebuffer */ - si->orig_video_isVGA = VIDEO_TYPE_EFI; - - si->lfb_depth = 32; - si->lfb_width = width; - si->lfb_height = height; - - si->red_size = 8; - si->red_pos = 16; - si->green_size = 8; - si->green_pos = 8; - si->blue_size = 8; - si->blue_pos = 0; - si->rsvd_size = 8; - si->rsvd_pos = 24; - -free_handle: - efi_bs_call(free_pool, uga_handle); - - return status; } static void setup_graphics(struct boot_params *boot_params) { - efi_guid_t graphics_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID; - struct screen_info *si; - efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID; - efi_status_t status; - unsigned long size; - void **gop_handle = NULL; - void **uga_handle = NULL; + struct screen_info *si = memset(&boot_params->screen_info, 0, sizeof(*si)); - si = &boot_params->screen_info; - memset(si, 0, sizeof(*si)); - - size = 0; - status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, - &graphics_proto, NULL, &size, gop_handle); - if (status == EFI_BUFFER_TOO_SMALL) - status = efi_setup_gop(si, &graphics_proto, size); - - if (status != EFI_SUCCESS) { - size = 0; - status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, - &uga_proto, NULL, &size, uga_handle); - if (status == EFI_BUFFER_TOO_SMALL) - setup_uga(si, &uga_proto, size); - } + efi_setup_gop(si); } - static void __noreturn efi_exit(efi_handle_t handle, efi_status_t status) { efi_bs_call(exit, handle, status, 0, NULL); @@ -457,40 +397,34 @@ static void __noreturn efi_exit(efi_handle_t handle, efi_status_t status) asm("hlt"); } -void __noreturn efi_stub_entry(efi_handle_t handle, - efi_system_table_t *sys_table_arg, - struct boot_params *boot_params); - /* * Because the x86 boot code expects to be passed a boot_params we * need to create one ourselves (usually the bootloader would create * one for us). */ -efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, - efi_system_table_t *sys_table_arg) +static efi_status_t efi_allocate_bootparams(efi_handle_t handle, + struct boot_params **bp) { - static struct boot_params boot_params __page_aligned_bss; - struct setup_header *hdr = &boot_params.hdr; efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID; - int options_size = 0; + struct boot_params *boot_params; + struct setup_header *hdr; efi_status_t status; + unsigned long alloc; char *cmdline_ptr; - if (efi_is_native()) - memset(_bss, 0, _ebss - _bss); - - efi_system_table = sys_table_arg; - - /* Check if we were booted by the EFI firmware */ - if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) - efi_exit(handle, EFI_INVALID_PARAMETER); - status = efi_bs_call(handle_protocol, handle, &proto, (void **)&image); if (status != EFI_SUCCESS) { efi_err("Failed to get handle for LOADED_IMAGE_PROTOCOL\n"); - efi_exit(handle, status); + return status; } + status = efi_allocate_pages(PARAM_SIZE, &alloc, ULONG_MAX); + if (status != EFI_SUCCESS) + return status; + + boot_params = memset((void *)alloc, 0x0, PARAM_SIZE); + hdr = &boot_params->hdr; + /* Assign the setup_header fields that the kernel actually cares about */ hdr->root_flags = 1; hdr->vid_mode = 0xffff; @@ -499,18 +433,17 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, hdr->initrd_addr_max = INT_MAX; /* Convert unicode cmdline to ascii */ - cmdline_ptr = efi_convert_cmdline(image, &options_size); - if (!cmdline_ptr) - goto fail; + cmdline_ptr = efi_convert_cmdline(image); + if (!cmdline_ptr) { + efi_free(PARAM_SIZE, alloc); + return EFI_OUT_OF_RESOURCES; + } efi_set_u64_split((unsigned long)cmdline_ptr, &hdr->cmd_line_ptr, - &boot_params.ext_cmd_line_ptr); - - efi_stub_entry(handle, sys_table_arg, &boot_params); - /* not reached */ + &boot_params->ext_cmd_line_ptr); -fail: - efi_exit(handle, status); + *bp = boot_params; + return EFI_SUCCESS; } static void add_e820ext(struct boot_params *params, @@ -555,7 +488,7 @@ setup_e820(struct boot_params *params, struct setup_data *e820ext, u32 e820ext_s m |= (u64)efi->efi_memmap_hi << 32; #endif - d = efi_early_memdesc_ptr(m, efi->efi_memdesc_size, i); + d = efi_memdesc_ptr(m, efi->efi_memdesc_size, i); switch (d->type) { case EFI_RESERVED_TYPE: case EFI_RUNTIME_SERVICES_CODE: @@ -669,7 +602,7 @@ static efi_status_t allocate_e820(struct boot_params *params, struct setup_data **e820ext, u32 *e820ext_size) { - struct efi_boot_memmap *map; + struct efi_boot_memmap *map __free(efi_pool) = NULL; efi_status_t status; __u32 nr_desc; @@ -683,13 +616,14 @@ static efi_status_t allocate_e820(struct boot_params *params, EFI_MMAP_NR_SLACK_SLOTS; status = alloc_e820ext(nr_e820ext, e820ext, e820ext_size); + if (status != EFI_SUCCESS) + return status; } - if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) && status == EFI_SUCCESS) - status = allocate_unaccepted_bitmap(nr_desc, map); + if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY)) + return allocate_unaccepted_bitmap(nr_desc, map); - efi_bs_call(free_pool, map); - return status; + return EFI_SUCCESS; } struct exit_boot_struct { @@ -776,13 +710,36 @@ static void error(char *str) efi_warn("Decompression failed: %s\n", str); } -static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry) +static const char *cmdline_memmap_override; + +static efi_status_t parse_options(const char *cmdline) +{ + static const char opts[][14] = { + "mem=", "memmap=", "hugepages=" + }; + + for (int i = 0; i < ARRAY_SIZE(opts); i++) { + const char *p = strstr(cmdline, opts[i]); + + if (p == cmdline || (p > cmdline && isspace(p[-1]))) { + cmdline_memmap_override = opts[i]; + break; + } + } + + return efi_parse_options(cmdline); +} + +static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry, + struct boot_params *boot_params) { unsigned long virt_addr = LOAD_PHYSICAL_ADDR; unsigned long addr, alloc_size, entry; efi_status_t status; u32 seed[2] = {}; + boot_params_ptr = boot_params; + /* determine the required size of the allocation */ alloc_size = ALIGN(max_t(unsigned long, output_len, kernel_total_size), MIN_KERNEL_ALIGN); @@ -807,9 +764,13 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry) !memcmp(efistub_fw_vendor(), ami, sizeof(ami))) { efi_debug("AMI firmware v2.0 or older detected - disabling physical KASLR\n"); seed[0] = 0; + } else if (cmdline_memmap_override) { + efi_info("%s detected on the kernel command line - disabling physical KASLR\n", + cmdline_memmap_override); + seed[0] = 0; } - boot_params_ptr->hdr.loadflags |= KASLR_FLAG; + boot_params->hdr.loadflags |= KASLR_FLAG; } status = efi_random_alloc(alloc_size, CONFIG_PHYSICAL_ALIGN, &addr, @@ -847,20 +808,27 @@ static void __noreturn enter_kernel(unsigned long kernel_addr, void __noreturn efi_stub_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg, struct boot_params *boot_params) + { efi_guid_t guid = EFI_MEMORY_ATTRIBUTE_PROTOCOL_GUID; - struct setup_header *hdr = &boot_params->hdr; const struct linux_efi_initrd *initrd = NULL; unsigned long kernel_entry; + struct setup_header *hdr; efi_status_t status; - boot_params_ptr = boot_params; - efi_system_table = sys_table_arg; /* Check if we were booted by the EFI firmware */ if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) efi_exit(handle, EFI_INVALID_PARAMETER); + if (!IS_ENABLED(CONFIG_EFI_HANDOVER_PROTOCOL) || !boot_params) { + status = efi_allocate_bootparams(handle, &boot_params); + if (status != EFI_SUCCESS) + efi_exit(handle, status); + } + + hdr = &boot_params->hdr; + if (have_unsupported_snp_features()) efi_exit(handle, EFI_UNSUPPORTED); @@ -883,7 +851,7 @@ void __noreturn efi_stub_entry(efi_handle_t handle, } #ifdef CONFIG_CMDLINE_BOOL - status = efi_parse_options(CONFIG_CMDLINE); + status = parse_options(CONFIG_CMDLINE); if (status != EFI_SUCCESS) { efi_err("Failed to parse options\n"); goto fail; @@ -892,7 +860,7 @@ void __noreturn efi_stub_entry(efi_handle_t handle, if (!IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) { unsigned long cmdline_paddr = ((u64)hdr->cmd_line_ptr | ((u64)boot_params->ext_cmd_line_ptr << 32)); - status = efi_parse_options((char *)cmdline_paddr); + status = parse_options((char *)cmdline_paddr); if (status != EFI_SUCCESS) { efi_err("Failed to parse options\n"); goto fail; @@ -902,7 +870,7 @@ void __noreturn efi_stub_entry(efi_handle_t handle, if (efi_mem_encrypt > 0) hdr->xloadflags |= XLF_MEM_ENCRYPTION; - status = efi_decompress_kernel(&kernel_entry); + status = efi_decompress_kernel(&kernel_entry, boot_params); if (status != EFI_SUCCESS) { efi_err("Failed to decompress kernel\n"); goto fail; @@ -972,6 +940,12 @@ fail: efi_exit(handle, status); } +efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, + efi_system_table_t *sys_table_arg) +{ + efi_stub_entry(handle, sys_table_arg, NULL); +} + #ifdef CONFIG_EFI_HANDOVER_PROTOCOL void efi_handover_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg, struct boot_params *boot_params) diff --git a/drivers/firmware/efi/libstub/zboot-decompress-gzip.c b/drivers/firmware/efi/libstub/zboot-decompress-gzip.c new file mode 100644 index 000000000000..e97a7e9d3c98 --- /dev/null +++ b/drivers/firmware/efi/libstub/zboot-decompress-gzip.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/efi.h> +#include <linux/zlib.h> + +#include <asm/efi.h> + +#include "efistub.h" + +#include "inftrees.c" +#include "inffast.c" +#include "inflate.c" + +extern unsigned char _gzdata_start[], _gzdata_end[]; +extern u32 __aligned(1) payload_size; + +static struct z_stream_s stream; + +efi_status_t efi_zboot_decompress_init(unsigned long *alloc_size) +{ + efi_status_t status; + int rc; + + /* skip the 10 byte header, assume no recorded filename */ + stream.next_in = _gzdata_start + 10; + stream.avail_in = _gzdata_end - stream.next_in; + + status = efi_allocate_pages(zlib_inflate_workspacesize(), + (unsigned long *)&stream.workspace, + ULONG_MAX); + if (status != EFI_SUCCESS) + return status; + + rc = zlib_inflateInit2(&stream, -MAX_WBITS); + if (rc != Z_OK) { + efi_err("failed to initialize GZIP decompressor: %d\n", rc); + status = EFI_LOAD_ERROR; + goto out; + } + + *alloc_size = payload_size; + return EFI_SUCCESS; +out: + efi_free(zlib_inflate_workspacesize(), (unsigned long)stream.workspace); + return status; +} + +efi_status_t efi_zboot_decompress(u8 *out, unsigned long outlen) +{ + int rc; + + stream.next_out = out; + stream.avail_out = outlen; + + rc = zlib_inflate(&stream, 0); + zlib_inflateEnd(&stream); + + efi_free(zlib_inflate_workspacesize(), (unsigned long)stream.workspace); + + if (rc != Z_STREAM_END) { + efi_err("GZIP decompression failed with status %d\n", rc); + return EFI_LOAD_ERROR; + } + + efi_cache_sync_image((unsigned long)out, outlen); + + return EFI_SUCCESS; +} diff --git a/drivers/firmware/efi/libstub/zboot-decompress-zstd.c b/drivers/firmware/efi/libstub/zboot-decompress-zstd.c new file mode 100644 index 000000000000..bde9d94dd2e3 --- /dev/null +++ b/drivers/firmware/efi/libstub/zboot-decompress-zstd.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/efi.h> +#include <linux/zstd.h> + +#include <asm/efi.h> + +#include "decompress_sources.h" +#include "efistub.h" + +extern unsigned char _gzdata_start[], _gzdata_end[]; +extern u32 __aligned(1) payload_size; + +static size_t wksp_size; +static void *wksp; + +efi_status_t efi_zboot_decompress_init(unsigned long *alloc_size) +{ + efi_status_t status; + + wksp_size = zstd_dctx_workspace_bound(); + status = efi_allocate_pages(wksp_size, (unsigned long *)&wksp, ULONG_MAX); + if (status != EFI_SUCCESS) + return status; + + *alloc_size = payload_size; + return EFI_SUCCESS; +} + +efi_status_t efi_zboot_decompress(u8 *out, unsigned long outlen) +{ + zstd_dctx *dctx = zstd_init_dctx(wksp, wksp_size); + size_t ret; + int retval; + + ret = zstd_decompress_dctx(dctx, out, outlen, _gzdata_start, + _gzdata_end - _gzdata_start - 4); + efi_free(wksp_size, (unsigned long)wksp); + + retval = zstd_get_error_code(ret); + if (retval) { + efi_err("ZSTD-decompression failed with status %d\n", retval); + return EFI_LOAD_ERROR; + } + + efi_cache_sync_image((unsigned long)out, outlen); + + return EFI_SUCCESS; +} diff --git a/drivers/firmware/efi/libstub/zboot-header.S b/drivers/firmware/efi/libstub/zboot-header.S index fb676ded47fa..b6431edd0fc9 100644 --- a/drivers/firmware/efi/libstub/zboot-header.S +++ b/drivers/firmware/efi/libstub/zboot-header.S @@ -4,17 +4,17 @@ #ifdef CONFIG_64BIT .set .Lextra_characteristics, 0x0 - .set .Lpe_opt_magic, PE_OPT_MAGIC_PE32PLUS + .set .Lpe_opt_magic, IMAGE_NT_OPTIONAL_HDR64_MAGIC #else .set .Lextra_characteristics, IMAGE_FILE_32BIT_MACHINE - .set .Lpe_opt_magic, PE_OPT_MAGIC_PE32 + .set .Lpe_opt_magic, IMAGE_NT_OPTIONAL_HDR32_MAGIC #endif .section ".head", "a" .globl __efistub_efi_zboot_header __efistub_efi_zboot_header: .Ldoshdr: - .long MZ_MAGIC + .long IMAGE_DOS_SIGNATURE .ascii "zimg" // image type .long __efistub__gzdata_start - .Ldoshdr // payload offset .long __efistub__gzdata_size - ZBOOT_SIZE_LEN // payload size @@ -25,7 +25,7 @@ __efistub_efi_zboot_header: .long .Lpehdr - .Ldoshdr // PE header offset .Lpehdr: - .long PE_MAGIC + .long IMAGE_NT_SIGNATURE .short MACHINE_TYPE .short .Lsection_count .long 0 @@ -63,7 +63,7 @@ __efistub_efi_zboot_header: .long .Lefi_header_end - .Ldoshdr .long 0 .short IMAGE_SUBSYSTEM_EFI_APPLICATION - .short IMAGE_DLL_CHARACTERISTICS_NX_COMPAT + .short IMAGE_DLLCHARACTERISTICS_NX_COMPAT #ifdef CONFIG_64BIT .quad 0, 0, 0, 0 #else @@ -123,11 +123,29 @@ __efistub_efi_zboot_header: IMAGE_SCN_MEM_READ | \ IMAGE_SCN_MEM_EXECUTE +#ifdef CONFIG_EFI_SBAT + .ascii ".sbat\0\0\0" + .long __sbat_size + .long _sbat - .Ldoshdr + .long __sbat_size + .long _sbat - .Ldoshdr + + .long 0, 0 + .short 0, 0 + .long IMAGE_SCN_CNT_INITIALIZED_DATA | \ + IMAGE_SCN_MEM_READ | \ + IMAGE_SCN_MEM_DISCARDABLE + + .pushsection ".sbat", "a", @progbits + .incbin CONFIG_EFI_SBAT_FILE + .popsection +#endif + .ascii ".data\0\0\0" .long __data_size - .long _etext - .Ldoshdr + .long _data - .Ldoshdr .long __data_rawsize - .long _etext - .Ldoshdr + .long _data - .Ldoshdr .long 0, 0 .short 0, 0 diff --git a/drivers/firmware/efi/libstub/zboot.c b/drivers/firmware/efi/libstub/zboot.c index 1ceace956758..c47ace06f010 100644 --- a/drivers/firmware/efi/libstub/zboot.c +++ b/drivers/firmware/efi/libstub/zboot.c @@ -3,40 +3,10 @@ #include <linux/efi.h> #include <linux/pe.h> #include <asm/efi.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "efistub.h" -static unsigned char zboot_heap[SZ_256K] __aligned(64); -static unsigned long free_mem_ptr, free_mem_end_ptr; - -#define STATIC static -#if defined(CONFIG_KERNEL_GZIP) -#include "../../../../lib/decompress_inflate.c" -#elif defined(CONFIG_KERNEL_LZ4) -#include "../../../../lib/decompress_unlz4.c" -#elif defined(CONFIG_KERNEL_LZMA) -#include "../../../../lib/decompress_unlzma.c" -#elif defined(CONFIG_KERNEL_LZO) -#include "../../../../lib/decompress_unlzo.c" -#elif defined(CONFIG_KERNEL_XZ) -#undef memcpy -#define memcpy memcpy -#undef memmove -#define memmove memmove -#include "../../../../lib/decompress_unxz.c" -#elif defined(CONFIG_KERNEL_ZSTD) -#include "../../../../lib/decompress_unzstd.c" -#endif - -extern char efi_zboot_header[]; -extern char _gzdata_start[], _gzdata_end[]; - -static void error(char *x) -{ - efi_err("EFI decompressor: %s\n", x); -} - static unsigned long alloc_preferred_address(unsigned long alloc_size) { #ifdef EFI_KIMG_PREFERRED_ADDRESS @@ -64,22 +34,17 @@ struct screen_info *alloc_screen_info(void) asmlinkage efi_status_t __efiapi efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab) { - unsigned long compressed_size = _gzdata_end - _gzdata_start; + char *cmdline_ptr __free(efi_pool) = NULL; unsigned long image_base, alloc_size; efi_loaded_image_t *image; efi_status_t status; - char *cmdline_ptr; - int ret; WRITE_ONCE(efi_system_table, systab); - free_mem_ptr = (unsigned long)&zboot_heap; - free_mem_end_ptr = free_mem_ptr + sizeof(zboot_heap); - status = efi_bs_call(handle_protocol, handle, &LOADED_IMAGE_PROTOCOL_GUID, (void **)&image); if (status != EFI_SUCCESS) { - error("Failed to locate parent's loaded image protocol"); + efi_err("Failed to locate parent's loaded image protocol\n"); return status; } @@ -89,9 +54,9 @@ efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab) efi_info("Decompressing Linux Kernel...\n"); - // SizeOfImage from the compressee's PE/COFF header - alloc_size = round_up(get_unaligned_le32(_gzdata_end - 4), - EFI_ALLOC_ALIGN); + status = efi_zboot_decompress_init(&alloc_size); + if (status != EFI_SUCCESS) + return status; // If the architecture has a preferred address for the image, // try that first. @@ -122,26 +87,14 @@ efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab) seed, EFI_LOADER_CODE, 0, EFI_ALLOC_LIMIT); if (status != EFI_SUCCESS) { efi_err("Failed to allocate memory\n"); - goto free_cmdline; + return status; } } - // Decompress the payload into the newly allocated buffer. - ret = __decompress(_gzdata_start, compressed_size, NULL, NULL, - (void *)image_base, alloc_size, NULL, error); - if (ret < 0) { - error("Decompression failed"); - status = EFI_DEVICE_ERROR; - goto free_image; - } - - efi_cache_sync_image(image_base, alloc_size); - - status = efi_stub_common(handle, image, image_base, cmdline_ptr); + // Decompress the payload into the newly allocated buffer + status = efi_zboot_decompress((void *)image_base, alloc_size) ?: + efi_stub_common(handle, image, image_base, cmdline_ptr); -free_image: efi_free(alloc_size, image_base); -free_cmdline: - efi_bs_call(free_pool, cmdline_ptr); return status; } diff --git a/drivers/firmware/efi/libstub/zboot.lds b/drivers/firmware/efi/libstub/zboot.lds index ac8c0ef85158..c3a166675450 100644 --- a/drivers/firmware/efi/libstub/zboot.lds +++ b/drivers/firmware/efi/libstub/zboot.lds @@ -17,6 +17,7 @@ SECTIONS .rodata : ALIGN(8) { __efistub__gzdata_start = .; *(.gzdata) + __efistub_payload_size = . - 4; __efistub__gzdata_end = .; *(.rodata* .init.rodata* .srodata*) @@ -28,7 +29,17 @@ SECTIONS . = _etext; } +#ifdef CONFIG_EFI_SBAT + .sbat : ALIGN(4096) { + _sbat = .; + *(.sbat) + _esbat = ALIGN(4096); + . = _esbat; + } +#endif + .data : ALIGN(4096) { + _data = .; *(.data* .init.data*) _edata = ALIGN(512); . = _edata; @@ -41,6 +52,7 @@ SECTIONS } /DISCARD/ : { + *(.discard .discard.*) *(.modinfo .init.modinfo) } } @@ -50,3 +62,4 @@ PROVIDE(__efistub__gzdata_size = PROVIDE(__data_rawsize = ABSOLUTE(_edata - _etext)); PROVIDE(__data_size = ABSOLUTE(_end - _etext)); +PROVIDE(__sbat_size = ABSOLUTE(_esbat - _sbat)); diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c index ab85bf8e165a..c38b1a335590 100644 --- a/drivers/firmware/efi/memattr.c +++ b/drivers/firmware/efi/memattr.c @@ -22,6 +22,7 @@ unsigned long __ro_after_init efi_mem_attr_table = EFI_INVALID_TABLE_ADDR; int __init efi_memattr_init(void) { efi_memory_attributes_table_t *tbl; + unsigned long size; if (efi_mem_attr_table == EFI_INVALID_TABLE_ADDR) return 0; @@ -39,7 +40,22 @@ int __init efi_memattr_init(void) goto unmap; } - tbl_size = sizeof(*tbl) + tbl->num_entries * tbl->desc_size; + + /* + * Sanity check: the Memory Attributes Table contains up to 3 entries + * for each entry of type EfiRuntimeServicesCode in the EFI memory map. + * So if the size of the table exceeds 3x the size of the entire EFI + * memory map, there is clearly something wrong, and the table should + * just be ignored altogether. + */ + size = tbl->num_entries * tbl->desc_size; + if (size > 3 * efi.memmap.nr_map * efi.memmap.desc_size) { + pr_warn(FW_BUG "Corrupted EFI Memory Attributes Table detected! (version == %u, desc_size == %u, num_entries == %u)\n", + tbl->version, tbl->desc_size, tbl->num_entries); + goto unmap; + } + + tbl_size = sizeof(*tbl) + size; memblock_reserve(efi_mem_attr_table, tbl_size); set_bit(EFI_MEM_ATTR, &efi.flags); @@ -164,7 +180,7 @@ int __init efi_memattr_apply_permissions(struct mm_struct *mm, bool valid; char buf[64]; - valid = entry_is_valid((void *)tbl->entry + i * tbl->desc_size, + valid = entry_is_valid(efi_memdesc_ptr(tbl->entry, tbl->desc_size, i), &md); size = md.num_pages << EFI_PAGE_SHIFT; if (efi_enabled(EFI_DBG) || !valid) diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c index 3365944f7965..f1c04d7cfd71 100644 --- a/drivers/firmware/efi/memmap.c +++ b/drivers/firmware/efi/memmap.c @@ -15,10 +15,6 @@ #include <asm/early_ioremap.h> #include <asm/efi.h> -#ifndef __efi_memmap_free -#define __efi_memmap_free(phys, size, flags) do { } while (0) -#endif - /** * __efi_memmap_init - Common code for mapping the EFI memory map * @data: EFI memory map data @@ -47,15 +43,11 @@ int __init __efi_memmap_init(struct efi_memory_map_data *data) map.map = early_memremap(phys_map, data->size); if (!map.map) { - pr_err("Could not map the memory map!\n"); + pr_err("Could not map the memory map! phys_map=%pa, size=0x%lx\n", + &phys_map, data->size); return -ENOMEM; } - if (efi.memmap.flags & (EFI_MEMMAP_MEMBLOCK | EFI_MEMMAP_SLAB)) - __efi_memmap_free(efi.memmap.phys_map, - efi.memmap.desc_size * efi.memmap.nr_map, - efi.memmap.flags); - map.phys_map = data->phys_map; map.nr_map = data->size / data->desc_size; map.map_end = map.map + data->size; diff --git a/drivers/firmware/efi/mokvar-table.c b/drivers/firmware/efi/mokvar-table.c index 5ed0602c2f75..0a856c3f69a3 100644 --- a/drivers/firmware/efi/mokvar-table.c +++ b/drivers/firmware/efi/mokvar-table.c @@ -99,14 +99,13 @@ static struct kobject *mokvar_kobj; */ void __init efi_mokvar_table_init(void) { + struct efi_mokvar_table_entry __aligned(1) *mokvar_entry, *next_entry; efi_memory_desc_t md; void *va = NULL; unsigned long cur_offset = 0; unsigned long offset_limit; - unsigned long map_size = 0; unsigned long map_size_needed = 0; unsigned long size; - struct efi_mokvar_table_entry *mokvar_entry; int err; if (!efi_enabled(EFI_MEMMAP)) @@ -134,48 +133,46 @@ void __init efi_mokvar_table_init(void) */ err = -EINVAL; while (cur_offset + sizeof(*mokvar_entry) <= offset_limit) { - mokvar_entry = va + cur_offset; - map_size_needed = cur_offset + sizeof(*mokvar_entry); - if (map_size_needed > map_size) { - if (va) - early_memunmap(va, map_size); - /* - * Map a little more than the fixed size entry - * header, anticipating some data. It's safe to - * do so as long as we stay within current memory - * descriptor. - */ - map_size = min(map_size_needed + 2*EFI_PAGE_SIZE, - offset_limit); - va = early_memremap(efi.mokvar_table, map_size); - if (!va) { - pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%lu.\n", - efi.mokvar_table, map_size); - return; - } - mokvar_entry = va + cur_offset; + if (va) + early_memunmap(va, sizeof(*mokvar_entry)); + va = early_memremap(efi.mokvar_table + cur_offset, sizeof(*mokvar_entry)); + if (!va) { + pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%zu.\n", + efi.mokvar_table + cur_offset, sizeof(*mokvar_entry)); + return; } - + mokvar_entry = va; +next: /* Check for last sentinel entry */ if (mokvar_entry->name[0] == '\0') { if (mokvar_entry->data_size != 0) break; err = 0; + map_size_needed = cur_offset + sizeof(*mokvar_entry); break; } - /* Sanity check that the name is null terminated */ - size = strnlen(mokvar_entry->name, - sizeof(mokvar_entry->name)); - if (size >= sizeof(mokvar_entry->name)) - break; + /* Enforce that the name is NUL terminated */ + mokvar_entry->name[sizeof(mokvar_entry->name) - 1] = '\0'; /* Advance to the next entry */ - cur_offset = map_size_needed + mokvar_entry->data_size; + size = sizeof(*mokvar_entry) + mokvar_entry->data_size; + cur_offset += size; + + /* + * Don't bother remapping if the current entry header and the + * next one end on the same page. + */ + next_entry = (void *)((unsigned long)mokvar_entry + size); + if (((((unsigned long)(mokvar_entry + 1) - 1) ^ + ((unsigned long)(next_entry + 1) - 1)) & PAGE_MASK) == 0) { + mokvar_entry = next_entry; + goto next; + } } if (va) - early_memunmap(va, map_size); + early_memunmap(va, sizeof(*mokvar_entry)); if (err) { pr_err("EFI MOKvar config table is not valid\n"); return; @@ -266,7 +263,7 @@ struct efi_mokvar_table_entry *efi_mokvar_entry_find(const char *name) * amount of data in this mokvar config table entry. */ static ssize_t efi_mokvar_sysfs_read(struct file *file, struct kobject *kobj, - struct bin_attribute *bin_attr, char *buf, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct efi_mokvar_table_entry *mokvar_entry = bin_attr->private; @@ -343,7 +340,7 @@ static int __init efi_mokvar_sysfs_init(void) mokvar_sysfs->bin_attr.attr.name = mokvar_entry->name; mokvar_sysfs->bin_attr.attr.mode = 0400; mokvar_sysfs->bin_attr.size = mokvar_entry->data_size; - mokvar_sysfs->bin_attr.read = efi_mokvar_sysfs_read; + mokvar_sysfs->bin_attr.read_new = efi_mokvar_sysfs_read; err = sysfs_create_bin_file(mokvar_kobj, &mokvar_sysfs->bin_attr); diff --git a/drivers/firmware/efi/rci2-table.c b/drivers/firmware/efi/rci2-table.c index de1a9a1f9f14..c1bedd244817 100644 --- a/drivers/firmware/efi/rci2-table.c +++ b/drivers/firmware/efi/rci2-table.c @@ -40,15 +40,7 @@ static u8 *rci2_base; static u32 rci2_table_len; unsigned long rci2_table_phys __ro_after_init = EFI_INVALID_TABLE_ADDR; -static ssize_t raw_table_read(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, - loff_t pos, size_t count) -{ - memcpy(buf, attr->private + pos, count); - return count; -} - -static BIN_ATTR(rci2, S_IRUSR, raw_table_read, NULL, 0); +static __ro_after_init BIN_ATTR_SIMPLE_ADMIN_RO(rci2); static u16 checksum(void) { diff --git a/drivers/firmware/efi/riscv-runtime.c b/drivers/firmware/efi/riscv-runtime.c index 01f0f90ea418..fa71cd898120 100644 --- a/drivers/firmware/efi/riscv-runtime.c +++ b/drivers/firmware/efi/riscv-runtime.c @@ -152,3 +152,16 @@ void arch_efi_call_virt_teardown(void) { efi_virtmap_unload(); } + +static int __init riscv_dmi_init(void) +{ + /* + * On riscv, DMI depends on UEFI, and dmi_setup() needs to + * be called early because dmi_id_init(), which is an arch_initcall + * itself, depends on dmi_scan_machine() having been called already. + */ + dmi_setup(); + + return 0; +} +core_initcall(riscv_dmi_init); diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c index 5d56bc40a79d..708b777857d3 100644 --- a/drivers/firmware/efi/runtime-wrappers.c +++ b/drivers/firmware/efi/runtime-wrappers.c @@ -213,7 +213,7 @@ extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock); * Calls the appropriate efi_runtime_service() with the appropriate * arguments. */ -static void efi_call_rts(struct work_struct *work) +static void __nocfi efi_call_rts(struct work_struct *work) { const union efi_rts_args *args = efi_rts_work.args; efi_status_t status = EFI_NOT_FOUND; @@ -435,7 +435,7 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name, return status; } -static efi_status_t +static efi_status_t __nocfi virt_efi_set_variable_nb(efi_char16_t *name, efi_guid_t *vendor, u32 attr, unsigned long data_size, void *data) { @@ -469,7 +469,7 @@ static efi_status_t virt_efi_query_variable_info(u32 attr, return status; } -static efi_status_t +static efi_status_t __nocfi virt_efi_query_variable_info_nb(u32 attr, u64 *storage_space, u64 *remaining_space, u64 *max_variable_size) { @@ -499,10 +499,9 @@ static efi_status_t virt_efi_get_next_high_mono_count(u32 *count) return status; } -static void virt_efi_reset_system(int reset_type, - efi_status_t status, - unsigned long data_size, - efi_char16_t *data) +static void __nocfi +virt_efi_reset_system(int reset_type, efi_status_t status, + unsigned long data_size, efi_char16_t *data) { if (down_trylock(&efi_runtime_lock)) { pr_warn("failed to invoke the reset_system() runtime service:\n" diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c index cc807ed35aed..1e509595ac03 100644 --- a/drivers/firmware/efi/sysfb_efi.c +++ b/drivers/firmware/efi/sysfb_efi.c @@ -91,6 +91,7 @@ void efifb_setup_from_dmi(struct screen_info *si, const char *opt) _ret_; \ }) +#ifdef CONFIG_EFI static int __init efifb_set_system(const struct dmi_system_id *id) { struct efifb_dmi_info *info = id->driver_data; @@ -346,7 +347,6 @@ static const struct fwnode_operations efifb_fwnode_ops = { .add_links = efifb_add_links, }; -#ifdef CONFIG_EFI static struct fwnode_handle efifb_fwnode; __init void sysfb_apply_efi_quirks(void) diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c index 47d67bb0a516..77b5f7ac3e20 100644 --- a/drivers/firmware/efi/test/efi_test.c +++ b/drivers/firmware/efi/test/efi_test.c @@ -361,6 +361,10 @@ static long efi_runtime_get_waketime(unsigned long arg) getwakeuptime.enabled)) return -EFAULT; + if (getwakeuptime.pending && put_user(pending, + getwakeuptime.pending)) + return -EFAULT; + if (getwakeuptime.time) { if (copy_to_user(getwakeuptime.time, &efi_time, sizeof(efi_time_t))) @@ -750,7 +754,6 @@ static const struct file_operations efi_test_fops = { .unlocked_ioctl = efi_test_ioctl, .open = efi_test_open, .release = efi_test_close, - .llseek = no_llseek, }; static struct miscdevice efi_test_dev = { diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c index e8d69bd548f3..cdd431027065 100644 --- a/drivers/firmware/efi/tpm.c +++ b/drivers/firmware/efi/tpm.c @@ -19,7 +19,7 @@ EXPORT_SYMBOL(efi_tpm_final_log_size); static int __init tpm2_calc_event_log_size(void *data, int count, void *size_info) { struct tcg_pcr_event2_head *header; - int event_size, size = 0; + u32 event_size, size = 0; while (count > 0) { header = data + size; @@ -40,7 +40,8 @@ int __init efi_tpm_eventlog_init(void) { struct linux_efi_tpm_eventlog *log_tbl; struct efi_tcg2_final_events_table *final_tbl; - int tbl_size; + unsigned int tbl_size; + int final_tbl_size; int ret = 0; if (efi.tpm_log == EFI_INVALID_TABLE_ADDR) { @@ -60,7 +61,12 @@ int __init efi_tpm_eventlog_init(void) } tbl_size = sizeof(*log_tbl) + log_tbl->size; - memblock_reserve(efi.tpm_log, tbl_size); + if (memblock_reserve(efi.tpm_log, tbl_size)) { + pr_err("TPM Event Log memblock reserve fails (0x%lx, 0x%x)\n", + efi.tpm_log, tbl_size); + ret = -ENOMEM; + goto out; + } if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR) { pr_info("TPM Final Events table not present\n"); @@ -80,26 +86,26 @@ int __init efi_tpm_eventlog_init(void) goto out; } - tbl_size = 0; + final_tbl_size = 0; if (final_tbl->nr_events != 0) { void *events = (void *)efi.tpm_final_log + sizeof(final_tbl->version) + sizeof(final_tbl->nr_events); - tbl_size = tpm2_calc_event_log_size(events, - final_tbl->nr_events, - log_tbl->log); + final_tbl_size = tpm2_calc_event_log_size(events, + final_tbl->nr_events, + log_tbl->log); } - if (tbl_size < 0) { + if (final_tbl_size < 0) { pr_err(FW_BUG "Failed to parse event in TPM Final Events Log\n"); ret = -EINVAL; goto out_calc; } memblock_reserve(efi.tpm_final_log, - tbl_size + sizeof(*final_tbl)); - efi_tpm_final_log_size = tbl_size; + final_tbl_size + sizeof(*final_tbl)); + efi_tpm_final_log_size = final_tbl_size; out_calc: early_memunmap(final_tbl, sizeof(*final_tbl)); diff --git a/drivers/firmware/efi/unaccepted_memory.c b/drivers/firmware/efi/unaccepted_memory.c index 5b439d04079c..c2c067eff634 100644 --- a/drivers/firmware/efi/unaccepted_memory.c +++ b/drivers/firmware/efi/unaccepted_memory.c @@ -4,6 +4,7 @@ #include <linux/memblock.h> #include <linux/spinlock.h> #include <linux/crash_dump.h> +#include <linux/nmi.h> #include <asm/unaccepted_memory.h> /* Protects unaccepted memory bitmap and accepting_list */ @@ -29,11 +30,12 @@ static LIST_HEAD(accepting_list); * - memory that is below phys_base; * - memory that is above the memory that addressable by the bitmap; */ -void accept_memory(phys_addr_t start, phys_addr_t end) +void accept_memory(phys_addr_t start, unsigned long size) { struct efi_unaccepted_memory *unaccepted; unsigned long range_start, range_end; struct accept_range range, *entry; + phys_addr_t end = start + size; unsigned long flags; u64 unit_size; @@ -73,13 +75,13 @@ void accept_memory(phys_addr_t start, phys_addr_t end) * "guard" page is accepted in addition to the memory that needs to be * used: * - * 1. Implicitly extend the range_contains_unaccepted_memory(start, end) - * checks up to end+unit_size if 'end' is aligned on a unit_size - * boundary. + * 1. Implicitly extend the range_contains_unaccepted_memory(start, size) + * checks up to the next unit_size if 'start+size' is aligned on a + * unit_size boundary. * - * 2. Implicitly extend accept_memory(start, end) to end+unit_size if - * 'end' is aligned on a unit_size boundary. (immediately following - * this comment) + * 2. Implicitly extend accept_memory(start, size) to the next unit_size + * if 'size+end' is aligned on a unit_size boundary. (immediately + * following this comment) */ if (!(end % unit_size)) end += unit_size; @@ -149,12 +151,16 @@ retry: } list_del(&range.list); + + touch_softlockup_watchdog(); + spin_unlock_irqrestore(&unaccepted_memory_lock, flags); } -bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end) +bool range_contains_unaccepted_memory(phys_addr_t start, unsigned long size) { struct efi_unaccepted_memory *unaccepted; + phys_addr_t end = start + size; unsigned long flags; bool ret = false; u64 unit_size; diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index f654e6f6af87..3700e9869767 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c @@ -149,7 +149,7 @@ int efivar_lock(void) } return 0; } -EXPORT_SYMBOL_NS_GPL(efivar_lock, EFIVAR); +EXPORT_SYMBOL_NS_GPL(efivar_lock, "EFIVAR"); /* * efivar_lock() - obtain the efivar lock if it is free @@ -165,7 +165,7 @@ int efivar_trylock(void) } return 0; } -EXPORT_SYMBOL_NS_GPL(efivar_trylock, EFIVAR); +EXPORT_SYMBOL_NS_GPL(efivar_trylock, "EFIVAR"); /* * efivar_unlock() - release the efivar lock @@ -174,7 +174,7 @@ void efivar_unlock(void) { up(&efivars_lock); } -EXPORT_SYMBOL_NS_GPL(efivar_unlock, EFIVAR); +EXPORT_SYMBOL_NS_GPL(efivar_unlock, "EFIVAR"); /* * efivar_get_variable() - retrieve a variable identified by name/vendor @@ -186,7 +186,7 @@ efi_status_t efivar_get_variable(efi_char16_t *name, efi_guid_t *vendor, { return __efivars->ops->get_variable(name, vendor, attr, size, data); } -EXPORT_SYMBOL_NS_GPL(efivar_get_variable, EFIVAR); +EXPORT_SYMBOL_NS_GPL(efivar_get_variable, "EFIVAR"); /* * efivar_get_next_variable() - enumerate the next name/vendor pair @@ -198,7 +198,7 @@ efi_status_t efivar_get_next_variable(unsigned long *name_size, { return __efivars->ops->get_next_variable(name_size, name, vendor); } -EXPORT_SYMBOL_NS_GPL(efivar_get_next_variable, EFIVAR); +EXPORT_SYMBOL_NS_GPL(efivar_get_next_variable, "EFIVAR"); /* * efivar_set_variable_locked() - set a variable identified by name/vendor @@ -215,7 +215,7 @@ efi_status_t efivar_set_variable_locked(efi_char16_t *name, efi_guid_t *vendor, if (data_size > 0) { status = check_var_size(nonblocking, attr, - data_size + ucs2_strsize(name, 1024)); + data_size + ucs2_strsize(name, EFI_VAR_NAME_LEN)); if (status != EFI_SUCCESS) return status; } @@ -230,7 +230,7 @@ efi_status_t efivar_set_variable_locked(efi_char16_t *name, efi_guid_t *vendor, return setvar(name, vendor, attr, data_size, data); } -EXPORT_SYMBOL_NS_GPL(efivar_set_variable_locked, EFIVAR); +EXPORT_SYMBOL_NS_GPL(efivar_set_variable_locked, "EFIVAR"); /* * efivar_set_variable() - set a variable identified by name/vendor @@ -252,7 +252,7 @@ efi_status_t efivar_set_variable(efi_char16_t *name, efi_guid_t *vendor, efivar_unlock(); return status; } -EXPORT_SYMBOL_NS_GPL(efivar_set_variable, EFIVAR); +EXPORT_SYMBOL_NS_GPL(efivar_set_variable, "EFIVAR"); efi_status_t efivar_query_variable_info(u32 attr, u64 *storage_space, @@ -264,4 +264,4 @@ efi_status_t efivar_query_variable_info(u32 attr, return __efivars->ops->query_variable_info(attr, storage_space, remaining_space, max_variable_size); } -EXPORT_SYMBOL_NS_GPL(efivar_query_variable_info, EFIVAR); +EXPORT_SYMBOL_NS_GPL(efivar_query_variable_info, "EFIVAR"); diff --git a/drivers/firmware/google/cbmem.c b/drivers/firmware/google/cbmem.c index c2bffdc352a3..773d05078e0a 100644 --- a/drivers/firmware/google/cbmem.c +++ b/drivers/firmware/google/cbmem.c @@ -30,7 +30,7 @@ static struct cbmem_entry *to_cbmem_entry(struct kobject *kobj) } static ssize_t mem_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, char *buf, loff_t pos, + const struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count) { struct cbmem_entry *entry = to_cbmem_entry(kobj); @@ -40,7 +40,7 @@ static ssize_t mem_read(struct file *filp, struct kobject *kobj, } static ssize_t mem_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, char *buf, loff_t pos, + const struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count) { struct cbmem_entry *entry = to_cbmem_entry(kobj); @@ -53,7 +53,7 @@ static ssize_t mem_write(struct file *filp, struct kobject *kobj, memcpy(entry->mem_file_buf + pos, buf, count); return count; } -static BIN_ATTR_ADMIN_RW(mem, 0); +static const BIN_ATTR_ADMIN_RW(mem, 0); static ssize_t address_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -79,14 +79,14 @@ static struct attribute *attrs[] = { NULL, }; -static struct bin_attribute *bin_attrs[] = { +static const struct bin_attribute *const bin_attrs[] = { &bin_attr_mem, NULL, }; static const struct attribute_group cbmem_entry_group = { .attrs = attrs, - .bin_attrs = bin_attrs, + .bin_attrs_new = bin_attrs, }; static const struct attribute_group *dev_groups[] = { @@ -124,7 +124,6 @@ static struct coreboot_driver cbmem_entry_driver = { .probe = cbmem_entry_probe, .drv = { .name = "cbmem", - .owner = THIS_MODULE, .dev_groups = dev_groups, }, .id_table = cbmem_ids, @@ -132,4 +131,5 @@ static struct coreboot_driver cbmem_entry_driver = { module_coreboot_driver(cbmem_entry_driver); MODULE_AUTHOR("Jack Rosenthal <jrosenth@chromium.org>"); +MODULE_DESCRIPTION("Driver for exporting CBMEM entries in sysfs"); MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c index d4b6e581a6c6..882db32e51be 100644 --- a/drivers/firmware/google/coreboot_table.c +++ b/drivers/firmware/google/coreboot_table.c @@ -22,12 +22,12 @@ #include "coreboot_table.h" #define CB_DEV(d) container_of(d, struct coreboot_device, dev) -#define CB_DRV(d) container_of(d, struct coreboot_driver, drv) +#define CB_DRV(d) container_of_const(d, struct coreboot_driver, drv) -static int coreboot_bus_match(struct device *dev, struct device_driver *drv) +static int coreboot_bus_match(struct device *dev, const struct device_driver *drv) { struct coreboot_device *device = CB_DEV(dev); - struct coreboot_driver *driver = CB_DRV(drv); + const struct coreboot_driver *driver = CB_DRV(drv); const struct coreboot_device_id *id; if (!driver->id_table) @@ -85,13 +85,15 @@ static void coreboot_device_release(struct device *dev) kfree(device); } -int coreboot_driver_register(struct coreboot_driver *driver) +int __coreboot_driver_register(struct coreboot_driver *driver, + struct module *owner) { driver->drv.bus = &coreboot_bus_type; + driver->drv.owner = owner; return driver_register(&driver->drv); } -EXPORT_SYMBOL(coreboot_driver_register); +EXPORT_SYMBOL(__coreboot_driver_register); void coreboot_driver_unregister(struct coreboot_driver *driver) { @@ -218,7 +220,7 @@ MODULE_DEVICE_TABLE(of, coreboot_of_match); static struct platform_driver coreboot_table_driver = { .probe = coreboot_table_probe, - .remove_new = coreboot_table_remove, + .remove = coreboot_table_remove, .driver = { .name = "coreboot_table", .acpi_match_table = ACPI_PTR(cros_coreboot_acpi_match), @@ -253,4 +255,5 @@ module_init(coreboot_table_driver_init); module_exit(coreboot_table_driver_exit); MODULE_AUTHOR("Google, Inc."); +MODULE_DESCRIPTION("Module providing coreboot table access"); MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/google/coreboot_table.h b/drivers/firmware/google/coreboot_table.h index 86427989c57f..bb6f0f7299b4 100644 --- a/drivers/firmware/google/coreboot_table.h +++ b/drivers/firmware/google/coreboot_table.h @@ -97,8 +97,12 @@ struct coreboot_driver { const struct coreboot_device_id *id_table; }; +/* use a macro to avoid include chaining to get THIS_MODULE */ +#define coreboot_driver_register(driver) \ + __coreboot_driver_register(driver, THIS_MODULE) /* Register a driver that uses the data from a coreboot table. */ -int coreboot_driver_register(struct coreboot_driver *driver); +int __coreboot_driver_register(struct coreboot_driver *driver, + struct module *owner); /* Unregister a driver that uses the data from a coreboot table. */ void coreboot_driver_unregister(struct coreboot_driver *driver); diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c index 07c458bf64ec..c68c9f56370f 100644 --- a/drivers/firmware/google/framebuffer-coreboot.c +++ b/drivers/firmware/google/framebuffer-coreboot.c @@ -15,6 +15,7 @@ #include <linux/module.h> #include <linux/platform_data/simplefb.h> #include <linux/platform_device.h> +#include <linux/sysfb.h> #include "coreboot_table.h" @@ -36,6 +37,19 @@ static int framebuffer_probe(struct coreboot_device *dev) .format = NULL, }; + /* + * On coreboot systems, the advertised LB_TAG_FRAMEBUFFER entry + * in the coreboot table should only be used if the payload did + * not pass a framebuffer information to the Linux kernel. + * + * If the global screen_info data has been filled, the Generic + * System Framebuffers (sysfb) will already register a platform + * device and pass that screen_info as platform_data to a driver + * that can scan-out using the system provided framebuffer. + */ + if (sysfb_handles_screen_info()) + return -ENODEV; + if (!fb->physical_address) return -ENODEV; @@ -97,4 +111,5 @@ static struct coreboot_driver framebuffer_driver = { module_coreboot_driver(framebuffer_driver); MODULE_AUTHOR("Samuel Holland <samuel@sholland.org>"); +MODULE_DESCRIPTION("Memory based framebuffer accessed through coreboot table"); MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c index 96ea1fa76d35..e8fb00dcaf65 100644 --- a/drivers/firmware/google/gsmi.c +++ b/drivers/firmware/google/gsmi.c @@ -488,7 +488,7 @@ static const struct efivar_operations efivar_ops = { #endif /* CONFIG_EFI */ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count) { struct gsmi_set_eventlog_param param = { @@ -528,9 +528,9 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj, } -static struct bin_attribute eventlog_bin_attr = { +static const struct bin_attribute eventlog_bin_attr = { .attr = {.name = "append_to_eventlog", .mode = 0200}, - .write = eventlog_write, + .write_new = eventlog_write, }; static ssize_t gsmi_clear_eventlog_store(struct kobject *kobj, @@ -918,7 +918,8 @@ static __init int gsmi_init(void) gsmi_dev.pdev = platform_device_register_full(&gsmi_dev_info); if (IS_ERR(gsmi_dev.pdev)) { printk(KERN_ERR "gsmi: unable to register platform device\n"); - return PTR_ERR(gsmi_dev.pdev); + ret = PTR_ERR(gsmi_dev.pdev); + goto out_unregister; } /* SMI access needs to be serialized */ @@ -1056,10 +1057,11 @@ out_err: gsmi_buf_free(gsmi_dev.name_buf); kmem_cache_destroy(gsmi_dev.mem_pool); platform_device_unregister(gsmi_dev.pdev); - pr_info("gsmi: failed to load: %d\n", ret); +out_unregister: #ifdef CONFIG_PM platform_driver_unregister(&gsmi_driver_info); #endif + pr_info("gsmi: failed to load: %d\n", ret); return ret; } @@ -1090,4 +1092,5 @@ module_init(gsmi_init); module_exit(gsmi_exit); MODULE_AUTHOR("Google, Inc."); +MODULE_DESCRIPTION("EFI SMI interface for Google platforms"); MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/google/memconsole-coreboot.c b/drivers/firmware/google/memconsole-coreboot.c index 24c97a70aa80..c5f08617aa8d 100644 --- a/drivers/firmware/google/memconsole-coreboot.c +++ b/drivers/firmware/google/memconsole-coreboot.c @@ -113,4 +113,5 @@ static struct coreboot_driver memconsole_driver = { module_coreboot_driver(memconsole_driver); MODULE_AUTHOR("Google, Inc."); +MODULE_DESCRIPTION("Memory based BIOS console accessed through coreboot table"); MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/google/memconsole-x86-legacy.c b/drivers/firmware/google/memconsole-x86-legacy.c index 3d3c4f6b8194..a0974c376985 100644 --- a/drivers/firmware/google/memconsole-x86-legacy.c +++ b/drivers/firmware/google/memconsole-x86-legacy.c @@ -154,4 +154,5 @@ module_init(memconsole_x86_init); module_exit(memconsole_x86_exit); MODULE_AUTHOR("Google, Inc."); +MODULE_DESCRIPTION("EBDA specific parts of the memory based BIOS console."); MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c index 44d314ad69e4..d957af6f9349 100644 --- a/drivers/firmware/google/memconsole.c +++ b/drivers/firmware/google/memconsole.c @@ -14,7 +14,7 @@ #include "memconsole.h" static ssize_t memconsole_read(struct file *filp, struct kobject *kobp, - struct bin_attribute *bin_attr, char *buf, + const struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count) { ssize_t (*memconsole_read_func)(char *, loff_t, size_t); @@ -28,7 +28,7 @@ static ssize_t memconsole_read(struct file *filp, struct kobject *kobp, static struct bin_attribute memconsole_bin_attr = { .attr = {.name = "log", .mode = 0444}, - .read = memconsole_read, + .read_new = memconsole_read, }; void memconsole_setup(ssize_t (*read_func)(char *, loff_t, size_t)) @@ -50,4 +50,5 @@ void memconsole_exit(void) EXPORT_SYMBOL(memconsole_exit); MODULE_AUTHOR("Google, Inc."); +MODULE_DESCRIPTION("Architecture-independent parts of the memory based BIOS console"); MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c index 8e4216714b29..254ac6545d68 100644 --- a/drivers/firmware/google/vpd.c +++ b/drivers/firmware/google/vpd.c @@ -56,7 +56,7 @@ static struct vpd_section ro_vpd; static struct vpd_section rw_vpd; static ssize_t vpd_attrib_read(struct file *filp, struct kobject *kobp, - struct bin_attribute *bin_attr, char *buf, + const struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count) { struct vpd_attrib_info *info = bin_attr->private; @@ -121,7 +121,7 @@ static int vpd_section_attrib_add(const u8 *key, u32 key_len, info->bin_attr.attr.name = info->key; info->bin_attr.attr.mode = 0444; info->bin_attr.size = value_len; - info->bin_attr.read = vpd_attrib_read; + info->bin_attr.read_new = vpd_attrib_read; info->bin_attr.private = info; info->value = value; @@ -156,7 +156,7 @@ static void vpd_section_attrib_destroy(struct vpd_section *sec) } static ssize_t vpd_section_read(struct file *filp, struct kobject *kobp, - struct bin_attribute *bin_attr, char *buf, + const struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count) { struct vpd_section *sec = bin_attr->private; @@ -201,7 +201,7 @@ static int vpd_section_init(const char *name, struct vpd_section *sec, sec->bin_attr.attr.name = sec->raw_name; sec->bin_attr.attr.mode = 0444; sec->bin_attr.size = size; - sec->bin_attr.read = vpd_section_read; + sec->bin_attr.read_new = vpd_section_read; sec->bin_attr.private = sec; err = sysfs_create_bin_file(vpd_kobj, &sec->bin_attr); @@ -323,4 +323,5 @@ static struct coreboot_driver vpd_driver = { module_coreboot_driver(vpd_driver); MODULE_AUTHOR("Google, Inc."); +MODULE_DESCRIPTION("Driver for exporting Vital Product Data content to sysfs"); MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig index 183613f82a11..127ad752acf8 100644 --- a/drivers/firmware/imx/Kconfig +++ b/drivers/firmware/imx/Kconfig @@ -22,3 +22,36 @@ config IMX_SCU This driver manages the IPC interface between host CPU and the SCU firmware running on M4. + +config IMX_SCMI_CPU_DRV + tristate "IMX SCMI CPU Protocol driver" + depends on ARCH_MXC || COMPILE_TEST + default y if ARCH_MXC + help + The System Controller Management Interface firmware (SCMI FW) is + a low-level system function which runs on a dedicated Cortex-M + core that could provide cpu management features. + + This driver can also be built as a module. + +config IMX_SCMI_LMM_DRV + tristate "IMX SCMI LMM Protocol driver" + depends on ARCH_MXC || COMPILE_TEST + default y if ARCH_MXC + help + The System Controller Management Interface firmware (SCMI FW) is + a low-level system function which runs on a dedicated Cortex-M + core that could provide Logical Machine management features. + + This driver can also be built as a module. + +config IMX_SCMI_MISC_DRV + tristate "IMX SCMI MISC Protocol driver" + depends on ARCH_MXC || COMPILE_TEST + default y if ARCH_MXC + help + The System Controller Management Interface firmware (SCMI FW) is + a low-level system function which runs on a dedicated Cortex-M + core that could provide misc functions such as board control. + + This driver can also be built as a module. diff --git a/drivers/firmware/imx/Makefile b/drivers/firmware/imx/Makefile index 8f9f04a513a8..3bbaffa6e347 100644 --- a/drivers/firmware/imx/Makefile +++ b/drivers/firmware/imx/Makefile @@ -1,3 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_IMX_DSP) += imx-dsp.o obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o rm.o imx-scu-soc.o +obj-${CONFIG_IMX_SCMI_CPU_DRV} += sm-cpu.o +obj-${CONFIG_IMX_SCMI_MISC_DRV} += sm-misc.o +obj-${CONFIG_IMX_SCMI_LMM_DRV} += sm-lmm.o diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c index 01c8ef14eaec..ed79e823157a 100644 --- a/drivers/firmware/imx/imx-dsp.c +++ b/drivers/firmware/imx/imx-dsp.c @@ -180,7 +180,7 @@ static struct platform_driver imx_dsp_driver = { .name = "imx-dsp", }, .probe = imx_dsp_probe, - .remove_new = imx_dsp_remove, + .remove = imx_dsp_remove, }; builtin_platform_driver(imx_dsp_driver); diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c index 1dd4362ef9a3..8c28e25ddc8a 100644 --- a/drivers/firmware/imx/imx-scu.c +++ b/drivers/firmware/imx/imx-scu.c @@ -280,6 +280,7 @@ static int imx_scu_probe(struct platform_device *pdev) return ret; sc_ipc->fast_ipc = of_device_is_compatible(args.np, "fsl,imx8-mu-scu"); + of_node_put(args.np); num_channel = sc_ipc->fast_ipc ? 2 : SCU_MU_CHAN_NUM; for (i = 0; i < num_channel; i++) { diff --git a/drivers/firmware/imx/sm-cpu.c b/drivers/firmware/imx/sm-cpu.c new file mode 100644 index 000000000000..091b014f739f --- /dev/null +++ b/drivers/firmware/imx/sm-cpu.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2025 NXP + */ + +#include <linux/firmware/imx/sm.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/scmi_protocol.h> +#include <linux/scmi_imx_protocol.h> + +static const struct scmi_imx_cpu_proto_ops *imx_cpu_ops; +static struct scmi_protocol_handle *ph; + +int scmi_imx_cpu_reset_vector_set(u32 cpuid, u64 vector, bool start, bool boot, + bool resume) +{ + if (!ph) + return -EPROBE_DEFER; + + return imx_cpu_ops->cpu_reset_vector_set(ph, cpuid, vector, start, + boot, resume); +} +EXPORT_SYMBOL(scmi_imx_cpu_reset_vector_set); + +int scmi_imx_cpu_start(u32 cpuid, bool start) +{ + if (!ph) + return -EPROBE_DEFER; + + if (start) + return imx_cpu_ops->cpu_start(ph, cpuid, true); + + return imx_cpu_ops->cpu_start(ph, cpuid, false); +}; +EXPORT_SYMBOL(scmi_imx_cpu_start); + +int scmi_imx_cpu_started(u32 cpuid, bool *started) +{ + if (!ph) + return -EPROBE_DEFER; + + if (!started) + return -EINVAL; + + return imx_cpu_ops->cpu_started(ph, cpuid, started); +}; +EXPORT_SYMBOL(scmi_imx_cpu_started); + +static int scmi_imx_cpu_probe(struct scmi_device *sdev) +{ + const struct scmi_handle *handle = sdev->handle; + + if (!handle) + return -ENODEV; + + if (imx_cpu_ops) { + dev_err(&sdev->dev, "sm cpu already initialized\n"); + return -EEXIST; + } + + imx_cpu_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_IMX_CPU, &ph); + if (IS_ERR(imx_cpu_ops)) + return PTR_ERR(imx_cpu_ops); + + return 0; +} + +static const struct scmi_device_id scmi_id_table[] = { + { SCMI_PROTOCOL_IMX_CPU, "imx-cpu" }, + { }, +}; +MODULE_DEVICE_TABLE(scmi, scmi_id_table); + +static struct scmi_driver scmi_imx_cpu_driver = { + .name = "scmi-imx-cpu", + .probe = scmi_imx_cpu_probe, + .id_table = scmi_id_table, +}; +module_scmi_driver(scmi_imx_cpu_driver); + +MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>"); +MODULE_DESCRIPTION("IMX SM CPU driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/imx/sm-lmm.c b/drivers/firmware/imx/sm-lmm.c new file mode 100644 index 000000000000..6807bf563c03 --- /dev/null +++ b/drivers/firmware/imx/sm-lmm.c @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2025 NXP + */ + +#include <linux/firmware/imx/sm.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/scmi_protocol.h> +#include <linux/scmi_imx_protocol.h> + +static const struct scmi_imx_lmm_proto_ops *imx_lmm_ops; +static struct scmi_protocol_handle *ph; + +int scmi_imx_lmm_info(u32 lmid, struct scmi_imx_lmm_info *info) +{ + if (!ph) + return -EPROBE_DEFER; + + if (!info) + return -EINVAL; + + return imx_lmm_ops->lmm_info(ph, lmid, info); +}; +EXPORT_SYMBOL(scmi_imx_lmm_info); + +int scmi_imx_lmm_reset_vector_set(u32 lmid, u32 cpuid, u32 flags, u64 vector) +{ + if (!ph) + return -EPROBE_DEFER; + + return imx_lmm_ops->lmm_reset_vector_set(ph, lmid, cpuid, flags, vector); +} +EXPORT_SYMBOL(scmi_imx_lmm_reset_vector_set); + +int scmi_imx_lmm_operation(u32 lmid, enum scmi_imx_lmm_op op, u32 flags) +{ + if (!ph) + return -EPROBE_DEFER; + + switch (op) { + case SCMI_IMX_LMM_BOOT: + return imx_lmm_ops->lmm_power_boot(ph, lmid, true); + case SCMI_IMX_LMM_POWER_ON: + return imx_lmm_ops->lmm_power_boot(ph, lmid, false); + case SCMI_IMX_LMM_SHUTDOWN: + return imx_lmm_ops->lmm_shutdown(ph, lmid, flags); + default: + break; + } + + return -EINVAL; +} +EXPORT_SYMBOL(scmi_imx_lmm_operation); + +static int scmi_imx_lmm_probe(struct scmi_device *sdev) +{ + const struct scmi_handle *handle = sdev->handle; + + if (!handle) + return -ENODEV; + + if (imx_lmm_ops) { + dev_err(&sdev->dev, "lmm already initialized\n"); + return -EEXIST; + } + + imx_lmm_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_IMX_LMM, &ph); + if (IS_ERR(imx_lmm_ops)) + return PTR_ERR(imx_lmm_ops); + + return 0; +} + +static const struct scmi_device_id scmi_id_table[] = { + { SCMI_PROTOCOL_IMX_LMM, "imx-lmm" }, + { }, +}; +MODULE_DEVICE_TABLE(scmi, scmi_id_table); + +static struct scmi_driver scmi_imx_lmm_driver = { + .name = "scmi-imx-lmm", + .probe = scmi_imx_lmm_probe, + .id_table = scmi_id_table, +}; +module_scmi_driver(scmi_imx_lmm_driver); + +MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>"); +MODULE_DESCRIPTION("IMX SM LMM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/imx/sm-misc.c b/drivers/firmware/imx/sm-misc.c new file mode 100644 index 000000000000..fc3ee12c2be8 --- /dev/null +++ b/drivers/firmware/imx/sm-misc.c @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2024 NXP + */ + +#include <linux/firmware/imx/sm.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/scmi_protocol.h> +#include <linux/scmi_imx_protocol.h> + +static const struct scmi_imx_misc_proto_ops *imx_misc_ctrl_ops; +static struct scmi_protocol_handle *ph; +struct notifier_block scmi_imx_misc_ctrl_nb; + +int scmi_imx_misc_ctrl_set(u32 id, u32 val) +{ + if (!ph) + return -EPROBE_DEFER; + + return imx_misc_ctrl_ops->misc_ctrl_set(ph, id, 1, &val); +}; +EXPORT_SYMBOL(scmi_imx_misc_ctrl_set); + +int scmi_imx_misc_ctrl_get(u32 id, u32 *num, u32 *val) +{ + if (!ph) + return -EPROBE_DEFER; + + return imx_misc_ctrl_ops->misc_ctrl_get(ph, id, num, val); +} +EXPORT_SYMBOL(scmi_imx_misc_ctrl_get); + +static int scmi_imx_misc_ctrl_notifier(struct notifier_block *nb, + unsigned long event, void *data) +{ + /* + * notifier_chain_register requires a valid notifier_block and + * valid notifier_call. SCMI_EVENT_IMX_MISC_CONTROL is needed + * to let SCMI firmware enable control events, but the hook here + * is just a dummy function to avoid kernel panic as of now. + */ + return 0; +} + +static int scmi_imx_misc_ctrl_probe(struct scmi_device *sdev) +{ + const struct scmi_handle *handle = sdev->handle; + struct device_node *np = sdev->dev.of_node; + u32 src_id, flags; + int ret, i, num; + + if (!handle) + return -ENODEV; + + if (imx_misc_ctrl_ops) { + dev_err(&sdev->dev, "misc ctrl already initialized\n"); + return -EEXIST; + } + + imx_misc_ctrl_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_IMX_MISC, &ph); + if (IS_ERR(imx_misc_ctrl_ops)) + return PTR_ERR(imx_misc_ctrl_ops); + + num = of_property_count_u32_elems(np, "nxp,ctrl-ids"); + if (num % 2) { + dev_err(&sdev->dev, "Invalid wakeup-sources\n"); + return -EINVAL; + } + + scmi_imx_misc_ctrl_nb.notifier_call = &scmi_imx_misc_ctrl_notifier; + for (i = 0; i < num; i += 2) { + ret = of_property_read_u32_index(np, "nxp,ctrl-ids", i, &src_id); + if (ret) { + dev_err(&sdev->dev, "Failed to read ctrl-id: %i\n", i); + continue; + } + + ret = of_property_read_u32_index(np, "nxp,ctrl-ids", i + 1, &flags); + if (ret) { + dev_err(&sdev->dev, "Failed to read ctrl-id value: %d\n", i + 1); + continue; + } + + ret = handle->notify_ops->devm_event_notifier_register(sdev, SCMI_PROTOCOL_IMX_MISC, + SCMI_EVENT_IMX_MISC_CONTROL, + &src_id, + &scmi_imx_misc_ctrl_nb); + if (ret) { + dev_err(&sdev->dev, "Failed to register scmi misc event: %d\n", src_id); + } else { + ret = imx_misc_ctrl_ops->misc_ctrl_req_notify(ph, src_id, + SCMI_EVENT_IMX_MISC_CONTROL, + flags); + if (ret) + dev_err(&sdev->dev, "Failed to req notify: %d\n", src_id); + } + } + + return 0; +} + +static const struct scmi_device_id scmi_id_table[] = { + { SCMI_PROTOCOL_IMX_MISC, "imx-misc-ctrl" }, + { }, +}; +MODULE_DEVICE_TABLE(scmi, scmi_id_table); + +static struct scmi_driver scmi_imx_misc_ctrl_driver = { + .name = "scmi-imx-misc-ctrl", + .probe = scmi_imx_misc_ctrl_probe, + .id_table = scmi_id_table, +}; +module_scmi_driver(scmi_imx_misc_ctrl_driver); + +MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>"); +MODULE_DESCRIPTION("IMX SM MISC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c index 6e9788324fea..371f24569b3b 100644 --- a/drivers/firmware/iscsi_ibft.c +++ b/drivers/firmware/iscsi_ibft.c @@ -310,7 +310,10 @@ static ssize_t ibft_attr_show_nic(void *data, int type, char *buf) str += sprintf_ipaddr(str, nic->ip_addr); break; case ISCSI_BOOT_ETH_SUBNET_MASK: - val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1)); + if (nic->subnet_mask_prefix > 32) + val = cpu_to_be32(~0); + else + val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1)); str += sprintf(str, "%pI4", &val); break; case ISCSI_BOOT_ETH_PREFIX_LEN: diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c index 8e59be3782cb..55b9cfad8a04 100644 --- a/drivers/firmware/memmap.c +++ b/drivers/firmware/memmap.c @@ -116,7 +116,7 @@ static void __meminit release_firmware_map_entry(struct kobject *kobj) kfree(entry); } -static struct kobj_type __refdata memmap_ktype = { +static const struct kobj_type memmap_ktype = { .release = release_firmware_map_entry, .sysfs_ops = &memmap_attr_ops, .default_groups = def_groups, diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c index 5d7f62fe1d5f..f25a9746249b 100644 --- a/drivers/firmware/meson/meson_sm.c +++ b/drivers/firmware/meson/meson_sm.c @@ -340,4 +340,5 @@ static struct platform_driver meson_sm_driver = { }, }; module_platform_driver_probe(meson_sm_driver, meson_sm_probe); +MODULE_DESCRIPTION("Amlogic Secure Monitor driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/firmware/microchip/mpfs-auto-update.c b/drivers/firmware/microchip/mpfs-auto-update.c index fbeeaee4ac85..e194f7acb2a9 100644 --- a/drivers/firmware/microchip/mpfs-auto-update.c +++ b/drivers/firmware/microchip/mpfs-auto-update.c @@ -9,6 +9,7 @@ * * Author: Conor Dooley <conor.dooley@microchip.com> */ +#include <linux/cleanup.h> #include <linux/debugfs.h> #include <linux/firmware.h> #include <linux/math.h> @@ -71,21 +72,30 @@ #define AUTO_UPDATE_UPGRADE_DIRECTORY (AUTO_UPDATE_DIRECTORY_WIDTH * AUTO_UPDATE_UPGRADE_INDEX) #define AUTO_UPDATE_BLANK_DIRECTORY (AUTO_UPDATE_DIRECTORY_WIDTH * AUTO_UPDATE_BLANK_INDEX) #define AUTO_UPDATE_DIRECTORY_SIZE SZ_1K -#define AUTO_UPDATE_RESERVED_SIZE SZ_1M -#define AUTO_UPDATE_BITSTREAM_BASE (AUTO_UPDATE_DIRECTORY_SIZE + AUTO_UPDATE_RESERVED_SIZE) - -#define AUTO_UPDATE_TIMEOUT_MS 60000 +#define AUTO_UPDATE_INFO_BASE AUTO_UPDATE_DIRECTORY_SIZE +#define AUTO_UPDATE_INFO_SIZE SZ_1M +#define AUTO_UPDATE_BITSTREAM_BASE (AUTO_UPDATE_DIRECTORY_SIZE + AUTO_UPDATE_INFO_SIZE) struct mpfs_auto_update_priv { struct mpfs_sys_controller *sys_controller; struct device *dev; struct mtd_info *flash; struct fw_upload *fw_uploader; - struct completion programming_complete; size_t size_per_bitstream; bool cancel_request; }; +static bool mpfs_auto_update_is_bitstream_info(const u8 *data, u32 size) +{ + if (size < 4) + return false; + + if (data[0] == 0x4d && data[1] == 0x43 && data[2] == 0x48 && data[3] == 0x50) + return true; + + return false; +} + static enum fw_upload_err mpfs_auto_update_prepare(struct fw_upload *fw_uploader, const u8 *data, u32 size) { @@ -143,47 +153,23 @@ static void mpfs_auto_update_cancel(struct fw_upload *fw_uploader) static enum fw_upload_err mpfs_auto_update_poll_complete(struct fw_upload *fw_uploader) { - struct mpfs_auto_update_priv *priv = fw_uploader->dd_handle; - int ret; - - /* - * There is no meaningful way to get the status of the programming while - * it is in progress, so attempting anything other than waiting for it - * to complete would be misplaced. - */ - ret = wait_for_completion_timeout(&priv->programming_complete, - msecs_to_jiffies(AUTO_UPDATE_TIMEOUT_MS)); - if (ret) - return FW_UPLOAD_ERR_TIMEOUT; - return FW_UPLOAD_ERR_NONE; } static int mpfs_auto_update_verify_image(struct fw_upload *fw_uploader) { struct mpfs_auto_update_priv *priv = fw_uploader->dd_handle; - struct mpfs_mss_response *response; - struct mpfs_mss_msg *message; - u32 *response_msg; + u32 *response_msg __free(kfree) = + kzalloc(AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg), GFP_KERNEL); + struct mpfs_mss_response *response __free(kfree) = + kzalloc(sizeof(struct mpfs_mss_response), GFP_KERNEL); + struct mpfs_mss_msg *message __free(kfree) = + kzalloc(sizeof(struct mpfs_mss_msg), GFP_KERNEL); int ret; - response_msg = devm_kzalloc(priv->dev, AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg), - GFP_KERNEL); - if (!response_msg) + if (!response_msg || !response || !message) return -ENOMEM; - response = devm_kzalloc(priv->dev, sizeof(struct mpfs_mss_response), GFP_KERNEL); - if (!response) { - ret = -ENOMEM; - goto free_response_msg; - } - - message = devm_kzalloc(priv->dev, sizeof(struct mpfs_mss_msg), GFP_KERNEL); - if (!message) { - ret = -ENOMEM; - goto free_response; - } - /* * The system controller can verify that an image in the flash is valid. * Rather than duplicate the check in this driver, call the relevant @@ -205,29 +191,25 @@ static int mpfs_auto_update_verify_image(struct fw_upload *fw_uploader) ret = mpfs_blocking_transaction(priv->sys_controller, message); if (ret | response->resp_status) { dev_warn(priv->dev, "Verification of Upgrade Image failed!\n"); - ret = ret ? ret : -EBADMSG; + return ret ? ret : -EBADMSG; } dev_info(priv->dev, "Verification of Upgrade Image passed!\n"); - devm_kfree(priv->dev, message); -free_response: - devm_kfree(priv->dev, response); -free_response_msg: - devm_kfree(priv->dev, response_msg); - - return ret; + return 0; } -static int mpfs_auto_update_set_image_address(struct mpfs_auto_update_priv *priv, char *buffer, +static int mpfs_auto_update_set_image_address(struct mpfs_auto_update_priv *priv, u32 image_address, loff_t directory_address) { struct erase_info erase; - size_t erase_size = AUTO_UPDATE_DIRECTORY_SIZE; + size_t erase_size = round_up(AUTO_UPDATE_DIRECTORY_SIZE, (u64)priv->flash->erasesize); size_t bytes_written = 0, bytes_read = 0; + char *buffer __free(kfree) = kzalloc(erase_size, GFP_KERNEL); int ret; - erase_size = round_up(erase_size, (u64)priv->flash->erasesize); + if (!buffer) + return -ENOMEM; erase.addr = AUTO_UPDATE_DIRECTORY_BASE; erase.len = erase_size; @@ -265,7 +247,7 @@ static int mpfs_auto_update_set_image_address(struct mpfs_auto_update_priv *priv AUTO_UPDATE_DIRECTORY_WIDTH); memset(buffer + AUTO_UPDATE_BLANK_DIRECTORY, 0x0, AUTO_UPDATE_DIRECTORY_WIDTH); - dev_info(priv->dev, "Writing the image address (%x) to the flash directory (%llx)\n", + dev_info(priv->dev, "Writing the image address (0x%x) to the flash directory (0x%llx)\n", image_address, directory_address); ret = mtd_write(priv->flash, 0x0, erase_size, &bytes_written, (u_char *)buffer); @@ -273,7 +255,7 @@ static int mpfs_auto_update_set_image_address(struct mpfs_auto_update_priv *priv return ret; if (bytes_written != erase_size) - return ret; + return -EIO; return 0; } @@ -283,26 +265,36 @@ static int mpfs_auto_update_write_bitstream(struct fw_upload *fw_uploader, const { struct mpfs_auto_update_priv *priv = fw_uploader->dd_handle; struct erase_info erase; - char *buffer; loff_t directory_address = AUTO_UPDATE_UPGRADE_DIRECTORY; size_t erase_size = AUTO_UPDATE_DIRECTORY_SIZE; size_t bytes_written = 0; + bool is_info = mpfs_auto_update_is_bitstream_info(data, size); u32 image_address; int ret; erase_size = round_up(erase_size, (u64)priv->flash->erasesize); - image_address = AUTO_UPDATE_BITSTREAM_BASE + - AUTO_UPDATE_UPGRADE_INDEX * priv->size_per_bitstream; - - buffer = devm_kzalloc(priv->dev, erase_size, GFP_KERNEL); - if (!buffer) - return -ENOMEM; + if (is_info) + image_address = AUTO_UPDATE_INFO_BASE; + else + image_address = AUTO_UPDATE_BITSTREAM_BASE + + AUTO_UPDATE_UPGRADE_INDEX * priv->size_per_bitstream; - ret = mpfs_auto_update_set_image_address(priv, buffer, image_address, directory_address); - if (ret) { - dev_err(priv->dev, "failed to set image address in the SPI directory: %d\n", ret); - goto out; + /* + * For bitstream info, the descriptor is written to a fixed offset, + * so there is no need to set the image address. + */ + if (!is_info) { + ret = mpfs_auto_update_set_image_address(priv, image_address, directory_address); + if (ret) { + dev_err(priv->dev, "failed to set image address in the SPI directory: %d\n", ret); + return ret; + } + } else { + if (size > AUTO_UPDATE_INFO_SIZE) { + dev_err(priv->dev, "bitstream info exceeds permitted size\n"); + return -ENOSPC; + } } /* @@ -313,61 +305,51 @@ static int mpfs_auto_update_write_bitstream(struct fw_upload *fw_uploader, const erase.len = round_up(size, (size_t)priv->flash->erasesize); erase.addr = image_address; - dev_info(priv->dev, "Erasing the flash at address (%x)\n", image_address); + dev_info(priv->dev, "Erasing the flash at address (0x%x)\n", image_address); ret = mtd_erase(priv->flash, &erase); if (ret) - goto out; + return ret; /* * No parsing etc of the bitstream is required. The system controller * will do all of that itself - including verifying that the bitstream * is valid. */ - dev_info(priv->dev, "Writing the image to the flash at address (%x)\n", image_address); + dev_info(priv->dev, "Writing the image to the flash at address (0x%x)\n", image_address); ret = mtd_write(priv->flash, (loff_t)image_address, size, &bytes_written, data); if (ret) - goto out; + return ret; - if (bytes_written != size) { - ret = -EIO; - goto out; - } + if (bytes_written != size) + return -EIO; *written = bytes_written; + dev_info(priv->dev, "Wrote 0x%zx bytes to the flash\n", bytes_written); -out: - devm_kfree(priv->dev, buffer); - return ret; + return 0; } static enum fw_upload_err mpfs_auto_update_write(struct fw_upload *fw_uploader, const u8 *data, u32 offset, u32 size, u32 *written) { struct mpfs_auto_update_priv *priv = fw_uploader->dd_handle; - enum fw_upload_err err = FW_UPLOAD_ERR_NONE; int ret; - reinit_completion(&priv->programming_complete); - ret = mpfs_auto_update_write_bitstream(fw_uploader, data, offset, size, written); - if (ret) { - err = FW_UPLOAD_ERR_RW_ERROR; - goto out; - } + if (ret) + return FW_UPLOAD_ERR_RW_ERROR; - if (priv->cancel_request) { - err = FW_UPLOAD_ERR_CANCELED; - goto out; - } + if (priv->cancel_request) + return FW_UPLOAD_ERR_CANCELED; + + if (mpfs_auto_update_is_bitstream_info(data, size)) + return FW_UPLOAD_ERR_NONE; ret = mpfs_auto_update_verify_image(fw_uploader); if (ret) - err = FW_UPLOAD_ERR_FW_INVALID; - -out: - complete(&priv->programming_complete); + return FW_UPLOAD_ERR_FW_INVALID; - return err; + return FW_UPLOAD_ERR_NONE; } static const struct fw_upload_ops mpfs_auto_update_ops = { @@ -379,23 +361,15 @@ static const struct fw_upload_ops mpfs_auto_update_ops = { static int mpfs_auto_update_available(struct mpfs_auto_update_priv *priv) { - struct mpfs_mss_response *response; - struct mpfs_mss_msg *message; - u32 *response_msg; + u32 *response_msg __free(kfree) = + kzalloc(AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg), GFP_KERNEL); + struct mpfs_mss_response *response __free(kfree) = + kzalloc(sizeof(struct mpfs_mss_response), GFP_KERNEL); + struct mpfs_mss_msg *message __free(kfree) = + kzalloc(sizeof(struct mpfs_mss_msg), GFP_KERNEL); int ret; - response_msg = devm_kzalloc(priv->dev, - AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg), - GFP_KERNEL); - if (!response_msg) - return -ENOMEM; - - response = devm_kzalloc(priv->dev, sizeof(struct mpfs_mss_response), GFP_KERNEL); - if (!response) - return -ENOMEM; - - message = devm_kzalloc(priv->dev, sizeof(struct mpfs_mss_msg), GFP_KERNEL); - if (!message) + if (!response_msg || !response || !message) return -ENOMEM; /* @@ -428,10 +402,10 @@ static int mpfs_auto_update_available(struct mpfs_auto_update_priv *priv) return -EIO; /* - * Bit 5 of byte 1 is "UL_Auto Update" & if it is set, Auto Update is + * Bit 5 of byte 1 is "UL_IAP" & if it is set, Auto Update is * not possible. */ - if (response_msg[1] & AUTO_UPDATE_FEATURE_ENABLED) + if ((((u8 *)response_msg)[1] & AUTO_UPDATE_FEATURE_ENABLED)) return -EPERM; return 0; @@ -461,8 +435,6 @@ static int mpfs_auto_update_probe(struct platform_device *pdev) return dev_err_probe(dev, ret, "The current bitstream does not support auto-update\n"); - init_completion(&priv->programming_complete); - fw_uploader = firmware_upload_register(THIS_MODULE, dev, "mpfs-auto-update", &mpfs_auto_update_ops, priv); if (IS_ERR(fw_uploader)) @@ -486,7 +458,7 @@ static struct platform_driver mpfs_auto_update_driver = { .name = "mpfs-auto-update", }, .probe = mpfs_auto_update_probe, - .remove_new = mpfs_auto_update_remove, + .remove = mpfs_auto_update_remove, }; module_platform_driver(mpfs_auto_update_driver); diff --git a/drivers/firmware/mtk-adsp-ipc.c b/drivers/firmware/mtk-adsp-ipc.c index a762302978de..2b79371c61c9 100644 --- a/drivers/firmware/mtk-adsp-ipc.c +++ b/drivers/firmware/mtk-adsp-ipc.c @@ -95,10 +95,9 @@ static int mtk_adsp_ipc_probe(struct platform_device *pdev) adsp_chan->idx = i; adsp_chan->ch = mbox_request_channel_byname(cl, adsp_mbox_ch_names[i]); if (IS_ERR(adsp_chan->ch)) { - ret = PTR_ERR(adsp_chan->ch); - if (ret != -EPROBE_DEFER) - dev_err(dev, "Failed to request mbox chan %s ret %d\n", - adsp_mbox_ch_names[i], ret); + ret = dev_err_probe(dev, PTR_ERR(adsp_chan->ch), + "Failed to request mbox channel %s\n", + adsp_mbox_ch_names[i]); for (j = 0; j < i; j++) { adsp_chan = &adsp_ipc->chans[j]; @@ -133,7 +132,7 @@ static struct platform_driver mtk_adsp_ipc_driver = { .name = "mtk-adsp-ipc", }, .probe = mtk_adsp_ipc_probe, - .remove_new = mtk_adsp_ipc_remove, + .remove = mtk_adsp_ipc_remove, }; builtin_platform_driver(mtk_adsp_ipc_driver); diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c index d9629ff87861..38ca190d4a22 100644 --- a/drivers/firmware/psci/psci.c +++ b/drivers/firmware/psci/psci.c @@ -78,6 +78,7 @@ struct psci_0_1_function_ids get_psci_0_1_function_ids(void) static u32 psci_cpu_suspend_feature; static bool psci_system_reset2_supported; +static bool psci_system_off2_hibernate_supported; static inline bool psci_has_ext_power_state(void) { @@ -333,6 +334,36 @@ static void psci_sys_poweroff(void) invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0); } +#ifdef CONFIG_HIBERNATION +static int psci_sys_hibernate(struct sys_off_data *data) +{ + /* + * If no hibernate type is specified SYSTEM_OFF2 defaults to selecting + * HIBERNATE_OFF. + * + * There are hypervisors in the wild that do not align with the spec and + * reject calls that explicitly provide a hibernate type. For + * compatibility with these nonstandard implementations, pass 0 as the + * type. + */ + if (system_entering_hibernation()) + invoke_psci_fn(PSCI_FN_NATIVE(1_3, SYSTEM_OFF2), 0, 0, 0); + return NOTIFY_DONE; +} + +static int __init psci_hibernate_init(void) +{ + if (psci_system_off2_hibernate_supported) { + /* Higher priority than EFI shutdown, but only for hibernate */ + register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, + SYS_OFF_PRIO_FIRMWARE + 2, + psci_sys_hibernate, NULL); + } + return 0; +} +subsys_initcall(psci_hibernate_init); +#endif + static int psci_features(u32 psci_func_id) { return invoke_psci_fn(PSCI_1_0_FN_PSCI_FEATURES, @@ -364,6 +395,7 @@ static const struct { PSCI_ID_NATIVE(1_1, SYSTEM_RESET2), PSCI_ID(1_1, MEM_PROTECT), PSCI_ID_NATIVE(1_1, MEM_PROTECT_CHECK_RANGE), + PSCI_ID_NATIVE(1_3, SYSTEM_OFF2), }; static int psci_debugfs_read(struct seq_file *s, void *data) @@ -497,10 +529,12 @@ int psci_cpu_suspend_enter(u32 state) static int psci_system_suspend(unsigned long unused) { + int err; phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume); - return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND), + err = invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND), pa_cpu_resume, 0, 0); + return psci_to_linux_errno(err); } static int psci_system_suspend_enter(suspend_state_t state) @@ -523,6 +557,18 @@ static void __init psci_init_system_reset2(void) psci_system_reset2_supported = true; } +static void __init psci_init_system_off2(void) +{ + int ret; + + ret = psci_features(PSCI_FN_NATIVE(1_3, SYSTEM_OFF2)); + if (ret < 0) + return; + + if (ret & PSCI_1_3_OFF_TYPE_HIBERNATE_OFF) + psci_system_off2_hibernate_supported = true; +} + static void __init psci_init_system_suspend(void) { int ret; @@ -653,6 +699,7 @@ static int __init psci_probe(void) psci_init_cpu_suspend(); psci_init_system_suspend(); psci_init_system_reset2(); + psci_init_system_off2(); kvm_init_hyp_services(); } @@ -757,8 +804,10 @@ int __init psci_dt_init(void) np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np); - if (!np || !of_device_is_available(np)) + if (!np || !of_device_is_available(np)) { + of_node_put(np); return -ENODEV; + } init_fn = (psci_initcall_t)matched_np->data; ret = init_fn(np); diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c index 116eb465cdb4..df02a4ec3398 100644 --- a/drivers/firmware/psci/psci_checker.c +++ b/drivers/firmware/psci/psci_checker.c @@ -342,8 +342,8 @@ static int suspend_test_thread(void *arg) * Disable the timer to make sure that the timer will not trigger * later. */ - del_timer(&wakeup_timer); - destroy_timer_on_stack(&wakeup_timer); + timer_delete(&wakeup_timer); + timer_destroy_on_stack(&wakeup_timer); if (atomic_dec_return_relaxed(&nb_active_threads) == 0) complete(&suspend_threads_done); diff --git a/drivers/firmware/qcom/Kconfig b/drivers/firmware/qcom/Kconfig index 3f05d9854ddf..b477d54b495a 100644 --- a/drivers/firmware/qcom/Kconfig +++ b/drivers/firmware/qcom/Kconfig @@ -7,18 +7,39 @@ menu "Qualcomm firmware drivers" config QCOM_SCM + select QCOM_TZMEM tristate -config QCOM_SCM_DOWNLOAD_MODE_DEFAULT - bool "Qualcomm download mode enabled by default" - depends on QCOM_SCM +config QCOM_TZMEM + tristate + select GENERIC_ALLOCATOR + +choice + prompt "TrustZone interface memory allocator mode" + depends on QCOM_TZMEM + default QCOM_TZMEM_MODE_GENERIC + help + Selects the mode of the memory allocator providing memory buffers of + suitable format for sharing with the TrustZone. If in doubt, select + 'Generic'. + +config QCOM_TZMEM_MODE_GENERIC + bool "Generic" help - A device with "download mode" enabled will upon an unexpected - warm-restart enter a special debug mode that allows the user to - "download" memory content over USB for offline postmortem analysis. - The feature can be enabled/disabled on the kernel command line. + Use the generic allocator mode. The memory is page-aligned, non-cachable + and physically contiguous. + +config QCOM_TZMEM_MODE_SHMBRIDGE + bool "SHM Bridge" + help + Use Qualcomm Shared Memory Bridge. The memory has the same alignment as + in the 'Generic' allocator but is also explicitly marked as an SHM Bridge + buffer. + + With this selected, all buffers passed to the TrustZone must be allocated + using the TZMem allocator or else the TrustZone will refuse to use them. - Say Y here to enable "download mode" by default. +endchoice config QCOM_QSEECOM bool "Qualcomm QSEECOM interface driver" diff --git a/drivers/firmware/qcom/Makefile b/drivers/firmware/qcom/Makefile index c9f12ee8224a..0be40a1abc13 100644 --- a/drivers/firmware/qcom/Makefile +++ b/drivers/firmware/qcom/Makefile @@ -5,5 +5,6 @@ obj-$(CONFIG_QCOM_SCM) += qcom-scm.o qcom-scm-objs += qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o +obj-$(CONFIG_QCOM_TZMEM) += qcom_tzmem.o obj-$(CONFIG_QCOM_QSEECOM) += qcom_qseecom.o obj-$(CONFIG_QCOM_QSEECOM_UEFISECAPP) += qcom_qseecom_uefisecapp.o diff --git a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c index bc550ad0dbe0..98a463e9774b 100644 --- a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c +++ b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c @@ -13,11 +13,14 @@ #include <linux/mutex.h> #include <linux/of.h> #include <linux/platform_device.h> +#include <linux/sizes.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/ucs2_string.h> #include <linux/firmware/qcom/qcom_qseecom.h> +#include <linux/firmware/qcom/qcom_scm.h> +#include <linux/firmware/qcom/qcom_tzmem.h> /* -- Qualcomm "uefisecapp" interface definitions. -------------------------- */ @@ -272,6 +275,7 @@ struct qsee_rsp_uefi_query_variable_info { struct qcuefi_client { struct qseecom_client *client; struct efivars efivars; + struct qcom_tzmem_pool *mempool; }; static struct device *qcuefi_dev(struct qcuefi_client *qcuefi) @@ -293,12 +297,11 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e { struct qsee_req_uefi_get_variable *req_data; struct qsee_rsp_uefi_get_variable *rsp_data; + void *cmd_buf __free(qcom_tzmem) = NULL; unsigned long buffer_size = *data_size; - efi_status_t efi_status = EFI_SUCCESS; unsigned long name_length; - dma_addr_t cmd_buf_dma; + efi_status_t efi_status; size_t cmd_buf_size; - void *cmd_buf; size_t guid_offs; size_t name_offs; size_t req_size; @@ -333,11 +336,9 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e __reqdata_offs(rsp_size, &rsp_offs) ); - cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL); - if (!cmd_buf) { - efi_status = EFI_OUT_OF_RESOURCES; - goto out; - } + cmd_buf = qcom_tzmem_alloc(qcuefi->mempool, cmd_buf_size, GFP_KERNEL); + if (!cmd_buf) + return EFI_OUT_OF_RESOURCES; req_data = cmd_buf + req_offs; rsp_data = cmd_buf + rsp_offs; @@ -351,30 +352,22 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e req_data->length = req_size; status = ucs2_strscpy(((void *)req_data) + req_data->name_offset, name, name_length); - if (status < 0) { - efi_status = EFI_INVALID_PARAMETER; - goto out_free; - } + if (status < 0) + return EFI_INVALID_PARAMETER; memcpy(((void *)req_data) + req_data->guid_offset, guid, req_data->guid_size); status = qcom_qseecom_app_send(qcuefi->client, - cmd_buf_dma + req_offs, req_size, - cmd_buf_dma + rsp_offs, rsp_size); - if (status) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + cmd_buf + req_offs, req_size, + cmd_buf + rsp_offs, rsp_size); + if (status) + return EFI_DEVICE_ERROR; - if (rsp_data->command_id != QSEE_CMD_UEFI_GET_VARIABLE) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->command_id != QSEE_CMD_UEFI_GET_VARIABLE) + return EFI_DEVICE_ERROR; - if (rsp_data->length < sizeof(*rsp_data)) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->length < sizeof(*rsp_data)) + return EFI_DEVICE_ERROR; if (rsp_data->status) { dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n", @@ -388,18 +381,14 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e *attributes = rsp_data->attributes; } - goto out_free; + return qsee_uefi_status_to_efi(rsp_data->status); } - if (rsp_data->length > rsp_size) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->length > rsp_size) + return EFI_DEVICE_ERROR; - if (rsp_data->data_offset + rsp_data->data_size > rsp_data->length) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->data_offset + rsp_data->data_size > rsp_data->length) + return EFI_DEVICE_ERROR; /* * Note: We need to set attributes and data size even if the buffer is @@ -422,22 +411,15 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e if (attributes) *attributes = rsp_data->attributes; - if (buffer_size == 0 && !data) { - efi_status = EFI_SUCCESS; - goto out_free; - } + if (buffer_size == 0 && !data) + return EFI_SUCCESS; - if (buffer_size < rsp_data->data_size) { - efi_status = EFI_BUFFER_TOO_SMALL; - goto out_free; - } + if (buffer_size < rsp_data->data_size) + return EFI_BUFFER_TOO_SMALL; memcpy(data, ((void *)rsp_data) + rsp_data->data_offset, rsp_data->data_size); -out_free: - qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma); -out: - return efi_status; + return EFI_SUCCESS; } static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const efi_char16_t *name, @@ -446,11 +428,9 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e { struct qsee_req_uefi_set_variable *req_data; struct qsee_rsp_uefi_set_variable *rsp_data; - efi_status_t efi_status = EFI_SUCCESS; + void *cmd_buf __free(qcom_tzmem) = NULL; unsigned long name_length; - dma_addr_t cmd_buf_dma; size_t cmd_buf_size; - void *cmd_buf; size_t name_offs; size_t guid_offs; size_t data_offs; @@ -486,11 +466,9 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e __reqdata_offs(sizeof(*rsp_data), &rsp_offs) ); - cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL); - if (!cmd_buf) { - efi_status = EFI_OUT_OF_RESOURCES; - goto out; - } + cmd_buf = qcom_tzmem_alloc(qcuefi->mempool, cmd_buf_size, GFP_KERNEL); + if (!cmd_buf) + return EFI_OUT_OF_RESOURCES; req_data = cmd_buf + req_offs; rsp_data = cmd_buf + rsp_offs; @@ -506,10 +484,8 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e req_data->length = req_size; status = ucs2_strscpy(((void *)req_data) + req_data->name_offset, name, name_length); - if (status < 0) { - efi_status = EFI_INVALID_PARAMETER; - goto out_free; - } + if (status < 0) + return EFI_INVALID_PARAMETER; memcpy(((void *)req_data) + req_data->guid_offset, guid, req_data->guid_size); @@ -517,33 +493,24 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e memcpy(((void *)req_data) + req_data->data_offset, data, req_data->data_size); status = qcom_qseecom_app_send(qcuefi->client, - cmd_buf_dma + req_offs, req_size, - cmd_buf_dma + rsp_offs, sizeof(*rsp_data)); - if (status) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + cmd_buf + req_offs, req_size, + cmd_buf + rsp_offs, sizeof(*rsp_data)); + if (status) + return EFI_DEVICE_ERROR; - if (rsp_data->command_id != QSEE_CMD_UEFI_SET_VARIABLE) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->command_id != QSEE_CMD_UEFI_SET_VARIABLE) + return EFI_DEVICE_ERROR; - if (rsp_data->length != sizeof(*rsp_data)) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->length != sizeof(*rsp_data)) + return EFI_DEVICE_ERROR; if (rsp_data->status) { dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n", __func__, rsp_data->status); - efi_status = qsee_uefi_status_to_efi(rsp_data->status); + return qsee_uefi_status_to_efi(rsp_data->status); } -out_free: - qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma); -out: - return efi_status; + return EFI_SUCCESS; } static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi, @@ -552,10 +519,9 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi, { struct qsee_req_uefi_get_next_variable *req_data; struct qsee_rsp_uefi_get_next_variable *rsp_data; - efi_status_t efi_status = EFI_SUCCESS; - dma_addr_t cmd_buf_dma; + void *cmd_buf __free(qcom_tzmem) = NULL; + efi_status_t efi_status; size_t cmd_buf_size; - void *cmd_buf; size_t guid_offs; size_t name_offs; size_t req_size; @@ -587,11 +553,9 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi, __reqdata_offs(rsp_size, &rsp_offs) ); - cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL); - if (!cmd_buf) { - efi_status = EFI_OUT_OF_RESOURCES; - goto out; - } + cmd_buf = qcom_tzmem_alloc(qcuefi->mempool, cmd_buf_size, GFP_KERNEL); + if (!cmd_buf) + return EFI_OUT_OF_RESOURCES; req_data = cmd_buf + req_offs; rsp_data = cmd_buf + rsp_offs; @@ -606,28 +570,20 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi, memcpy(((void *)req_data) + req_data->guid_offset, guid, req_data->guid_size); status = ucs2_strscpy(((void *)req_data) + req_data->name_offset, name, *name_size / sizeof(*name)); - if (status < 0) { - efi_status = EFI_INVALID_PARAMETER; - goto out_free; - } + if (status < 0) + return EFI_INVALID_PARAMETER; status = qcom_qseecom_app_send(qcuefi->client, - cmd_buf_dma + req_offs, req_size, - cmd_buf_dma + rsp_offs, rsp_size); - if (status) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + cmd_buf + req_offs, req_size, + cmd_buf + rsp_offs, rsp_size); + if (status) + return EFI_DEVICE_ERROR; - if (rsp_data->command_id != QSEE_CMD_UEFI_GET_NEXT_VARIABLE) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->command_id != QSEE_CMD_UEFI_GET_NEXT_VARIABLE) + return EFI_DEVICE_ERROR; - if (rsp_data->length < sizeof(*rsp_data)) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->length < sizeof(*rsp_data)) + return EFI_DEVICE_ERROR; if (rsp_data->status) { dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n", @@ -642,53 +598,40 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi, if (efi_status == EFI_BUFFER_TOO_SMALL) *name_size = rsp_data->name_size; - goto out_free; + return efi_status; } - if (rsp_data->length > rsp_size) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->length > rsp_size) + return EFI_DEVICE_ERROR; - if (rsp_data->name_offset + rsp_data->name_size > rsp_data->length) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->name_offset + rsp_data->name_size > rsp_data->length) + return EFI_DEVICE_ERROR; - if (rsp_data->guid_offset + rsp_data->guid_size > rsp_data->length) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->guid_offset + rsp_data->guid_size > rsp_data->length) + return EFI_DEVICE_ERROR; if (rsp_data->name_size > *name_size) { *name_size = rsp_data->name_size; - efi_status = EFI_BUFFER_TOO_SMALL; - goto out_free; + return EFI_BUFFER_TOO_SMALL; } - if (rsp_data->guid_size != sizeof(*guid)) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->guid_size != sizeof(*guid)) + return EFI_DEVICE_ERROR; memcpy(guid, ((void *)rsp_data) + rsp_data->guid_offset, rsp_data->guid_size); status = ucs2_strscpy(name, ((void *)rsp_data) + rsp_data->name_offset, rsp_data->name_size / sizeof(*name)); *name_size = rsp_data->name_size; - if (status < 0) { + if (status < 0) /* * Return EFI_DEVICE_ERROR here because the buffer size should * have already been validated above, causing this function to * bail with EFI_BUFFER_TOO_SMALL. */ - efi_status = EFI_DEVICE_ERROR; - } + return EFI_DEVICE_ERROR; -out_free: - qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma); -out: - return efi_status; + return EFI_SUCCESS; } static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi, u32 attr, @@ -697,10 +640,8 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi, { struct qsee_req_uefi_query_variable_info *req_data; struct qsee_rsp_uefi_query_variable_info *rsp_data; - efi_status_t efi_status = EFI_SUCCESS; - dma_addr_t cmd_buf_dma; + void *cmd_buf __free(qcom_tzmem) = NULL; size_t cmd_buf_size; - void *cmd_buf; size_t req_offs; size_t rsp_offs; int status; @@ -710,11 +651,9 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi, __reqdata_offs(sizeof(*rsp_data), &rsp_offs) ); - cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL); - if (!cmd_buf) { - efi_status = EFI_OUT_OF_RESOURCES; - goto out; - } + cmd_buf = qcom_tzmem_alloc(qcuefi->mempool, cmd_buf_size, GFP_KERNEL); + if (!cmd_buf) + return EFI_OUT_OF_RESOURCES; req_data = cmd_buf + req_offs; rsp_data = cmd_buf + rsp_offs; @@ -724,28 +663,21 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi, req_data->length = sizeof(*req_data); status = qcom_qseecom_app_send(qcuefi->client, - cmd_buf_dma + req_offs, sizeof(*req_data), - cmd_buf_dma + rsp_offs, sizeof(*rsp_data)); - if (status) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + cmd_buf + req_offs, sizeof(*req_data), + cmd_buf + rsp_offs, sizeof(*rsp_data)); + if (status) + return EFI_DEVICE_ERROR; - if (rsp_data->command_id != QSEE_CMD_UEFI_QUERY_VARIABLE_INFO) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->command_id != QSEE_CMD_UEFI_QUERY_VARIABLE_INFO) + return EFI_DEVICE_ERROR; - if (rsp_data->length != sizeof(*rsp_data)) { - efi_status = EFI_DEVICE_ERROR; - goto out_free; - } + if (rsp_data->length != sizeof(*rsp_data)) + return EFI_DEVICE_ERROR; if (rsp_data->status) { dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n", __func__, rsp_data->status); - efi_status = qsee_uefi_status_to_efi(rsp_data->status); - goto out_free; + return qsee_uefi_status_to_efi(rsp_data->status); } if (storage_space) @@ -757,10 +689,7 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi, if (max_variable_size) *max_variable_size = rsp_data->max_variable_size; -out_free: - qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma); -out: - return efi_status; + return EFI_SUCCESS; } /* -- Global efivar interface. ---------------------------------------------- */ @@ -786,6 +715,10 @@ static int qcuefi_set_reference(struct qcuefi_client *qcuefi) static struct qcuefi_client *qcuefi_acquire(void) { mutex_lock(&__qcuefi_lock); + if (!__qcuefi) { + mutex_unlock(&__qcuefi_lock); + return NULL; + } return __qcuefi; } @@ -871,6 +804,7 @@ static const struct efivar_operations qcom_efivar_ops = { static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *aux_dev_id) { + struct qcom_tzmem_pool_config pool_config; struct qcuefi_client *qcuefi; int status; @@ -880,6 +814,16 @@ static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev, qcuefi->client = container_of(aux_dev, struct qseecom_client, aux_dev); + memset(&pool_config, 0, sizeof(pool_config)); + pool_config.initial_size = SZ_4K; + pool_config.policy = QCOM_TZMEM_POLICY_MULTIPLIER; + pool_config.increment = 2; + pool_config.max_size = SZ_256K; + + qcuefi->mempool = devm_qcom_tzmem_pool_new(&aux_dev->dev, &pool_config); + if (IS_ERR(qcuefi->mempool)) + return PTR_ERR(qcuefi->mempool); + auxiliary_set_drvdata(aux_dev, qcuefi); status = qcuefi_set_reference(qcuefi); if (status) diff --git a/drivers/firmware/qcom/qcom_scm-smc.c b/drivers/firmware/qcom/qcom_scm-smc.c index 16cf88acfa8e..574930729ddd 100644 --- a/drivers/firmware/qcom/qcom_scm-smc.c +++ b/drivers/firmware/qcom/qcom_scm-smc.c @@ -2,6 +2,7 @@ /* Copyright (c) 2015,2019 The Linux Foundation. All rights reserved. */ +#include <linux/cleanup.h> #include <linux/io.h> #include <linux/errno.h> #include <linux/delay.h> @@ -9,6 +10,7 @@ #include <linux/slab.h> #include <linux/types.h> #include <linux/firmware/qcom/qcom_scm.h> +#include <linux/firmware/qcom/qcom_tzmem.h> #include <linux/arm-smccc.h> #include <linux/dma-mapping.h> @@ -71,7 +73,7 @@ int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending) struct arm_smccc_res get_wq_res; struct arm_smccc_args get_wq_ctx = {0}; - get_wq_ctx.args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, + get_wq_ctx.args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP, SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_GET_WQ_CTX)); @@ -152,9 +154,7 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc, { int arglen = desc->arginfo & 0xf; int i, ret; - dma_addr_t args_phys = 0; - void *args_virt = NULL; - size_t alloc_len; + void *args_virt __free(qcom_tzmem) = NULL; gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL; u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL; u32 qcom_smccc_convention = (qcom_convention == SMC_CONVENTION_ARM_32) ? @@ -172,9 +172,14 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc, smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i]; if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) { - alloc_len = SCM_SMC_N_EXT_ARGS * sizeof(u64); - args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag); + struct qcom_tzmem_pool *mempool = qcom_scm_get_tzmem_pool(); + if (!mempool) + return -EINVAL; + + args_virt = qcom_tzmem_alloc(mempool, + SCM_SMC_N_EXT_ARGS * sizeof(u64), + flag); if (!args_virt) return -ENOMEM; @@ -192,25 +197,10 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc, SCM_SMC_FIRST_EXT_IDX]); } - args_phys = dma_map_single(dev, args_virt, alloc_len, - DMA_TO_DEVICE); - - if (dma_mapping_error(dev, args_phys)) { - kfree(args_virt); - return -ENOMEM; - } - - smc.args[SCM_SMC_LAST_REG_IDX] = args_phys; + smc.args[SCM_SMC_LAST_REG_IDX] = qcom_tzmem_to_phys(args_virt); } - /* ret error check follows after args_virt cleanup*/ ret = __scm_smc_do(dev, &smc, &smc_res, atomic); - - if (args_virt) { - dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE); - kfree(args_virt); - } - if (ret) return ret; diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c index 90283f160a22..f63b716be5b0 100644 --- a/drivers/firmware/qcom/qcom_scm.c +++ b/drivers/firmware/qcom/qcom_scm.c @@ -4,28 +4,36 @@ */ #include <linux/arm-smccc.h> +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/cleanup.h> #include <linux/clk.h> #include <linux/completion.h> #include <linux/cpumask.h> #include <linux/dma-mapping.h> +#include <linux/err.h> #include <linux/export.h> #include <linux/firmware/qcom/qcom_scm.h> +#include <linux/firmware/qcom/qcom_tzmem.h> #include <linux/init.h> #include <linux/interconnect.h> #include <linux/interrupt.h> +#include <linux/kstrtox.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> +#include <linux/of_reserved_mem.h> #include <linux/platform_device.h> #include <linux/reset-controller.h> +#include <linux/sizes.h> #include <linux/types.h> #include "qcom_scm.h" +#include "qcom_tzmem.h" -static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT); -module_param(download_mode, bool, 0); +static u32 download_mode; struct qcom_scm { struct device *dev; @@ -41,6 +49,8 @@ struct qcom_scm { int scm_vote_count; u64 dload_mode_addr; + + struct qcom_tzmem_pool *mempool; }; struct qcom_scm_current_perm_info { @@ -102,6 +112,7 @@ enum qcom_scm_qseecom_tz_cmd_info { }; #define QSEECOM_MAX_APP_NAME_SIZE 64 +#define SHMBRIDGE_RESULT_NOTSUPP 4 /* Each bit configures cold/warm boot address for one of the 4 CPUs */ static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = { @@ -112,7 +123,12 @@ static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = { }; #define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0) -#define QCOM_SMC_WAITQ_FLAG_WAKE_ALL BIT(1) + +#define QCOM_DLOAD_MASK GENMASK(5, 4) +#define QCOM_DLOAD_NODUMP 0 +#define QCOM_DLOAD_FULLDUMP 1 +#define QCOM_DLOAD_MINIDUMP 2 +#define QCOM_DLOAD_BOTHDUMP 3 static const char * const qcom_scm_convention_names[] = { [SMC_CONVENTION_UNKNOWN] = "unknown", @@ -121,6 +137,13 @@ static const char * const qcom_scm_convention_names[] = { [SMC_CONVENTION_LEGACY] = "smc legacy", }; +static const char * const download_mode_name[] = { + [QCOM_DLOAD_NODUMP] = "off", + [QCOM_DLOAD_FULLDUMP] = "full", + [QCOM_DLOAD_MINIDUMP] = "mini", + [QCOM_DLOAD_BOTHDUMP] = "full,mini", +}; + static struct qcom_scm *__scm; static int qcom_scm_clk_enable(void) @@ -163,9 +186,6 @@ static int qcom_scm_bw_enable(void) if (!__scm->path) return 0; - if (IS_ERR(__scm->path)) - return -EINVAL; - mutex_lock(&__scm->scm_bw_lock); if (!__scm->scm_vote_count) { ret = icc_set_bw(__scm->path, 0, UINT_MAX); @@ -183,7 +203,7 @@ err_bw: static void qcom_scm_bw_disable(void) { - if (IS_ERR_OR_NULL(__scm->path)) + if (!__scm->path) return; mutex_lock(&__scm->scm_bw_lock); @@ -195,6 +215,14 @@ static void qcom_scm_bw_disable(void) enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN; static DEFINE_SPINLOCK(scm_query_lock); +struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void) +{ + if (!qcom_scm_is_available()) + return NULL; + + return __scm->mempool; +} + static enum qcom_scm_convention __get_convention(void) { unsigned long flags; @@ -496,20 +524,32 @@ static int __qcom_scm_set_dload_mode(struct device *dev, bool enable) return qcom_scm_call_atomic(__scm->dev, &desc, NULL); } -static void qcom_scm_set_download_mode(bool enable) +static int qcom_scm_io_rmw(phys_addr_t addr, unsigned int mask, unsigned int val) +{ + unsigned int old; + unsigned int new; + int ret; + + ret = qcom_scm_io_readl(addr, &old); + if (ret) + return ret; + + new = (old & ~mask) | (val & mask); + + return qcom_scm_io_writel(addr, new); +} + +static void qcom_scm_set_download_mode(u32 dload_mode) { - bool avail; int ret = 0; - avail = __qcom_scm_is_call_available(__scm->dev, - QCOM_SCM_SVC_BOOT, - QCOM_SCM_BOOT_SET_DLOAD_MODE); - if (avail) { - ret = __qcom_scm_set_dload_mode(__scm->dev, enable); - } else if (__scm->dload_mode_addr) { - ret = qcom_scm_io_writel(__scm->dload_mode_addr, - enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0); - } else { + if (__scm->dload_mode_addr) { + ret = qcom_scm_io_rmw(__scm->dload_mode_addr, QCOM_DLOAD_MASK, + FIELD_PREP(QCOM_DLOAD_MASK, dload_mode)); + } else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT, + QCOM_SCM_BOOT_SET_DLOAD_MODE)) { + ret = __qcom_scm_set_dload_mode(__scm->dev, !!dload_mode); + } else if (dload_mode) { dev_err(__scm->dev, "No available mechanism for setting download mode\n"); } @@ -554,13 +594,19 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size, * During the scm call memory protection will be enabled for the meta * data blob, so make sure it's physically contiguous, 4K aligned and * non-cachable to avoid XPU violations. + * + * For PIL calls the hypervisor creates SHM Bridges for the blob + * buffers on behalf of Linux so we must not do it ourselves hence + * not using the TZMem allocator here. + * + * If we pass a buffer that is already part of an SHM Bridge to this + * call, it will fail. */ mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, GFP_KERNEL); - if (!mdata_buf) { - dev_err(__scm->dev, "Allocation of metadata buffer failed.\n"); + if (!mdata_buf) return -ENOMEM; - } + memcpy(mdata_buf, metadata, size); ret = qcom_scm_clk_enable(); @@ -569,13 +615,14 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size, ret = qcom_scm_bw_enable(); if (ret) - return ret; + goto disable_clk; desc.args[1] = mdata_phys; ret = qcom_scm_call(__scm->dev, &desc, &res); - qcom_scm_bw_disable(); + +disable_clk: qcom_scm_clk_disable(); out: @@ -637,10 +684,12 @@ int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) ret = qcom_scm_bw_enable(); if (ret) - return ret; + goto disable_clk; ret = qcom_scm_call(__scm->dev, &desc, &res); qcom_scm_bw_disable(); + +disable_clk: qcom_scm_clk_disable(); return ret ? : res.result[0]; @@ -672,10 +721,12 @@ int qcom_scm_pas_auth_and_reset(u32 peripheral) ret = qcom_scm_bw_enable(); if (ret) - return ret; + goto disable_clk; ret = qcom_scm_call(__scm->dev, &desc, &res); qcom_scm_bw_disable(); + +disable_clk: qcom_scm_clk_disable(); return ret ? : res.result[0]; @@ -706,11 +757,12 @@ int qcom_scm_pas_shutdown(u32 peripheral) ret = qcom_scm_bw_enable(); if (ret) - return ret; + goto disable_clk; ret = qcom_scm_call(__scm->dev, &desc, &res); - qcom_scm_bw_disable(); + +disable_clk: qcom_scm_clk_disable(); return ret ? : res.result[0]; @@ -855,6 +907,32 @@ int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) } EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg); +#define QCOM_SCM_CP_APERTURE_CONTEXT_MASK GENMASK(7, 0) + +bool qcom_scm_set_gpu_smmu_aperture_is_available(void) +{ + return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, + QCOM_SCM_MP_CP_SMMU_APERTURE_ID); +} +EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture_is_available); + +int qcom_scm_set_gpu_smmu_aperture(unsigned int context_bank) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_CP_SMMU_APERTURE_ID, + .arginfo = QCOM_SCM_ARGS(4), + .args[0] = 0xffff0000 | FIELD_PREP(QCOM_SCM_CP_APERTURE_CONTEXT_MASK, context_bank), + .args[1] = 0xffffffff, + .args[2] = 0xffffffff, + .args[3] = 0xffffffff, + .owner = ARM_SMCCC_OWNER_SIP + }; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture); + int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) { struct qcom_scm_desc desc = { @@ -987,14 +1065,13 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, struct qcom_scm_mem_map_info *mem_to_map; phys_addr_t mem_to_map_phys; phys_addr_t dest_phys; - dma_addr_t ptr_phys; + phys_addr_t ptr_phys; size_t mem_to_map_sz; size_t dest_sz; size_t src_sz; size_t ptr_sz; int next_vm; __le32 *src; - void *ptr; int ret, i, b; u64 srcvm_bits = *srcvm; @@ -1004,10 +1081,13 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(dest_sz, SZ_64); - ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL); + void *ptr __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, + ptr_sz, GFP_KERNEL); if (!ptr) return -ENOMEM; + ptr_phys = qcom_tzmem_to_phys(ptr); + /* Fill source vmid detail */ src = ptr; i = 0; @@ -1036,7 +1116,6 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, ptr_phys, src_sz, dest_phys, dest_sz); - dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys); if (ret) { dev_err(__scm->dev, "Assign memory protection call failed %d\n", ret); @@ -1184,36 +1263,239 @@ int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, .args[4] = data_unit_size, .owner = ARM_SMCCC_OWNER_SIP, }; - void *keybuf; - dma_addr_t key_phys; - int ret; - /* - * 'key' may point to vmalloc()'ed memory, but we need to pass a - * physical address that's been properly flushed. The sanctioned way to - * do this is by using the DMA API. But as is best practice for crypto - * keys, we also must wipe the key after use. This makes kmemdup() + - * dma_map_single() not clearly correct, since the DMA API can use - * bounce buffers. Instead, just use dma_alloc_coherent(). Programming - * keys is normally rare and thus not performance-critical. - */ + int ret; - keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys, - GFP_KERNEL); + void *keybuf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, + key_size, + GFP_KERNEL); if (!keybuf) return -ENOMEM; memcpy(keybuf, key, key_size); - desc.args[1] = key_phys; + desc.args[1] = qcom_tzmem_to_phys(keybuf); ret = qcom_scm_call(__scm->dev, &desc, NULL); memzero_explicit(keybuf, key_size); - dma_free_coherent(__scm->dev, key_size, keybuf, key_phys); return ret; } EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key); +bool qcom_scm_has_wrapped_key_support(void) +{ + return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, + QCOM_SCM_ES_DERIVE_SW_SECRET) && + __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, + QCOM_SCM_ES_GENERATE_ICE_KEY) && + __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, + QCOM_SCM_ES_PREPARE_ICE_KEY) && + __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, + QCOM_SCM_ES_IMPORT_ICE_KEY); +} +EXPORT_SYMBOL_GPL(qcom_scm_has_wrapped_key_support); + +/** + * qcom_scm_derive_sw_secret() - Derive software secret from wrapped key + * @eph_key: an ephemerally-wrapped key + * @eph_key_size: size of @eph_key in bytes + * @sw_secret: output buffer for the software secret + * @sw_secret_size: size of the software secret to derive in bytes + * + * Derive a software secret from an ephemerally-wrapped key for software crypto + * operations. This is done by calling into the secure execution environment, + * which then calls into the hardware to unwrap and derive the secret. + * + * For more information on sw_secret, see the "Hardware-wrapped keys" section of + * Documentation/block/inline-encryption.rst. + * + * Return: 0 on success; -errno on failure. + */ +int qcom_scm_derive_sw_secret(const u8 *eph_key, size_t eph_key_size, + u8 *sw_secret, size_t sw_secret_size) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_ES, + .cmd = QCOM_SCM_ES_DERIVE_SW_SECRET, + .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL, + QCOM_SCM_RW, QCOM_SCM_VAL), + .owner = ARM_SMCCC_OWNER_SIP, + }; + int ret; + + void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, + eph_key_size, + GFP_KERNEL); + if (!eph_key_buf) + return -ENOMEM; + + void *sw_secret_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, + sw_secret_size, + GFP_KERNEL); + if (!sw_secret_buf) + return -ENOMEM; + + memcpy(eph_key_buf, eph_key, eph_key_size); + desc.args[0] = qcom_tzmem_to_phys(eph_key_buf); + desc.args[1] = eph_key_size; + desc.args[2] = qcom_tzmem_to_phys(sw_secret_buf); + desc.args[3] = sw_secret_size; + + ret = qcom_scm_call(__scm->dev, &desc, NULL); + if (!ret) + memcpy(sw_secret, sw_secret_buf, sw_secret_size); + + memzero_explicit(eph_key_buf, eph_key_size); + memzero_explicit(sw_secret_buf, sw_secret_size); + return ret; +} +EXPORT_SYMBOL_GPL(qcom_scm_derive_sw_secret); + +/** + * qcom_scm_generate_ice_key() - Generate a wrapped key for storage encryption + * @lt_key: output buffer for the long-term wrapped key + * @lt_key_size: size of @lt_key in bytes. Must be the exact wrapped key size + * used by the SoC. + * + * Generate a key using the built-in HW module in the SoC. The resulting key is + * returned wrapped with the platform-specific Key Encryption Key. + * + * Return: 0 on success; -errno on failure. + */ +int qcom_scm_generate_ice_key(u8 *lt_key, size_t lt_key_size) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_ES, + .cmd = QCOM_SCM_ES_GENERATE_ICE_KEY, + .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL), + .owner = ARM_SMCCC_OWNER_SIP, + }; + int ret; + + void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, + lt_key_size, + GFP_KERNEL); + if (!lt_key_buf) + return -ENOMEM; + + desc.args[0] = qcom_tzmem_to_phys(lt_key_buf); + desc.args[1] = lt_key_size; + + ret = qcom_scm_call(__scm->dev, &desc, NULL); + if (!ret) + memcpy(lt_key, lt_key_buf, lt_key_size); + + memzero_explicit(lt_key_buf, lt_key_size); + return ret; +} +EXPORT_SYMBOL_GPL(qcom_scm_generate_ice_key); + +/** + * qcom_scm_prepare_ice_key() - Re-wrap a key with the per-boot ephemeral key + * @lt_key: a long-term wrapped key + * @lt_key_size: size of @lt_key in bytes + * @eph_key: output buffer for the ephemerally-wrapped key + * @eph_key_size: size of @eph_key in bytes. Must be the exact wrapped key size + * used by the SoC. + * + * Given a long-term wrapped key, re-wrap it with the per-boot ephemeral key for + * added protection. The resulting key will only be valid for the current boot. + * + * Return: 0 on success; -errno on failure. + */ +int qcom_scm_prepare_ice_key(const u8 *lt_key, size_t lt_key_size, + u8 *eph_key, size_t eph_key_size) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_ES, + .cmd = QCOM_SCM_ES_PREPARE_ICE_KEY, + .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL, + QCOM_SCM_RW, QCOM_SCM_VAL), + .owner = ARM_SMCCC_OWNER_SIP, + }; + int ret; + + void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, + lt_key_size, + GFP_KERNEL); + if (!lt_key_buf) + return -ENOMEM; + + void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, + eph_key_size, + GFP_KERNEL); + if (!eph_key_buf) + return -ENOMEM; + + memcpy(lt_key_buf, lt_key, lt_key_size); + desc.args[0] = qcom_tzmem_to_phys(lt_key_buf); + desc.args[1] = lt_key_size; + desc.args[2] = qcom_tzmem_to_phys(eph_key_buf); + desc.args[3] = eph_key_size; + + ret = qcom_scm_call(__scm->dev, &desc, NULL); + if (!ret) + memcpy(eph_key, eph_key_buf, eph_key_size); + + memzero_explicit(lt_key_buf, lt_key_size); + memzero_explicit(eph_key_buf, eph_key_size); + return ret; +} +EXPORT_SYMBOL_GPL(qcom_scm_prepare_ice_key); + +/** + * qcom_scm_import_ice_key() - Import key for storage encryption + * @raw_key: the raw key to import + * @raw_key_size: size of @raw_key in bytes + * @lt_key: output buffer for the long-term wrapped key + * @lt_key_size: size of @lt_key in bytes. Must be the exact wrapped key size + * used by the SoC. + * + * Import a raw key and return a long-term wrapped key. Uses the SoC's HWKM to + * wrap the raw key using the platform-specific Key Encryption Key. + * + * Return: 0 on success; -errno on failure. + */ +int qcom_scm_import_ice_key(const u8 *raw_key, size_t raw_key_size, + u8 *lt_key, size_t lt_key_size) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_ES, + .cmd = QCOM_SCM_ES_IMPORT_ICE_KEY, + .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL, + QCOM_SCM_RW, QCOM_SCM_VAL), + .owner = ARM_SMCCC_OWNER_SIP, + }; + int ret; + + void *raw_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, + raw_key_size, + GFP_KERNEL); + if (!raw_key_buf) + return -ENOMEM; + + void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, + lt_key_size, + GFP_KERNEL); + if (!lt_key_buf) + return -ENOMEM; + + memcpy(raw_key_buf, raw_key, raw_key_size); + desc.args[0] = qcom_tzmem_to_phys(raw_key_buf); + desc.args[1] = raw_key_size; + desc.args[2] = qcom_tzmem_to_phys(lt_key_buf); + desc.args[3] = lt_key_size; + + ret = qcom_scm_call(__scm->dev, &desc, NULL); + if (!ret) + memcpy(lt_key, lt_key_buf, lt_key_size); + + memzero_explicit(raw_key_buf, raw_key_size); + memzero_explicit(lt_key_buf, lt_key_size); + return ret; +} +EXPORT_SYMBOL_GPL(qcom_scm_import_ice_key); + /** * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. * @@ -1321,6 +1603,76 @@ bool qcom_scm_lmh_dcvsh_available(void) } EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available); +int qcom_scm_shm_bridge_enable(void) +{ + int ret; + + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_SHM_BRIDGE_ENABLE, + .owner = ARM_SMCCC_OWNER_SIP + }; + + struct qcom_scm_res res; + + if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, + QCOM_SCM_MP_SHM_BRIDGE_ENABLE)) + return -EOPNOTSUPP; + + ret = qcom_scm_call(__scm->dev, &desc, &res); + + if (ret) + return ret; + + if (res.result[0] == SHMBRIDGE_RESULT_NOTSUPP) + return -EOPNOTSUPP; + + return res.result[0]; +} +EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable); + +int qcom_scm_shm_bridge_create(struct device *dev, u64 pfn_and_ns_perm_flags, + u64 ipfn_and_s_perm_flags, u64 size_and_flags, + u64 ns_vmids, u64 *handle) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_SHM_BRIDGE_CREATE, + .owner = ARM_SMCCC_OWNER_SIP, + .args[0] = pfn_and_ns_perm_flags, + .args[1] = ipfn_and_s_perm_flags, + .args[2] = size_and_flags, + .args[3] = ns_vmids, + .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, + QCOM_SCM_VAL, QCOM_SCM_VAL), + }; + + struct qcom_scm_res res; + int ret; + + ret = qcom_scm_call(__scm->dev, &desc, &res); + + if (handle && !ret) + *handle = res.result[1]; + + return ret ?: res.result[0]; +} +EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_create); + +int qcom_scm_shm_bridge_delete(struct device *dev, u64 handle) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_SHM_BRIDGE_DELETE, + .owner = ARM_SMCCC_OWNER_SIP, + .args[0] = handle, + .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL), + }; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_delete); + int qcom_scm_lmh_profile_change(u32 profile_id) { struct qcom_scm_desc desc = { @@ -1338,8 +1690,6 @@ EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change); int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, u64 limit_node, u32 node_id, u64 version) { - dma_addr_t payload_phys; - u32 *payload_buf; int ret, payload_size = 5 * sizeof(u32); struct qcom_scm_desc desc = { @@ -1354,7 +1704,9 @@ int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, .owner = ARM_SMCCC_OWNER_SIP, }; - payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL); + u32 *payload_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, + payload_size, + GFP_KERNEL); if (!payload_buf) return -ENOMEM; @@ -1364,15 +1716,28 @@ int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, payload_buf[3] = 1; payload_buf[4] = payload_val; - desc.args[0] = payload_phys; + desc.args[0] = qcom_tzmem_to_phys(payload_buf); ret = qcom_scm_call(__scm->dev, &desc, NULL); - dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys); return ret; } EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh); +int qcom_scm_gpu_init_regs(u32 gpu_req) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_GPU, + .cmd = QCOM_SCM_SVC_GPU_INIT_REGS, + .arginfo = QCOM_SCM_ARGS(1), + .args[0] = gpu_req, + .owner = ARM_SMCCC_OWNER_SIP, + }; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL_GPL(qcom_scm_gpu_init_regs); + static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) { struct device_node *tcsr; @@ -1524,37 +1889,27 @@ int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id) unsigned long app_name_len = strlen(app_name); struct qcom_scm_desc desc = {}; struct qcom_scm_qseecom_resp res = {}; - dma_addr_t name_buf_phys; - char *name_buf; int status; if (app_name_len >= name_buf_size) return -EINVAL; - name_buf = kzalloc(name_buf_size, GFP_KERNEL); + char *name_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool, + name_buf_size, + GFP_KERNEL); if (!name_buf) return -ENOMEM; memcpy(name_buf, app_name, app_name_len); - name_buf_phys = dma_map_single(__scm->dev, name_buf, name_buf_size, DMA_TO_DEVICE); - status = dma_mapping_error(__scm->dev, name_buf_phys); - if (status) { - kfree(name_buf); - dev_err(__scm->dev, "qseecom: failed to map dma address\n"); - return status; - } - desc.owner = QSEECOM_TZ_OWNER_QSEE_OS; desc.svc = QSEECOM_TZ_SVC_APP_MGR; desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP; desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL); - desc.args[0] = name_buf_phys; + desc.args[0] = qcom_tzmem_to_phys(name_buf); desc.args[1] = app_name_len; status = qcom_scm_qseecom_call(&desc, &res); - dma_unmap_single(__scm->dev, name_buf_phys, name_buf_size, DMA_TO_DEVICE); - kfree(name_buf); if (status) return status; @@ -1576,9 +1931,9 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id); /** * qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app. * @app_id: The ID of the target app. - * @req: DMA address of the request buffer sent to the app. + * @req: Request buffer sent to the app (must be TZ memory) * @req_size: Size of the request buffer. - * @rsp: DMA address of the response buffer, written to by the app. + * @rsp: Response buffer, written to by the app (must be TZ memory) * @rsp_size: Size of the response buffer. * * Sends a request to the QSEE app associated with the given ID and read back @@ -1589,13 +1944,18 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id); * * Return: Zero on success, nonzero on failure. */ -int qcom_scm_qseecom_app_send(u32 app_id, dma_addr_t req, size_t req_size, - dma_addr_t rsp, size_t rsp_size) +int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, + void *rsp, size_t rsp_size) { struct qcom_scm_qseecom_resp res = {}; struct qcom_scm_desc desc = {}; + phys_addr_t req_phys; + phys_addr_t rsp_phys; int status; + req_phys = qcom_tzmem_to_phys(req); + rsp_phys = qcom_tzmem_to_phys(rsp); + desc.owner = QSEECOM_TZ_OWNER_TZ_APPS; desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER; desc.cmd = QSEECOM_TZ_CMD_APP_SEND; @@ -1603,9 +1963,9 @@ int qcom_scm_qseecom_app_send(u32 app_id, dma_addr_t req, size_t req_size, QCOM_SCM_RW, QCOM_SCM_VAL, QCOM_SCM_RW, QCOM_SCM_VAL); desc.args[0] = app_id; - desc.args[1] = req; + desc.args[1] = req_phys; desc.args[2] = req_size; - desc.args[3] = rsp; + desc.args[3] = rsp_phys; desc.args[4] = rsp_size; status = qcom_scm_qseecom_call(&desc, &res); @@ -1624,8 +1984,27 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send); * We do not yet support re-entrant calls via the qseecom interface. To prevent + any potential issues with this, only allow validated machines for now. */ -static const struct of_device_id qcom_scm_qseecom_allowlist[] = { +static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = { + { .compatible = "asus,vivobook-s15" }, + { .compatible = "asus,zenbook-a14-ux3407qa" }, + { .compatible = "asus,zenbook-a14-ux3407ra" }, + { .compatible = "dell,xps13-9345" }, + { .compatible = "hp,elitebook-ultra-g1q" }, + { .compatible = "hp,omnibook-x14" }, + { .compatible = "huawei,gaokun3" }, + { .compatible = "lenovo,flex-5g" }, + { .compatible = "lenovo,thinkpad-t14s" }, { .compatible = "lenovo,thinkpad-x13s", }, + { .compatible = "lenovo,yoga-slim7x" }, + { .compatible = "microsoft,arcata", }, + { .compatible = "microsoft,blackrock" }, + { .compatible = "microsoft,romulus13", }, + { .compatible = "microsoft,romulus15", }, + { .compatible = "qcom,sc8180x-primus" }, + { .compatible = "qcom,x1e001de-devkit" }, + { .compatible = "qcom,x1e80100-crd" }, + { .compatible = "qcom,x1e80100-qcp" }, + { .compatible = "qcom,x1p42100-crd" }, { } }; @@ -1713,7 +2092,8 @@ static int qcom_scm_qseecom_init(struct qcom_scm *scm) */ bool qcom_scm_is_available(void) { - return !!__scm; + /* Paired with smp_store_release() in qcom_scm_probe */ + return !!smp_load_acquire(&__scm); } EXPORT_SYMBOL_GPL(qcom_scm_is_available); @@ -1744,7 +2124,7 @@ int qcom_scm_wait_for_wq_completion(u32 wq_ctx) return 0; } -static int qcom_scm_waitq_wakeup(struct qcom_scm *scm, unsigned int wq_ctx) +static int qcom_scm_waitq_wakeup(unsigned int wq_ctx) { int ret; @@ -1770,13 +2150,12 @@ static irqreturn_t qcom_scm_irq_handler(int irq, void *data) goto out; } - if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE && - flags != QCOM_SMC_WAITQ_FLAG_WAKE_ALL) { - dev_err(scm->dev, "Invalid flags found for wq_ctx: %u\n", flags); + if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE) { + dev_err(scm->dev, "Invalid flags received for wq_ctx: %u\n", flags); goto out; } - ret = qcom_scm_waitq_wakeup(scm, wq_ctx); + ret = qcom_scm_waitq_wakeup(wq_ctx); if (ret) goto out; } while (more_pending); @@ -1785,8 +2164,48 @@ out: return IRQ_HANDLED; } +static int get_download_mode(char *buffer, const struct kernel_param *kp) +{ + if (download_mode >= ARRAY_SIZE(download_mode_name)) + return sysfs_emit(buffer, "unknown mode\n"); + + return sysfs_emit(buffer, "%s\n", download_mode_name[download_mode]); +} + +static int set_download_mode(const char *val, const struct kernel_param *kp) +{ + bool tmp; + int ret; + + ret = sysfs_match_string(download_mode_name, val); + if (ret < 0) { + ret = kstrtobool(val, &tmp); + if (ret < 0) { + pr_err("qcom_scm: err: %d\n", ret); + return ret; + } + + ret = tmp ? 1 : 0; + } + + download_mode = ret; + if (__scm) + qcom_scm_set_download_mode(download_mode); + + return 0; +} + +static const struct kernel_param_ops download_mode_param_ops = { + .get = get_download_mode, + .set = set_download_mode, +}; + +module_param_cb(download_mode, &download_mode_param_ops, NULL, 0644); +MODULE_PARM_DESC(download_mode, "download mode: off/0/N for no dump mode, full/on/1/Y for full dump mode, mini for minidump mode and full,mini for both full and minidump mode together are acceptable values"); + static int qcom_scm_probe(struct platform_device *pdev) { + struct qcom_tzmem_pool_config pool_config; struct qcom_scm *scm; int irq, ret; @@ -1794,10 +2213,12 @@ static int qcom_scm_probe(struct platform_device *pdev) if (!scm) return -ENOMEM; + scm->dev = &pdev->dev; ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr); if (ret < 0) return ret; + init_completion(&scm->waitq_comp); mutex_init(&scm->scm_bw_lock); scm->path = devm_of_icc_get(&pdev->dev, NULL); @@ -1829,39 +2250,65 @@ static int qcom_scm_probe(struct platform_device *pdev) if (ret) return ret; - __scm = scm; - __scm->dev = &pdev->dev; - - init_completion(&__scm->waitq_comp); + /* Paired with smp_load_acquire() in qcom_scm_is_available(). */ + smp_store_release(&__scm, scm); irq = platform_get_irq_optional(pdev, 0); if (irq < 0) { - if (irq != -ENXIO) - return irq; + if (irq != -ENXIO) { + ret = irq; + goto err; + } } else { ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler, IRQF_ONESHOT, "qcom-scm", __scm); - if (ret < 0) - return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n"); + if (ret < 0) { + dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n"); + goto err; + } } __get_convention(); /* - * If requested enable "download mode", from this point on warmboot + * If "download mode" is requested, from this point on warmboot * will cause the boot stages to enter download mode, unless * disabled below by a clean shutdown/reboot. */ - if (download_mode) - qcom_scm_set_download_mode(true); - + qcom_scm_set_download_mode(download_mode); /* * Disable SDI if indicated by DT that it is enabled by default. */ - if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled")) + if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled") || !download_mode) qcom_scm_disable_sdi(); + ret = of_reserved_mem_device_init(__scm->dev); + if (ret && ret != -ENODEV) { + dev_err_probe(__scm->dev, ret, + "Failed to setup the reserved memory region for TZ mem\n"); + goto err; + } + + ret = qcom_tzmem_enable(__scm->dev); + if (ret) { + dev_err_probe(__scm->dev, ret, + "Failed to enable the TrustZone memory allocator\n"); + goto err; + } + + memset(&pool_config, 0, sizeof(pool_config)); + pool_config.initial_size = 0; + pool_config.policy = QCOM_TZMEM_POLICY_ON_DEMAND; + pool_config.max_size = SZ_256K; + + __scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config); + if (IS_ERR(__scm->mempool)) { + ret = dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool), + "Failed to create the SCM memory pool\n"); + goto err; + } + /* * Initialize the QSEECOM interface. * @@ -1876,12 +2323,18 @@ static int qcom_scm_probe(struct platform_device *pdev) WARN(ret < 0, "failed to initialize qseecom: %d\n", ret); return 0; + +err: + /* Paired with smp_load_acquire() in qcom_scm_is_available(). */ + smp_store_release(&__scm, NULL); + + return ret; } static void qcom_scm_shutdown(struct platform_device *pdev) { /* Clean shutdown, disable download mode to allow normal restart */ - qcom_scm_set_download_mode(false); + qcom_scm_set_download_mode(QCOM_DLOAD_NODUMP); } static const struct of_device_id qcom_scm_dt_match[] = { diff --git a/drivers/firmware/qcom/qcom_scm.h b/drivers/firmware/qcom/qcom_scm.h index 4532907e8489..3133d826f5fa 100644 --- a/drivers/firmware/qcom/qcom_scm.h +++ b/drivers/firmware/qcom/qcom_scm.h @@ -5,6 +5,7 @@ #define __QCOM_SCM_INT_H struct device; +struct qcom_tzmem_pool; enum qcom_scm_convention { SMC_CONVENTION_UNKNOWN, @@ -43,8 +44,11 @@ enum qcom_scm_arg_types { /** * struct qcom_scm_desc + * @svc: Service identifier + * @cmd: Command identifier * @arginfo: Metadata describing the arguments in args[] * @args: The array of arguments for the secure syscall + * @owner: Owner identifier */ struct qcom_scm_desc { u32 svc; @@ -78,6 +82,8 @@ int scm_legacy_call_atomic(struct device *dev, const struct qcom_scm_desc *desc, int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, struct qcom_scm_res *res); +struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void); + #define QCOM_SCM_SVC_BOOT 0x01 #define QCOM_SCM_BOOT_SET_ADDR 0x01 #define QCOM_SCM_BOOT_TERMINATE_PC 0x02 @@ -113,6 +119,10 @@ int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, #define QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE 0x05 #define QCOM_SCM_MP_VIDEO_VAR 0x08 #define QCOM_SCM_MP_ASSIGN 0x16 +#define QCOM_SCM_MP_CP_SMMU_APERTURE_ID 0x1b +#define QCOM_SCM_MP_SHM_BRIDGE_ENABLE 0x1c +#define QCOM_SCM_MP_SHM_BRIDGE_DELETE 0x1d +#define QCOM_SCM_MP_SHM_BRIDGE_CREATE 0x1e #define QCOM_SCM_SVC_OCMEM 0x0f #define QCOM_SCM_OCMEM_LOCK_CMD 0x01 @@ -121,6 +131,10 @@ int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, #define QCOM_SCM_SVC_ES 0x10 /* Enterprise Security */ #define QCOM_SCM_ES_INVALIDATE_ICE_KEY 0x03 #define QCOM_SCM_ES_CONFIG_SET_ICE_KEY 0x04 +#define QCOM_SCM_ES_DERIVE_SW_SECRET 0x07 +#define QCOM_SCM_ES_GENERATE_ICE_KEY 0x08 +#define QCOM_SCM_ES_PREPARE_ICE_KEY 0x09 +#define QCOM_SCM_ES_IMPORT_ICE_KEY 0x0a #define QCOM_SCM_SVC_HDCP 0x11 #define QCOM_SCM_HDCP_INVOKE 0x01 @@ -138,6 +152,9 @@ int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, #define QCOM_SCM_WAITQ_RESUME 0x02 #define QCOM_SCM_WAITQ_GET_WQ_CTX 0x03 +#define QCOM_SCM_SVC_GPU 0x28 +#define QCOM_SCM_SVC_GPU_INIT_REGS 0x01 + /* common error codes */ #define QCOM_SCM_V2_EBUSY -12 #define QCOM_SCM_ENOMEM -5 diff --git a/drivers/firmware/qcom/qcom_tzmem.c b/drivers/firmware/qcom/qcom_tzmem.c new file mode 100644 index 000000000000..94196ad87105 --- /dev/null +++ b/drivers/firmware/qcom/qcom_tzmem.c @@ -0,0 +1,481 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Memory allocator for buffers shared with the TrustZone. + * + * Copyright (C) 2023-2024 Linaro Ltd. + */ + +#include <linux/bug.h> +#include <linux/cleanup.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/firmware/qcom/qcom_tzmem.h> +#include <linux/genalloc.h> +#include <linux/gfp.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/mm.h> +#include <linux/radix-tree.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +#include "qcom_tzmem.h" + +struct qcom_tzmem_area { + struct list_head list; + void *vaddr; + dma_addr_t paddr; + size_t size; + void *priv; +}; + +struct qcom_tzmem_pool { + struct gen_pool *genpool; + struct list_head areas; + enum qcom_tzmem_policy policy; + size_t increment; + size_t max_size; + spinlock_t lock; +}; + +struct qcom_tzmem_chunk { + size_t size; + struct qcom_tzmem_pool *owner; +}; + +static struct device *qcom_tzmem_dev; +static RADIX_TREE(qcom_tzmem_chunks, GFP_ATOMIC); +static DEFINE_SPINLOCK(qcom_tzmem_chunks_lock); + +#if IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_GENERIC) + +static int qcom_tzmem_init(void) +{ + return 0; +} + +static int qcom_tzmem_init_area(struct qcom_tzmem_area *area) +{ + return 0; +} + +static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area) +{ + +} + +#elif IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE) + +#include <linux/firmware/qcom/qcom_scm.h> +#include <linux/of.h> + +#define QCOM_SHM_BRIDGE_NUM_VM_SHIFT 9 + +static bool qcom_tzmem_using_shm_bridge; + +/* List of machines that are known to not support SHM bridge correctly. */ +static const char *const qcom_tzmem_blacklist[] = { + "qcom,sc8180x", + "qcom,sdm670", /* failure in GPU firmware loading */ + "qcom,sdm845", /* reset in rmtfs memory assignment */ + "qcom,sm7150", /* reset in rmtfs memory assignment */ + "qcom,sm8150", /* reset in rmtfs memory assignment */ + NULL +}; + +static int qcom_tzmem_init(void) +{ + const char *const *platform; + int ret; + + for (platform = qcom_tzmem_blacklist; *platform; platform++) { + if (of_machine_is_compatible(*platform)) + goto notsupp; + } + + ret = qcom_scm_shm_bridge_enable(); + if (ret == -EOPNOTSUPP) + goto notsupp; + + if (!ret) + qcom_tzmem_using_shm_bridge = true; + + return ret; + +notsupp: + dev_info(qcom_tzmem_dev, "SHM Bridge not supported\n"); + return 0; +} + +static int qcom_tzmem_init_area(struct qcom_tzmem_area *area) +{ + u64 pfn_and_ns_perm, ipfn_and_s_perm, size_and_flags; + int ret; + + if (!qcom_tzmem_using_shm_bridge) + return 0; + + pfn_and_ns_perm = (u64)area->paddr | QCOM_SCM_PERM_RW; + ipfn_and_s_perm = (u64)area->paddr | QCOM_SCM_PERM_RW; + size_and_flags = area->size | (1 << QCOM_SHM_BRIDGE_NUM_VM_SHIFT); + + u64 *handle __free(kfree) = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + ret = qcom_scm_shm_bridge_create(qcom_tzmem_dev, pfn_and_ns_perm, + ipfn_and_s_perm, size_and_flags, + QCOM_SCM_VMID_HLOS, handle); + if (ret) + return ret; + + area->priv = no_free_ptr(handle); + + return 0; +} + +static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area) +{ + u64 *handle = area->priv; + + if (!qcom_tzmem_using_shm_bridge) + return; + + qcom_scm_shm_bridge_delete(qcom_tzmem_dev, *handle); + kfree(handle); +} + +#endif /* CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE */ + +static int qcom_tzmem_pool_add_memory(struct qcom_tzmem_pool *pool, + size_t size, gfp_t gfp) +{ + int ret; + + struct qcom_tzmem_area *area __free(kfree) = kzalloc(sizeof(*area), + gfp); + if (!area) + return -ENOMEM; + + area->size = PAGE_ALIGN(size); + + area->vaddr = dma_alloc_coherent(qcom_tzmem_dev, area->size, + &area->paddr, gfp); + if (!area->vaddr) + return -ENOMEM; + + ret = qcom_tzmem_init_area(area); + if (ret) { + dma_free_coherent(qcom_tzmem_dev, area->size, + area->vaddr, area->paddr); + return ret; + } + + ret = gen_pool_add_virt(pool->genpool, (unsigned long)area->vaddr, + (phys_addr_t)area->paddr, size, -1); + if (ret) { + dma_free_coherent(qcom_tzmem_dev, area->size, + area->vaddr, area->paddr); + return ret; + } + + scoped_guard(spinlock_irqsave, &pool->lock) + list_add_tail(&area->list, &pool->areas); + + area = NULL; + return 0; +} + +/** + * qcom_tzmem_pool_new() - Create a new TZ memory pool. + * @config: Pool configuration. + * + * Create a new pool of memory suitable for sharing with the TrustZone. + * + * Must not be used in atomic context. + * + * Return: New memory pool address or ERR_PTR() on error. + */ +struct qcom_tzmem_pool * +qcom_tzmem_pool_new(const struct qcom_tzmem_pool_config *config) +{ + int ret = -ENOMEM; + + might_sleep(); + + switch (config->policy) { + case QCOM_TZMEM_POLICY_STATIC: + if (!config->initial_size) + return ERR_PTR(-EINVAL); + break; + case QCOM_TZMEM_POLICY_MULTIPLIER: + if (!config->increment) + return ERR_PTR(-EINVAL); + break; + case QCOM_TZMEM_POLICY_ON_DEMAND: + break; + default: + return ERR_PTR(-EINVAL); + } + + struct qcom_tzmem_pool *pool __free(kfree) = kzalloc(sizeof(*pool), + GFP_KERNEL); + if (!pool) + return ERR_PTR(-ENOMEM); + + pool->genpool = gen_pool_create(PAGE_SHIFT, -1); + if (!pool->genpool) + return ERR_PTR(-ENOMEM); + + gen_pool_set_algo(pool->genpool, gen_pool_best_fit, NULL); + + pool->policy = config->policy; + pool->increment = config->increment; + pool->max_size = config->max_size; + INIT_LIST_HEAD(&pool->areas); + spin_lock_init(&pool->lock); + + if (config->initial_size) { + ret = qcom_tzmem_pool_add_memory(pool, config->initial_size, + GFP_KERNEL); + if (ret) { + gen_pool_destroy(pool->genpool); + return ERR_PTR(ret); + } + } + + return_ptr(pool); +} +EXPORT_SYMBOL_GPL(qcom_tzmem_pool_new); + +/** + * qcom_tzmem_pool_free() - Destroy a TZ memory pool and free all resources. + * @pool: Memory pool to free. + * + * Must not be called if any of the allocated chunks has not been freed. + * Must not be used in atomic context. + */ +void qcom_tzmem_pool_free(struct qcom_tzmem_pool *pool) +{ + struct qcom_tzmem_area *area, *next; + struct qcom_tzmem_chunk *chunk; + struct radix_tree_iter iter; + bool non_empty = false; + void __rcu **slot; + + might_sleep(); + + if (!pool) + return; + + scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) { + radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) { + chunk = radix_tree_deref_slot_protected(slot, + &qcom_tzmem_chunks_lock); + + if (chunk->owner == pool) + non_empty = true; + } + } + + WARN(non_empty, "Freeing TZ memory pool with memory still allocated"); + + list_for_each_entry_safe(area, next, &pool->areas, list) { + list_del(&area->list); + qcom_tzmem_cleanup_area(area); + dma_free_coherent(qcom_tzmem_dev, area->size, + area->vaddr, area->paddr); + kfree(area); + } + + gen_pool_destroy(pool->genpool); + kfree(pool); +} +EXPORT_SYMBOL_GPL(qcom_tzmem_pool_free); + +static void devm_qcom_tzmem_pool_free(void *data) +{ + struct qcom_tzmem_pool *pool = data; + + qcom_tzmem_pool_free(pool); +} + +/** + * devm_qcom_tzmem_pool_new() - Managed variant of qcom_tzmem_pool_new(). + * @dev: Device managing this resource. + * @config: Pool configuration. + * + * Must not be used in atomic context. + * + * Return: Address of the managed pool or ERR_PTR() on failure. + */ +struct qcom_tzmem_pool * +devm_qcom_tzmem_pool_new(struct device *dev, + const struct qcom_tzmem_pool_config *config) +{ + struct qcom_tzmem_pool *pool; + int ret; + + pool = qcom_tzmem_pool_new(config); + if (IS_ERR(pool)) + return pool; + + ret = devm_add_action_or_reset(dev, devm_qcom_tzmem_pool_free, pool); + if (ret) + return ERR_PTR(ret); + + return pool; +} +EXPORT_SYMBOL_GPL(devm_qcom_tzmem_pool_new); + +static bool qcom_tzmem_try_grow_pool(struct qcom_tzmem_pool *pool, + size_t requested, gfp_t gfp) +{ + size_t current_size = gen_pool_size(pool->genpool); + + if (pool->max_size && (current_size + requested) > pool->max_size) + return false; + + switch (pool->policy) { + case QCOM_TZMEM_POLICY_STATIC: + return false; + case QCOM_TZMEM_POLICY_MULTIPLIER: + requested = current_size * pool->increment; + break; + case QCOM_TZMEM_POLICY_ON_DEMAND: + break; + } + + return !qcom_tzmem_pool_add_memory(pool, requested, gfp); +} + +/** + * qcom_tzmem_alloc() - Allocate a memory chunk suitable for sharing with TZ. + * @pool: TZ memory pool from which to allocate memory. + * @size: Number of bytes to allocate. + * @gfp: GFP flags. + * + * Can be used in any context. + * + * Return: + * Address of the allocated buffer or NULL if no more memory can be allocated. + * The buffer must be released using qcom_tzmem_free(). + */ +void *qcom_tzmem_alloc(struct qcom_tzmem_pool *pool, size_t size, gfp_t gfp) +{ + unsigned long vaddr; + int ret; + + if (!size) + return NULL; + + size = PAGE_ALIGN(size); + + struct qcom_tzmem_chunk *chunk __free(kfree) = kzalloc(sizeof(*chunk), + gfp); + if (!chunk) + return NULL; + +again: + vaddr = gen_pool_alloc(pool->genpool, size); + if (!vaddr) { + if (qcom_tzmem_try_grow_pool(pool, size, gfp)) + goto again; + + return NULL; + } + + chunk->size = size; + chunk->owner = pool; + + scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) { + ret = radix_tree_insert(&qcom_tzmem_chunks, vaddr, chunk); + if (ret) { + gen_pool_free(pool->genpool, vaddr, size); + return NULL; + } + + chunk = NULL; + } + + return (void *)vaddr; +} +EXPORT_SYMBOL_GPL(qcom_tzmem_alloc); + +/** + * qcom_tzmem_free() - Release a buffer allocated from a TZ memory pool. + * @vaddr: Virtual address of the buffer. + * + * Can be used in any context. + */ +void qcom_tzmem_free(void *vaddr) +{ + struct qcom_tzmem_chunk *chunk; + + scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) + chunk = radix_tree_delete_item(&qcom_tzmem_chunks, + (unsigned long)vaddr, NULL); + + if (!chunk) { + WARN(1, "Virtual address %p not owned by TZ memory allocator", + vaddr); + return; + } + + scoped_guard(spinlock_irqsave, &chunk->owner->lock) + gen_pool_free(chunk->owner->genpool, (unsigned long)vaddr, + chunk->size); + kfree(chunk); +} +EXPORT_SYMBOL_GPL(qcom_tzmem_free); + +/** + * qcom_tzmem_to_phys() - Map the virtual address of TZ memory to physical. + * @vaddr: Virtual address of memory allocated from a TZ memory pool. + * + * Can be used in any context. The address must point to memory allocated + * using qcom_tzmem_alloc(). + * + * Returns: + * Physical address mapped from the virtual or 0 if the mapping failed. + */ +phys_addr_t qcom_tzmem_to_phys(void *vaddr) +{ + struct qcom_tzmem_chunk *chunk; + struct radix_tree_iter iter; + void __rcu **slot; + phys_addr_t ret; + + guard(spinlock_irqsave)(&qcom_tzmem_chunks_lock); + + radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) { + chunk = radix_tree_deref_slot_protected(slot, + &qcom_tzmem_chunks_lock); + + ret = gen_pool_virt_to_phys(chunk->owner->genpool, + (unsigned long)vaddr); + if (ret == -1) + continue; + + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(qcom_tzmem_to_phys); + +int qcom_tzmem_enable(struct device *dev) +{ + if (qcom_tzmem_dev) + return -EBUSY; + + qcom_tzmem_dev = dev; + + return qcom_tzmem_init(); +} +EXPORT_SYMBOL_GPL(qcom_tzmem_enable); + +MODULE_DESCRIPTION("TrustZone memory allocator for Qualcomm firmware drivers"); +MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/qcom/qcom_tzmem.h b/drivers/firmware/qcom/qcom_tzmem.h new file mode 100644 index 000000000000..8fa8a3eb940e --- /dev/null +++ b/drivers/firmware/qcom/qcom_tzmem.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2023-2024 Linaro Ltd. + */ + +#ifndef __QCOM_TZMEM_PRIV_H +#define __QCOM_TZMEM_PRIV_H + +struct device; + +int qcom_tzmem_enable(struct device *dev); + +#endif /* __QCOM_TZMEM_PRIV_H */ diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c index 5f43dfa22f79..2615fb780e3c 100644 --- a/drivers/firmware/qemu_fw_cfg.c +++ b/drivers/firmware/qemu_fw_cfg.c @@ -452,7 +452,7 @@ static void fw_cfg_sysfs_release_entry(struct kobject *kobj) } /* kobj_type: ties together all properties required to register an entry */ -static struct kobj_type fw_cfg_sysfs_entry_ktype = { +static const struct kobj_type fw_cfg_sysfs_entry_ktype = { .default_groups = fw_cfg_sysfs_entry_groups, .sysfs_ops = &fw_cfg_sysfs_attr_ops, .release = fw_cfg_sysfs_release_entry, @@ -460,7 +460,7 @@ static struct kobj_type fw_cfg_sysfs_entry_ktype = { /* raw-read method and attribute */ static ssize_t fw_cfg_sysfs_read_raw(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count) { struct fw_cfg_sysfs_entry *entry = to_entry(kobj); @@ -474,9 +474,9 @@ static ssize_t fw_cfg_sysfs_read_raw(struct file *filp, struct kobject *kobj, return fw_cfg_read_blob(entry->select, buf, pos, count); } -static struct bin_attribute fw_cfg_sysfs_attr_raw = { +static const struct bin_attribute fw_cfg_sysfs_attr_raw = { .attr = { .name = "raw", .mode = S_IRUSR }, - .read = fw_cfg_sysfs_read_raw, + .read_new = fw_cfg_sysfs_read_raw, }; /* @@ -757,7 +757,7 @@ MODULE_DEVICE_TABLE(acpi, fw_cfg_sysfs_acpi_match); static struct platform_driver fw_cfg_sysfs_driver = { .probe = fw_cfg_sysfs_probe, - .remove_new = fw_cfg_sysfs_remove, + .remove = fw_cfg_sysfs_remove, .driver = { .name = "fw_cfg", .of_match_table = fw_cfg_sysfs_mmio_match, diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c index 322aada20f74..7ecde6921a0a 100644 --- a/drivers/firmware/raspberrypi.c +++ b/drivers/firmware/raspberrypi.c @@ -9,6 +9,7 @@ #include <linux/dma-mapping.h> #include <linux/kref.h> #include <linux/mailbox_client.h> +#include <linux/mailbox_controller.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_platform.h> @@ -61,7 +62,6 @@ rpi_firmware_transaction(struct rpi_firmware *fw, u32 chan, u32 data) ret = 0; } else { ret = -ETIMEDOUT; - WARN_ONCE(1, "Firmware transaction timeout"); } } else { dev_err(fw->cl.dev, "mbox_send_message returned %d\n", ret); @@ -97,8 +97,8 @@ int rpi_firmware_property_list(struct rpi_firmware *fw, if (size & 3) return -EINVAL; - buf = dma_alloc_coherent(fw->cl.dev, PAGE_ALIGN(size), &bus_addr, - GFP_ATOMIC); + buf = dma_alloc_coherent(fw->chan->mbox->dev, PAGE_ALIGN(size), + &bus_addr, GFP_ATOMIC); if (!buf) return -ENOMEM; @@ -124,9 +124,11 @@ int rpi_firmware_property_list(struct rpi_firmware *fw, dev_err(fw->cl.dev, "Request 0x%08x returned status 0x%08x\n", buf[2], buf[1]); ret = -EINVAL; + } else if (ret == -ETIMEDOUT) { + WARN_ONCE(1, "Firmware transaction 0x%08x timeout", buf[2]); } - dma_free_coherent(fw->cl.dev, PAGE_ALIGN(size), buf, bus_addr); + dma_free_coherent(fw->chan->mbox->dev, PAGE_ALIGN(size), buf, bus_addr); return ret; } @@ -404,7 +406,7 @@ static struct platform_driver rpi_firmware_driver = { }, .probe = rpi_firmware_probe, .shutdown = rpi_firmware_shutdown, - .remove_new = rpi_firmware_remove, + .remove = rpi_firmware_remove, }; module_platform_driver(rpi_firmware_driver); diff --git a/drivers/firmware/samsung/Kconfig b/drivers/firmware/samsung/Kconfig new file mode 100644 index 000000000000..16d81aeb1d41 --- /dev/null +++ b/drivers/firmware/samsung/Kconfig @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config EXYNOS_ACPM_PROTOCOL + tristate "Exynos Alive Clock and Power Manager (ACPM) Message Protocol" + depends on ARCH_EXYNOS || COMPILE_TEST + depends on MAILBOX + help + Alive Clock and Power Manager (ACPM) Message Protocol is defined for + the purpose of communication between the ACPM firmware and masters + (AP, AOC, ...). ACPM firmware operates on the Active Power Management + (APM) module that handles overall power activities. + + This protocol driver provides interface for all the client drivers + making use of the features offered by the APM. diff --git a/drivers/firmware/samsung/Makefile b/drivers/firmware/samsung/Makefile new file mode 100644 index 000000000000..7b4c9f6f34f5 --- /dev/null +++ b/drivers/firmware/samsung/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only + +acpm-protocol-objs := exynos-acpm.o exynos-acpm-pmic.o +obj-$(CONFIG_EXYNOS_ACPM_PROTOCOL) += acpm-protocol.o diff --git a/drivers/firmware/samsung/exynos-acpm-pmic.c b/drivers/firmware/samsung/exynos-acpm-pmic.c new file mode 100644 index 000000000000..39b33a356ebd --- /dev/null +++ b/drivers/firmware/samsung/exynos-acpm-pmic.c @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2020 Samsung Electronics Co., Ltd. + * Copyright 2020 Google LLC. + * Copyright 2024 Linaro Ltd. + */ +#include <linux/bitfield.h> +#include <linux/firmware/samsung/exynos-acpm-protocol.h> +#include <linux/ktime.h> +#include <linux/types.h> + +#include "exynos-acpm.h" +#include "exynos-acpm-pmic.h" + +#define ACPM_PMIC_CHANNEL GENMASK(15, 12) +#define ACPM_PMIC_TYPE GENMASK(11, 8) +#define ACPM_PMIC_REG GENMASK(7, 0) + +#define ACPM_PMIC_RETURN GENMASK(31, 24) +#define ACPM_PMIC_MASK GENMASK(23, 16) +#define ACPM_PMIC_VALUE GENMASK(15, 8) +#define ACPM_PMIC_FUNC GENMASK(7, 0) + +#define ACPM_PMIC_BULK_SHIFT 8 +#define ACPM_PMIC_BULK_MASK GENMASK(7, 0) +#define ACPM_PMIC_BULK_MAX_COUNT 8 + +enum exynos_acpm_pmic_func { + ACPM_PMIC_READ, + ACPM_PMIC_WRITE, + ACPM_PMIC_UPDATE, + ACPM_PMIC_BULK_READ, + ACPM_PMIC_BULK_WRITE, +}; + +static inline u32 acpm_pmic_set_bulk(u32 data, unsigned int i) +{ + return (data & ACPM_PMIC_BULK_MASK) << (ACPM_PMIC_BULK_SHIFT * i); +} + +static inline u32 acpm_pmic_get_bulk(u32 data, unsigned int i) +{ + return (data >> (ACPM_PMIC_BULK_SHIFT * i)) & ACPM_PMIC_BULK_MASK; +} + +static void acpm_pmic_set_xfer(struct acpm_xfer *xfer, u32 *cmd, size_t cmdlen, + unsigned int acpm_chan_id) +{ + xfer->txd = cmd; + xfer->rxd = cmd; + xfer->txlen = cmdlen; + xfer->rxlen = cmdlen; + xfer->acpm_chan_id = acpm_chan_id; +} + +static void acpm_pmic_init_read_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan) +{ + cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) | + FIELD_PREP(ACPM_PMIC_REG, reg) | + FIELD_PREP(ACPM_PMIC_CHANNEL, chan); + cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_READ); + cmd[3] = ktime_to_ms(ktime_get()); +} + +int acpm_pmic_read_reg(const struct acpm_handle *handle, + unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan, + u8 *buf) +{ + struct acpm_xfer xfer; + u32 cmd[4] = {0}; + int ret; + + acpm_pmic_init_read_cmd(cmd, type, reg, chan); + acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id); + + ret = acpm_do_xfer(handle, &xfer); + if (ret) + return ret; + + *buf = FIELD_GET(ACPM_PMIC_VALUE, xfer.rxd[1]); + + return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]); +} + +static void acpm_pmic_init_bulk_read_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan, + u8 count) +{ + cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) | + FIELD_PREP(ACPM_PMIC_REG, reg) | + FIELD_PREP(ACPM_PMIC_CHANNEL, chan); + cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_BULK_READ) | + FIELD_PREP(ACPM_PMIC_VALUE, count); +} + +int acpm_pmic_bulk_read(const struct acpm_handle *handle, + unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan, + u8 count, u8 *buf) +{ + struct acpm_xfer xfer; + u32 cmd[4] = {0}; + int i, ret; + + if (count > ACPM_PMIC_BULK_MAX_COUNT) + return -EINVAL; + + acpm_pmic_init_bulk_read_cmd(cmd, type, reg, chan, count); + acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id); + + ret = acpm_do_xfer(handle, &xfer); + if (ret) + return ret; + + ret = FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]); + if (ret) + return ret; + + for (i = 0; i < count; i++) { + if (i < 4) + buf[i] = acpm_pmic_get_bulk(xfer.rxd[2], i); + else + buf[i] = acpm_pmic_get_bulk(xfer.rxd[3], i - 4); + } + + return 0; +} + +static void acpm_pmic_init_write_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan, + u8 value) +{ + cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) | + FIELD_PREP(ACPM_PMIC_REG, reg) | + FIELD_PREP(ACPM_PMIC_CHANNEL, chan); + cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_WRITE) | + FIELD_PREP(ACPM_PMIC_VALUE, value); + cmd[3] = ktime_to_ms(ktime_get()); +} + +int acpm_pmic_write_reg(const struct acpm_handle *handle, + unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan, + u8 value) +{ + struct acpm_xfer xfer; + u32 cmd[4] = {0}; + int ret; + + acpm_pmic_init_write_cmd(cmd, type, reg, chan, value); + acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id); + + ret = acpm_do_xfer(handle, &xfer); + if (ret) + return ret; + + return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]); +} + +static void acpm_pmic_init_bulk_write_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan, + u8 count, const u8 *buf) +{ + int i; + + cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) | + FIELD_PREP(ACPM_PMIC_REG, reg) | + FIELD_PREP(ACPM_PMIC_CHANNEL, chan); + cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_BULK_WRITE) | + FIELD_PREP(ACPM_PMIC_VALUE, count); + + for (i = 0; i < count; i++) { + if (i < 4) + cmd[2] |= acpm_pmic_set_bulk(buf[i], i); + else + cmd[3] |= acpm_pmic_set_bulk(buf[i], i - 4); + } +} + +int acpm_pmic_bulk_write(const struct acpm_handle *handle, + unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan, + u8 count, const u8 *buf) +{ + struct acpm_xfer xfer; + u32 cmd[4] = {0}; + int ret; + + if (count > ACPM_PMIC_BULK_MAX_COUNT) + return -EINVAL; + + acpm_pmic_init_bulk_write_cmd(cmd, type, reg, chan, count, buf); + acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id); + + ret = acpm_do_xfer(handle, &xfer); + if (ret) + return ret; + + return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]); +} + +static void acpm_pmic_init_update_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan, + u8 value, u8 mask) +{ + cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) | + FIELD_PREP(ACPM_PMIC_REG, reg) | + FIELD_PREP(ACPM_PMIC_CHANNEL, chan); + cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_UPDATE) | + FIELD_PREP(ACPM_PMIC_VALUE, value) | + FIELD_PREP(ACPM_PMIC_MASK, mask); + cmd[3] = ktime_to_ms(ktime_get()); +} + +int acpm_pmic_update_reg(const struct acpm_handle *handle, + unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan, + u8 value, u8 mask) +{ + struct acpm_xfer xfer; + u32 cmd[4] = {0}; + int ret; + + acpm_pmic_init_update_cmd(cmd, type, reg, chan, value, mask); + acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id); + + ret = acpm_do_xfer(handle, &xfer); + if (ret) + return ret; + + return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]); +} diff --git a/drivers/firmware/samsung/exynos-acpm-pmic.h b/drivers/firmware/samsung/exynos-acpm-pmic.h new file mode 100644 index 000000000000..078421888a14 --- /dev/null +++ b/drivers/firmware/samsung/exynos-acpm-pmic.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2020 Samsung Electronics Co., Ltd. + * Copyright 2020 Google LLC. + * Copyright 2024 Linaro Ltd. + */ +#ifndef __EXYNOS_ACPM_PMIC_H__ +#define __EXYNOS_ACPM_PMIC_H__ + +#include <linux/types.h> + +struct acpm_handle; + +int acpm_pmic_read_reg(const struct acpm_handle *handle, + unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan, + u8 *buf); +int acpm_pmic_bulk_read(const struct acpm_handle *handle, + unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan, + u8 count, u8 *buf); +int acpm_pmic_write_reg(const struct acpm_handle *handle, + unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan, + u8 value); +int acpm_pmic_bulk_write(const struct acpm_handle *handle, + unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan, + u8 count, const u8 *buf); +int acpm_pmic_update_reg(const struct acpm_handle *handle, + unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan, + u8 value, u8 mask); +#endif /* __EXYNOS_ACPM_PMIC_H__ */ diff --git a/drivers/firmware/samsung/exynos-acpm.c b/drivers/firmware/samsung/exynos-acpm.c new file mode 100644 index 000000000000..e02f14f4bd7c --- /dev/null +++ b/drivers/firmware/samsung/exynos-acpm.c @@ -0,0 +1,773 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2020 Samsung Electronics Co., Ltd. + * Copyright 2020 Google LLC. + * Copyright 2024 Linaro Ltd. + */ + +#include <linux/bitfield.h> +#include <linux/bitmap.h> +#include <linux/bits.h> +#include <linux/cleanup.h> +#include <linux/container_of.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/firmware/samsung/exynos-acpm-protocol.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/ktime.h> +#include <linux/mailbox/exynos-message.h> +#include <linux/mailbox_client.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/math.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/types.h> + +#include "exynos-acpm.h" +#include "exynos-acpm-pmic.h" + +#define ACPM_PROTOCOL_SEQNUM GENMASK(21, 16) + +#define ACPM_POLL_TIMEOUT_US (100 * USEC_PER_MSEC) +#define ACPM_TX_TIMEOUT_US 500000 + +#define ACPM_GS101_INITDATA_BASE 0xa000 + +/** + * struct acpm_shmem - shared memory configuration information. + * @reserved: unused fields. + * @chans: offset to array of struct acpm_chan_shmem. + * @reserved1: unused fields. + * @num_chans: number of channels. + */ +struct acpm_shmem { + u32 reserved[2]; + u32 chans; + u32 reserved1[3]; + u32 num_chans; +}; + +/** + * struct acpm_chan_shmem - descriptor of a shared memory channel. + * + * @id: channel ID. + * @reserved: unused fields. + * @rx_rear: rear pointer of APM RX queue (TX for AP). + * @rx_front: front pointer of APM RX queue (TX for AP). + * @rx_base: base address of APM RX queue (TX for AP). + * @reserved1: unused fields. + * @tx_rear: rear pointer of APM TX queue (RX for AP). + * @tx_front: front pointer of APM TX queue (RX for AP). + * @tx_base: base address of APM TX queue (RX for AP). + * @qlen: queue length. Applies to both TX/RX queues. + * @mlen: message length. Applies to both TX/RX queues. + * @reserved2: unused fields. + * @poll_completion: true when the channel works on polling. + */ +struct acpm_chan_shmem { + u32 id; + u32 reserved[3]; + u32 rx_rear; + u32 rx_front; + u32 rx_base; + u32 reserved1[3]; + u32 tx_rear; + u32 tx_front; + u32 tx_base; + u32 qlen; + u32 mlen; + u32 reserved2[2]; + u32 poll_completion; +}; + +/** + * struct acpm_queue - exynos acpm queue. + * + * @rear: rear address of the queue. + * @front: front address of the queue. + * @base: base address of the queue. + */ +struct acpm_queue { + void __iomem *rear; + void __iomem *front; + void __iomem *base; +}; + +/** + * struct acpm_rx_data - RX queue data. + * + * @cmd: pointer to where the data shall be saved. + * @n_cmd: number of 32-bit commands. + * @response: true if the client expects the RX data. + */ +struct acpm_rx_data { + u32 *cmd; + size_t n_cmd; + bool response; +}; + +#define ACPM_SEQNUM_MAX 64 + +/** + * struct acpm_chan - driver internal representation of a channel. + * @cl: mailbox client. + * @chan: mailbox channel. + * @acpm: pointer to driver private data. + * @tx: TX queue. The enqueue is done by the host. + * - front index is written by the host. + * - rear index is written by the firmware. + * + * @rx: RX queue. The enqueue is done by the firmware. + * - front index is written by the firmware. + * - rear index is written by the host. + * @tx_lock: protects TX queue. + * @rx_lock: protects RX queue. + * @qlen: queue length. Applies to both TX/RX queues. + * @mlen: message length. Applies to both TX/RX queues. + * @seqnum: sequence number of the last message enqueued on TX queue. + * @id: channel ID. + * @poll_completion: indicates if the transfer needs to be polled for + * completion or interrupt mode is used. + * @bitmap_seqnum: bitmap that tracks the messages on the TX/RX queues. + * @rx_data: internal buffer used to drain the RX queue. + */ +struct acpm_chan { + struct mbox_client cl; + struct mbox_chan *chan; + struct acpm_info *acpm; + struct acpm_queue tx; + struct acpm_queue rx; + struct mutex tx_lock; + struct mutex rx_lock; + + unsigned int qlen; + unsigned int mlen; + u8 seqnum; + u8 id; + bool poll_completion; + + DECLARE_BITMAP(bitmap_seqnum, ACPM_SEQNUM_MAX - 1); + struct acpm_rx_data rx_data[ACPM_SEQNUM_MAX]; +}; + +/** + * struct acpm_info - driver's private data. + * @shmem: pointer to the SRAM configuration data. + * @sram_base: base address of SRAM. + * @chans: pointer to the ACPM channel parameters retrieved from SRAM. + * @dev: pointer to the exynos-acpm device. + * @handle: instance of acpm_handle to send to clients. + * @num_chans: number of channels available for this controller. + */ +struct acpm_info { + struct acpm_shmem __iomem *shmem; + void __iomem *sram_base; + struct acpm_chan *chans; + struct device *dev; + struct acpm_handle handle; + u32 num_chans; +}; + +/** + * struct acpm_match_data - of_device_id data. + * @initdata_base: offset in SRAM where the channels configuration resides. + */ +struct acpm_match_data { + loff_t initdata_base; +}; + +#define client_to_acpm_chan(c) container_of(c, struct acpm_chan, cl) +#define handle_to_acpm_info(h) container_of(h, struct acpm_info, handle) + +/** + * acpm_get_saved_rx() - get the response if it was already saved. + * @achan: ACPM channel info. + * @xfer: reference to the transfer to get response for. + * @tx_seqnum: xfer TX sequence number. + */ +static void acpm_get_saved_rx(struct acpm_chan *achan, + const struct acpm_xfer *xfer, u32 tx_seqnum) +{ + const struct acpm_rx_data *rx_data = &achan->rx_data[tx_seqnum - 1]; + u32 rx_seqnum; + + if (!rx_data->response) + return; + + rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, rx_data->cmd[0]); + + if (rx_seqnum == tx_seqnum) { + memcpy(xfer->rxd, rx_data->cmd, xfer->rxlen); + clear_bit(rx_seqnum - 1, achan->bitmap_seqnum); + } +} + +/** + * acpm_get_rx() - get response from RX queue. + * @achan: ACPM channel info. + * @xfer: reference to the transfer to get response for. + * + * Return: 0 on success, -errno otherwise. + */ +static int acpm_get_rx(struct acpm_chan *achan, const struct acpm_xfer *xfer) +{ + u32 rx_front, rx_seqnum, tx_seqnum, seqnum; + const void __iomem *base, *addr; + struct acpm_rx_data *rx_data; + u32 i, val, mlen; + bool rx_set = false; + + guard(mutex)(&achan->rx_lock); + + rx_front = readl(achan->rx.front); + i = readl(achan->rx.rear); + + tx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]); + + if (i == rx_front) { + acpm_get_saved_rx(achan, xfer, tx_seqnum); + return 0; + } + + base = achan->rx.base; + mlen = achan->mlen; + + /* Drain RX queue. */ + do { + /* Read RX seqnum. */ + addr = base + mlen * i; + val = readl(addr); + + rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, val); + if (!rx_seqnum) + return -EIO; + /* + * mssg seqnum starts with value 1, whereas the driver considers + * the first mssg at index 0. + */ + seqnum = rx_seqnum - 1; + rx_data = &achan->rx_data[seqnum]; + + if (rx_data->response) { + if (rx_seqnum == tx_seqnum) { + __ioread32_copy(xfer->rxd, addr, + xfer->rxlen / 4); + rx_set = true; + clear_bit(seqnum, achan->bitmap_seqnum); + } else { + /* + * The RX data corresponds to another request. + * Save the data to drain the queue, but don't + * clear yet the bitmap. It will be cleared + * after the response is copied to the request. + */ + __ioread32_copy(rx_data->cmd, addr, + xfer->rxlen / 4); + } + } else { + clear_bit(seqnum, achan->bitmap_seqnum); + } + + i = (i + 1) % achan->qlen; + } while (i != rx_front); + + /* We saved all responses, mark RX empty. */ + writel(rx_front, achan->rx.rear); + + /* + * If the response was not in this iteration of the queue, check if the + * RX data was previously saved. + */ + if (!rx_set) + acpm_get_saved_rx(achan, xfer, tx_seqnum); + + return 0; +} + +/** + * acpm_dequeue_by_polling() - RX dequeue by polling. + * @achan: ACPM channel info. + * @xfer: reference to the transfer being waited for. + * + * Return: 0 on success, -errno otherwise. + */ +static int acpm_dequeue_by_polling(struct acpm_chan *achan, + const struct acpm_xfer *xfer) +{ + struct device *dev = achan->acpm->dev; + ktime_t timeout; + u32 seqnum; + int ret; + + seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]); + + timeout = ktime_add_us(ktime_get(), ACPM_POLL_TIMEOUT_US); + do { + ret = acpm_get_rx(achan, xfer); + if (ret) + return ret; + + if (!test_bit(seqnum - 1, achan->bitmap_seqnum)) + return 0; + + /* Determined experimentally. */ + udelay(20); + } while (ktime_before(ktime_get(), timeout)); + + dev_err(dev, "Timeout! ch:%u s:%u bitmap:%lx.\n", + achan->id, seqnum, achan->bitmap_seqnum[0]); + + return -ETIME; +} + +/** + * acpm_wait_for_queue_slots() - wait for queue slots. + * + * @achan: ACPM channel info. + * @next_tx_front: next front index of the TX queue. + * + * Return: 0 on success, -errno otherwise. + */ +static int acpm_wait_for_queue_slots(struct acpm_chan *achan, u32 next_tx_front) +{ + u32 val, ret; + + /* + * Wait for RX front to keep up with TX front. Make sure there's at + * least one element between them. + */ + ret = readl_poll_timeout(achan->rx.front, val, next_tx_front != val, 0, + ACPM_TX_TIMEOUT_US); + if (ret) { + dev_err(achan->acpm->dev, "RX front can not keep up with TX front.\n"); + return ret; + } + + ret = readl_poll_timeout(achan->tx.rear, val, next_tx_front != val, 0, + ACPM_TX_TIMEOUT_US); + if (ret) + dev_err(achan->acpm->dev, "TX queue is full.\n"); + + return ret; +} + +/** + * acpm_prepare_xfer() - prepare a transfer before writing the message to the + * TX queue. + * @achan: ACPM channel info. + * @xfer: reference to the transfer being prepared. + */ +static void acpm_prepare_xfer(struct acpm_chan *achan, + const struct acpm_xfer *xfer) +{ + struct acpm_rx_data *rx_data; + u32 *txd = (u32 *)xfer->txd; + + /* Prevent chan->seqnum from being re-used */ + do { + if (++achan->seqnum == ACPM_SEQNUM_MAX) + achan->seqnum = 1; + } while (test_bit(achan->seqnum - 1, achan->bitmap_seqnum)); + + txd[0] |= FIELD_PREP(ACPM_PROTOCOL_SEQNUM, achan->seqnum); + + /* Clear data for upcoming responses */ + rx_data = &achan->rx_data[achan->seqnum - 1]; + memset(rx_data->cmd, 0, sizeof(*rx_data->cmd) * rx_data->n_cmd); + if (xfer->rxd) + rx_data->response = true; + + /* Flag the index based on seqnum. (seqnum: 1~63, bitmap: 0~62) */ + set_bit(achan->seqnum - 1, achan->bitmap_seqnum); +} + +/** + * acpm_wait_for_message_response - an helper to group all possible ways of + * waiting for a synchronous message response. + * + * @achan: ACPM channel info. + * @xfer: reference to the transfer being waited for. + * + * Return: 0 on success, -errno otherwise. + */ +static int acpm_wait_for_message_response(struct acpm_chan *achan, + const struct acpm_xfer *xfer) +{ + /* Just polling mode supported for now. */ + return acpm_dequeue_by_polling(achan, xfer); +} + +/** + * acpm_do_xfer() - do one transfer. + * @handle: pointer to the acpm handle. + * @xfer: transfer to initiate and wait for response. + * + * Return: 0 on success, -errno otherwise. + */ +int acpm_do_xfer(const struct acpm_handle *handle, const struct acpm_xfer *xfer) +{ + struct acpm_info *acpm = handle_to_acpm_info(handle); + struct exynos_mbox_msg msg; + struct acpm_chan *achan; + u32 idx, tx_front; + int ret; + + if (xfer->acpm_chan_id >= acpm->num_chans) + return -EINVAL; + + achan = &acpm->chans[xfer->acpm_chan_id]; + + if (!xfer->txd || xfer->txlen > achan->mlen || xfer->rxlen > achan->mlen) + return -EINVAL; + + if (!achan->poll_completion) { + dev_err(achan->acpm->dev, "Interrupt mode not supported\n"); + return -EOPNOTSUPP; + } + + scoped_guard(mutex, &achan->tx_lock) { + tx_front = readl(achan->tx.front); + idx = (tx_front + 1) % achan->qlen; + + ret = acpm_wait_for_queue_slots(achan, idx); + if (ret) + return ret; + + acpm_prepare_xfer(achan, xfer); + + /* Write TX command. */ + __iowrite32_copy(achan->tx.base + achan->mlen * tx_front, + xfer->txd, xfer->txlen / 4); + + /* Advance TX front. */ + writel(idx, achan->tx.front); + } + + msg.chan_id = xfer->acpm_chan_id; + msg.chan_type = EXYNOS_MBOX_CHAN_TYPE_DOORBELL; + ret = mbox_send_message(achan->chan, (void *)&msg); + if (ret < 0) + return ret; + + ret = acpm_wait_for_message_response(achan, xfer); + + /* + * NOTE: we might prefer not to need the mailbox ticker to manage the + * transfer queueing since the protocol layer queues things by itself. + * Unfortunately, we have to kick the mailbox framework after we have + * received our message. + */ + mbox_client_txdone(achan->chan, ret); + + return ret; +} + +/** + * acpm_chan_shmem_get_params() - get channel parameters and addresses of the + * TX/RX queues. + * @achan: ACPM channel info. + * @chan_shmem: __iomem pointer to a channel described in shared memory. + */ +static void acpm_chan_shmem_get_params(struct acpm_chan *achan, + struct acpm_chan_shmem __iomem *chan_shmem) +{ + void __iomem *base = achan->acpm->sram_base; + struct acpm_queue *rx = &achan->rx; + struct acpm_queue *tx = &achan->tx; + + achan->mlen = readl(&chan_shmem->mlen); + achan->poll_completion = readl(&chan_shmem->poll_completion); + achan->id = readl(&chan_shmem->id); + achan->qlen = readl(&chan_shmem->qlen); + + tx->base = base + readl(&chan_shmem->rx_base); + tx->rear = base + readl(&chan_shmem->rx_rear); + tx->front = base + readl(&chan_shmem->rx_front); + + rx->base = base + readl(&chan_shmem->tx_base); + rx->rear = base + readl(&chan_shmem->tx_rear); + rx->front = base + readl(&chan_shmem->tx_front); + + dev_vdbg(achan->acpm->dev, "ID = %d poll = %d, mlen = %d, qlen = %d\n", + achan->id, achan->poll_completion, achan->mlen, achan->qlen); +} + +/** + * acpm_achan_alloc_cmds() - allocate buffers for retrieving data from the ACPM + * firmware. + * @achan: ACPM channel info. + * + * Return: 0 on success, -errno otherwise. + */ +static int acpm_achan_alloc_cmds(struct acpm_chan *achan) +{ + struct device *dev = achan->acpm->dev; + struct acpm_rx_data *rx_data; + size_t cmd_size, n_cmd; + int i; + + if (achan->mlen == 0) + return 0; + + cmd_size = sizeof(*(achan->rx_data[0].cmd)); + n_cmd = DIV_ROUND_UP_ULL(achan->mlen, cmd_size); + + for (i = 0; i < ACPM_SEQNUM_MAX; i++) { + rx_data = &achan->rx_data[i]; + rx_data->n_cmd = n_cmd; + rx_data->cmd = devm_kcalloc(dev, n_cmd, cmd_size, GFP_KERNEL); + if (!rx_data->cmd) + return -ENOMEM; + } + + return 0; +} + +/** + * acpm_free_mbox_chans() - free mailbox channels. + * @acpm: pointer to driver data. + */ +static void acpm_free_mbox_chans(struct acpm_info *acpm) +{ + int i; + + for (i = 0; i < acpm->num_chans; i++) + if (!IS_ERR_OR_NULL(acpm->chans[i].chan)) + mbox_free_channel(acpm->chans[i].chan); +} + +/** + * acpm_channels_init() - initialize channels based on the configuration data in + * the shared memory. + * @acpm: pointer to driver data. + * + * Return: 0 on success, -errno otherwise. + */ +static int acpm_channels_init(struct acpm_info *acpm) +{ + struct acpm_shmem __iomem *shmem = acpm->shmem; + struct acpm_chan_shmem __iomem *chans_shmem; + struct device *dev = acpm->dev; + int i, ret; + + acpm->num_chans = readl(&shmem->num_chans); + acpm->chans = devm_kcalloc(dev, acpm->num_chans, sizeof(*acpm->chans), + GFP_KERNEL); + if (!acpm->chans) + return -ENOMEM; + + chans_shmem = acpm->sram_base + readl(&shmem->chans); + + for (i = 0; i < acpm->num_chans; i++) { + struct acpm_chan_shmem __iomem *chan_shmem = &chans_shmem[i]; + struct acpm_chan *achan = &acpm->chans[i]; + struct mbox_client *cl = &achan->cl; + + achan->acpm = acpm; + + acpm_chan_shmem_get_params(achan, chan_shmem); + + ret = acpm_achan_alloc_cmds(achan); + if (ret) + return ret; + + mutex_init(&achan->rx_lock); + mutex_init(&achan->tx_lock); + + cl->dev = dev; + + achan->chan = mbox_request_channel(cl, 0); + if (IS_ERR(achan->chan)) { + acpm_free_mbox_chans(acpm); + return PTR_ERR(achan->chan); + } + } + + return 0; +} + +/** + * acpm_setup_ops() - setup the operations structures. + * @acpm: pointer to the driver data. + */ +static void acpm_setup_ops(struct acpm_info *acpm) +{ + struct acpm_pmic_ops *pmic_ops = &acpm->handle.ops.pmic_ops; + + pmic_ops->read_reg = acpm_pmic_read_reg; + pmic_ops->bulk_read = acpm_pmic_bulk_read; + pmic_ops->write_reg = acpm_pmic_write_reg; + pmic_ops->bulk_write = acpm_pmic_bulk_write; + pmic_ops->update_reg = acpm_pmic_update_reg; +} + +static int acpm_probe(struct platform_device *pdev) +{ + const struct acpm_match_data *match_data; + struct device *dev = &pdev->dev; + struct device_node *shmem; + struct acpm_info *acpm; + resource_size_t size; + struct resource res; + int ret; + + acpm = devm_kzalloc(dev, sizeof(*acpm), GFP_KERNEL); + if (!acpm) + return -ENOMEM; + + shmem = of_parse_phandle(dev->of_node, "shmem", 0); + ret = of_address_to_resource(shmem, 0, &res); + of_node_put(shmem); + if (ret) + return dev_err_probe(dev, ret, + "Failed to get shared memory.\n"); + + size = resource_size(&res); + acpm->sram_base = devm_ioremap(dev, res.start, size); + if (!acpm->sram_base) + return dev_err_probe(dev, -ENOMEM, + "Failed to ioremap shared memory.\n"); + + match_data = of_device_get_match_data(dev); + if (!match_data) + return dev_err_probe(dev, -EINVAL, + "Failed to get match data.\n"); + + acpm->shmem = acpm->sram_base + match_data->initdata_base; + acpm->dev = dev; + + ret = acpm_channels_init(acpm); + if (ret) + return ret; + + acpm_setup_ops(acpm); + + platform_set_drvdata(pdev, acpm); + + return devm_of_platform_populate(dev); +} + +/** + * acpm_handle_put() - release the handle acquired by acpm_get_by_phandle. + * @handle: Handle acquired by acpm_get_by_phandle. + */ +static void acpm_handle_put(const struct acpm_handle *handle) +{ + struct acpm_info *acpm = handle_to_acpm_info(handle); + struct device *dev = acpm->dev; + + module_put(dev->driver->owner); + /* Drop reference taken with of_find_device_by_node(). */ + put_device(dev); +} + +/** + * devm_acpm_release() - devres release method. + * @dev: pointer to device. + * @res: pointer to resource. + */ +static void devm_acpm_release(struct device *dev, void *res) +{ + acpm_handle_put(*(struct acpm_handle **)res); +} + +/** + * acpm_get_by_node() - get the ACPM handle using node pointer. + * @dev: device pointer requesting ACPM handle. + * @np: ACPM device tree node. + * + * Return: pointer to handle on success, ERR_PTR(-errno) otherwise. + */ +static const struct acpm_handle *acpm_get_by_node(struct device *dev, + struct device_node *np) +{ + struct platform_device *pdev; + struct device_link *link; + struct acpm_info *acpm; + + pdev = of_find_device_by_node(np); + if (!pdev) + return ERR_PTR(-EPROBE_DEFER); + + acpm = platform_get_drvdata(pdev); + if (!acpm) { + platform_device_put(pdev); + return ERR_PTR(-EPROBE_DEFER); + } + + if (!try_module_get(pdev->dev.driver->owner)) { + platform_device_put(pdev); + return ERR_PTR(-EPROBE_DEFER); + } + + link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER); + if (!link) { + dev_err(&pdev->dev, + "Failed to create device link to consumer %s.\n", + dev_name(dev)); + platform_device_put(pdev); + module_put(pdev->dev.driver->owner); + return ERR_PTR(-EINVAL); + } + + return &acpm->handle; +} + +/** + * devm_acpm_get_by_node() - managed get handle using node pointer. + * @dev: device pointer requesting ACPM handle. + * @np: ACPM device tree node. + * + * Return: pointer to handle on success, ERR_PTR(-errno) otherwise. + */ +const struct acpm_handle *devm_acpm_get_by_node(struct device *dev, + struct device_node *np) +{ + const struct acpm_handle **ptr, *handle; + + ptr = devres_alloc(devm_acpm_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + handle = acpm_get_by_node(dev, np); + if (!IS_ERR(handle)) { + *ptr = handle; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return handle; +} +EXPORT_SYMBOL_GPL(devm_acpm_get_by_node); + +static const struct acpm_match_data acpm_gs101 = { + .initdata_base = ACPM_GS101_INITDATA_BASE, +}; + +static const struct of_device_id acpm_match[] = { + { + .compatible = "google,gs101-acpm-ipc", + .data = &acpm_gs101, + }, + {}, +}; +MODULE_DEVICE_TABLE(of, acpm_match); + +static struct platform_driver acpm_driver = { + .probe = acpm_probe, + .driver = { + .name = "exynos-acpm-protocol", + .of_match_table = acpm_match, + }, +}; +module_platform_driver(acpm_driver); + +MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@linaro.org>"); +MODULE_DESCRIPTION("Samsung Exynos ACPM mailbox protocol driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/samsung/exynos-acpm.h b/drivers/firmware/samsung/exynos-acpm.h new file mode 100644 index 000000000000..2d14cb58f98c --- /dev/null +++ b/drivers/firmware/samsung/exynos-acpm.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2020 Samsung Electronics Co., Ltd. + * Copyright 2020 Google LLC. + * Copyright 2024 Linaro Ltd. + */ +#ifndef __EXYNOS_ACPM_H__ +#define __EXYNOS_ACPM_H__ + +struct acpm_xfer { + const u32 *txd; + u32 *rxd; + size_t txlen; + size_t rxlen; + unsigned int acpm_chan_id; +}; + +struct acpm_handle; + +int acpm_do_xfer(const struct acpm_handle *handle, + const struct acpm_xfer *xfer); + +#endif /* __EXYNOS_ACPM_H__ */ diff --git a/drivers/firmware/smccc/kvm_guest.c b/drivers/firmware/smccc/kvm_guest.c index 89a68e7eeaa6..a123c05cbc9e 100644 --- a/drivers/firmware/smccc/kvm_guest.c +++ b/drivers/firmware/smccc/kvm_guest.c @@ -6,8 +6,11 @@ #include <linux/bitmap.h> #include <linux/cache.h> #include <linux/kernel.h> +#include <linux/memblock.h> #include <linux/string.h> +#include <uapi/linux/psci.h> + #include <asm/hypervisor.h> static DECLARE_BITMAP(__kvm_arm_hyp_services, ARM_SMCCC_KVM_NUM_FUNCS) __ro_after_init = { }; @@ -39,6 +42,8 @@ void __init kvm_init_hyp_services(void) pr_info("hypervisor services detected (0x%08lx 0x%08lx 0x%08lx 0x%08lx)\n", res.a3, res.a2, res.a1, res.a0); + + kvm_arch_init_hyp_services(); } bool kvm_arm_hyp_service_available(u32 func_id) @@ -49,3 +54,66 @@ bool kvm_arm_hyp_service_available(u32 func_id) return test_bit(func_id, __kvm_arm_hyp_services); } EXPORT_SYMBOL_GPL(kvm_arm_hyp_service_available); + +#ifdef CONFIG_ARM64 +void __init kvm_arm_target_impl_cpu_init(void) +{ + int i; + u32 ver; + u64 max_cpus; + struct arm_smccc_res res; + struct target_impl_cpu *target; + + if (!kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER) || + !kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS)) + return; + + arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_VER_FUNC_ID, + 0, &res); + if (res.a0 != SMCCC_RET_SUCCESS) + return; + + /* Version info is in lower 32 bits and is in SMMCCC_VERSION format */ + ver = lower_32_bits(res.a1); + if (PSCI_VERSION_MAJOR(ver) != 1) { + pr_warn("Unsupported target CPU implementation version v%d.%d\n", + PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver)); + return; + } + + if (!res.a2) { + pr_warn("No target implementation CPUs specified\n"); + return; + } + + max_cpus = res.a2; + target = memblock_alloc(sizeof(*target) * max_cpus, __alignof__(*target)); + if (!target) { + pr_warn("Not enough memory for struct target_impl_cpu\n"); + return; + } + + for (i = 0; i < max_cpus; i++) { + arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID, + i, 0, 0, &res); + if (res.a0 != SMCCC_RET_SUCCESS) { + pr_warn("Discovering target implementation CPUs failed\n"); + goto mem_free; + } + target[i].midr = res.a1; + target[i].revidr = res.a2; + target[i].aidr = res.a3; + } + + if (!cpu_errata_set_target_impl(max_cpus, target)) { + pr_warn("Failed to set target implementation CPUs\n"); + goto mem_free; + } + + pr_info("Number of target implementation CPUs is %lld\n", max_cpus); + return; + +mem_free: + memblock_free(target, sizeof(*target) * max_cpus); +} +#endif diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c index db818f9dcb8e..a74600d9f2d7 100644 --- a/drivers/firmware/smccc/smccc.c +++ b/drivers/firmware/smccc/smccc.c @@ -16,7 +16,6 @@ static u32 smccc_version = ARM_SMCCC_VERSION_1_0; static enum arm_smccc_conduit smccc_conduit = SMCCC_CONDUIT_NONE; bool __ro_after_init smccc_trng_available = false; -u64 __ro_after_init smccc_has_sve_hint = false; s32 __ro_after_init smccc_soc_id_version = SMCCC_RET_NOT_SUPPORTED; s32 __ro_after_init smccc_soc_id_revision = SMCCC_RET_NOT_SUPPORTED; @@ -28,9 +27,6 @@ void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit) smccc_conduit = conduit; smccc_trng_available = smccc_probe_trng(); - if (IS_ENABLED(CONFIG_ARM64_SVE) && - smccc_version >= ARM_SMCCC_VERSION_1_3) - smccc_has_sve_hint = true; if ((smccc_version >= ARM_SMCCC_VERSION_1_2) && (smccc_conduit != SMCCC_CONDUIT_NONE)) { @@ -69,6 +65,7 @@ s32 arm_smccc_get_soc_id_revision(void) { return smccc_soc_id_revision; } +EXPORT_SYMBOL_GPL(arm_smccc_get_soc_id_revision); static int __init smccc_devices_init(void) { diff --git a/drivers/firmware/smccc/soc_id.c b/drivers/firmware/smccc/soc_id.c index 1990263fbba0..c24b3fca1cfe 100644 --- a/drivers/firmware/smccc/soc_id.c +++ b/drivers/firmware/smccc/soc_id.c @@ -32,6 +32,85 @@ static struct soc_device *soc_dev; static struct soc_device_attribute *soc_dev_attr; +#ifdef CONFIG_ARM64 + +static char __ro_after_init smccc_soc_id_name[136] = ""; + +static inline void str_fragment_from_reg(char *dst, unsigned long reg) +{ + dst[0] = (reg >> 0) & 0xff; + dst[1] = (reg >> 8) & 0xff; + dst[2] = (reg >> 16) & 0xff; + dst[3] = (reg >> 24) & 0xff; + dst[4] = (reg >> 32) & 0xff; + dst[5] = (reg >> 40) & 0xff; + dst[6] = (reg >> 48) & 0xff; + dst[7] = (reg >> 56) & 0xff; +} + +static char __init *smccc_soc_name_init(void) +{ + struct arm_smccc_1_2_regs args; + struct arm_smccc_1_2_regs res; + size_t len; + + /* + * Issue Number 1.6 of the Arm SMC Calling Convention + * specification introduces an optional "name" string + * to the ARM_SMCCC_ARCH_SOC_ID function. Fetch it if + * available. + */ + args.a0 = ARM_SMCCC_ARCH_SOC_ID; + args.a1 = 2; /* SOC_ID name */ + arm_smccc_1_2_invoke(&args, &res); + + if ((u32)res.a0 == 0) { + /* + * Copy res.a1..res.a17 to the smccc_soc_id_name string + * 8 bytes at a time. As per Issue 1.6 of the Arm SMC + * Calling Convention, the string will be NUL terminated + * and padded, from the end of the string to the end of the + * 136 byte buffer, with NULs. + */ + str_fragment_from_reg(smccc_soc_id_name + 8 * 0, res.a1); + str_fragment_from_reg(smccc_soc_id_name + 8 * 1, res.a2); + str_fragment_from_reg(smccc_soc_id_name + 8 * 2, res.a3); + str_fragment_from_reg(smccc_soc_id_name + 8 * 3, res.a4); + str_fragment_from_reg(smccc_soc_id_name + 8 * 4, res.a5); + str_fragment_from_reg(smccc_soc_id_name + 8 * 5, res.a6); + str_fragment_from_reg(smccc_soc_id_name + 8 * 6, res.a7); + str_fragment_from_reg(smccc_soc_id_name + 8 * 7, res.a8); + str_fragment_from_reg(smccc_soc_id_name + 8 * 8, res.a9); + str_fragment_from_reg(smccc_soc_id_name + 8 * 9, res.a10); + str_fragment_from_reg(smccc_soc_id_name + 8 * 10, res.a11); + str_fragment_from_reg(smccc_soc_id_name + 8 * 11, res.a12); + str_fragment_from_reg(smccc_soc_id_name + 8 * 12, res.a13); + str_fragment_from_reg(smccc_soc_id_name + 8 * 13, res.a14); + str_fragment_from_reg(smccc_soc_id_name + 8 * 14, res.a15); + str_fragment_from_reg(smccc_soc_id_name + 8 * 15, res.a16); + str_fragment_from_reg(smccc_soc_id_name + 8 * 16, res.a17); + + len = strnlen(smccc_soc_id_name, sizeof(smccc_soc_id_name)); + if (len) { + if (len == sizeof(smccc_soc_id_name)) + pr_warn(FW_BUG "Ignoring improperly formatted name\n"); + else + return smccc_soc_id_name; + } + } + + return NULL; +} + +#else + +static char __init *smccc_soc_name_init(void) +{ + return NULL; +} + +#endif + static int __init smccc_soc_init(void) { int soc_id_rev, soc_id_version; @@ -72,6 +151,7 @@ static int __init smccc_soc_init(void) soc_dev_attr->soc_id = soc_id_str; soc_dev_attr->revision = soc_id_rev_str; soc_dev_attr->family = soc_id_jep106_id_str; + soc_dev_attr->machine = smccc_soc_name_init(); soc_dev = soc_device_register(soc_dev_attr); if (IS_ERR(soc_dev)) { diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c index e20cee9c2d32..1ea39a0a76c7 100644 --- a/drivers/firmware/stratix10-rsu.c +++ b/drivers/firmware/stratix10-rsu.c @@ -802,7 +802,7 @@ static void stratix10_rsu_remove(struct platform_device *pdev) static struct platform_driver stratix10_rsu_driver = { .probe = stratix10_rsu_probe, - .remove_new = stratix10_rsu_remove, + .remove = stratix10_rsu_remove, .driver = { .name = "stratix10-rsu", .dev_groups = rsu_groups, diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c index 528f37417aea..e3f990d888d7 100644 --- a/drivers/firmware/stratix10-svc.c +++ b/drivers/firmware/stratix10-svc.c @@ -967,18 +967,15 @@ int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg) /* first client will create kernel thread */ if (!chan->ctrl->task) { chan->ctrl->task = - kthread_create_on_node(svc_normal_to_secure_thread, - (void *)chan->ctrl, - cpu_to_node(cpu), - "svc_smc_hvc_thread"); + kthread_run_on_cpu(svc_normal_to_secure_thread, + (void *)chan->ctrl, + cpu, "svc_smc_hvc_thread"); if (IS_ERR(chan->ctrl->task)) { dev_err(chan->ctrl->dev, "failed to create svc_smc_hvc_thread\n"); kfree(p_data); return -EINVAL; } - kthread_bind(chan->ctrl->task, cpu); - wake_up_process(chan->ctrl->task); } pr_debug("%s: sent P-va=%p, P-com=%x, P-size=%u\n", __func__, @@ -1227,22 +1224,28 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev) if (!svc->intel_svc_fcs) { dev_err(dev, "failed to allocate %s device\n", INTEL_FCS); ret = -ENOMEM; - goto err_unregister_dev; + goto err_unregister_rsu_dev; } ret = platform_device_add(svc->intel_svc_fcs); if (ret) { platform_device_put(svc->intel_svc_fcs); - goto err_unregister_dev; + goto err_unregister_rsu_dev; } + ret = of_platform_default_populate(dev_of_node(dev), NULL, dev); + if (ret) + goto err_unregister_fcs_dev; + dev_set_drvdata(dev, svc); pr_info("Intel Service Layer Driver Initialized\n"); return 0; -err_unregister_dev: +err_unregister_fcs_dev: + platform_device_unregister(svc->intel_svc_fcs); +err_unregister_rsu_dev: platform_device_unregister(svc->stratix10_svc_rsu); err_free_kfifo: kfifo_free(&controller->svc_fifo); @@ -1256,6 +1259,8 @@ static void stratix10_svc_drv_remove(struct platform_device *pdev) struct stratix10_svc *svc = dev_get_drvdata(&pdev->dev); struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev); + of_platform_depopulate(ctrl->dev); + platform_device_unregister(svc->intel_svc_fcs); platform_device_unregister(svc->stratix10_svc_rsu); @@ -1271,7 +1276,7 @@ static void stratix10_svc_drv_remove(struct platform_device *pdev) static struct platform_driver stratix10_svc_driver = { .probe = stratix10_svc_drv_probe, - .remove_new = stratix10_svc_drv_remove, + .remove = stratix10_svc_drv_remove, .driver = { .name = "stratix10-svc", .of_match_table = stratix10_svc_drv_match, diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c index 880ffcb50088..7c5c03f274b9 100644 --- a/drivers/firmware/sysfb.c +++ b/drivers/firmware/sysfb.c @@ -39,6 +39,8 @@ static struct platform_device *pd; static DEFINE_MUTEX(disable_lock); static bool disabled; +static struct device *sysfb_parent_dev(const struct screen_info *si); + static bool sysfb_unregister(void) { if (IS_ERR_OR_NULL(pd)) @@ -52,6 +54,7 @@ static bool sysfb_unregister(void) /** * sysfb_disable() - disable the Generic System Framebuffers support + * @dev: the device to check if non-NULL * * This disables the registration of system framebuffer devices that match the * generic drivers that make use of the system framebuffer set up by firmware. @@ -61,17 +64,42 @@ static bool sysfb_unregister(void) * Context: The function can sleep. A @disable_lock mutex is acquired to serialize * against sysfb_init(), that registers a system framebuffer device. */ -void sysfb_disable(void) +void sysfb_disable(struct device *dev) { + struct screen_info *si = &screen_info; + struct device *parent; + mutex_lock(&disable_lock); - sysfb_unregister(); - disabled = true; + parent = sysfb_parent_dev(si); + if (!dev || !parent || dev == parent) { + sysfb_unregister(); + disabled = true; + } mutex_unlock(&disable_lock); } EXPORT_SYMBOL_GPL(sysfb_disable); +/** + * sysfb_handles_screen_info() - reports if sysfb handles the global screen_info + * + * Callers can use sysfb_handles_screen_info() to determine whether the Generic + * System Framebuffers (sysfb) can handle the global screen_info data structure + * or not. Drivers might need this information to know if they have to setup the + * system framebuffer, or if they have to delegate this action to sysfb instead. + * + * Returns: + * True if sysfb handles the global screen_info data structure. + */ +bool sysfb_handles_screen_info(void) +{ + const struct screen_info *si = &screen_info; + + return !!screen_info_video_type(si); +} +EXPORT_SYMBOL_GPL(sysfb_handles_screen_info); + #if defined(CONFIG_PCI) -static __init bool sysfb_pci_dev_is_enabled(struct pci_dev *pdev) +static bool sysfb_pci_dev_is_enabled(struct pci_dev *pdev) { /* * TODO: Try to integrate this code into the PCI subsystem @@ -87,13 +115,13 @@ static __init bool sysfb_pci_dev_is_enabled(struct pci_dev *pdev) return true; } #else -static __init bool sysfb_pci_dev_is_enabled(struct pci_dev *pdev) +static bool sysfb_pci_dev_is_enabled(struct pci_dev *pdev) { return false; } #endif -static __init struct device *sysfb_parent_dev(const struct screen_info *si) +static struct device *sysfb_parent_dev(const struct screen_info *si) { struct pci_dev *pdev; @@ -101,8 +129,10 @@ static __init struct device *sysfb_parent_dev(const struct screen_info *si) if (IS_ERR(pdev)) { return ERR_CAST(pdev); } else if (pdev) { - if (!sysfb_pci_dev_is_enabled(pdev)) + if (!sysfb_pci_dev_is_enabled(pdev)) { + pci_dev_put(pdev); return ERR_PTR(-ENODEV); + } return &pdev->dev; } @@ -137,7 +167,7 @@ static __init int sysfb_init(void) if (compatible) { pd = sysfb_create_simplefb(si, &mode, parent); if (!IS_ERR(pd)) - goto unlock_mutex; + goto put_device; } /* if the FB is incompatible, create a legacy framebuffer device */ @@ -155,7 +185,7 @@ static __init int sysfb_init(void) pd = platform_device_alloc(name, 0); if (!pd) { ret = -ENOMEM; - goto unlock_mutex; + goto put_device; } pd->dev.parent = parent; @@ -170,9 +200,11 @@ static __init int sysfb_init(void) if (ret) goto err; - goto unlock_mutex; + goto put_device; err: platform_device_put(pd); +put_device: + put_device(parent); unlock_mutex: mutex_unlock(&disable_lock); return ret; diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c index 75a186bf8f8e..592d8a644619 100644 --- a/drivers/firmware/sysfb_simplefb.c +++ b/drivers/firmware/sysfb_simplefb.c @@ -35,36 +35,7 @@ __init bool sysfb_parse_mode(const struct screen_info *si, if (type != VIDEO_TYPE_VLFB && type != VIDEO_TYPE_EFI) return false; - /* - * The meaning of depth and bpp for direct-color formats is - * inconsistent: - * - * - DRM format info specifies depth as the number of color - * bits; including alpha, but not including filler bits. - * - Linux' EFI platform code computes lfb_depth from the - * individual color channels, including the reserved bits. - * - VBE 1.1 defines lfb_depth for XRGB1555 as 16, but later - * versions use 15. - * - On the kernel command line, 'bpp' of 32 is usually - * XRGB8888 including the filler bits, but 15 is XRGB1555 - * not including the filler bit. - * - * It's not easily possible to fix this in struct screen_info, - * as this could break UAPI. The best solution is to compute - * bits_per_pixel from the color bits, reserved bits and - * reported lfb_depth, whichever is highest. In the loop below, - * ignore simplefb formats with alpha bits, as EFI and VESA - * don't specify alpha channels. - */ - if (si->lfb_depth > 8) { - bits_per_pixel = max(max3(si->red_size + si->red_pos, - si->green_size + si->green_pos, - si->blue_size + si->blue_pos), - si->rsvd_size + si->rsvd_pos); - bits_per_pixel = max_t(u32, bits_per_pixel, si->lfb_depth); - } else { - bits_per_pixel = si->lfb_depth; - } + bits_per_pixel = __screen_info_lfb_bits_per_pixel(si); for (i = 0; i < ARRAY_SIZE(formats); ++i) { const struct simplefb_format *f = &formats[i]; diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c index c1590d3aa9cb..c3a1dc344961 100644 --- a/drivers/firmware/tegra/bpmp.c +++ b/drivers/firmware/tegra/bpmp.c @@ -24,12 +24,6 @@ #define MSG_RING BIT(1) #define TAG_SZ 32 -static inline struct tegra_bpmp * -mbox_client_to_bpmp(struct mbox_client *client) -{ - return container_of(client, struct tegra_bpmp, mbox.client); -} - static inline const struct tegra_bpmp_ops * channel_to_ops(struct tegra_bpmp_channel *channel) { diff --git a/drivers/firmware/thead,th1520-aon.c b/drivers/firmware/thead,th1520-aon.c new file mode 100644 index 000000000000..38f812ac9920 --- /dev/null +++ b/drivers/firmware/thead,th1520-aon.c @@ -0,0 +1,250 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Alibaba Group Holding Limited. + * Copyright (c) 2024 Samsung Electronics Co., Ltd. + * Author: Michal Wilczynski <m.wilczynski@samsung.com> + */ + +#include <linux/device.h> +#include <linux/firmware/thead/thead,th1520-aon.h> +#include <linux/mailbox_client.h> +#include <linux/mailbox_controller.h> +#include <linux/slab.h> + +#define MAX_RX_TIMEOUT (msecs_to_jiffies(3000)) +#define MAX_TX_TIMEOUT 500 + +struct th1520_aon_chan { + struct mbox_chan *ch; + struct th1520_aon_rpc_ack_common ack_msg; + struct mbox_client cl; + struct completion done; + + /* make sure only one RPC is performed at a time */ + struct mutex transaction_lock; +}; + +struct th1520_aon_msg_req_set_resource_power_mode { + struct th1520_aon_rpc_msg_hdr hdr; + u16 resource; + u16 mode; + u16 reserved[10]; +} __packed __aligned(1); + +/* + * This type is used to indicate error response for most functions. + */ +enum th1520_aon_error_codes { + LIGHT_AON_ERR_NONE = 0, /* Success */ + LIGHT_AON_ERR_VERSION = 1, /* Incompatible API version */ + LIGHT_AON_ERR_CONFIG = 2, /* Configuration error */ + LIGHT_AON_ERR_PARM = 3, /* Bad parameter */ + LIGHT_AON_ERR_NOACCESS = 4, /* Permission error (no access) */ + LIGHT_AON_ERR_LOCKED = 5, /* Permission error (locked) */ + LIGHT_AON_ERR_UNAVAILABLE = 6, /* Unavailable (out of resources) */ + LIGHT_AON_ERR_NOTFOUND = 7, /* Not found */ + LIGHT_AON_ERR_NOPOWER = 8, /* No power */ + LIGHT_AON_ERR_IPC = 9, /* Generic IPC error */ + LIGHT_AON_ERR_BUSY = 10, /* Resource is currently busy/active */ + LIGHT_AON_ERR_FAIL = 11, /* General I/O failure */ + LIGHT_AON_ERR_LAST +}; + +static int th1520_aon_linux_errmap[LIGHT_AON_ERR_LAST] = { + 0, /* LIGHT_AON_ERR_NONE */ + -EINVAL, /* LIGHT_AON_ERR_VERSION */ + -EINVAL, /* LIGHT_AON_ERR_CONFIG */ + -EINVAL, /* LIGHT_AON_ERR_PARM */ + -EACCES, /* LIGHT_AON_ERR_NOACCESS */ + -EACCES, /* LIGHT_AON_ERR_LOCKED */ + -ERANGE, /* LIGHT_AON_ERR_UNAVAILABLE */ + -EEXIST, /* LIGHT_AON_ERR_NOTFOUND */ + -EPERM, /* LIGHT_AON_ERR_NOPOWER */ + -EPIPE, /* LIGHT_AON_ERR_IPC */ + -EBUSY, /* LIGHT_AON_ERR_BUSY */ + -EIO, /* LIGHT_AON_ERR_FAIL */ +}; + +static inline int th1520_aon_to_linux_errno(int errno) +{ + if (errno >= LIGHT_AON_ERR_NONE && errno < LIGHT_AON_ERR_LAST) + return th1520_aon_linux_errmap[errno]; + + return -EIO; +} + +static void th1520_aon_rx_callback(struct mbox_client *c, void *rx_msg) +{ + struct th1520_aon_chan *aon_chan = + container_of(c, struct th1520_aon_chan, cl); + struct th1520_aon_rpc_msg_hdr *hdr = + (struct th1520_aon_rpc_msg_hdr *)rx_msg; + u8 recv_size = sizeof(struct th1520_aon_rpc_msg_hdr) + hdr->size; + + if (recv_size != sizeof(struct th1520_aon_rpc_ack_common)) { + dev_err(c->dev, "Invalid ack size, not completing\n"); + return; + } + + memcpy(&aon_chan->ack_msg, rx_msg, recv_size); + complete(&aon_chan->done); +} + +/** + * th1520_aon_call_rpc() - Send an RPC request to the TH1520 AON subsystem + * @aon_chan: Pointer to the AON channel structure + * @msg: Pointer to the message (RPC payload) that will be sent + * + * This function sends an RPC message to the TH1520 AON subsystem via mailbox. + * It takes the provided @msg buffer, formats it with version and service flags, + * then blocks until the RPC completes or times out. The completion is signaled + * by the `aon_chan->done` completion, which is waited upon for a duration + * defined by `MAX_RX_TIMEOUT`. + * + * Return: + * * 0 on success + * * -ETIMEDOUT if the RPC call times out + * * A negative error code if the mailbox send fails or if AON responds with + * a non-zero error code (converted via th1520_aon_to_linux_errno()). + */ +int th1520_aon_call_rpc(struct th1520_aon_chan *aon_chan, void *msg) +{ + struct th1520_aon_rpc_msg_hdr *hdr = msg; + int ret; + + mutex_lock(&aon_chan->transaction_lock); + reinit_completion(&aon_chan->done); + + RPC_SET_VER(hdr, TH1520_AON_RPC_VERSION); + RPC_SET_SVC_ID(hdr, hdr->svc); + RPC_SET_SVC_FLAG_MSG_TYPE(hdr, RPC_SVC_MSG_TYPE_DATA); + RPC_SET_SVC_FLAG_ACK_TYPE(hdr, RPC_SVC_MSG_NEED_ACK); + + ret = mbox_send_message(aon_chan->ch, msg); + if (ret < 0) { + dev_err(aon_chan->cl.dev, "RPC send msg failed: %d\n", ret); + goto out; + } + + if (!wait_for_completion_timeout(&aon_chan->done, MAX_RX_TIMEOUT)) { + dev_err(aon_chan->cl.dev, "RPC send msg timeout\n"); + mutex_unlock(&aon_chan->transaction_lock); + return -ETIMEDOUT; + } + + ret = aon_chan->ack_msg.err_code; + +out: + mutex_unlock(&aon_chan->transaction_lock); + + return th1520_aon_to_linux_errno(ret); +} +EXPORT_SYMBOL_GPL(th1520_aon_call_rpc); + +/** + * th1520_aon_power_update() - Change power state of a resource via TH1520 AON + * @aon_chan: Pointer to the AON channel structure + * @rsrc: Resource ID whose power state needs to be updated + * @power_on: Boolean indicating whether the resource should be powered on (true) + * or powered off (false) + * + * This function requests the TH1520 AON subsystem to set the power mode of the + * given resource (@rsrc) to either on or off. It constructs the message in + * `struct th1520_aon_msg_req_set_resource_power_mode` and then invokes + * th1520_aon_call_rpc() to make the request. If the AON call fails, an error + * message is logged along with the specific return code. + * + * Return: + * * 0 on success + * * A negative error code in case of failures (propagated from + * th1520_aon_call_rpc()). + */ +int th1520_aon_power_update(struct th1520_aon_chan *aon_chan, u16 rsrc, + bool power_on) +{ + struct th1520_aon_msg_req_set_resource_power_mode msg = {}; + struct th1520_aon_rpc_msg_hdr *hdr = &msg.hdr; + int ret; + + hdr->svc = TH1520_AON_RPC_SVC_PM; + hdr->func = TH1520_AON_PM_FUNC_SET_RESOURCE_POWER_MODE; + hdr->size = TH1520_AON_RPC_MSG_NUM; + + RPC_SET_BE16(&msg.resource, 0, rsrc); + RPC_SET_BE16(&msg.resource, 2, + (power_on ? TH1520_AON_PM_PW_MODE_ON : + TH1520_AON_PM_PW_MODE_OFF)); + + ret = th1520_aon_call_rpc(aon_chan, &msg); + if (ret) + dev_err(aon_chan->cl.dev, "failed to power %s resource %d ret %d\n", + power_on ? "up" : "off", rsrc, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(th1520_aon_power_update); + +/** + * th1520_aon_init() - Initialize TH1520 AON firmware protocol interface + * @dev: Device pointer for the AON subsystem + * + * This function initializes the TH1520 AON firmware protocol interface by: + * - Allocating and initializing the AON channel structure + * - Setting up the mailbox client + * - Requesting the AON mailbox channel + * - Initializing synchronization primitives + * + * Return: + * * Valid pointer to th1520_aon_chan structure on success + * * ERR_PTR(-ENOMEM) if memory allocation fails + * * ERR_PTR() with other negative error codes from mailbox operations + */ +struct th1520_aon_chan *th1520_aon_init(struct device *dev) +{ + struct th1520_aon_chan *aon_chan; + struct mbox_client *cl; + int ret; + + aon_chan = kzalloc(sizeof(*aon_chan), GFP_KERNEL); + if (!aon_chan) + return ERR_PTR(-ENOMEM); + + cl = &aon_chan->cl; + cl->dev = dev; + cl->tx_block = true; + cl->tx_tout = MAX_TX_TIMEOUT; + cl->rx_callback = th1520_aon_rx_callback; + + aon_chan->ch = mbox_request_channel_byname(cl, "aon"); + if (IS_ERR(aon_chan->ch)) { + dev_err(dev, "Failed to request aon mbox chan\n"); + ret = PTR_ERR(aon_chan->ch); + kfree(aon_chan); + return ERR_PTR(ret); + } + + mutex_init(&aon_chan->transaction_lock); + init_completion(&aon_chan->done); + + return aon_chan; +} +EXPORT_SYMBOL_GPL(th1520_aon_init); + +/** + * th1520_aon_deinit() - Clean up TH1520 AON firmware protocol interface + * @aon_chan: Pointer to the AON channel structure to clean up + * + * This function cleans up resources allocated by th1520_aon_init(): + * - Frees the mailbox channel + * - Frees the AON channel + */ +void th1520_aon_deinit(struct th1520_aon_chan *aon_chan) +{ + mbox_free_channel(aon_chan->ch); + kfree(aon_chan); +} +EXPORT_SYMBOL_GPL(th1520_aon_deinit); + +MODULE_AUTHOR("Michal Wilczynski <m.wilczynski@samsung.com>"); +MODULE_DESCRIPTION("T-HEAD TH1520 Always-On firmware protocol library"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index 8b9a2556de16..ae5fd1936ad3 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c @@ -2,13 +2,14 @@ /* * Texas Instruments System Control Interface Protocol Driver * - * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/ + * Copyright (C) 2015-2025 Texas Instruments Incorporated - https://www.ti.com/ * Nishanth Menon */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/bitmap.h> +#include <linux/cpu.h> #include <linux/debugfs.h> #include <linux/export.h> #include <linux/io.h> @@ -19,11 +20,14 @@ #include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> +#include <linux/pm_qos.h> #include <linux/property.h> #include <linux/semaphore.h> #include <linux/slab.h> #include <linux/soc/ti/ti-msgmgr.h> #include <linux/soc/ti/ti_sci_protocol.h> +#include <linux/suspend.h> +#include <linux/sys_soc.h> #include <linux/reboot.h> #include "ti_sci.h" @@ -87,7 +91,6 @@ struct ti_sci_desc { * struct ti_sci_info - Structure representing a TI SCI instance * @dev: Device pointer * @desc: SoC description for this instance - * @nb: Reboot Notifier block * @d: Debugfs file entry * @debug_region: Memory region where the debug message are available * @debug_region_size: Debug region size @@ -99,11 +102,11 @@ struct ti_sci_desc { * @minfo: Message info * @node: list head * @host_id: Host ID + * @fw_caps: FW/SoC low power capabilities * @users: Number of users of this instance */ struct ti_sci_info { struct device *dev; - struct notifier_block nb; const struct ti_sci_desc *desc; struct dentry *d; void __iomem *debug_region; @@ -116,13 +119,13 @@ struct ti_sci_info { struct ti_sci_xfers_info minfo; struct list_head node; u8 host_id; + u64 fw_caps; /* protected by ti_sci_list_mutex */ int users; }; #define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl) #define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle) -#define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb) #ifdef CONFIG_DEBUG_FS @@ -1654,6 +1657,364 @@ fail: return ret; } +/** + * ti_sci_cmd_prepare_sleep() - Prepare system for system suspend + * @handle: pointer to TI SCI handle + * @mode: Device identifier + * @ctx_lo: Low part of address for context save + * @ctx_hi: High part of address for context save + * @debug_flags: Debug flags to pass to firmware + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode, + u32 ctx_lo, u32 ctx_hi, u32 debug_flags) +{ + struct ti_sci_info *info; + struct ti_sci_msg_req_prepare_sleep *req; + struct ti_sci_msg_hdr *resp; + struct ti_sci_xfer *xfer; + struct device *dev; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + dev = info->dev; + + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PREPARE_SLEEP, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + sizeof(*req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "Message alloc failed(%d)\n", ret); + return ret; + } + + req = (struct ti_sci_msg_req_prepare_sleep *)xfer->xfer_buf; + req->mode = mode; + req->ctx_lo = ctx_lo; + req->ctx_hi = ctx_hi; + req->debug_flags = debug_flags; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "Mbox send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; + + if (!ti_sci_is_response_ack(resp)) { + dev_err(dev, "Failed to prepare sleep\n"); + ret = -ENODEV; + } + +fail: + ti_sci_put_one_xfer(&info->minfo, xfer); + + return ret; +} + +/** + * ti_sci_msg_cmd_query_fw_caps() - Get the FW/SoC capabilities + * @handle: Pointer to TI SCI handle + * @fw_caps: Each bit in fw_caps indicating one FW/SOC capability + * + * Check if the firmware supports any optional low power modes. + * Old revisions of TIFS (< 08.04) will NACK the request which results in + * -ENODEV being returned. + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_msg_cmd_query_fw_caps(const struct ti_sci_handle *handle, + u64 *fw_caps) +{ + struct ti_sci_info *info; + struct ti_sci_xfer *xfer; + struct ti_sci_msg_resp_query_fw_caps *resp; + struct device *dev; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + dev = info->dev; + + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_FW_CAPS, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + sizeof(struct ti_sci_msg_hdr), + sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "Message alloc failed(%d)\n", ret); + return ret; + } + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "Mbox send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_resp_query_fw_caps *)xfer->xfer_buf; + + if (!ti_sci_is_response_ack(resp)) { + dev_err(dev, "Failed to get capabilities\n"); + ret = -ENODEV; + goto fail; + } + + if (fw_caps) + *fw_caps = resp->fw_caps; + +fail: + ti_sci_put_one_xfer(&info->minfo, xfer); + + return ret; +} + +/** + * ti_sci_cmd_set_io_isolation() - Enable IO isolation in LPM + * @handle: Pointer to TI SCI handle + * @state: The desired state of the IO isolation + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_cmd_set_io_isolation(const struct ti_sci_handle *handle, + u8 state) +{ + struct ti_sci_info *info; + struct ti_sci_msg_req_set_io_isolation *req; + struct ti_sci_msg_hdr *resp; + struct ti_sci_xfer *xfer; + struct device *dev; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + dev = info->dev; + + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_IO_ISOLATION, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + sizeof(*req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "Message alloc failed(%d)\n", ret); + return ret; + } + req = (struct ti_sci_msg_req_set_io_isolation *)xfer->xfer_buf; + req->state = state; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "Mbox send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; + + if (!ti_sci_is_response_ack(resp)) { + dev_err(dev, "Failed to set IO isolation\n"); + ret = -ENODEV; + } + +fail: + ti_sci_put_one_xfer(&info->minfo, xfer); + + return ret; +} + +/** + * ti_sci_msg_cmd_lpm_wake_reason() - Get the wakeup source from LPM + * @handle: Pointer to TI SCI handle + * @source: The wakeup source that woke the SoC from LPM + * @timestamp: Timestamp of the wakeup event + * @pin: The pin that has triggered wake up + * @mode: The last entered low power mode + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_msg_cmd_lpm_wake_reason(const struct ti_sci_handle *handle, + u32 *source, u64 *timestamp, u8 *pin, u8 *mode) +{ + struct ti_sci_info *info; + struct ti_sci_xfer *xfer; + struct ti_sci_msg_resp_lpm_wake_reason *resp; + struct device *dev; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + dev = info->dev; + + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_WAKE_REASON, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + sizeof(struct ti_sci_msg_hdr), + sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "Message alloc failed(%d)\n", ret); + return ret; + } + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "Mbox send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_resp_lpm_wake_reason *)xfer->xfer_buf; + + if (!ti_sci_is_response_ack(resp)) { + dev_err(dev, "Failed to get wake reason\n"); + ret = -ENODEV; + goto fail; + } + + if (source) + *source = resp->wake_source; + if (timestamp) + *timestamp = resp->wake_timestamp; + if (pin) + *pin = resp->wake_pin; + if (mode) + *mode = resp->mode; + +fail: + ti_sci_put_one_xfer(&info->minfo, xfer); + + return ret; +} + +/** + * ti_sci_cmd_set_device_constraint() - Set LPM constraint on behalf of a device + * @handle: pointer to TI SCI handle + * @id: Device identifier + * @state: The desired state of device constraint: set or clear + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_cmd_set_device_constraint(const struct ti_sci_handle *handle, + u32 id, u8 state) +{ + struct ti_sci_info *info; + struct ti_sci_msg_req_lpm_set_device_constraint *req; + struct ti_sci_msg_hdr *resp; + struct ti_sci_xfer *xfer; + struct device *dev; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + dev = info->dev; + + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_SET_DEVICE_CONSTRAINT, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + sizeof(*req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "Message alloc failed(%d)\n", ret); + return ret; + } + req = (struct ti_sci_msg_req_lpm_set_device_constraint *)xfer->xfer_buf; + req->id = id; + req->state = state; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "Mbox send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; + + if (!ti_sci_is_response_ack(resp)) { + dev_err(dev, "Failed to set device constraint\n"); + ret = -ENODEV; + } + +fail: + ti_sci_put_one_xfer(&info->minfo, xfer); + + return ret; +} + +/** + * ti_sci_cmd_set_latency_constraint() - Set LPM resume latency constraint + * @handle: pointer to TI SCI handle + * @latency: maximum acceptable latency (in ms) to wake up from LPM + * @state: The desired state of latency constraint: set or clear + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_cmd_set_latency_constraint(const struct ti_sci_handle *handle, + u16 latency, u8 state) +{ + struct ti_sci_info *info; + struct ti_sci_msg_req_lpm_set_latency_constraint *req; + struct ti_sci_msg_hdr *resp; + struct ti_sci_xfer *xfer; + struct device *dev; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + dev = info->dev; + + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_SET_LATENCY_CONSTRAINT, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + sizeof(*req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "Message alloc failed(%d)\n", ret); + return ret; + } + req = (struct ti_sci_msg_req_lpm_set_latency_constraint *)xfer->xfer_buf; + req->latency = latency; + req->state = state; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "Mbox send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; + + if (!ti_sci_is_response_ack(resp)) { + dev_err(dev, "Failed to set device constraint\n"); + ret = -ENODEV; + } + +fail: + ti_sci_put_one_xfer(&info->minfo, xfer); + + return ret; +} + static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle) { struct ti_sci_info *info; @@ -2796,6 +3157,7 @@ static void ti_sci_setup_ops(struct ti_sci_info *info) struct ti_sci_core_ops *core_ops = &ops->core_ops; struct ti_sci_dev_ops *dops = &ops->dev_ops; struct ti_sci_clk_ops *cops = &ops->clk_ops; + struct ti_sci_pm_ops *pmops = &ops->pm_ops; struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops; struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops; struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops; @@ -2835,6 +3197,13 @@ static void ti_sci_setup_ops(struct ti_sci_info *info) cops->set_freq = ti_sci_cmd_clk_set_freq; cops->get_freq = ti_sci_cmd_clk_get_freq; + if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) { + pr_debug("detected DM managed LPM in fw_caps\n"); + pmops->lpm_wake_reason = ti_sci_msg_cmd_lpm_wake_reason; + pmops->set_device_constraint = ti_sci_cmd_set_device_constraint; + pmops->set_latency_constraint = ti_sci_cmd_set_latency_constraint; + } + rm_core_ops->get_range = ti_sci_cmd_get_resource_range; rm_core_ops->get_range_from_shost = ti_sci_cmd_get_resource_range_from_shost; @@ -3254,10 +3623,9 @@ devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev, } EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource); -static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode, - void *cmd) +static int tisci_reboot_handler(struct sys_off_data *data) { - struct ti_sci_info *info = reboot_to_ti_sci_info(nb); + struct ti_sci_info *info = data->cb_data; const struct ti_sci_handle *handle = &info->handle; ti_sci_cmd_core_reboot(handle); @@ -3266,6 +3634,119 @@ static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode, return NOTIFY_BAD; } +static int ti_sci_prepare_system_suspend(struct ti_sci_info *info) +{ + /* + * Map and validate the target Linux suspend state to TISCI LPM. + * Default is to let Device Manager select the low power mode. + */ + switch (pm_suspend_target_state) { + case PM_SUSPEND_MEM: + if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) { + /* + * For the DM_MANAGED mode the context is reserved for + * internal use and can be 0 + */ + return ti_sci_cmd_prepare_sleep(&info->handle, + TISCI_MSG_VALUE_SLEEP_MODE_DM_MANAGED, + 0, 0, 0); + } else { + /* DM Managed is not supported by the firmware. */ + dev_err(info->dev, "Suspend to memory is not supported by the firmware\n"); + return -EOPNOTSUPP; + } + break; + default: + /* + * Do not fail if we don't have action to take for a + * specific suspend mode. + */ + return 0; + } +} + +static int __maybe_unused ti_sci_suspend(struct device *dev) +{ + struct ti_sci_info *info = dev_get_drvdata(dev); + struct device *cpu_dev, *cpu_dev_max = NULL; + s32 val, cpu_lat = 0; + u16 cpu_lat_ms; + int i, ret; + + if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) { + for_each_possible_cpu(i) { + cpu_dev = get_cpu_device(i); + val = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_RESUME_LATENCY); + if (val != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) { + cpu_lat = max(cpu_lat, val); + cpu_dev_max = cpu_dev; + } + } + if (cpu_dev_max) { + /* + * PM QoS latency unit is usecs, device manager uses msecs. + * Convert to msecs and round down for device manager. + */ + cpu_lat_ms = cpu_lat / USEC_PER_MSEC; + dev_dbg(cpu_dev_max, "%s: sending max CPU latency=%u ms\n", __func__, + cpu_lat_ms); + ret = ti_sci_cmd_set_latency_constraint(&info->handle, + cpu_lat_ms, + TISCI_MSG_CONSTRAINT_SET); + if (ret) + return ret; + } + } + + ret = ti_sci_prepare_system_suspend(info); + if (ret) + return ret; + + return 0; +} + +static int __maybe_unused ti_sci_suspend_noirq(struct device *dev) +{ + struct ti_sci_info *info = dev_get_drvdata(dev); + int ret = 0; + + ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_ENABLE); + if (ret) + return ret; + + return 0; +} + +static int __maybe_unused ti_sci_resume_noirq(struct device *dev) +{ + struct ti_sci_info *info = dev_get_drvdata(dev); + int ret = 0; + u32 source; + u64 time; + u8 pin; + u8 mode; + + ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_DISABLE); + if (ret) + return ret; + + ret = ti_sci_msg_cmd_lpm_wake_reason(&info->handle, &source, &time, &pin, &mode); + /* Do not fail to resume on error as the wake reason is not critical */ + if (!ret) + dev_info(dev, "ti_sci: wakeup source:0x%x, pin:0x%x, mode:0x%x\n", + source, pin, mode); + + return 0; +} + +static const struct dev_pm_ops ti_sci_pm_ops = { +#ifdef CONFIG_PM_SLEEP + .suspend = ti_sci_suspend, + .suspend_noirq = ti_sci_suspend_noirq, + .resume_noirq = ti_sci_resume_noirq, +#endif +}; + /* Description for K2G */ static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = { .default_host_id = 2, @@ -3303,7 +3784,6 @@ static int ti_sci_probe(struct platform_device *pdev) struct mbox_client *cl; int ret = -EINVAL; int i; - int reboot = 0; u32 h_id; desc = device_get_match_data(dev); @@ -3327,8 +3807,6 @@ static int ti_sci_probe(struct platform_device *pdev) } } - reboot = of_property_read_bool(dev->of_node, - "ti,system-reboot-controller"); INIT_LIST_HEAD(&info->node); minfo = &info->minfo; @@ -3397,17 +3875,19 @@ static int ti_sci_probe(struct platform_device *pdev) goto out; } - ti_sci_setup_ops(info); + ti_sci_msg_cmd_query_fw_caps(&info->handle, &info->fw_caps); + dev_dbg(dev, "Detected firmware capabilities: %s%s%s\n", + info->fw_caps & MSG_FLAG_CAPS_GENERIC ? "Generic" : "", + info->fw_caps & MSG_FLAG_CAPS_LPM_PARTIAL_IO ? " Partial-IO" : "", + info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED ? " DM-Managed" : "" + ); - if (reboot) { - info->nb.notifier_call = tisci_reboot_handler; - info->nb.priority = 128; + ti_sci_setup_ops(info); - ret = register_restart_handler(&info->nb); - if (ret) { - dev_err(dev, "reboot registration fail(%d)\n", ret); - goto out; - } + ret = devm_register_restart_handler(dev, tisci_reboot_handler, info); + if (ret) { + dev_err(dev, "reboot registration fail(%d)\n", ret); + goto out; } dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n", @@ -3433,8 +3913,9 @@ static struct platform_driver ti_sci_driver = { .probe = ti_sci_probe, .driver = { .name = "ti-sci", - .of_match_table = of_match_ptr(ti_sci_of_match), + .of_match_table = ti_sci_of_match, .suppress_bind_attrs = true, + .pm = &ti_sci_pm_ops, }, }; module_platform_driver(ti_sci_driver); diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h index ef3a8214d002..053387d7baa0 100644 --- a/drivers/firmware/ti_sci.h +++ b/drivers/firmware/ti_sci.h @@ -4,9 +4,9 @@ * * Communication protocol with TI SCI hardware * The system works in a message response protocol - * See: http://processors.wiki.ti.com/index.php/TISCI for details + * See: https://software-dl.ti.com/tisci/esd/latest/index.html for details * - * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/ + * Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef __TI_SCI_H @@ -19,6 +19,7 @@ #define TI_SCI_MSG_WAKE_REASON 0x0003 #define TI_SCI_MSG_GOODBYE 0x0004 #define TI_SCI_MSG_SYS_RESET 0x0005 +#define TI_SCI_MSG_QUERY_FW_CAPS 0x0022 /* Device requests */ #define TI_SCI_MSG_SET_DEVICE_STATE 0x0200 @@ -35,6 +36,13 @@ #define TI_SCI_MSG_QUERY_CLOCK_FREQ 0x010d #define TI_SCI_MSG_GET_CLOCK_FREQ 0x010e +/* Low Power Mode Requests */ +#define TI_SCI_MSG_PREPARE_SLEEP 0x0300 +#define TI_SCI_MSG_LPM_WAKE_REASON 0x0306 +#define TI_SCI_MSG_SET_IO_ISOLATION 0x0307 +#define TI_SCI_MSG_LPM_SET_DEVICE_CONSTRAINT 0x0309 +#define TI_SCI_MSG_LPM_SET_LATENCY_CONSTRAINT 0x030A + /* Resource Management Requests */ #define TI_SCI_MSG_GET_RESOURCE_RANGE 0x1500 @@ -133,6 +141,27 @@ struct ti_sci_msg_req_reboot { } __packed; /** + * struct ti_sci_msg_resp_query_fw_caps - Response for query firmware caps + * @hdr: Generic header + * @fw_caps: Each bit in fw_caps indicating one FW/SOC capability + * MSG_FLAG_CAPS_GENERIC: Generic capability (LPM not supported) + * MSG_FLAG_CAPS_LPM_PARTIAL_IO: Partial IO in LPM + * MSG_FLAG_CAPS_LPM_DM_MANAGED: LPM can be managed by DM + * + * Response to a generic message with message type TI_SCI_MSG_QUERY_FW_CAPS + * providing currently available SOC/firmware capabilities. SoC that don't + * support low power modes return only MSG_FLAG_CAPS_GENERIC capability. + */ +struct ti_sci_msg_resp_query_fw_caps { + struct ti_sci_msg_hdr hdr; +#define MSG_FLAG_CAPS_GENERIC TI_SCI_MSG_FLAG(0) +#define MSG_FLAG_CAPS_LPM_PARTIAL_IO TI_SCI_MSG_FLAG(4) +#define MSG_FLAG_CAPS_LPM_DM_MANAGED TI_SCI_MSG_FLAG(5) +#define MSG_MASK_CAPS_LPM GENMASK_ULL(4, 1) + u64 fw_caps; +} __packed; + +/** * struct ti_sci_msg_req_set_device_state - Set the desired state of the device * @hdr: Generic header * @id: Indicates which device to modify @@ -545,6 +574,118 @@ struct ti_sci_msg_resp_get_clock_freq { u64 freq_hz; } __packed; +/** + * struct tisci_msg_req_prepare_sleep - Request for TISCI_MSG_PREPARE_SLEEP. + * + * @hdr TISCI header to provide ACK/NAK flags to the host. + * @mode Low power mode to enter. + * @ctx_lo Low 32-bits of physical pointer to address to use for context save. + * @ctx_hi High 32-bits of physical pointer to address to use for context save. + * @debug_flags Flags that can be set to halt the sequence during suspend or + * resume to allow JTAG connection and debug. + * + * This message is used as the first step of entering a low power mode. It + * allows configurable information, including which state to enter to be + * easily shared from the application, as this is a non-secure message and + * therefore can be sent by anyone. + */ +struct ti_sci_msg_req_prepare_sleep { + struct ti_sci_msg_hdr hdr; + +#define TISCI_MSG_VALUE_SLEEP_MODE_DM_MANAGED 0xfd + u8 mode; + u32 ctx_lo; + u32 ctx_hi; + u32 debug_flags; +} __packed; + +/** + * struct tisci_msg_set_io_isolation_req - Request for TI_SCI_MSG_SET_IO_ISOLATION. + * + * @hdr: Generic header + * @state: The deseared state of the IO isolation. + * + * This message is used to enable/disable IO isolation for low power modes. + * Response is generic ACK / NACK message. + */ +struct ti_sci_msg_req_set_io_isolation { + struct ti_sci_msg_hdr hdr; + u8 state; +} __packed; + +/** + * struct ti_sci_msg_resp_lpm_wake_reason - Response for TI_SCI_MSG_LPM_WAKE_REASON. + * + * @hdr: Generic header. + * @wake_source: The wake up source that woke soc from LPM. + * @wake_timestamp: Timestamp at which soc woke. + * @wake_pin: The pin that has triggered wake up. + * @mode: The last entered low power mode. + * @rsvd: Reserved for future use. + * + * Response to a generic message with message type TI_SCI_MSG_LPM_WAKE_REASON, + * used to query the wake up source, pin and entered low power mode. + */ +struct ti_sci_msg_resp_lpm_wake_reason { + struct ti_sci_msg_hdr hdr; + u32 wake_source; + u64 wake_timestamp; + u8 wake_pin; + u8 mode; + u32 rsvd[2]; +} __packed; + +/** + * struct ti_sci_msg_req_lpm_set_device_constraint - Request for + * TISCI_MSG_LPM_SET_DEVICE_CONSTRAINT. + * + * @hdr: TISCI header to provide ACK/NAK flags to the host. + * @id: Device ID of device whose constraint has to be modified. + * @state: The desired state of device constraint: set or clear. + * @rsvd: Reserved for future use. + * + * This message is used by host to set constraint on the device. This can be + * sent anytime after boot before prepare sleep message. Any device can set a + * constraint on the low power mode that the SoC can enter. It allows + * configurable information to be easily shared from the application, as this + * is a non-secure message and therefore can be sent by anyone. By setting a + * constraint, the device ensures that it will not be powered off or reset in + * the selected mode. Note: Access Restriction: Exclusivity flag of Device will + * be honored. If some other host already has constraint on this device ID, + * NACK will be returned. + */ +struct ti_sci_msg_req_lpm_set_device_constraint { + struct ti_sci_msg_hdr hdr; + u32 id; + u8 state; + u32 rsvd[2]; +} __packed; + +/** + * struct ti_sci_msg_req_lpm_set_latency_constraint - Request for + * TISCI_MSG_LPM_SET_LATENCY_CONSTRAINT. + * + * @hdr: TISCI header to provide ACK/NAK flags to the host. + * @wkup_latency: The maximum acceptable latency to wake up from low power mode + * in milliseconds. The deeper the state, the higher the latency. + * @state: The desired state of wakeup latency constraint: set or clear. + * @rsvd: Reserved for future use. + * + * This message is used by host to set wakeup latency from low power mode. This can + * be sent anytime after boot before prepare sleep message, and can be sent after + * current low power mode is exited. Any device can set a constraint on the low power + * mode that the SoC can enter. It allows configurable information to be easily shared + * from the application, as this is a non-secure message and therefore can be sent by + * anyone. By setting a wakeup latency constraint, the host ensures that the resume time + * from selected low power mode will be less than the constraint value. + */ +struct ti_sci_msg_req_lpm_set_latency_constraint { + struct ti_sci_msg_hdr hdr; + u16 latency; + u8 state; + u32 rsvd; +} __packed; + #define TI_SCI_IRQ_SECONDARY_HOST_INVALID 0xff /** diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c index 31d962cdd6eb..1eac9948148f 100644 --- a/drivers/firmware/turris-mox-rwtm.c +++ b/drivers/firmware/turris-mox-rwtm.c @@ -2,29 +2,51 @@ /* * Turris Mox rWTM firmware driver * - * Copyright (C) 2019 Marek Behún <kabel@kernel.org> + * Copyright (C) 2019, 2024, 2025 Marek Behún <kabel@kernel.org> */ +#include <crypto/sha2.h> +#include <linux/align.h> #include <linux/armada-37xx-rwtm-mailbox.h> +#include <linux/cleanup.h> #include <linux/completion.h> -#include <linux/debugfs.h> +#include <linux/container_of.h> +#include <linux/device.h> #include <linux/dma-mapping.h> +#include <linux/err.h> #include <linux/hw_random.h> +#include <linux/if_ether.h> +#include <linux/key.h> +#include <linux/kobject.h> #include <linux/mailbox_client.h> +#include <linux/math.h> +#include <linux/minmax.h> #include <linux/module.h> #include <linux/mutex.h> -#include <linux/of.h> #include <linux/platform_device.h> -#include <linux/slab.h> +#include <linux/sizes.h> +#include <linux/sysfs.h> +#include <linux/turris-signing-key.h> +#include <linux/types.h> #define DRIVER_NAME "turris-mox-rwtm" +#define RWTM_DMA_BUFFER_SIZE SZ_4K + /* * The macros and constants below come from Turris Mox's rWTM firmware code. * This firmware is open source and it's sources can be found at * https://gitlab.labs.nic.cz/turris/mox-boot-builder/tree/master/wtmi. */ +enum { + MOX_ECC_NUM_BITS = 521, + MOX_ECC_NUM_LEN = DIV_ROUND_UP(MOX_ECC_NUM_BITS, 8), + MOX_ECC_NUM_WORDS = DIV_ROUND_UP(MOX_ECC_NUM_BITS, 32), + MOX_ECC_SIG_LEN = 2 * MOX_ECC_NUM_LEN, + MOX_ECC_PUBKEY_LEN = 1 + MOX_ECC_NUM_LEN, +}; + #define MBOX_STS_SUCCESS (0 << 30) #define MBOX_STS_FAIL (1 << 30) #define MBOX_STS_BADCMD (2 << 30) @@ -44,13 +66,27 @@ enum mbox_cmd { MBOX_CMD_OTP_WRITE = 8, }; -struct mox_kobject; - +/** + * struct mox_rwtm - driver private data structure + * @mbox_client: rWTM mailbox client + * @mbox: rWTM mailbox channel + * @hwrng: RNG driver structure + * @reply: last mailbox reply, filled in receive callback + * @buf: DMA buffer + * @buf_phys: physical address of the DMA buffer + * @busy: mutex to protect mailbox command execution + * @cmd_done: command done completion + * @has_board_info: whether board information is present + * @serial_number: serial number of the device + * @board_version: board version / revision of the device + * @ram_size: RAM size of the device + * @mac_address1: first MAC address of the device + * @mac_address2: second MAC address of the device + * @pubkey: board ECDSA public key + */ struct mox_rwtm { - struct device *dev; struct mbox_client mbox_client; struct mbox_chan *mbox; - struct mox_kobject *kobj; struct hwrng hwrng; struct armada_37xx_rwtm_rx_msg reply; @@ -61,89 +97,48 @@ struct mox_rwtm { struct mutex busy; struct completion cmd_done; - /* board information */ - int has_board_info; + bool has_board_info; u64 serial_number; int board_version, ram_size; - u8 mac_address1[6], mac_address2[6]; - - /* public key burned in eFuse */ - int has_pubkey; - u8 pubkey[135]; + u8 mac_address1[ETH_ALEN], mac_address2[ETH_ALEN]; -#ifdef CONFIG_DEBUG_FS - /* - * Signature process. This is currently done via debugfs, because it - * does not conform to the sysfs standard "one file per attribute". - * It should be rewritten via crypto API once akcipher API is available - * from userspace. - */ - struct dentry *debugfs_root; - u32 last_sig[34]; - int last_sig_done; +#ifdef CONFIG_TURRIS_MOX_RWTM_KEYCTL + u8 pubkey[MOX_ECC_PUBKEY_LEN]; #endif }; -struct mox_kobject { - struct kobject kobj; - struct mox_rwtm *rwtm; -}; - -static inline struct kobject *rwtm_to_kobj(struct mox_rwtm *rwtm) +static inline struct device *rwtm_dev(struct mox_rwtm *rwtm) { - return &rwtm->kobj->kobj; + return rwtm->mbox_client.dev; } -static inline struct mox_rwtm *to_rwtm(struct kobject *kobj) -{ - return container_of(kobj, struct mox_kobject, kobj)->rwtm; -} - -static void mox_kobj_release(struct kobject *kobj) -{ - kfree(to_rwtm(kobj)->kobj); -} - -static const struct kobj_type mox_kobj_ktype = { - .release = mox_kobj_release, - .sysfs_ops = &kobj_sysfs_ops, -}; - -static int mox_kobj_create(struct mox_rwtm *rwtm) -{ - rwtm->kobj = kzalloc(sizeof(*rwtm->kobj), GFP_KERNEL); - if (!rwtm->kobj) - return -ENOMEM; - - kobject_init(rwtm_to_kobj(rwtm), &mox_kobj_ktype); - if (kobject_add(rwtm_to_kobj(rwtm), firmware_kobj, "turris-mox-rwtm")) { - kobject_put(rwtm_to_kobj(rwtm)); - return -ENXIO; - } - - rwtm->kobj->rwtm = rwtm; - - return 0; -} - -#define MOX_ATTR_RO(name, format, cat) \ +#define MOX_ATTR_RO(name, format) \ static ssize_t \ -name##_show(struct kobject *kobj, struct kobj_attribute *a, \ +name##_show(struct device *dev, struct device_attribute *a, \ char *buf) \ { \ - struct mox_rwtm *rwtm = to_rwtm(kobj); \ - if (!rwtm->has_##cat) \ + struct mox_rwtm *rwtm = dev_get_drvdata(dev); \ + if (!rwtm->has_board_info) \ return -ENODATA; \ - return sprintf(buf, format, rwtm->name); \ + return sysfs_emit(buf, format, rwtm->name); \ } \ -static struct kobj_attribute mox_attr_##name = __ATTR_RO(name) - -MOX_ATTR_RO(serial_number, "%016llX\n", board_info); -MOX_ATTR_RO(board_version, "%i\n", board_info); -MOX_ATTR_RO(ram_size, "%i\n", board_info); -MOX_ATTR_RO(mac_address1, "%pM\n", board_info); -MOX_ATTR_RO(mac_address2, "%pM\n", board_info); -MOX_ATTR_RO(pubkey, "%s\n", pubkey); +static DEVICE_ATTR_RO(name) + +MOX_ATTR_RO(serial_number, "%016llX\n"); +MOX_ATTR_RO(board_version, "%i\n"); +MOX_ATTR_RO(ram_size, "%i\n"); +MOX_ATTR_RO(mac_address1, "%pM\n"); +MOX_ATTR_RO(mac_address2, "%pM\n"); + +static struct attribute *turris_mox_rwtm_attrs[] = { + &dev_attr_serial_number.attr, + &dev_attr_board_version.attr, + &dev_attr_ram_size.attr, + &dev_attr_mac_address1.attr, + &dev_attr_mac_address2.attr, + NULL +}; +ATTRIBUTE_GROUPS(turris_mox_rwtm); static int mox_get_status(enum mbox_cmd cmd, u32 retval) { @@ -152,32 +147,53 @@ static int mox_get_status(enum mbox_cmd cmd, u32 retval) else if (MBOX_STS_ERROR(retval) == MBOX_STS_FAIL) return -(int)MBOX_STS_VALUE(retval); else if (MBOX_STS_ERROR(retval) == MBOX_STS_BADCMD) - return -ENOSYS; + return -EOPNOTSUPP; else if (MBOX_STS_ERROR(retval) != MBOX_STS_SUCCESS) return -EIO; else return MBOX_STS_VALUE(retval); } -static const struct attribute *mox_rwtm_attrs[] = { - &mox_attr_serial_number.attr, - &mox_attr_board_version.attr, - &mox_attr_ram_size.attr, - &mox_attr_mac_address1.attr, - &mox_attr_mac_address2.attr, - &mox_attr_pubkey.attr, - NULL -}; - static void mox_rwtm_rx_callback(struct mbox_client *cl, void *data) { struct mox_rwtm *rwtm = dev_get_drvdata(cl->dev); struct armada_37xx_rwtm_rx_msg *msg = data; + if (completion_done(&rwtm->cmd_done)) + return; + rwtm->reply = *msg; complete(&rwtm->cmd_done); } +static int mox_rwtm_exec(struct mox_rwtm *rwtm, enum mbox_cmd cmd, + struct armada_37xx_rwtm_tx_msg *msg, + bool interruptible) +{ + struct armada_37xx_rwtm_tx_msg _msg = {}; + int ret; + + if (!msg) + msg = &_msg; + + msg->command = cmd; + + ret = mbox_send_message(rwtm->mbox, msg); + if (ret < 0) + return ret; + + if (interruptible) { + ret = wait_for_completion_interruptible(&rwtm->cmd_done); + if (ret < 0) + return ret; + } else { + if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2)) + return -ETIMEDOUT; + } + + return mox_get_status(cmd, rwtm->reply.retval); +} + static void reply_to_mac_addr(u8 *mac, u32 t1, u32 t2) { mac[0] = t1 >> 8; @@ -190,25 +206,16 @@ static void reply_to_mac_addr(u8 *mac, u32 t1, u32 t2) static int mox_get_board_info(struct mox_rwtm *rwtm) { - struct armada_37xx_rwtm_tx_msg msg; + struct device *dev = rwtm_dev(rwtm); struct armada_37xx_rwtm_rx_msg *reply = &rwtm->reply; int ret; - msg.command = MBOX_CMD_BOARD_INFO; - ret = mbox_send_message(rwtm->mbox, &msg); - if (ret < 0) - return ret; - - ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2); - if (ret < 0) - return ret; - - ret = mox_get_status(MBOX_CMD_BOARD_INFO, reply->retval); + ret = mox_rwtm_exec(rwtm, MBOX_CMD_BOARD_INFO, NULL, false); if (ret == -ENODATA) { - dev_warn(rwtm->dev, + dev_warn(dev, "Board does not have manufacturing information burned!\n"); - } else if (ret == -ENOSYS) { - dev_notice(rwtm->dev, + } else if (ret == -EOPNOTSUPP) { + dev_notice(dev, "Firmware does not support the BOARD_INFO command\n"); } else if (ret < 0) { return ret; @@ -222,7 +229,7 @@ static int mox_get_board_info(struct mox_rwtm *rwtm) reply->status[5]); reply_to_mac_addr(rwtm->mac_address2, reply->status[6], reply->status[7]); - rwtm->has_board_info = 1; + rwtm->has_board_info = true; pr_info("Turris Mox serial number %016llX\n", rwtm->serial_number); @@ -230,70 +237,27 @@ static int mox_get_board_info(struct mox_rwtm *rwtm) pr_info(" burned RAM size %i MiB\n", rwtm->ram_size); } - msg.command = MBOX_CMD_ECDSA_PUB_KEY; - ret = mbox_send_message(rwtm->mbox, &msg); - if (ret < 0) - return ret; - - ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2); - if (ret < 0) - return ret; - - ret = mox_get_status(MBOX_CMD_ECDSA_PUB_KEY, reply->retval); - if (ret == -ENODATA) { - dev_warn(rwtm->dev, "Board has no public key burned!\n"); - } else if (ret == -ENOSYS) { - dev_notice(rwtm->dev, - "Firmware does not support the ECDSA_PUB_KEY command\n"); - } else if (ret < 0) { - return ret; - } else { - u32 *s = reply->status; - - rwtm->has_pubkey = 1; - sprintf(rwtm->pubkey, - "%06x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x", - ret, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], - s[8], s[9], s[10], s[11], s[12], s[13], s[14], s[15]); - } - return 0; } static int check_get_random_support(struct mox_rwtm *rwtm) { - struct armada_37xx_rwtm_tx_msg msg; - int ret; - - msg.command = MBOX_CMD_GET_RANDOM; - msg.args[0] = 1; - msg.args[1] = rwtm->buf_phys; - msg.args[2] = 4; - - ret = mbox_send_message(rwtm->mbox, &msg); - if (ret < 0) - return ret; - - ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2); - if (ret < 0) - return ret; + struct armada_37xx_rwtm_tx_msg msg = { + .args = { 1, rwtm->buf_phys, 4 }, + }; - return mox_get_status(MBOX_CMD_GET_RANDOM, rwtm->reply.retval); + return mox_rwtm_exec(rwtm, MBOX_CMD_GET_RANDOM, &msg, false); } static int mox_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait) { - struct mox_rwtm *rwtm = (struct mox_rwtm *) rng->priv; - struct armada_37xx_rwtm_tx_msg msg; + struct mox_rwtm *rwtm = container_of(rng, struct mox_rwtm, hwrng); + struct armada_37xx_rwtm_tx_msg msg = { + .args = { 1, rwtm->buf_phys, ALIGN(max, 4) }, + }; int ret; - if (max > 4096) - max = 4096; - - msg.command = MBOX_CMD_GET_RANDOM; - msg.args[0] = 1; - msg.args[1] = rwtm->buf_phys; - msg.args[2] = (max + 3) & ~3; + max = min(max, RWTM_DMA_BUFFER_SIZE); if (!wait) { if (!mutex_trylock(&rwtm->busy)) @@ -302,15 +266,7 @@ static int mox_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait) mutex_lock(&rwtm->busy); } - ret = mbox_send_message(rwtm->mbox, &msg); - if (ret < 0) - goto unlock_mutex; - - ret = wait_for_completion_interruptible(&rwtm->cmd_done); - if (ret < 0) - goto unlock_mutex; - - ret = mox_get_status(MBOX_CMD_GET_RANDOM, rwtm->reply.retval); + ret = mox_rwtm_exec(rwtm, MBOX_CMD_GET_RANDOM, &msg, true); if (ret < 0) goto unlock_mutex; @@ -322,151 +278,149 @@ unlock_mutex: return ret; } -#ifdef CONFIG_DEBUG_FS -static int rwtm_debug_open(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - - return nonseekable_open(inode, file); -} +#ifdef CONFIG_TURRIS_MOX_RWTM_KEYCTL -static ssize_t do_sign_read(struct file *file, char __user *buf, size_t len, - loff_t *ppos) +static void mox_ecc_number_to_bin(void *dst, const u32 *src) { - struct mox_rwtm *rwtm = file->private_data; - ssize_t ret; + __be32 tmp[MOX_ECC_NUM_WORDS]; - /* only allow one read, of 136 bytes, from position 0 */ - if (*ppos != 0) - return 0; + cpu_to_be32_array(tmp, src, MOX_ECC_NUM_WORDS); - if (len < 136) - return -EINVAL; + memcpy(dst, (void *)tmp + 2, MOX_ECC_NUM_LEN); +} - if (!rwtm->last_sig_done) - return -ENODATA; +static void mox_ecc_public_key_to_bin(void *dst, u32 src_first, + const u32 *src_rest) +{ + __be32 tmp[MOX_ECC_NUM_WORDS - 1]; + u8 *p = dst; - /* 2 arrays of 17 32-bit words are 136 bytes */ - ret = simple_read_from_buffer(buf, len, ppos, rwtm->last_sig, 136); - rwtm->last_sig_done = 0; + /* take 3 bytes from the first word */ + *p++ = src_first >> 16; + *p++ = src_first >> 8; + *p++ = src_first; - return ret; + /* take the rest of the words */ + cpu_to_be32_array(tmp, src_rest, MOX_ECC_NUM_WORDS - 1); + memcpy(p, tmp, sizeof(tmp)); } -static ssize_t do_sign_write(struct file *file, const char __user *buf, - size_t len, loff_t *ppos) +static int mox_rwtm_sign(const struct key *key, const void *data, void *signature) { - struct mox_rwtm *rwtm = file->private_data; - struct armada_37xx_rwtm_rx_msg *reply = &rwtm->reply; - struct armada_37xx_rwtm_tx_msg msg; - loff_t dummy = 0; - ssize_t ret; - - /* the input is a SHA-512 hash, so exactly 64 bytes have to be read */ - if (len != 64) - return -EINVAL; - - /* if last result is not zero user has not read that information yet */ - if (rwtm->last_sig_done) - return -EBUSY; + struct mox_rwtm *rwtm = dev_get_drvdata(turris_signing_key_get_dev(key)); + struct armada_37xx_rwtm_tx_msg msg = {}; + u32 offset_r, offset_s; + int ret; - if (!mutex_trylock(&rwtm->busy)) - return -EBUSY; + guard(mutex)(&rwtm->busy); /* - * Here we have to send: - * 1. Address of the input to sign. - * The input is an array of 17 32-bit words, the first (most - * significat) is 0, the rest 16 words are copied from the SHA-512 - * hash given by the user and converted from BE to LE. - * 2. Address of the buffer where ECDSA signature value R shall be - * stored by the rWTM firmware. - * 3. Address of the buffer where ECDSA signature value S shall be - * stored by the rWTM firmware. + * For MBOX_CMD_SIGN command: + * args[0] - must be 1 + * args[1] - address of message M to sign; message is a 521-bit number + * args[2] - address where the R part of the signature will be stored + * args[3] - address where the S part of the signature will be stored + * + * M, R and S are 521-bit numbers encoded as seventeen 32-bit words, + * most significat word first. + * Since the message in @data is a sha512 digest, the most significat + * word is always zero. */ - memset(rwtm->buf, 0, 4); - ret = simple_write_to_buffer(rwtm->buf + 4, 64, &dummy, buf, len); - if (ret < 0) - goto unlock_mutex; - be32_to_cpu_array(rwtm->buf, rwtm->buf, 17); - msg.command = MBOX_CMD_SIGN; + offset_r = MOX_ECC_NUM_WORDS * sizeof(u32); + offset_s = 2 * MOX_ECC_NUM_WORDS * sizeof(u32); + + memset(rwtm->buf, 0, sizeof(u32)); + memcpy(rwtm->buf + sizeof(u32), data, SHA512_DIGEST_SIZE); + be32_to_cpu_array(rwtm->buf, rwtm->buf, MOX_ECC_NUM_WORDS); + msg.args[0] = 1; msg.args[1] = rwtm->buf_phys; - msg.args[2] = rwtm->buf_phys + 68; - msg.args[3] = rwtm->buf_phys + 2 * 68; - ret = mbox_send_message(rwtm->mbox, &msg); - if (ret < 0) - goto unlock_mutex; + msg.args[2] = rwtm->buf_phys + offset_r; + msg.args[3] = rwtm->buf_phys + offset_s; - ret = wait_for_completion_interruptible(&rwtm->cmd_done); + ret = mox_rwtm_exec(rwtm, MBOX_CMD_SIGN, &msg, true); if (ret < 0) - goto unlock_mutex; + return ret; - ret = MBOX_STS_VALUE(reply->retval); - if (MBOX_STS_ERROR(reply->retval) != MBOX_STS_SUCCESS) - goto unlock_mutex; + /* convert R and S parts of the signature */ + mox_ecc_number_to_bin(signature, rwtm->buf + offset_r); + mox_ecc_number_to_bin(signature + MOX_ECC_NUM_LEN, rwtm->buf + offset_s); - /* - * Here we read the R and S values of the ECDSA signature - * computed by the rWTM firmware and convert their words from - * LE to BE. - */ - memcpy(rwtm->last_sig, rwtm->buf + 68, 136); - cpu_to_be32_array(rwtm->last_sig, rwtm->last_sig, 34); - rwtm->last_sig_done = 1; + return 0; +} - mutex_unlock(&rwtm->busy); - return len; -unlock_mutex: - mutex_unlock(&rwtm->busy); - return ret; +static const void *mox_rwtm_get_public_key(const struct key *key) +{ + struct mox_rwtm *rwtm = dev_get_drvdata(turris_signing_key_get_dev(key)); + + return rwtm->pubkey; } -static const struct file_operations do_sign_fops = { - .owner = THIS_MODULE, - .open = rwtm_debug_open, - .read = do_sign_read, - .write = do_sign_write, - .llseek = no_llseek, +static const struct turris_signing_key_subtype mox_signing_key_subtype = { + .key_size = MOX_ECC_NUM_BITS, + .data_size = SHA512_DIGEST_SIZE, + .sig_size = MOX_ECC_SIG_LEN, + .public_key_size = MOX_ECC_PUBKEY_LEN, + .hash_algo = "sha512", + .get_public_key = mox_rwtm_get_public_key, + .sign = mox_rwtm_sign, }; -static int rwtm_register_debugfs(struct mox_rwtm *rwtm) +static int mox_register_signing_key(struct mox_rwtm *rwtm) { - struct dentry *root, *entry; + struct armada_37xx_rwtm_rx_msg *reply = &rwtm->reply; + struct device *dev = rwtm_dev(rwtm); + int ret; + + ret = mox_rwtm_exec(rwtm, MBOX_CMD_ECDSA_PUB_KEY, NULL, false); + if (ret == -ENODATA) { + dev_warn(dev, "Board has no public key burned!\n"); + } else if (ret == -EOPNOTSUPP) { + dev_notice(dev, + "Firmware does not support the ECDSA_PUB_KEY command\n"); + } else if (ret < 0) { + return ret; + } else { + char sn[17] = "unknown"; + char desc[46]; - root = debugfs_create_dir("turris-mox-rwtm", NULL); + if (rwtm->has_board_info) + sprintf(sn, "%016llX", rwtm->serial_number); - if (IS_ERR(root)) - return PTR_ERR(root); + sprintf(desc, "Turris MOX SN %s rWTM ECDSA key", sn); - entry = debugfs_create_file_unsafe("do_sign", 0600, root, rwtm, - &do_sign_fops); - if (IS_ERR(entry)) - goto err_remove; + mox_ecc_public_key_to_bin(rwtm->pubkey, ret, reply->status); - rwtm->debugfs_root = root; + ret = devm_turris_signing_key_create(dev, + &mox_signing_key_subtype, + desc); + if (ret) + return dev_err_probe(dev, ret, + "Cannot create signing key\n"); + } return 0; -err_remove: - debugfs_remove_recursive(root); - return PTR_ERR(entry); } -static void rwtm_unregister_debugfs(struct mox_rwtm *rwtm) +#else /* CONFIG_TURRIS_MOX_RWTM_KEYCTL */ + +static inline int mox_register_signing_key(struct mox_rwtm *rwtm) { - debugfs_remove_recursive(rwtm->debugfs_root); + return 0; } -#else -static inline int rwtm_register_debugfs(struct mox_rwtm *rwtm) + +#endif /* !CONFIG_TURRIS_MOX_RWTM_KEYCTL */ + +static void rwtm_devm_mbox_release(void *mbox) { - return 0; + mbox_free_channel(mbox); } -static inline void rwtm_unregister_debugfs(struct mox_rwtm *rwtm) +static void rwtm_firmware_symlink_drop(void *parent) { + sysfs_remove_link(parent, DRIVER_NAME); } -#endif static int turris_mox_rwtm_probe(struct platform_device *pdev) { @@ -478,90 +432,65 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev) if (!rwtm) return -ENOMEM; - rwtm->dev = dev; - rwtm->buf = dmam_alloc_coherent(dev, PAGE_SIZE, &rwtm->buf_phys, - GFP_KERNEL); + rwtm->buf = dmam_alloc_coherent(dev, RWTM_DMA_BUFFER_SIZE, + &rwtm->buf_phys, GFP_KERNEL); if (!rwtm->buf) return -ENOMEM; - ret = mox_kobj_create(rwtm); - if (ret < 0) { - dev_err(dev, "Cannot create turris-mox-rwtm kobject!\n"); - return ret; - } - - ret = sysfs_create_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs); - if (ret < 0) { - dev_err(dev, "Cannot create sysfs files!\n"); - goto put_kobj; - } - platform_set_drvdata(pdev, rwtm); - mutex_init(&rwtm->busy); + ret = devm_mutex_init(dev, &rwtm->busy); + if (ret) + return ret; + + init_completion(&rwtm->cmd_done); rwtm->mbox_client.dev = dev; rwtm->mbox_client.rx_callback = mox_rwtm_rx_callback; rwtm->mbox = mbox_request_channel(&rwtm->mbox_client, 0); - if (IS_ERR(rwtm->mbox)) { - ret = PTR_ERR(rwtm->mbox); - if (ret != -EPROBE_DEFER) - dev_err(dev, "Cannot request mailbox channel: %i\n", - ret); - goto remove_files; - } + if (IS_ERR(rwtm->mbox)) + return dev_err_probe(dev, PTR_ERR(rwtm->mbox), + "Cannot request mailbox channel!\n"); - init_completion(&rwtm->cmd_done); + ret = devm_add_action_or_reset(dev, rwtm_devm_mbox_release, rwtm->mbox); + if (ret) + return ret; ret = mox_get_board_info(rwtm); if (ret < 0) dev_warn(dev, "Cannot read board information: %i\n", ret); + ret = mox_register_signing_key(rwtm); + if (ret < 0) + return ret; + ret = check_get_random_support(rwtm); if (ret < 0) { dev_notice(dev, "Firmware does not support the GET_RANDOM command\n"); - goto free_channel; + return ret; } rwtm->hwrng.name = DRIVER_NAME "_hwrng"; rwtm->hwrng.read = mox_hwrng_read; - rwtm->hwrng.priv = (unsigned long) rwtm; ret = devm_hwrng_register(dev, &rwtm->hwrng); - if (ret < 0) { - dev_err(dev, "Cannot register HWRNG: %i\n", ret); - goto free_channel; - } - - ret = rwtm_register_debugfs(rwtm); - if (ret < 0) { - dev_err(dev, "Failed creating debugfs entries: %i\n", ret); - goto free_channel; - } + if (ret) + return dev_err_probe(dev, ret, "Cannot register HWRNG!\n"); dev_info(dev, "HWRNG successfully registered\n"); - return 0; - -free_channel: - mbox_free_channel(rwtm->mbox); -remove_files: - sysfs_remove_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs); -put_kobj: - kobject_put(rwtm_to_kobj(rwtm)); - return ret; -} - -static void turris_mox_rwtm_remove(struct platform_device *pdev) -{ - struct mox_rwtm *rwtm = platform_get_drvdata(pdev); + /* + * For sysfs ABI compatibility, create symlink + * /sys/firmware/turris-mox-rwtm to this device's sysfs directory. + */ + ret = sysfs_create_link(firmware_kobj, &dev->kobj, DRIVER_NAME); + if (!ret) + devm_add_action_or_reset(dev, rwtm_firmware_symlink_drop, + firmware_kobj); - rwtm_unregister_debugfs(rwtm); - sysfs_remove_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs); - kobject_put(rwtm_to_kobj(rwtm)); - mbox_free_channel(rwtm->mbox); + return 0; } static const struct of_device_id turris_mox_rwtm_match[] = { @@ -574,10 +503,10 @@ MODULE_DEVICE_TABLE(of, turris_mox_rwtm_match); static struct platform_driver turris_mox_rwtm_driver = { .probe = turris_mox_rwtm_probe, - .remove_new = turris_mox_rwtm_remove, .driver = { .name = DRIVER_NAME, .of_match_table = turris_mox_rwtm_match, + .dev_groups = turris_mox_rwtm_groups, }, }; module_platform_driver(turris_mox_rwtm_driver); diff --git a/drivers/firmware/xilinx/zynqmp-debug.c b/drivers/firmware/xilinx/zynqmp-debug.c index 8528850af889..22853ae0efdf 100644 --- a/drivers/firmware/xilinx/zynqmp-debug.c +++ b/drivers/firmware/xilinx/zynqmp-debug.c @@ -31,13 +31,51 @@ static char debugfs_buf[PAGE_SIZE]; #define PM_API(id) {id, #id, strlen(#id)} static struct pm_api_info pm_api_list[] = { + PM_API(PM_FORCE_POWERDOWN), + PM_API(PM_REQUEST_WAKEUP), + PM_API(PM_SYSTEM_SHUTDOWN), + PM_API(PM_REQUEST_NODE), + PM_API(PM_RELEASE_NODE), + PM_API(PM_SET_REQUIREMENT), PM_API(PM_GET_API_VERSION), + PM_API(PM_REGISTER_NOTIFIER), + PM_API(PM_RESET_ASSERT), + PM_API(PM_RESET_GET_STATUS), + PM_API(PM_GET_CHIPID), + PM_API(PM_PINCTRL_SET_FUNCTION), + PM_API(PM_PINCTRL_CONFIG_PARAM_GET), + PM_API(PM_PINCTRL_CONFIG_PARAM_SET), + PM_API(PM_IOCTL), + PM_API(PM_CLOCK_ENABLE), + PM_API(PM_CLOCK_DISABLE), + PM_API(PM_CLOCK_GETSTATE), + PM_API(PM_CLOCK_SETDIVIDER), + PM_API(PM_CLOCK_GETDIVIDER), + PM_API(PM_CLOCK_SETPARENT), + PM_API(PM_CLOCK_GETPARENT), PM_API(PM_QUERY_DATA), }; static struct dentry *firmware_debugfs_root; /** + * zynqmp_pm_ioctl - PM IOCTL for device control and configs + * @node: Node ID of the device + * @ioctl: ID of the requested IOCTL + * @arg1: Argument 1 of requested IOCTL call + * @arg2: Argument 2 of requested IOCTL call + * @arg3: Argument 3 of requested IOCTL call + * @out: Returned output value + * + * Return: Returns status, either success or error+reason + */ +static int zynqmp_pm_ioctl(const u32 node, const u32 ioctl, const u32 arg1, + const u32 arg2, const u32 arg3, u32 *out) +{ + return zynqmp_pm_invoke_fn(PM_IOCTL, out, 5, node, ioctl, arg1, arg2, arg3); +} + +/** * zynqmp_pm_argument_value() - Extract argument value from a PM-API request * @arg: Entered PM-API argument in string format * @@ -95,6 +133,128 @@ static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret) sprintf(debugfs_buf, "PM-API Version = %d.%d\n", pm_api_version >> 16, pm_api_version & 0xffff); break; + case PM_FORCE_POWERDOWN: + ret = zynqmp_pm_force_pwrdwn(pm_api_arg[0], + pm_api_arg[1] ? pm_api_arg[1] : + ZYNQMP_PM_REQUEST_ACK_NO); + break; + case PM_REQUEST_WAKEUP: + ret = zynqmp_pm_request_wake(pm_api_arg[0], + pm_api_arg[1], pm_api_arg[2], + pm_api_arg[3] ? pm_api_arg[3] : + ZYNQMP_PM_REQUEST_ACK_NO); + break; + case PM_SYSTEM_SHUTDOWN: + ret = zynqmp_pm_system_shutdown(pm_api_arg[0], pm_api_arg[1]); + break; + case PM_REQUEST_NODE: + ret = zynqmp_pm_request_node(pm_api_arg[0], + pm_api_arg[1] ? pm_api_arg[1] : + ZYNQMP_PM_CAPABILITY_ACCESS, + pm_api_arg[2] ? pm_api_arg[2] : 0, + pm_api_arg[3] ? pm_api_arg[3] : + ZYNQMP_PM_REQUEST_ACK_BLOCKING); + break; + case PM_RELEASE_NODE: + ret = zynqmp_pm_release_node(pm_api_arg[0]); + break; + case PM_SET_REQUIREMENT: + ret = zynqmp_pm_set_requirement(pm_api_arg[0], + pm_api_arg[1] ? pm_api_arg[1] : + ZYNQMP_PM_CAPABILITY_CONTEXT, + pm_api_arg[2] ? + pm_api_arg[2] : 0, + pm_api_arg[3] ? pm_api_arg[3] : + ZYNQMP_PM_REQUEST_ACK_BLOCKING); + break; + case PM_REGISTER_NOTIFIER: + ret = zynqmp_pm_register_notifier(pm_api_arg[0], + pm_api_arg[1] ? + pm_api_arg[1] : 0, + pm_api_arg[2] ? + pm_api_arg[2] : 0, + pm_api_arg[3] ? + pm_api_arg[3] : 0); + break; + case PM_RESET_ASSERT: + ret = zynqmp_pm_reset_assert(pm_api_arg[0], pm_api_arg[1]); + break; + case PM_RESET_GET_STATUS: + ret = zynqmp_pm_reset_get_status(pm_api_arg[0], &pm_api_ret[0]); + if (!ret) + sprintf(debugfs_buf, "Reset status: %u\n", + pm_api_ret[0]); + break; + case PM_GET_CHIPID: + ret = zynqmp_pm_get_chipid(&pm_api_ret[0], &pm_api_ret[1]); + if (!ret) + sprintf(debugfs_buf, "Idcode: %#x, Version:%#x\n", + pm_api_ret[0], pm_api_ret[1]); + break; + case PM_PINCTRL_SET_FUNCTION: + ret = zynqmp_pm_pinctrl_set_function(pm_api_arg[0], + pm_api_arg[1]); + break; + case PM_PINCTRL_CONFIG_PARAM_GET: + ret = zynqmp_pm_pinctrl_get_config(pm_api_arg[0], pm_api_arg[1], + &pm_api_ret[0]); + if (!ret) + sprintf(debugfs_buf, + "Pin: %llu, Param: %llu, Value: %u\n", + pm_api_arg[0], pm_api_arg[1], + pm_api_ret[0]); + break; + case PM_PINCTRL_CONFIG_PARAM_SET: + ret = zynqmp_pm_pinctrl_set_config(pm_api_arg[0], + pm_api_arg[1], + pm_api_arg[2]); + break; + case PM_IOCTL: + ret = zynqmp_pm_ioctl(pm_api_arg[0], pm_api_arg[1], + pm_api_arg[2], pm_api_arg[3], + pm_api_arg[4], &pm_api_ret[0]); + if (!ret && (pm_api_arg[1] == IOCTL_GET_RPU_OPER_MODE || + pm_api_arg[1] == IOCTL_GET_PLL_FRAC_MODE || + pm_api_arg[1] == IOCTL_GET_PLL_FRAC_DATA || + pm_api_arg[1] == IOCTL_READ_GGS || + pm_api_arg[1] == IOCTL_READ_PGGS || + pm_api_arg[1] == IOCTL_READ_REG)) + sprintf(debugfs_buf, "IOCTL return value: %u\n", + pm_api_ret[1]); + if (!ret && pm_api_arg[1] == IOCTL_GET_QOS) + sprintf(debugfs_buf, "Default QoS: %u\nCurrent QoS: %u\n", + pm_api_ret[1], pm_api_ret[2]); + break; + case PM_CLOCK_ENABLE: + ret = zynqmp_pm_clock_enable(pm_api_arg[0]); + break; + case PM_CLOCK_DISABLE: + ret = zynqmp_pm_clock_disable(pm_api_arg[0]); + break; + case PM_CLOCK_GETSTATE: + ret = zynqmp_pm_clock_getstate(pm_api_arg[0], &pm_api_ret[0]); + if (!ret) + sprintf(debugfs_buf, "Clock state: %u\n", + pm_api_ret[0]); + break; + case PM_CLOCK_SETDIVIDER: + ret = zynqmp_pm_clock_setdivider(pm_api_arg[0], pm_api_arg[1]); + break; + case PM_CLOCK_GETDIVIDER: + ret = zynqmp_pm_clock_getdivider(pm_api_arg[0], &pm_api_ret[0]); + if (!ret) + sprintf(debugfs_buf, "Divider Value: %d\n", + pm_api_ret[0]); + break; + case PM_CLOCK_SETPARENT: + ret = zynqmp_pm_clock_setparent(pm_api_arg[0], pm_api_arg[1]); + break; + case PM_CLOCK_GETPARENT: + ret = zynqmp_pm_clock_getparent(pm_api_arg[0], &pm_api_ret[0]); + if (!ret) + sprintf(debugfs_buf, + "Clock parent Index: %u\n", pm_api_ret[0]); + break; case PM_QUERY_DATA: qdata.qid = pm_api_arg[0]; qdata.arg1 = pm_api_arg[1]; @@ -150,7 +310,7 @@ static ssize_t zynqmp_pm_debugfs_api_write(struct file *file, char *kern_buff, *tmp_buff; char *pm_api_req; u32 pm_id = 0; - u64 pm_api_arg[4] = {0, 0, 0, 0}; + u64 pm_api_arg[5] = {0, 0, 0, 0, 0}; /* Return values from PM APIs calls */ u32 pm_api_ret[4] = {0, 0, 0, 0}; diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index 9bc45357e1a8..7356e860e65c 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -3,7 +3,7 @@ * Xilinx Zynq MPSoC Firmware layer * * Copyright (C) 2014-2022 Xilinx, Inc. - * Copyright (C) 2022 - 2023, Advanced Micro Devices, Inc. + * Copyright (C) 2022 - 2024, Advanced Micro Devices, Inc. * * Michal Simek <michal.simek@amd.com> * Davorin Mista <davorin.mista@aggios.com> @@ -41,14 +41,12 @@ /* IOCTL/QUERY feature payload size */ #define FEATURE_PAYLOAD_SIZE 2 -/* Firmware feature check version mask */ -#define FIRMWARE_VERSION_MASK GENMASK(15, 0) - static bool feature_check_enabled; static DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER); static u32 ioctl_features[FEATURE_PAYLOAD_SIZE]; static u32 query_features[FEATURE_PAYLOAD_SIZE]; +static u32 sip_svc_version; static struct platform_device *em_dev; /** @@ -154,6 +152,9 @@ static noinline int do_fw_call_smc(u32 *ret_payload, u32 num_args, ...) ret_payload[1] = upper_32_bits(res.a0); ret_payload[2] = lower_32_bits(res.a1); ret_payload[3] = upper_32_bits(res.a1); + ret_payload[4] = lower_32_bits(res.a2); + ret_payload[5] = upper_32_bits(res.a2); + ret_payload[6] = lower_32_bits(res.a3); } return zynqmp_pm_ret_code((enum pm_ret_status)res.a0); @@ -194,6 +195,9 @@ static noinline int do_fw_call_hvc(u32 *ret_payload, u32 num_args, ...) ret_payload[1] = upper_32_bits(res.a0); ret_payload[2] = lower_32_bits(res.a1); ret_payload[3] = upper_32_bits(res.a1); + ret_payload[4] = lower_32_bits(res.a2); + ret_payload[5] = upper_32_bits(res.a2); + ret_payload[6] = lower_32_bits(res.a3); } return zynqmp_pm_ret_code((enum pm_ret_status)res.a0); @@ -221,11 +225,14 @@ static int __do_feature_check_call(const u32 api_id, u32 *ret_payload) * Feature check of TF-A APIs is done in the TF-A layer and it expects for * MODULE_ID_MASK bits of SMC's arg[0] to be the same as PM_MODULE_ID. */ - if (module_id == TF_A_MODULE_ID) + if (module_id == TF_A_MODULE_ID) { module_id = PM_MODULE_ID; + smc_arg[1] = api_id; + } else { + smc_arg[1] = (api_id & API_ID_MASK); + } smc_arg[0] = PM_SIP_SVC | FIELD_PREP(MODULE_ID_MASK, module_id) | feature_check_api_id; - smc_arg[1] = (api_id & API_ID_MASK); ret = do_fw_call(ret_payload, 2, smc_arg[0], smc_arg[1]); if (ret) @@ -335,6 +342,70 @@ int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id) EXPORT_SYMBOL_GPL(zynqmp_pm_is_function_supported); /** + * zynqmp_pm_invoke_fw_fn() - Invoke the system-level platform management layer + * caller function depending on the configuration + * @pm_api_id: Requested PM-API call + * @ret_payload: Returned value array + * @num_args: Number of arguments to requested PM-API call + * + * Invoke platform management function for SMC or HVC call, depending on + * configuration. + * Following SMC Calling Convention (SMCCC) for SMC64: + * Pm Function Identifier, + * PM_SIP_SVC + PASS_THROUGH_FW_CMD_ID = + * ((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT) + * ((SMC_64) << FUNCID_CC_SHIFT) + * ((SIP_START) << FUNCID_OEN_SHIFT) + * (PASS_THROUGH_FW_CMD_ID)) + * + * PM_SIP_SVC - Registered ZynqMP SIP Service Call. + * PASS_THROUGH_FW_CMD_ID - Fixed SiP SVC call ID for FW specific calls. + * + * Return: Returns status, either success or error+reason + */ +int zynqmp_pm_invoke_fw_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...) +{ + /* + * Added SIP service call Function Identifier + * Make sure to stay in x0 register + */ + u64 smc_arg[SMC_ARG_CNT_64]; + int ret, i; + va_list arg_list; + u32 args[SMC_ARG_CNT_32] = {0}; + u32 module_id; + + if (num_args > SMC_ARG_CNT_32) + return -EINVAL; + + va_start(arg_list, num_args); + + /* Check if feature is supported or not */ + ret = zynqmp_pm_feature(pm_api_id); + if (ret < 0) + return ret; + + for (i = 0; i < num_args; i++) + args[i] = va_arg(arg_list, u32); + + va_end(arg_list); + + module_id = FIELD_GET(PLM_MODULE_ID_MASK, pm_api_id); + + if (module_id == 0) + module_id = XPM_MODULE_ID; + + smc_arg[0] = PM_SIP_SVC | PASS_THROUGH_FW_CMD_ID; + smc_arg[1] = ((u64)args[0] << 32U) | FIELD_PREP(PLM_MODULE_ID_MASK, module_id) | + (pm_api_id & API_ID_MASK); + for (i = 1; i < (SMC_ARG_CNT_64 - 1); i++) + smc_arg[i + 1] = ((u64)args[(i * 2)] << 32U) | args[(i * 2) - 1]; + + return do_fw_call(ret_payload, 8, smc_arg[0], smc_arg[1], smc_arg[2], smc_arg[3], + smc_arg[4], smc_arg[5], smc_arg[6], smc_arg[7]); +} + +/** * zynqmp_pm_invoke_fn() - Invoke the system-level platform management layer * caller function depending on the configuration * @pm_api_id: Requested PM-API call @@ -492,6 +563,35 @@ int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily) EXPORT_SYMBOL_GPL(zynqmp_pm_get_family_info); /** + * zynqmp_pm_get_sip_svc_version() - Get SiP service call version + * @version: Returned version value + * + * Return: Returns status, either success or error+reason + */ +static int zynqmp_pm_get_sip_svc_version(u32 *version) +{ + struct arm_smccc_res res; + u64 args[SMC_ARG_CNT_64] = {0}; + + if (!version) + return -EINVAL; + + /* Check if SiP SVC version already verified */ + if (sip_svc_version > 0) { + *version = sip_svc_version; + return 0; + } + + args[0] = GET_SIP_SVC_VERSION; + + arm_smccc_smc(args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], &res); + + *version = ((lower_32_bits(res.a0) << 16U) | lower_32_bits(res.a1)); + + return zynqmp_pm_ret_code(XST_PM_SUCCESS); +} + +/** * zynqmp_pm_get_trustzone_version() - Get secure trustzone firmware version * @version: Returned version value * @@ -555,10 +655,34 @@ static int get_set_conduit_method(struct device_node *np) */ int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out) { - int ret; + int ret, i = 0; + u32 ret_payload[PAYLOAD_ARG_CNT] = {0}; + + if (sip_svc_version >= SIP_SVC_PASSTHROUGH_VERSION) { + ret = zynqmp_pm_invoke_fw_fn(PM_QUERY_DATA, ret_payload, 4, + qdata.qid, qdata.arg1, + qdata.arg2, qdata.arg3); + /* To support backward compatibility */ + if (!ret && !ret_payload[0]) { + /* + * TF-A passes return status on 0th index but + * api to get clock name reads data from 0th + * index so pass data at 0th index instead of + * return status + */ + if (qdata.qid == PM_QID_CLOCK_GET_NAME || + qdata.qid == PM_QID_PINCTRL_GET_FUNCTION_NAME) + i = 1; + + for (; i < PAYLOAD_ARG_CNT; i++, out++) + *out = ret_payload[i]; - ret = zynqmp_pm_invoke_fn(PM_QUERY_DATA, out, 4, qdata.qid, qdata.arg1, qdata.arg2, - qdata.arg3); + return ret; + } + } + + ret = zynqmp_pm_invoke_fn(PM_QUERY_DATA, out, 4, qdata.qid, + qdata.arg1, qdata.arg2, qdata.arg3); /* * For clock name query, all bytes in SMC response are clock name @@ -923,7 +1047,7 @@ int zynqmp_pm_set_boot_health_status(u32 value) * * Return: Returns status, either success or error+reason */ -int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset, +int zynqmp_pm_reset_assert(const u32 reset, const enum zynqmp_pm_reset_action assert_flag) { return zynqmp_pm_invoke_fn(PM_RESET_ASSERT, NULL, 2, reset, assert_flag); @@ -937,7 +1061,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_reset_assert); * * Return: Returns status, either success or error+reason */ -int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status) +int zynqmp_pm_reset_get_status(const u32 reset, u32 *status) { u32 ret_payload[PAYLOAD_ARG_CNT]; int ret; @@ -1015,17 +1139,13 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_status); int zynqmp_pm_fpga_get_config_status(u32 *value) { u32 ret_payload[PAYLOAD_ARG_CNT]; - u32 buf, lower_addr, upper_addr; int ret; if (!value) return -EINVAL; - lower_addr = lower_32_bits((u64)&buf); - upper_addr = upper_32_bits((u64)&buf); - ret = zynqmp_pm_invoke_fn(PM_FPGA_READ, ret_payload, 4, - XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET, lower_addr, upper_addr, + XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET, 0, 0, XILINX_ZYNQMP_PM_FPGA_READ_CONFIG_REG); *value = ret_payload[1]; @@ -1121,8 +1241,11 @@ int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param, if (pm_family_code == ZYNQMP_FAMILY_CODE && param == PM_PINCTRL_CONFIG_TRI_STATE) { ret = zynqmp_pm_feature(PM_PINCTRL_CONFIG_PARAM_SET); - if (ret < PM_PINCTRL_PARAM_SET_VERSION) + if (ret < PM_PINCTRL_PARAM_SET_VERSION) { + pr_warn("The requested pinctrl feature is not supported in the current firmware.\n" + "Expected firmware version is 2023.1 and above for this feature to work.\r\n"); return -EOPNOTSUPP; + } } return zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_SET, NULL, 3, pin, param, value); @@ -1890,6 +2013,11 @@ static int zynqmp_firmware_probe(struct platform_device *pdev) if (ret) return ret; + /* Get SiP SVC version number */ + ret = zynqmp_pm_get_sip_svc_version(&sip_svc_version); + if (ret) + return ret; + ret = do_feature_check_call(PM_FEATURE_CHECK); if (ret >= 0 && ((ret & FIRMWARE_VERSION_MASK) >= PM_API_VERSION_1)) feature_check_enabled = true; @@ -1986,6 +2114,6 @@ static struct platform_driver zynqmp_firmware_driver = { .dev_groups = zynqmp_firmware_groups, }, .probe = zynqmp_firmware_probe, - .remove_new = zynqmp_firmware_remove, + .remove = zynqmp_firmware_remove, }; module_platform_driver(zynqmp_firmware_driver); |