diff options
Diffstat (limited to 'drivers/platform/x86/amd')
32 files changed, 9701 insertions, 0 deletions
diff --git a/drivers/platform/x86/amd/Kconfig b/drivers/platform/x86/amd/Kconfig new file mode 100644 index 000000000000..b813f9265368 --- /dev/null +++ b/drivers/platform/x86/amd/Kconfig @@ -0,0 +1,46 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# AMD x86 Platform Specific Drivers +# + +source "drivers/platform/x86/amd/hsmp/Kconfig" +source "drivers/platform/x86/amd/pmf/Kconfig" +source "drivers/platform/x86/amd/pmc/Kconfig" +source "drivers/platform/x86/amd/hfi/Kconfig" + +config AMD_3D_VCACHE + tristate "AMD 3D V-Cache Performance Optimizer Driver" + depends on X86_64 && ACPI + help + The driver provides a sysfs interface, enabling the setting of a bias + that alters CPU core reordering. This bias prefers cores with higher + frequencies or larger L3 caches on processors supporting AMD 3D V-Cache + technology. + + If you choose to compile this driver as a module the module will be + called amd_3d_vcache. + +config AMD_WBRF + bool "AMD Wifi RF Band mitigations (WBRF)" + depends on ACPI + help + WBRF(Wifi Band RFI mitigation) mechanism allows Wifi drivers + to notify the frequencies they are using so that other hardware + can be reconfigured to avoid harmonic conflicts. + + AMD provides an ACPI based mechanism to support WBRF on platform with + appropriate underlying support. + + This mechanism will only be activated on platforms that advertise a + need for it. + +config AMD_ISP_PLATFORM + tristate "AMD ISP4 platform driver" + depends on I2C && X86_64 && ACPI + help + Platform driver for AMD platforms containing image signal processor + gen 4. Provides camera sensor module board information to allow + sensor and V4L drivers to work properly. + + This driver can also be built as a module. If so, the module + will be called amd_isp4. diff --git a/drivers/platform/x86/amd/Makefile b/drivers/platform/x86/amd/Makefile new file mode 100644 index 000000000000..f6ff0c837f34 --- /dev/null +++ b/drivers/platform/x86/amd/Makefile @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for drivers/platform/x86/amd +# AMD x86 Platform-Specific Drivers +# + +obj-$(CONFIG_AMD_3D_VCACHE) += amd_3d_vcache.o +amd_3d_vcache-y := x3d_vcache.o +obj-$(CONFIG_AMD_PMC) += pmc/ +obj-$(CONFIG_AMD_HSMP) += hsmp/ +obj-$(CONFIG_AMD_PMF) += pmf/ +obj-$(CONFIG_AMD_WBRF) += wbrf.o +obj-$(CONFIG_AMD_ISP_PLATFORM) += amd_isp4.o +obj-$(CONFIG_AMD_HFI) += hfi/ diff --git a/drivers/platform/x86/amd/amd_isp4.c b/drivers/platform/x86/amd/amd_isp4.c new file mode 100644 index 000000000000..0d494899502c --- /dev/null +++ b/drivers/platform/x86/amd/amd_isp4.c @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * AMD ISP platform driver for sensor i2-client instantiation + * + * Copyright 2025 Advanced Micro Devices, Inc. + */ + +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/soc/amd/isp4_misc.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/units.h> + +#define AMDISP_OV05C10_I2C_ADDR 0x10 +#define AMDISP_OV05C10_HID "OMNI5C10" +#define AMDISP_OV05C10_REMOTE_EP_NAME "ov05c10_isp_4_1_1" +#define AMD_ISP_PLAT_DRV_NAME "amd-isp4" + +static const struct software_node isp4_mipi1_endpoint_node; +static const struct software_node ov05c10_endpoint_node; + +/* + * AMD ISP platform info definition to initialize sensor + * specific platform configuration to prepare the amdisp + * platform. + */ +struct amdisp_platform_info { + struct i2c_board_info board_info; + const struct software_node **swnodes; +}; + +/* + * AMD ISP platform definition to configure the device properties + * missing in the ACPI table. + */ +struct amdisp_platform { + const struct amdisp_platform_info *pinfo; + struct i2c_board_info board_info; + struct notifier_block i2c_nb; + struct i2c_client *i2c_dev; + struct mutex lock; /* protects i2c client creation */ +}; + +/* Root AMD CAMERA SWNODE */ + +/* Root amd camera node definition */ +static const struct software_node amd_camera_node = { + .name = "amd_camera", +}; + +/* ISP4 SWNODE */ + +/* ISP4 OV05C10 camera node definition */ +static const struct software_node isp4_node = { + .name = "isp4", + .parent = &amd_camera_node, +}; + +/* + * ISP4 Ports node definition. No properties defined for + * ports node. + */ +static const struct software_node isp4_ports = { + .name = "ports", + .parent = &isp4_node, +}; + +/* + * ISP4 Port node definition. No properties defined for + * port node. + */ +static const struct software_node isp4_port_node = { + .name = "port@0", + .parent = &isp4_ports, +}; + +/* + * ISP4 MIPI1 remote endpoint points to OV05C10 endpoint + * node. + */ +static const struct software_node_ref_args isp4_refs[] = { + SOFTWARE_NODE_REFERENCE(&ov05c10_endpoint_node), +}; + +/* ISP4 MIPI1 endpoint node properties table */ +static const struct property_entry isp4_mipi1_endpoint_props[] = { + PROPERTY_ENTRY_REF_ARRAY("remote-endpoint", isp4_refs), + { } +}; + +/* ISP4 MIPI1 endpoint node definition */ +static const struct software_node isp4_mipi1_endpoint_node = { + .name = "endpoint", + .parent = &isp4_port_node, + .properties = isp4_mipi1_endpoint_props, +}; + +/* I2C1 SWNODE */ + +/* I2C1 camera node property table */ +static const struct property_entry i2c1_camera_props[] = { + PROPERTY_ENTRY_U32("clock-frequency", 1 * HZ_PER_MHZ), + { } +}; + +/* I2C1 camera node definition */ +static const struct software_node i2c1_node = { + .name = "i2c1", + .parent = &amd_camera_node, + .properties = i2c1_camera_props, +}; + +/* I2C1 camera node property table */ +static const struct property_entry ov05c10_camera_props[] = { + PROPERTY_ENTRY_U32("clock-frequency", 24 * HZ_PER_MHZ), + { } +}; + +/* OV05C10 camera node definition */ +static const struct software_node ov05c10_camera_node = { + .name = AMDISP_OV05C10_HID, + .parent = &i2c1_node, + .properties = ov05c10_camera_props, +}; + +/* + * OV05C10 Ports node definition. No properties defined for + * ports node for OV05C10. + */ +static const struct software_node ov05c10_ports = { + .name = "ports", + .parent = &ov05c10_camera_node, +}; + +/* + * OV05C10 Port node definition. + */ +static const struct software_node ov05c10_port_node = { + .name = "port@0", + .parent = &ov05c10_ports, +}; + +/* + * OV05C10 remote endpoint points to ISP4 MIPI1 endpoint + * node. + */ +static const struct software_node_ref_args ov05c10_refs[] = { + SOFTWARE_NODE_REFERENCE(&isp4_mipi1_endpoint_node), +}; + +/* OV05C10 supports one single link frequency */ +static const u64 ov05c10_link_freqs[] = { + 900 * HZ_PER_MHZ, +}; + +/* OV05C10 supports only 2-lane configuration */ +static const u32 ov05c10_data_lanes[] = { + 1, + 2, +}; + +/* OV05C10 endpoint node properties table */ +static const struct property_entry ov05c10_endpoint_props[] = { + PROPERTY_ENTRY_U32("bus-type", 4), + PROPERTY_ENTRY_U32_ARRAY_LEN("data-lanes", ov05c10_data_lanes, + ARRAY_SIZE(ov05c10_data_lanes)), + PROPERTY_ENTRY_U64_ARRAY_LEN("link-frequencies", ov05c10_link_freqs, + ARRAY_SIZE(ov05c10_link_freqs)), + PROPERTY_ENTRY_REF_ARRAY("remote-endpoint", ov05c10_refs), + { } +}; + +/* OV05C10 endpoint node definition */ +static const struct software_node ov05c10_endpoint_node = { + .name = "endpoint", + .parent = &ov05c10_port_node, + .properties = ov05c10_endpoint_props, +}; + +/* + * AMD Camera swnode graph uses 10 nodes and also its relationship is + * fixed to align with the structure that v4l2 and i2c frameworks expects + * for successful parsing of fwnodes and its properties with standard names. + * + * It is only the node property_entries that will vary for each platform + * supporting different sensor modules. + * + * AMD ISP4 SWNODE GRAPH Structure + * + * amd_camera { + * isp4 { + * ports { + * port@0 { + * isp4_mipi1_ep: endpoint { + * remote-endpoint = &OMNI5C10_ep; + * }; + * }; + * }; + * }; + * + * i2c1 { + * clock-frequency = 1 MHz; + * OMNI5C10 { + * clock-frequency = 24MHz; + * ports { + * port@0 { + * OMNI5C10_ep: endpoint { + * bus-type = 4; + * data-lanes = <1 2>; + * link-frequencies = 900MHz; + * remote-endpoint = &isp4_mipi1; + * }; + * }; + * }; + * }; + * }; + * }; + * + */ +static const struct software_node *amd_isp4_nodes[] = { + &amd_camera_node, + &isp4_node, + &isp4_ports, + &isp4_port_node, + &isp4_mipi1_endpoint_node, + &i2c1_node, + &ov05c10_camera_node, + &ov05c10_ports, + &ov05c10_port_node, + &ov05c10_endpoint_node, + NULL +}; + +/* OV05C10 specific AMD ISP platform configuration */ +static const struct amdisp_platform_info ov05c10_platform_config = { + .board_info = { + .dev_name = "ov05c10", + I2C_BOARD_INFO("ov05c10", AMDISP_OV05C10_I2C_ADDR), + }, + .swnodes = amd_isp4_nodes, +}; + +static const struct acpi_device_id amdisp_sensor_ids[] = { + { AMDISP_OV05C10_HID, (kernel_ulong_t)&ov05c10_platform_config }, + { } +}; +MODULE_DEVICE_TABLE(acpi, amdisp_sensor_ids); + +static inline bool is_isp_i2c_adapter(struct i2c_adapter *adap) +{ + return !strcmp(adap->name, AMDISP_I2C_ADAP_NAME); +} + +static void instantiate_isp_i2c_client(struct amdisp_platform *isp4_platform, + struct i2c_adapter *adap) +{ + struct i2c_board_info *info = &isp4_platform->board_info; + struct i2c_client *i2c_dev; + + guard(mutex)(&isp4_platform->lock); + + if (isp4_platform->i2c_dev) + return; + + i2c_dev = i2c_new_client_device(adap, info); + if (IS_ERR(i2c_dev)) { + dev_err(&adap->dev, "error %pe registering isp i2c_client\n", i2c_dev); + return; + } + isp4_platform->i2c_dev = i2c_dev; +} + +static int isp_i2c_bus_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct amdisp_platform *isp4_platform = + container_of(nb, struct amdisp_platform, i2c_nb); + struct device *dev = data; + struct i2c_client *client; + struct i2c_adapter *adap; + + switch (action) { + case BUS_NOTIFY_ADD_DEVICE: + adap = i2c_verify_adapter(dev); + if (!adap) + break; + if (is_isp_i2c_adapter(adap)) + instantiate_isp_i2c_client(isp4_platform, adap); + break; + case BUS_NOTIFY_REMOVED_DEVICE: + client = i2c_verify_client(dev); + if (!client) + break; + + scoped_guard(mutex, &isp4_platform->lock) { + if (isp4_platform->i2c_dev == client) { + dev_dbg(&client->adapter->dev, "amdisp i2c_client removed\n"); + isp4_platform->i2c_dev = NULL; + } + } + break; + default: + break; + } + + return NOTIFY_DONE; +} + +static struct amdisp_platform *prepare_amdisp_platform(struct device *dev, + const struct amdisp_platform_info *src) +{ + struct amdisp_platform *isp4_platform; + int ret; + + isp4_platform = devm_kzalloc(dev, sizeof(*isp4_platform), GFP_KERNEL); + if (!isp4_platform) + return ERR_PTR(-ENOMEM); + + ret = devm_mutex_init(dev, &isp4_platform->lock); + if (ret) + return ERR_PTR(ret); + + isp4_platform->board_info.dev_name = src->board_info.dev_name; + strscpy(isp4_platform->board_info.type, src->board_info.type); + isp4_platform->board_info.addr = src->board_info.addr; + isp4_platform->pinfo = src; + + ret = software_node_register_node_group(src->swnodes); + if (ret) + return ERR_PTR(ret); + + /* initialize ov05c10_camera_node */ + isp4_platform->board_info.swnode = src->swnodes[6]; + + return isp4_platform; +} + +static int try_to_instantiate_i2c_client(struct device *dev, void *data) +{ + struct i2c_adapter *adap = i2c_verify_adapter(dev); + struct amdisp_platform *isp4_platform = data; + + if (!isp4_platform || !adap) + return 0; + if (!adap->owner) + return 0; + + if (is_isp_i2c_adapter(adap)) + instantiate_isp_i2c_client(isp4_platform, adap); + + return 0; +} + +static int amd_isp_probe(struct platform_device *pdev) +{ + const struct amdisp_platform_info *pinfo; + struct amdisp_platform *isp4_platform; + struct acpi_device *adev; + int ret; + + pinfo = device_get_match_data(&pdev->dev); + if (!pinfo) + return dev_err_probe(&pdev->dev, -EINVAL, + "failed to get valid ACPI data\n"); + + isp4_platform = prepare_amdisp_platform(&pdev->dev, pinfo); + if (IS_ERR(isp4_platform)) + return dev_err_probe(&pdev->dev, PTR_ERR(isp4_platform), + "failed to prepare AMD ISP platform fwnode\n"); + + isp4_platform->i2c_nb.notifier_call = isp_i2c_bus_notify; + ret = bus_register_notifier(&i2c_bus_type, &isp4_platform->i2c_nb); + if (ret) + goto error_unregister_sw_node; + + adev = ACPI_COMPANION(&pdev->dev); + /* initialize root amd_camera_node */ + adev->driver_data = (void *)pinfo->swnodes[0]; + + /* check if adapter is already registered and create i2c client instance */ + i2c_for_each_dev(isp4_platform, try_to_instantiate_i2c_client); + + platform_set_drvdata(pdev, isp4_platform); + return 0; + +error_unregister_sw_node: + software_node_unregister_node_group(isp4_platform->pinfo->swnodes); + return ret; +} + +static void amd_isp_remove(struct platform_device *pdev) +{ + struct amdisp_platform *isp4_platform = platform_get_drvdata(pdev); + + bus_unregister_notifier(&i2c_bus_type, &isp4_platform->i2c_nb); + i2c_unregister_device(isp4_platform->i2c_dev); + software_node_unregister_node_group(isp4_platform->pinfo->swnodes); +} + +static struct platform_driver amd_isp_platform_driver = { + .driver = { + .name = AMD_ISP_PLAT_DRV_NAME, + .acpi_match_table = amdisp_sensor_ids, + }, + .probe = amd_isp_probe, + .remove = amd_isp_remove, +}; + +module_platform_driver(amd_isp_platform_driver); + +MODULE_AUTHOR("Benjamin Chan <benjamin.chan@amd.com>"); +MODULE_AUTHOR("Pratap Nirujogi <pratap.nirujogi@amd.com>"); +MODULE_DESCRIPTION("AMD ISP4 Platform Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/amd/hfi/Kconfig b/drivers/platform/x86/amd/hfi/Kconfig new file mode 100644 index 000000000000..fecef6848023 --- /dev/null +++ b/drivers/platform/x86/amd/hfi/Kconfig @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# AMD Hardware Feedback Interface Driver +# + +config AMD_HFI + bool "AMD Hetero Core Hardware Feedback Driver" + depends on ACPI + depends on CPU_SUP_AMD + depends on SCHED_MC_PRIO + help + Select this option to enable the AMD Heterogeneous Core Hardware + Feedback Interface. If selected, hardware provides runtime thread + classification guidance to the operating system on the performance and + energy efficiency capabilities of each heterogeneous CPU core. These + capabilities may vary due to the inherent differences in the core types + and can also change as a result of variations in the operating + conditions of the system such as power and thermal limits. diff --git a/drivers/platform/x86/amd/hfi/Makefile b/drivers/platform/x86/amd/hfi/Makefile new file mode 100644 index 000000000000..672c6ac106e9 --- /dev/null +++ b/drivers/platform/x86/amd/hfi/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# AMD Hardware Feedback Interface Driver +# + +obj-$(CONFIG_AMD_HFI) += amd_hfi.o +amd_hfi-objs := hfi.o diff --git a/drivers/platform/x86/amd/hfi/hfi.c b/drivers/platform/x86/amd/hfi/hfi.c new file mode 100644 index 000000000000..83863a5e0fbc --- /dev/null +++ b/drivers/platform/x86/amd/hfi/hfi.c @@ -0,0 +1,546 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * AMD Hardware Feedback Interface Driver + * + * Copyright (C) 2025 Advanced Micro Devices, Inc. All Rights Reserved. + * + * Authors: Perry Yuan <Perry.Yuan@amd.com> + * Mario Limonciello <mario.limonciello@amd.com> + */ + +#define pr_fmt(fmt) "amd-hfi: " fmt + +#include <linux/acpi.h> +#include <linux/cpu.h> +#include <linux/debugfs.h> +#include <linux/gfp.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mailbox_client.h> +#include <linux/mutex.h> +#include <linux/percpu-defs.h> +#include <linux/platform_device.h> +#include <linux/smp.h> +#include <linux/topology.h> +#include <linux/workqueue.h> + +#include <asm/cpu_device_id.h> + +#include <acpi/pcc.h> +#include <acpi/cppc_acpi.h> + +#define AMD_HFI_DRIVER "amd_hfi" +#define AMD_HFI_MAILBOX_COUNT 1 +#define AMD_HETERO_RANKING_TABLE_VER 2 + +#define AMD_HETERO_CPUID_27 0x80000027 + +static struct platform_device *device; + +/** + * struct amd_shmem_info - Shared memory table for AMD HFI + * + * @header: The PCCT table header including signature, length flags and command. + * @version_number: Version number of the table + * @n_logical_processors: Number of logical processors + * @n_capabilities: Number of ranking dimensions (performance, efficiency, etc) + * @table_update_context: Command being sent over the subspace + * @n_bitmaps: Number of 32-bit bitmaps to enumerate all the APIC IDs + * This is based on the maximum APIC ID enumerated in the system + * @reserved: 24 bit spare + * @table_data: Bit Map(s) of enabled logical processors + * Followed by the ranking data for each logical processor + */ +struct amd_shmem_info { + struct acpi_pcct_ext_pcc_shared_memory header; + u32 version_number :8, + n_logical_processors :8, + n_capabilities :8, + table_update_context :8; + u32 n_bitmaps :8, + reserved :24; + u32 table_data[]; +}; + +struct amd_hfi_data { + const char *name; + struct device *dev; + + /* PCCT table related */ + struct pcc_mbox_chan *pcc_chan; + void __iomem *pcc_comm_addr; + struct acpi_subtable_header *pcct_entry; + struct amd_shmem_info *shmem; + + struct dentry *dbgfs_dir; +}; + +/** + * struct amd_hfi_classes - HFI class capabilities per CPU + * @perf: Performance capability + * @eff: Power efficiency capability + * + * Capabilities of a logical processor in the ranking table. These capabilities + * are unitless and specific to each HFI class. + */ +struct amd_hfi_classes { + u32 perf; + u32 eff; +}; + +/** + * struct amd_hfi_cpuinfo - HFI workload class info per CPU + * @cpu: CPU index + * @apic_id: APIC id of the current CPU + * @class_index: workload class ID index + * @nr_class: max number of workload class supported + * @ipcc_scores: ipcc scores for each class + * @amd_hfi_classes: current CPU workload class ranking data + * + * Parameters of a logical processor linked with hardware feedback class. + */ +struct amd_hfi_cpuinfo { + int cpu; + u32 apic_id; + s16 class_index; + u8 nr_class; + int *ipcc_scores; + struct amd_hfi_classes *amd_hfi_classes; +}; + +static DEFINE_PER_CPU(struct amd_hfi_cpuinfo, amd_hfi_cpuinfo) = {.class_index = -1}; + +static DEFINE_MUTEX(hfi_cpuinfo_lock); + +static void amd_hfi_sched_itmt_work(struct work_struct *work) +{ + sched_set_itmt_support(); +} +static DECLARE_WORK(sched_amd_hfi_itmt_work, amd_hfi_sched_itmt_work); + +static int find_cpu_index_by_apicid(unsigned int target_apicid) +{ + int cpu_index; + + for_each_possible_cpu(cpu_index) { + struct cpuinfo_x86 *info = &cpu_data(cpu_index); + + if (info->topo.apicid == target_apicid) { + pr_debug("match APIC id %u for CPU index: %d\n", + info->topo.apicid, cpu_index); + return cpu_index; + } + } + + return -ENODEV; +} + +static int amd_hfi_fill_metadata(struct amd_hfi_data *amd_hfi_data) +{ + struct acpi_pcct_ext_pcc_slave *pcct_ext = + (struct acpi_pcct_ext_pcc_slave *)amd_hfi_data->pcct_entry; + void __iomem *pcc_comm_addr; + u32 apic_start = 0; + + pcc_comm_addr = acpi_os_ioremap(amd_hfi_data->pcc_chan->shmem_base_addr, + amd_hfi_data->pcc_chan->shmem_size); + if (!pcc_comm_addr) { + dev_err(amd_hfi_data->dev, "failed to ioremap PCC common region mem\n"); + return -ENOMEM; + } + + memcpy_fromio(amd_hfi_data->shmem, pcc_comm_addr, pcct_ext->length); + iounmap(pcc_comm_addr); + + if (amd_hfi_data->shmem->header.signature != PCC_SIGNATURE) { + dev_err(amd_hfi_data->dev, "invalid signature in shared memory\n"); + return -EINVAL; + } + if (amd_hfi_data->shmem->version_number != AMD_HETERO_RANKING_TABLE_VER) { + dev_err(amd_hfi_data->dev, "invalid version %d\n", + amd_hfi_data->shmem->version_number); + return -EINVAL; + } + + for (unsigned int i = 0; i < amd_hfi_data->shmem->n_bitmaps; i++) { + u32 bitmap = amd_hfi_data->shmem->table_data[i]; + + for (unsigned int j = 0; j < BITS_PER_TYPE(u32); j++) { + u32 apic_id = i * BITS_PER_TYPE(u32) + j; + struct amd_hfi_cpuinfo *info; + int cpu_index, apic_index; + + if (!(bitmap & BIT(j))) + continue; + + cpu_index = find_cpu_index_by_apicid(apic_id); + if (cpu_index < 0) { + dev_warn(amd_hfi_data->dev, "APIC ID %u not found\n", apic_id); + continue; + } + + info = per_cpu_ptr(&amd_hfi_cpuinfo, cpu_index); + info->apic_id = apic_id; + + /* Fill the ranking data for each logical processor */ + info = per_cpu_ptr(&amd_hfi_cpuinfo, cpu_index); + apic_index = apic_start * info->nr_class * 2; + for (unsigned int k = 0; k < info->nr_class; k++) { + u32 *table = amd_hfi_data->shmem->table_data + + amd_hfi_data->shmem->n_bitmaps + + i * info->nr_class; + + info->amd_hfi_classes[k].eff = table[apic_index + 2 * k]; + info->amd_hfi_classes[k].perf = table[apic_index + 2 * k + 1]; + } + apic_start++; + } + } + + return 0; +} + +static int amd_hfi_alloc_class_data(struct platform_device *pdev) +{ + struct amd_hfi_cpuinfo *hfi_cpuinfo; + struct device *dev = &pdev->dev; + u32 nr_class_id; + int idx; + + nr_class_id = cpuid_eax(AMD_HETERO_CPUID_27); + if (nr_class_id > 255) { + dev_err(dev, "number of supported classes too large: %d\n", + nr_class_id); + return -EINVAL; + } + + for_each_possible_cpu(idx) { + struct amd_hfi_classes *classes; + int *ipcc_scores; + + classes = devm_kcalloc(dev, + nr_class_id, + sizeof(struct amd_hfi_classes), + GFP_KERNEL); + if (!classes) + return -ENOMEM; + ipcc_scores = devm_kcalloc(dev, nr_class_id, sizeof(int), GFP_KERNEL); + if (!ipcc_scores) + return -ENOMEM; + hfi_cpuinfo = per_cpu_ptr(&amd_hfi_cpuinfo, idx); + hfi_cpuinfo->amd_hfi_classes = classes; + hfi_cpuinfo->ipcc_scores = ipcc_scores; + hfi_cpuinfo->nr_class = nr_class_id; + } + + return 0; +} + +static void amd_hfi_remove(struct platform_device *pdev) +{ + struct amd_hfi_data *dev = platform_get_drvdata(pdev); + + debugfs_remove_recursive(dev->dbgfs_dir); +} + +static int amd_set_hfi_ipcc_score(struct amd_hfi_cpuinfo *hfi_cpuinfo, int cpu) +{ + for (int i = 0; i < hfi_cpuinfo->nr_class; i++) + WRITE_ONCE(hfi_cpuinfo->ipcc_scores[i], + hfi_cpuinfo->amd_hfi_classes[i].perf); + + sched_set_itmt_core_prio(hfi_cpuinfo->ipcc_scores[0], cpu); + + return 0; +} + +static int amd_hfi_set_state(unsigned int cpu, bool state) +{ + int ret; + + ret = wrmsrq_on_cpu(cpu, MSR_AMD_WORKLOAD_CLASS_CONFIG, state ? 1 : 0); + if (ret) + return ret; + + return wrmsrq_on_cpu(cpu, MSR_AMD_WORKLOAD_HRST, 0x1); +} + +/** + * amd_hfi_online() - Enable workload classification on @cpu + * @cpu: CPU in which the workload classification will be enabled + * + * Return: 0 on success, negative error code on failure. + */ +static int amd_hfi_online(unsigned int cpu) +{ + struct amd_hfi_cpuinfo *hfi_info = per_cpu_ptr(&amd_hfi_cpuinfo, cpu); + struct amd_hfi_classes *hfi_classes; + int ret; + + if (WARN_ON_ONCE(!hfi_info)) + return -EINVAL; + + /* + * Check if @cpu as an associated, initialized and ranking data must + * be filled. + */ + hfi_classes = hfi_info->amd_hfi_classes; + if (!hfi_classes) + return -EINVAL; + + guard(mutex)(&hfi_cpuinfo_lock); + + ret = amd_hfi_set_state(cpu, true); + if (ret) + pr_err("WCT enable failed for CPU %u\n", cpu); + + return ret; +} + +/** + * amd_hfi_offline() - Disable workload classification on @cpu + * @cpu: CPU in which the workload classification will be disabled + * + * Remove @cpu from those covered by its HFI instance. + * + * Return: 0 on success, negative error code on failure + */ +static int amd_hfi_offline(unsigned int cpu) +{ + struct amd_hfi_cpuinfo *hfi_info = &per_cpu(amd_hfi_cpuinfo, cpu); + int ret; + + if (WARN_ON_ONCE(!hfi_info)) + return -EINVAL; + + guard(mutex)(&hfi_cpuinfo_lock); + + ret = amd_hfi_set_state(cpu, false); + if (ret) + pr_err("WCT disable failed for CPU %u\n", cpu); + + return ret; +} + +static int update_hfi_ipcc_scores(void) +{ + int cpu; + int ret; + + for_each_possible_cpu(cpu) { + struct amd_hfi_cpuinfo *hfi_cpuinfo = per_cpu_ptr(&amd_hfi_cpuinfo, cpu); + + ret = amd_set_hfi_ipcc_score(hfi_cpuinfo, cpu); + if (ret) + return ret; + } + + return 0; +} + +static int amd_hfi_metadata_parser(struct platform_device *pdev, + struct amd_hfi_data *amd_hfi_data) +{ + struct acpi_pcct_ext_pcc_slave *pcct_ext; + struct acpi_subtable_header *pcct_entry; + struct mbox_chan *pcc_mbox_channels; + struct acpi_table_header *pcct_tbl; + struct pcc_mbox_chan *pcc_chan; + acpi_status status; + int ret; + + pcc_mbox_channels = devm_kcalloc(&pdev->dev, AMD_HFI_MAILBOX_COUNT, + sizeof(*pcc_mbox_channels), GFP_KERNEL); + if (!pcc_mbox_channels) + return -ENOMEM; + + pcc_chan = devm_kcalloc(&pdev->dev, AMD_HFI_MAILBOX_COUNT, + sizeof(*pcc_chan), GFP_KERNEL); + if (!pcc_chan) + return -ENOMEM; + + status = acpi_get_table(ACPI_SIG_PCCT, 0, &pcct_tbl); + if (ACPI_FAILURE(status) || !pcct_tbl) + return -ENODEV; + + /* get pointer to the first PCC subspace entry */ + pcct_entry = (struct acpi_subtable_header *) ( + (unsigned long)pcct_tbl + sizeof(struct acpi_table_pcct)); + + pcc_chan->mchan = &pcc_mbox_channels[0]; + + amd_hfi_data->pcc_chan = pcc_chan; + amd_hfi_data->pcct_entry = pcct_entry; + pcct_ext = (struct acpi_pcct_ext_pcc_slave *)pcct_entry; + + if (pcct_ext->length <= 0) { + ret = -EINVAL; + goto out; + } + + amd_hfi_data->shmem = devm_kzalloc(amd_hfi_data->dev, pcct_ext->length, GFP_KERNEL); + if (!amd_hfi_data->shmem) { + ret = -ENOMEM; + goto out; + } + + pcc_chan->shmem_base_addr = pcct_ext->base_address; + pcc_chan->shmem_size = pcct_ext->length; + + /* parse the shared memory info from the PCCT table */ + ret = amd_hfi_fill_metadata(amd_hfi_data); + +out: + /* Don't leak any ACPI memory */ + acpi_put_table(pcct_tbl); + + return ret; +} + +static int class_capabilities_show(struct seq_file *s, void *unused) +{ + u32 cpu, idx; + + seq_puts(s, "CPU #\tWLC\tPerf\tEff\n"); + for_each_possible_cpu(cpu) { + struct amd_hfi_cpuinfo *hfi_cpuinfo = per_cpu_ptr(&amd_hfi_cpuinfo, cpu); + + seq_printf(s, "%d", cpu); + for (idx = 0; idx < hfi_cpuinfo->nr_class; idx++) { + seq_printf(s, "\t%u\t%u\t%u\n", idx, + hfi_cpuinfo->amd_hfi_classes[idx].perf, + hfi_cpuinfo->amd_hfi_classes[idx].eff); + } + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(class_capabilities); + +static int amd_hfi_pm_resume(struct device *dev) +{ + int ret, cpu; + + for_each_online_cpu(cpu) { + ret = amd_hfi_set_state(cpu, true); + if (ret < 0) { + dev_err(dev, "failed to enable workload class config: %d\n", ret); + return ret; + } + } + + return 0; +} + +static int amd_hfi_pm_suspend(struct device *dev) +{ + int ret, cpu; + + for_each_online_cpu(cpu) { + ret = amd_hfi_set_state(cpu, false); + if (ret < 0) { + dev_err(dev, "failed to disable workload class config: %d\n", ret); + return ret; + } + } + + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(amd_hfi_pm_ops, amd_hfi_pm_suspend, amd_hfi_pm_resume); + +static const struct acpi_device_id amd_hfi_platform_match[] = { + {"AMDI0104", 0}, + { } +}; +MODULE_DEVICE_TABLE(acpi, amd_hfi_platform_match); + +static int amd_hfi_probe(struct platform_device *pdev) +{ + struct amd_hfi_data *amd_hfi_data; + int ret; + + if (!acpi_match_device(amd_hfi_platform_match, &pdev->dev)) + return -ENODEV; + + amd_hfi_data = devm_kzalloc(&pdev->dev, sizeof(*amd_hfi_data), GFP_KERNEL); + if (!amd_hfi_data) + return -ENOMEM; + + amd_hfi_data->dev = &pdev->dev; + platform_set_drvdata(pdev, amd_hfi_data); + + ret = amd_hfi_alloc_class_data(pdev); + if (ret) + return ret; + + ret = amd_hfi_metadata_parser(pdev, amd_hfi_data); + if (ret) + return ret; + + ret = update_hfi_ipcc_scores(); + if (ret) + return ret; + + /* + * Tasks will already be running at the time this happens. This is + * OK because rankings will be adjusted by the callbacks. + */ + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/amd_hfi:online", + amd_hfi_online, amd_hfi_offline); + if (ret < 0) + return ret; + + schedule_work(&sched_amd_hfi_itmt_work); + + amd_hfi_data->dbgfs_dir = debugfs_create_dir("amd_hfi", arch_debugfs_dir); + debugfs_create_file("class_capabilities", 0644, amd_hfi_data->dbgfs_dir, pdev, + &class_capabilities_fops); + + return 0; +} + +static struct platform_driver amd_hfi_driver = { + .driver = { + .name = AMD_HFI_DRIVER, + .pm = &amd_hfi_pm_ops, + .acpi_match_table = ACPI_PTR(amd_hfi_platform_match), + }, + .probe = amd_hfi_probe, + .remove = amd_hfi_remove, +}; + +static int __init amd_hfi_init(void) +{ + int ret; + + if (acpi_disabled || + !cpu_feature_enabled(X86_FEATURE_AMD_HTR_CORES) || + !cpu_feature_enabled(X86_FEATURE_AMD_WORKLOAD_CLASS)) + return -ENODEV; + + device = platform_device_register_simple(AMD_HFI_DRIVER, -1, NULL, 0); + if (IS_ERR(device)) { + pr_err("unable to register HFI platform device\n"); + return PTR_ERR(device); + } + + ret = platform_driver_register(&amd_hfi_driver); + if (ret) + pr_err("failed to register HFI driver\n"); + + return ret; +} + +static __exit void amd_hfi_exit(void) +{ + platform_driver_unregister(&amd_hfi_driver); + platform_device_unregister(device); +} +module_init(amd_hfi_init); +module_exit(amd_hfi_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("AMD Hardware Feedback Interface Driver"); diff --git a/drivers/platform/x86/amd/hsmp/Kconfig b/drivers/platform/x86/amd/hsmp/Kconfig new file mode 100644 index 000000000000..2911120792e8 --- /dev/null +++ b/drivers/platform/x86/amd/hsmp/Kconfig @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# AMD HSMP Driver +# + +config AMD_HSMP + tristate + +menu "AMD HSMP Driver" + depends on AMD_NODE || COMPILE_TEST + +config AMD_HSMP_ACPI + tristate "AMD HSMP ACPI device driver" + depends on ACPI + depends on HWMON || !HWMON + select AMD_HSMP + help + Host System Management Port (HSMP) interface is a mailbox interface + between the x86 core and the System Management Unit (SMU) firmware. + The driver provides a way for user space tools to monitor and manage + system management functionality on EPYC and MI300A server CPUs + from AMD. + + This option supports ACPI based probing. + You may enable this, if your platform BIOS provides an ACPI object + as described in amd_hsmp.rst document. + + If you choose to compile this driver as a module the module will be + called hsmp_acpi. + +config AMD_HSMP_PLAT + tristate "AMD HSMP platform device driver" + depends on HWMON || !HWMON + select AMD_HSMP + help + Host System Management Port (HSMP) interface is a mailbox interface + between the x86 core and the System Management Unit (SMU) firmware. + The driver provides a way for user space tools to monitor and manage + system management functionality on EPYC and MI300A server CPUs + from AMD. + + This option supports platform device based probing. + You may enable this, if your platform BIOS does not provide + HSMP ACPI object. + + If you choose to compile this driver as a module the module will be + called amd_hsmp. + +endmenu diff --git a/drivers/platform/x86/amd/hsmp/Makefile b/drivers/platform/x86/amd/hsmp/Makefile new file mode 100644 index 000000000000..ce8342e71f50 --- /dev/null +++ b/drivers/platform/x86/amd/hsmp/Makefile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for drivers/platform/x86/amd/hsmp +# AMD HSMP Driver +# + +obj-$(CONFIG_AMD_HSMP) += hsmp_common.o +hsmp_common-y := hsmp.o +hsmp_common-$(CONFIG_HWMON) += hwmon.o +obj-$(CONFIG_AMD_HSMP_PLAT) += amd_hsmp.o +amd_hsmp-y := plat.o +obj-$(CONFIG_AMD_HSMP_ACPI) += hsmp_acpi.o +hsmp_acpi-y := acpi.o diff --git a/drivers/platform/x86/amd/hsmp/acpi.c b/drivers/platform/x86/amd/hsmp/acpi.c new file mode 100644 index 000000000000..97ed71593bdf --- /dev/null +++ b/drivers/platform/x86/amd/hsmp/acpi.c @@ -0,0 +1,647 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * AMD HSMP Platform Driver + * Copyright (c) 2024, AMD. + * All Rights Reserved. + * + * This file provides an ACPI based driver implementation for HSMP interface. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <asm/amd/hsmp.h> + +#include <linux/acpi.h> +#include <linux/array_size.h> +#include <linux/bits.h> +#include <linux/bitfield.h> +#include <linux/device.h> +#include <linux/dev_printk.h> +#include <linux/ioport.h> +#include <linux/kstrtox.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/sysfs.h> +#include <linux/topology.h> +#include <linux/uuid.h> + +#include <uapi/asm-generic/errno-base.h> + +#include "hsmp.h" + +#define DRIVER_NAME "hsmp_acpi" + +/* These are the strings specified in ACPI table */ +#define MSG_IDOFF_STR "MsgIdOffset" +#define MSG_ARGOFF_STR "MsgArgOffset" +#define MSG_RESPOFF_STR "MsgRspOffset" + +static struct hsmp_plat_device *hsmp_pdev; + +struct hsmp_sys_attr { + struct device_attribute dattr; + u32 msg_id; +}; + +static int amd_hsmp_acpi_rdwr(struct hsmp_socket *sock, u32 offset, + u32 *value, bool write) +{ + if (write) + iowrite32(*value, sock->virt_base_addr + offset); + else + *value = ioread32(sock->virt_base_addr + offset); + + return 0; +} + +/* This is the UUID used for HSMP */ +static const guid_t acpi_hsmp_uuid = GUID_INIT(0xb74d619d, 0x5707, 0x48bd, + 0xa6, 0x9f, 0x4e, 0xa2, + 0x87, 0x1f, 0xc2, 0xf6); + +static inline bool is_acpi_hsmp_uuid(union acpi_object *obj) +{ + if (obj->type == ACPI_TYPE_BUFFER && obj->buffer.length == UUID_SIZE) + return guid_equal((guid_t *)obj->buffer.pointer, &acpi_hsmp_uuid); + + return false; +} + +static inline int hsmp_get_uid(struct device *dev, u16 *sock_ind) +{ + char *uid; + + /* + * UID (ID00, ID01..IDXX) is used for differentiating sockets, + * read it and strip the "ID" part of it and convert the remaining + * bytes to integer. + */ + uid = acpi_device_uid(ACPI_COMPANION(dev)); + + return kstrtou16(uid + 2, 10, sock_ind); +} + +static acpi_status hsmp_resource(struct acpi_resource *res, void *data) +{ + struct hsmp_socket *sock = data; + struct resource r; + + switch (res->type) { + case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: + if (!acpi_dev_resource_memory(res, &r)) + return AE_ERROR; + if (!r.start || r.end < r.start || !(r.flags & IORESOURCE_MEM_WRITEABLE)) + return AE_ERROR; + sock->mbinfo.base_addr = r.start; + sock->mbinfo.size = resource_size(&r); + break; + case ACPI_RESOURCE_TYPE_END_TAG: + break; + default: + return AE_ERROR; + } + + return AE_OK; +} + +static int hsmp_read_acpi_dsd(struct hsmp_socket *sock) +{ + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *guid, *mailbox_package; + union acpi_object *dsd; + acpi_status status; + int ret = 0; + int j; + + status = acpi_evaluate_object_typed(ACPI_HANDLE(sock->dev), "_DSD", NULL, + &buf, ACPI_TYPE_PACKAGE); + if (ACPI_FAILURE(status)) { + dev_err(sock->dev, "Failed to read mailbox reg offsets from DSD table, err: %s\n", + acpi_format_exception(status)); + return -ENODEV; + } + + dsd = buf.pointer; + + /* HSMP _DSD property should contain 2 objects. + * 1. guid which is an acpi object of type ACPI_TYPE_BUFFER + * 2. mailbox which is an acpi object of type ACPI_TYPE_PACKAGE + * This mailbox object contains 3 more acpi objects of type + * ACPI_TYPE_PACKAGE for holding msgid, msgresp, msgarg offsets + * these packages inturn contain 2 acpi objects of type + * ACPI_TYPE_STRING and ACPI_TYPE_INTEGER + */ + if (!dsd || dsd->type != ACPI_TYPE_PACKAGE || dsd->package.count != 2) { + ret = -EINVAL; + goto free_buf; + } + + guid = &dsd->package.elements[0]; + mailbox_package = &dsd->package.elements[1]; + if (!is_acpi_hsmp_uuid(guid) || mailbox_package->type != ACPI_TYPE_PACKAGE) { + dev_err(sock->dev, "Invalid hsmp _DSD table data\n"); + ret = -EINVAL; + goto free_buf; + } + + for (j = 0; j < mailbox_package->package.count; j++) { + union acpi_object *msgobj, *msgstr, *msgint; + + msgobj = &mailbox_package->package.elements[j]; + msgstr = &msgobj->package.elements[0]; + msgint = &msgobj->package.elements[1]; + + /* package should have 1 string and 1 integer object */ + if (msgobj->type != ACPI_TYPE_PACKAGE || + msgstr->type != ACPI_TYPE_STRING || + msgint->type != ACPI_TYPE_INTEGER) { + ret = -EINVAL; + goto free_buf; + } + + if (!strncmp(msgstr->string.pointer, MSG_IDOFF_STR, + msgstr->string.length)) { + sock->mbinfo.msg_id_off = msgint->integer.value; + } else if (!strncmp(msgstr->string.pointer, MSG_RESPOFF_STR, + msgstr->string.length)) { + sock->mbinfo.msg_resp_off = msgint->integer.value; + } else if (!strncmp(msgstr->string.pointer, MSG_ARGOFF_STR, + msgstr->string.length)) { + sock->mbinfo.msg_arg_off = msgint->integer.value; + } else { + ret = -ENOENT; + goto free_buf; + } + } + + if (!sock->mbinfo.msg_id_off || !sock->mbinfo.msg_resp_off || + !sock->mbinfo.msg_arg_off) + ret = -EINVAL; + +free_buf: + ACPI_FREE(buf.pointer); + return ret; +} + +static int hsmp_read_acpi_crs(struct hsmp_socket *sock) +{ + acpi_status status; + + status = acpi_walk_resources(ACPI_HANDLE(sock->dev), METHOD_NAME__CRS, + hsmp_resource, sock); + if (ACPI_FAILURE(status)) { + dev_err(sock->dev, "Failed to look up MP1 base address from CRS method, err: %s\n", + acpi_format_exception(status)); + return -EINVAL; + } + if (!sock->mbinfo.base_addr || !sock->mbinfo.size) + return -EINVAL; + + /* The mapped region should be un-cached */ + sock->virt_base_addr = devm_ioremap_uc(sock->dev, sock->mbinfo.base_addr, + sock->mbinfo.size); + if (!sock->virt_base_addr) { + dev_err(sock->dev, "Failed to ioremap MP1 base address\n"); + return -ENOMEM; + } + + return 0; +} + +/* Parse the ACPI table to read the data */ +static int hsmp_parse_acpi_table(struct device *dev, u16 sock_ind) +{ + struct hsmp_socket *sock = &hsmp_pdev->sock[sock_ind]; + int ret; + + sock->sock_ind = sock_ind; + sock->dev = dev; + sock->amd_hsmp_rdwr = amd_hsmp_acpi_rdwr; + + sema_init(&sock->hsmp_sem, 1); + + dev_set_drvdata(dev, sock); + + /* Read MP1 base address from CRS method */ + ret = hsmp_read_acpi_crs(sock); + if (ret) + return ret; + + /* Read mailbox offsets from DSD table */ + return hsmp_read_acpi_dsd(sock); +} + +static ssize_t hsmp_metric_tbl_acpi_read(struct file *filp, struct kobject *kobj, + const struct bin_attribute *bin_attr, char *buf, + loff_t off, size_t count) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct hsmp_socket *sock = dev_get_drvdata(dev); + + return hsmp_metric_tbl_read(sock, buf, count); +} + +static umode_t hsmp_is_sock_attr_visible(struct kobject *kobj, + const struct bin_attribute *battr, int id) +{ + if (hsmp_pdev->proto_ver == HSMP_PROTO_VER6) + return battr->attr.mode; + + return 0; +} + +static umode_t hsmp_is_sock_dev_attr_visible(struct kobject *kobj, + struct attribute *attr, int id) +{ + return attr->mode; +} + +#define to_hsmp_sys_attr(_attr) container_of(_attr, struct hsmp_sys_attr, dattr) + +static ssize_t hsmp_msg_resp32_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hsmp_sys_attr *hattr = to_hsmp_sys_attr(attr); + struct hsmp_socket *sock = dev_get_drvdata(dev); + u32 data; + int ret; + + ret = hsmp_msg_get_nargs(sock->sock_ind, hattr->msg_id, &data, 1); + if (ret) + return ret; + + return sysfs_emit(buf, "%u\n", data); +} + +#define DDR_MAX_BW_MASK GENMASK(31, 20) +#define DDR_UTIL_BW_MASK GENMASK(19, 8) +#define DDR_UTIL_BW_PERC_MASK GENMASK(7, 0) +#define FW_VER_MAJOR_MASK GENMASK(23, 16) +#define FW_VER_MINOR_MASK GENMASK(15, 8) +#define FW_VER_DEBUG_MASK GENMASK(7, 0) +#define FMAX_MASK GENMASK(31, 16) +#define FMIN_MASK GENMASK(15, 0) +#define FREQ_LIMIT_MASK GENMASK(31, 16) +#define FREQ_SRC_IND_MASK GENMASK(15, 0) + +static ssize_t hsmp_ddr_max_bw_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hsmp_sys_attr *hattr = to_hsmp_sys_attr(attr); + struct hsmp_socket *sock = dev_get_drvdata(dev); + u32 data; + int ret; + + ret = hsmp_msg_get_nargs(sock->sock_ind, hattr->msg_id, &data, 1); + if (ret) + return ret; + + return sysfs_emit(buf, "%lu\n", FIELD_GET(DDR_MAX_BW_MASK, data)); +} + +static ssize_t hsmp_ddr_util_bw_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hsmp_sys_attr *hattr = to_hsmp_sys_attr(attr); + struct hsmp_socket *sock = dev_get_drvdata(dev); + u32 data; + int ret; + + ret = hsmp_msg_get_nargs(sock->sock_ind, hattr->msg_id, &data, 1); + if (ret) + return ret; + + return sysfs_emit(buf, "%lu\n", FIELD_GET(DDR_UTIL_BW_MASK, data)); +} + +static ssize_t hsmp_ddr_util_bw_perc_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hsmp_sys_attr *hattr = to_hsmp_sys_attr(attr); + struct hsmp_socket *sock = dev_get_drvdata(dev); + u32 data; + int ret; + + ret = hsmp_msg_get_nargs(sock->sock_ind, hattr->msg_id, &data, 1); + if (ret) + return ret; + + return sysfs_emit(buf, "%lu\n", FIELD_GET(DDR_UTIL_BW_PERC_MASK, data)); +} + +static ssize_t hsmp_msg_fw_ver_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hsmp_sys_attr *hattr = to_hsmp_sys_attr(attr); + struct hsmp_socket *sock = dev_get_drvdata(dev); + u32 data; + int ret; + + ret = hsmp_msg_get_nargs(sock->sock_ind, hattr->msg_id, &data, 1); + if (ret) + return ret; + + return sysfs_emit(buf, "%lu.%lu.%lu\n", + FIELD_GET(FW_VER_MAJOR_MASK, data), + FIELD_GET(FW_VER_MINOR_MASK, data), + FIELD_GET(FW_VER_DEBUG_MASK, data)); +} + +static ssize_t hsmp_fclk_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hsmp_sys_attr *hattr = to_hsmp_sys_attr(attr); + struct hsmp_socket *sock = dev_get_drvdata(dev); + u32 data[2]; + int ret; + + ret = hsmp_msg_get_nargs(sock->sock_ind, hattr->msg_id, data, 2); + if (ret) + return ret; + + return sysfs_emit(buf, "%u\n", data[0]); +} + +static ssize_t hsmp_mclk_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hsmp_sys_attr *hattr = to_hsmp_sys_attr(attr); + struct hsmp_socket *sock = dev_get_drvdata(dev); + u32 data[2]; + int ret; + + ret = hsmp_msg_get_nargs(sock->sock_ind, hattr->msg_id, data, 2); + if (ret) + return ret; + + return sysfs_emit(buf, "%u\n", data[1]); +} + +static ssize_t hsmp_clk_fmax_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hsmp_sys_attr *hattr = to_hsmp_sys_attr(attr); + struct hsmp_socket *sock = dev_get_drvdata(dev); + u32 data; + int ret; + + ret = hsmp_msg_get_nargs(sock->sock_ind, hattr->msg_id, &data, 1); + if (ret) + return ret; + + return sysfs_emit(buf, "%lu\n", FIELD_GET(FMAX_MASK, data)); +} + +static ssize_t hsmp_clk_fmin_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hsmp_sys_attr *hattr = to_hsmp_sys_attr(attr); + struct hsmp_socket *sock = dev_get_drvdata(dev); + u32 data; + int ret; + + ret = hsmp_msg_get_nargs(sock->sock_ind, hattr->msg_id, &data, 1); + if (ret) + return ret; + + return sysfs_emit(buf, "%lu\n", FIELD_GET(FMIN_MASK, data)); +} + +static ssize_t hsmp_freq_limit_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hsmp_sys_attr *hattr = to_hsmp_sys_attr(attr); + struct hsmp_socket *sock = dev_get_drvdata(dev); + u32 data; + int ret; + + ret = hsmp_msg_get_nargs(sock->sock_ind, hattr->msg_id, &data, 1); + if (ret) + return ret; + + return sysfs_emit(buf, "%lu\n", FIELD_GET(FREQ_LIMIT_MASK, data)); +} + +static const char * const freqlimit_srcnames[] = { + "cHTC-Active", + "PROCHOT", + "TDC limit", + "PPT Limit", + "OPN Max", + "Reliability Limit", + "APML Agent", + "HSMP Agent", +}; + +static ssize_t hsmp_freq_limit_source_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hsmp_sys_attr *hattr = to_hsmp_sys_attr(attr); + struct hsmp_socket *sock = dev_get_drvdata(dev); + unsigned int index; + int len = 0; + u16 src_ind; + u32 data; + int ret; + + ret = hsmp_msg_get_nargs(sock->sock_ind, hattr->msg_id, &data, 1); + if (ret) + return ret; + + src_ind = FIELD_GET(FREQ_SRC_IND_MASK, data); + for (index = 0; index < ARRAY_SIZE(freqlimit_srcnames); index++) { + if (!src_ind) + break; + if (src_ind & 1) + len += sysfs_emit_at(buf, len, "%s\n", freqlimit_srcnames[index]); + src_ind >>= 1; + } + return len; +} + +static int init_acpi(struct device *dev) +{ + u16 sock_ind; + int ret; + + ret = hsmp_get_uid(dev, &sock_ind); + if (ret) + return ret; + if (sock_ind >= hsmp_pdev->num_sockets) + return -EINVAL; + + ret = hsmp_parse_acpi_table(dev, sock_ind); + if (ret) { + dev_err(dev, "Failed to parse ACPI table\n"); + return ret; + } + + /* Test the hsmp interface */ + ret = hsmp_test(sock_ind, 0xDEADBEEF); + if (ret) { + dev_err(dev, "HSMP test message failed on Fam:%x model:%x\n", + boot_cpu_data.x86, boot_cpu_data.x86_model); + dev_err(dev, "Is HSMP disabled in BIOS ?\n"); + return ret; + } + + ret = hsmp_cache_proto_ver(sock_ind); + if (ret) { + dev_err(dev, "Failed to read HSMP protocol version\n"); + return ret; + } + + if (hsmp_pdev->proto_ver == HSMP_PROTO_VER6) { + ret = hsmp_get_tbl_dram_base(sock_ind); + if (ret) + dev_info(dev, "Failed to init metric table\n"); + } + + ret = hsmp_create_sensor(dev, sock_ind); + if (ret) + dev_info(dev, "Failed to register HSMP sensors with hwmon\n"); + + dev_set_drvdata(dev, &hsmp_pdev->sock[sock_ind]); + + return 0; +} + +static const struct bin_attribute hsmp_metric_tbl_attr = { + .attr = { .name = HSMP_METRICS_TABLE_NAME, .mode = 0444}, + .read = hsmp_metric_tbl_acpi_read, + .size = sizeof(struct hsmp_metric_table), +}; + +static const struct bin_attribute *hsmp_attr_list[] = { + &hsmp_metric_tbl_attr, + NULL +}; + +#define HSMP_DEV_ATTR(_name, _msg_id, _show, _mode) \ +static struct hsmp_sys_attr hattr_##_name = { \ + .dattr = __ATTR(_name, _mode, _show, NULL), \ + .msg_id = _msg_id, \ +} + +HSMP_DEV_ATTR(c0_residency_input, HSMP_GET_C0_PERCENT, hsmp_msg_resp32_show, 0444); +HSMP_DEV_ATTR(prochot_status, HSMP_GET_PROC_HOT, hsmp_msg_resp32_show, 0444); +HSMP_DEV_ATTR(smu_fw_version, HSMP_GET_SMU_VER, hsmp_msg_fw_ver_show, 0444); +HSMP_DEV_ATTR(protocol_version, HSMP_GET_PROTO_VER, hsmp_msg_resp32_show, 0444); +HSMP_DEV_ATTR(cclk_freq_limit_input, HSMP_GET_CCLK_THROTTLE_LIMIT, hsmp_msg_resp32_show, 0444); +HSMP_DEV_ATTR(ddr_max_bw, HSMP_GET_DDR_BANDWIDTH, hsmp_ddr_max_bw_show, 0444); +HSMP_DEV_ATTR(ddr_utilised_bw_input, HSMP_GET_DDR_BANDWIDTH, hsmp_ddr_util_bw_show, 0444); +HSMP_DEV_ATTR(ddr_utilised_bw_perc_input, HSMP_GET_DDR_BANDWIDTH, hsmp_ddr_util_bw_perc_show, 0444); +HSMP_DEV_ATTR(fclk_input, HSMP_GET_FCLK_MCLK, hsmp_fclk_show, 0444); +HSMP_DEV_ATTR(mclk_input, HSMP_GET_FCLK_MCLK, hsmp_mclk_show, 0444); +HSMP_DEV_ATTR(clk_fmax, HSMP_GET_SOCKET_FMAX_FMIN, hsmp_clk_fmax_show, 0444); +HSMP_DEV_ATTR(clk_fmin, HSMP_GET_SOCKET_FMAX_FMIN, hsmp_clk_fmin_show, 0444); +HSMP_DEV_ATTR(pwr_current_active_freq_limit, HSMP_GET_SOCKET_FREQ_LIMIT, + hsmp_freq_limit_show, 0444); +HSMP_DEV_ATTR(pwr_current_active_freq_limit_source, HSMP_GET_SOCKET_FREQ_LIMIT, + hsmp_freq_limit_source_show, 0444); + +static struct attribute *hsmp_dev_attr_list[] = { + &hattr_c0_residency_input.dattr.attr, + &hattr_prochot_status.dattr.attr, + &hattr_smu_fw_version.dattr.attr, + &hattr_protocol_version.dattr.attr, + &hattr_cclk_freq_limit_input.dattr.attr, + &hattr_ddr_max_bw.dattr.attr, + &hattr_ddr_utilised_bw_input.dattr.attr, + &hattr_ddr_utilised_bw_perc_input.dattr.attr, + &hattr_fclk_input.dattr.attr, + &hattr_mclk_input.dattr.attr, + &hattr_clk_fmax.dattr.attr, + &hattr_clk_fmin.dattr.attr, + &hattr_pwr_current_active_freq_limit.dattr.attr, + &hattr_pwr_current_active_freq_limit_source.dattr.attr, + NULL +}; + +static const struct attribute_group hsmp_attr_grp = { + .bin_attrs = hsmp_attr_list, + .attrs = hsmp_dev_attr_list, + .is_bin_visible = hsmp_is_sock_attr_visible, + .is_visible = hsmp_is_sock_dev_attr_visible, +}; + +static const struct attribute_group *hsmp_groups[] = { + &hsmp_attr_grp, + NULL +}; + +static const struct acpi_device_id amd_hsmp_acpi_ids[] = { + {ACPI_HSMP_DEVICE_HID, 0}, + {} +}; +MODULE_DEVICE_TABLE(acpi, amd_hsmp_acpi_ids); + +static int hsmp_acpi_probe(struct platform_device *pdev) +{ + int ret; + + hsmp_pdev = get_hsmp_pdev(); + if (!hsmp_pdev) + return -ENOMEM; + + if (!hsmp_pdev->is_probed) { + hsmp_pdev->num_sockets = topology_max_packages(); + if (!hsmp_pdev->num_sockets) { + dev_err(&pdev->dev, "No CPU sockets detected\n"); + return -ENODEV; + } + + hsmp_pdev->sock = devm_kcalloc(&pdev->dev, hsmp_pdev->num_sockets, + sizeof(*hsmp_pdev->sock), + GFP_KERNEL); + if (!hsmp_pdev->sock) + return -ENOMEM; + } + + ret = init_acpi(&pdev->dev); + if (ret) { + dev_err(&pdev->dev, "Failed to initialize HSMP interface.\n"); + return ret; + } + + if (!hsmp_pdev->is_probed) { + ret = hsmp_misc_register(&pdev->dev); + if (ret) { + dev_err(&pdev->dev, "Failed to register misc device\n"); + return ret; + } + hsmp_pdev->is_probed = true; + dev_dbg(&pdev->dev, "AMD HSMP ACPI is probed successfully\n"); + } + + return 0; +} + +static void hsmp_acpi_remove(struct platform_device *pdev) +{ + /* + * We register only one misc_device even on multi-socket system. + * So, deregister should happen only once. + */ + if (hsmp_pdev->is_probed) { + hsmp_misc_deregister(); + hsmp_pdev->is_probed = false; + } +} + +static struct platform_driver amd_hsmp_driver = { + .probe = hsmp_acpi_probe, + .remove = hsmp_acpi_remove, + .driver = { + .name = DRIVER_NAME, + .acpi_match_table = amd_hsmp_acpi_ids, + .dev_groups = hsmp_groups, + }, +}; + +module_platform_driver(amd_hsmp_driver); + +MODULE_IMPORT_NS("AMD_HSMP"); +MODULE_DESCRIPTION("AMD HSMP Platform Interface Driver"); +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/amd/hsmp/hsmp.c b/drivers/platform/x86/amd/hsmp/hsmp.c new file mode 100644 index 000000000000..19f82c1d3090 --- /dev/null +++ b/drivers/platform/x86/amd/hsmp/hsmp.c @@ -0,0 +1,466 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * AMD HSMP Platform Driver + * Copyright (c) 2022, AMD. + * All Rights Reserved. + * + * This file provides a device implementation for HSMP interface + */ + +#include <asm/amd/hsmp.h> + +#include <linux/acpi.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/semaphore.h> +#include <linux/sysfs.h> + +#include "hsmp.h" + +/* HSMP Status / Error codes */ +#define HSMP_STATUS_NOT_READY 0x00 +#define HSMP_STATUS_OK 0x01 +#define HSMP_ERR_INVALID_MSG 0xFE +#define HSMP_ERR_INVALID_INPUT 0xFF +#define HSMP_ERR_PREREQ_NOT_SATISFIED 0xFD +#define HSMP_ERR_SMU_BUSY 0xFC + +/* Timeout in millsec */ +#define HSMP_MSG_TIMEOUT 100 +#define HSMP_SHORT_SLEEP 1 + +#define HSMP_WR true +#define HSMP_RD false + +/* + * When same message numbers are used for both GET and SET operation, + * bit:31 indicates whether its SET or GET operation. + */ +#define CHECK_GET_BIT BIT(31) + +static struct hsmp_plat_device hsmp_pdev; + +/* + * Send a message to the HSMP port via PCI-e config space registers + * or by writing to MMIO space. + * + * The caller is expected to zero out any unused arguments. + * If a response is expected, the number of response words should be greater than 0. + * + * Returns 0 for success and populates the requested number of arguments. + * Returns a negative error code for failure. + */ +static int __hsmp_send_message(struct hsmp_socket *sock, struct hsmp_message *msg) +{ + struct hsmp_mbaddr_info *mbinfo; + unsigned long timeout, short_sleep; + u32 mbox_status; + u32 index; + int ret; + + mbinfo = &sock->mbinfo; + + /* Clear the status register */ + mbox_status = HSMP_STATUS_NOT_READY; + ret = sock->amd_hsmp_rdwr(sock, mbinfo->msg_resp_off, &mbox_status, HSMP_WR); + if (ret) { + dev_err(sock->dev, "Error %d clearing mailbox status register\n", ret); + return ret; + } + + index = 0; + /* Write any message arguments */ + while (index < msg->num_args) { + ret = sock->amd_hsmp_rdwr(sock, mbinfo->msg_arg_off + (index << 2), + &msg->args[index], HSMP_WR); + if (ret) { + dev_err(sock->dev, "Error %d writing message argument %d\n", ret, index); + return ret; + } + index++; + } + + /* Write the message ID which starts the operation */ + ret = sock->amd_hsmp_rdwr(sock, mbinfo->msg_id_off, &msg->msg_id, HSMP_WR); + if (ret) { + dev_err(sock->dev, "Error %d writing message ID %u\n", ret, msg->msg_id); + return ret; + } + + /* + * Depending on when the trigger write completes relative to the SMU + * firmware 1 ms cycle, the operation may take from tens of us to 1 ms + * to complete. Some operations may take more. Therefore we will try + * a few short duration sleeps and switch to long sleeps if we don't + * succeed quickly. + */ + short_sleep = jiffies + msecs_to_jiffies(HSMP_SHORT_SLEEP); + timeout = jiffies + msecs_to_jiffies(HSMP_MSG_TIMEOUT); + + while (true) { + ret = sock->amd_hsmp_rdwr(sock, mbinfo->msg_resp_off, &mbox_status, HSMP_RD); + if (ret) { + dev_err(sock->dev, "Error %d reading mailbox status\n", ret); + return ret; + } + + if (mbox_status != HSMP_STATUS_NOT_READY) + break; + + if (!time_before(jiffies, timeout)) + break; + + if (time_before(jiffies, short_sleep)) + usleep_range(50, 100); + else + usleep_range(1000, 2000); + } + + if (unlikely(mbox_status == HSMP_STATUS_NOT_READY)) { + dev_err(sock->dev, "Message ID 0x%X failure : SMU tmeout (status = 0x%X)\n", + msg->msg_id, mbox_status); + return -ETIMEDOUT; + } else if (unlikely(mbox_status == HSMP_ERR_INVALID_MSG)) { + dev_err(sock->dev, "Message ID 0x%X failure : Invalid message (status = 0x%X)\n", + msg->msg_id, mbox_status); + return -ENOMSG; + } else if (unlikely(mbox_status == HSMP_ERR_INVALID_INPUT)) { + dev_err(sock->dev, "Message ID 0x%X failure : Invalid arguments (status = 0x%X)\n", + msg->msg_id, mbox_status); + return -EINVAL; + } else if (unlikely(mbox_status == HSMP_ERR_PREREQ_NOT_SATISFIED)) { + dev_err(sock->dev, "Message ID 0x%X failure : Prerequisite not satisfied (status = 0x%X)\n", + msg->msg_id, mbox_status); + return -EREMOTEIO; + } else if (unlikely(mbox_status == HSMP_ERR_SMU_BUSY)) { + dev_err(sock->dev, "Message ID 0x%X failure : SMU BUSY (status = 0x%X)\n", + msg->msg_id, mbox_status); + return -EBUSY; + } else if (unlikely(mbox_status != HSMP_STATUS_OK)) { + dev_err(sock->dev, "Message ID 0x%X unknown failure (status = 0x%X)\n", + msg->msg_id, mbox_status); + return -EIO; + } + + /* + * SMU has responded OK. Read response data. + * SMU reads the input arguments from eight 32 bit registers starting + * from SMN_HSMP_MSG_DATA and writes the response data to the same + * SMN_HSMP_MSG_DATA address. + * We copy the response data if any, back to the args[]. + */ + index = 0; + while (index < msg->response_sz) { + ret = sock->amd_hsmp_rdwr(sock, mbinfo->msg_arg_off + (index << 2), + &msg->args[index], HSMP_RD); + if (ret) { + dev_err(sock->dev, "Error %d reading response %u for message ID:%u\n", + ret, index, msg->msg_id); + break; + } + index++; + } + + return ret; +} + +static int validate_message(struct hsmp_message *msg) +{ + /* msg_id against valid range of message IDs */ + if (msg->msg_id < HSMP_TEST || msg->msg_id >= HSMP_MSG_ID_MAX) + return -ENOMSG; + + /* msg_id is a reserved message ID */ + if (hsmp_msg_desc_table[msg->msg_id].type == HSMP_RSVD) + return -ENOMSG; + + /* + * num_args passed by user should match the num_args specified in + * message description table. + */ + if (msg->num_args != hsmp_msg_desc_table[msg->msg_id].num_args) + return -EINVAL; + + /* + * Some older HSMP SET messages are updated to add GET in the same message. + * In these messages, GET returns the current value and SET also returns + * the successfully set value. To support this GET and SET in same message + * while maintaining backward compatibility for the HSMP users, + * hsmp_msg_desc_table[] indicates only maximum allowed response_sz. + */ + if (hsmp_msg_desc_table[msg->msg_id].type == HSMP_SET_GET) { + if (msg->response_sz > hsmp_msg_desc_table[msg->msg_id].response_sz) + return -EINVAL; + } else { + /* only HSMP_SET or HSMP_GET messages go through this strict check */ + if (msg->response_sz != hsmp_msg_desc_table[msg->msg_id].response_sz) + return -EINVAL; + } + return 0; +} + +int hsmp_send_message(struct hsmp_message *msg) +{ + struct hsmp_socket *sock; + int ret; + + if (!msg) + return -EINVAL; + ret = validate_message(msg); + if (ret) + return ret; + + if (!hsmp_pdev.sock || msg->sock_ind >= hsmp_pdev.num_sockets) + return -ENODEV; + sock = &hsmp_pdev.sock[msg->sock_ind]; + + ret = down_interruptible(&sock->hsmp_sem); + if (ret < 0) + return ret; + + ret = __hsmp_send_message(sock, msg); + + up(&sock->hsmp_sem); + + return ret; +} +EXPORT_SYMBOL_NS_GPL(hsmp_send_message, "AMD_HSMP"); + +int hsmp_msg_get_nargs(u16 sock_ind, u32 msg_id, u32 *data, u8 num_args) +{ + struct hsmp_message msg = {}; + unsigned int i; + int ret; + + if (!data) + return -EINVAL; + msg.msg_id = msg_id; + msg.sock_ind = sock_ind; + msg.response_sz = num_args; + + ret = hsmp_send_message(&msg); + if (ret) + return ret; + + for (i = 0; i < num_args; i++) + data[i] = msg.args[i]; + + return 0; +} +EXPORT_SYMBOL_NS_GPL(hsmp_msg_get_nargs, "AMD_HSMP"); + +int hsmp_test(u16 sock_ind, u32 value) +{ + struct hsmp_message msg = { 0 }; + int ret; + + /* + * Test the hsmp port by performing TEST command. The test message + * takes one argument and returns the value of that argument + 1. + */ + msg.msg_id = HSMP_TEST; + msg.num_args = 1; + msg.response_sz = 1; + msg.args[0] = value; + msg.sock_ind = sock_ind; + + ret = hsmp_send_message(&msg); + if (ret) + return ret; + + /* Check the response value */ + if (msg.args[0] != (value + 1)) { + dev_err(hsmp_pdev.sock[sock_ind].dev, + "Socket %d test message failed, Expected 0x%08X, received 0x%08X\n", + sock_ind, (value + 1), msg.args[0]); + return -EBADE; + } + + return ret; +} +EXPORT_SYMBOL_NS_GPL(hsmp_test, "AMD_HSMP"); + +static bool is_get_msg(struct hsmp_message *msg) +{ + if (hsmp_msg_desc_table[msg->msg_id].type == HSMP_GET) + return true; + + if (hsmp_msg_desc_table[msg->msg_id].type == HSMP_SET_GET && + (msg->args[0] & CHECK_GET_BIT)) + return true; + + return false; +} + +long hsmp_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) +{ + int __user *arguser = (int __user *)arg; + struct hsmp_message msg = { 0 }; + int ret; + + if (copy_struct_from_user(&msg, sizeof(msg), arguser, sizeof(struct hsmp_message))) + return -EFAULT; + + /* + * Check msg_id is within the range of supported msg ids + * i.e within the array bounds of hsmp_msg_desc_table + */ + if (msg.msg_id < HSMP_TEST || msg.msg_id >= HSMP_MSG_ID_MAX) + return -ENOMSG; + + switch (fp->f_mode & (FMODE_WRITE | FMODE_READ)) { + case FMODE_WRITE: + /* + * Device is opened in O_WRONLY mode + * Execute only set/configure commands + */ + if (is_get_msg(&msg)) + return -EPERM; + break; + case FMODE_READ: + /* + * Device is opened in O_RDONLY mode + * Execute only get/monitor commands + */ + if (!is_get_msg(&msg)) + return -EPERM; + break; + case FMODE_READ | FMODE_WRITE: + /* + * Device is opened in O_RDWR mode + * Execute both get/monitor and set/configure commands + */ + break; + default: + return -EPERM; + } + + ret = hsmp_send_message(&msg); + if (ret) + return ret; + + if (hsmp_msg_desc_table[msg.msg_id].response_sz > 0) { + /* Copy results back to user for get/monitor commands */ + if (copy_to_user(arguser, &msg, sizeof(struct hsmp_message))) + return -EFAULT; + } + + return 0; +} + +ssize_t hsmp_metric_tbl_read(struct hsmp_socket *sock, char *buf, size_t size) +{ + struct hsmp_message msg = { 0 }; + int ret; + + if (!sock || !buf) + return -EINVAL; + + if (!sock->metric_tbl_addr) { + dev_err(sock->dev, "Metrics table address not available\n"); + return -ENOMEM; + } + + /* Do not support lseek(), also don't allow more than the size of metric table */ + if (size != sizeof(struct hsmp_metric_table)) { + dev_err(sock->dev, "Wrong buffer size\n"); + return -EINVAL; + } + + msg.msg_id = HSMP_GET_METRIC_TABLE; + msg.sock_ind = sock->sock_ind; + + ret = hsmp_send_message(&msg); + if (ret) + return ret; + memcpy_fromio(buf, sock->metric_tbl_addr, size); + + return size; +} +EXPORT_SYMBOL_NS_GPL(hsmp_metric_tbl_read, "AMD_HSMP"); + +int hsmp_get_tbl_dram_base(u16 sock_ind) +{ + struct hsmp_socket *sock = &hsmp_pdev.sock[sock_ind]; + struct hsmp_message msg = { 0 }; + phys_addr_t dram_addr; + int ret; + + msg.sock_ind = sock_ind; + msg.response_sz = hsmp_msg_desc_table[HSMP_GET_METRIC_TABLE_DRAM_ADDR].response_sz; + msg.msg_id = HSMP_GET_METRIC_TABLE_DRAM_ADDR; + + ret = hsmp_send_message(&msg); + if (ret) + return ret; + + /* + * calculate the metric table DRAM address from lower and upper 32 bits + * sent from SMU and ioremap it to virtual address. + */ + dram_addr = msg.args[0] | ((u64)(msg.args[1]) << 32); + if (!dram_addr) { + dev_err(sock->dev, "Invalid DRAM address for metric table\n"); + return -ENOMEM; + } + sock->metric_tbl_addr = devm_ioremap(sock->dev, dram_addr, + sizeof(struct hsmp_metric_table)); + if (!sock->metric_tbl_addr) { + dev_err(sock->dev, "Failed to ioremap metric table addr\n"); + return -ENOMEM; + } + return 0; +} +EXPORT_SYMBOL_NS_GPL(hsmp_get_tbl_dram_base, "AMD_HSMP"); + +int hsmp_cache_proto_ver(u16 sock_ind) +{ + struct hsmp_message msg = { 0 }; + int ret; + + msg.msg_id = HSMP_GET_PROTO_VER; + msg.sock_ind = sock_ind; + msg.response_sz = hsmp_msg_desc_table[HSMP_GET_PROTO_VER].response_sz; + + ret = hsmp_send_message(&msg); + if (!ret) + hsmp_pdev.proto_ver = msg.args[0]; + + return ret; +} +EXPORT_SYMBOL_NS_GPL(hsmp_cache_proto_ver, "AMD_HSMP"); + +static const struct file_operations hsmp_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = hsmp_ioctl, + .compat_ioctl = hsmp_ioctl, +}; + +int hsmp_misc_register(struct device *dev) +{ + hsmp_pdev.mdev.name = HSMP_CDEV_NAME; + hsmp_pdev.mdev.minor = MISC_DYNAMIC_MINOR; + hsmp_pdev.mdev.fops = &hsmp_fops; + hsmp_pdev.mdev.parent = dev; + hsmp_pdev.mdev.nodename = HSMP_DEVNODE_NAME; + hsmp_pdev.mdev.mode = 0644; + + return misc_register(&hsmp_pdev.mdev); +} +EXPORT_SYMBOL_NS_GPL(hsmp_misc_register, "AMD_HSMP"); + +void hsmp_misc_deregister(void) +{ + misc_deregister(&hsmp_pdev.mdev); +} +EXPORT_SYMBOL_NS_GPL(hsmp_misc_deregister, "AMD_HSMP"); + +struct hsmp_plat_device *get_hsmp_pdev(void) +{ + return &hsmp_pdev; +} +EXPORT_SYMBOL_NS_GPL(get_hsmp_pdev, "AMD_HSMP"); + +MODULE_DESCRIPTION("AMD HSMP Common driver"); +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/amd/hsmp/hsmp.h b/drivers/platform/x86/amd/hsmp/hsmp.h new file mode 100644 index 000000000000..0509a442eaae --- /dev/null +++ b/drivers/platform/x86/amd/hsmp/hsmp.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * AMD HSMP Platform Driver + * Copyright (c) 2024, AMD. + * All Rights Reserved. + * + * Header file for HSMP driver + */ + +#ifndef HSMP_H +#define HSMP_H + +#include <linux/compiler_types.h> +#include <linux/device.h> +#include <linux/hwmon.h> +#include <linux/kconfig.h> +#include <linux/miscdevice.h> +#include <linux/pci.h> +#include <linux/semaphore.h> +#include <linux/sysfs.h> + +#define HSMP_METRICS_TABLE_NAME "metrics_bin" + +#define HSMP_ATTR_GRP_NAME_SIZE 10 + +#define HSMP_CDEV_NAME "hsmp_cdev" +#define HSMP_DEVNODE_NAME "hsmp" +#define ACPI_HSMP_DEVICE_HID "AMDI0097" + +#define DRIVER_VERSION "2.5" + +struct hsmp_mbaddr_info { + u32 base_addr; + u32 msg_id_off; + u32 msg_resp_off; + u32 msg_arg_off; + u32 size; +}; + +struct hsmp_socket { + struct bin_attribute hsmp_attr; + struct hsmp_mbaddr_info mbinfo; + void __iomem *metric_tbl_addr; + void __iomem *virt_base_addr; + struct semaphore hsmp_sem; + char name[HSMP_ATTR_GRP_NAME_SIZE]; + struct device *dev; + u16 sock_ind; + int (*amd_hsmp_rdwr)(struct hsmp_socket *sock, u32 off, u32 *val, bool rw); +}; + +struct hsmp_plat_device { + struct miscdevice mdev; + struct hsmp_socket *sock; + u32 proto_ver; + u16 num_sockets; + bool is_probed; +}; + +int hsmp_cache_proto_ver(u16 sock_ind); +int hsmp_test(u16 sock_ind, u32 value); +long hsmp_ioctl(struct file *fp, unsigned int cmd, unsigned long arg); +void hsmp_misc_deregister(void); +int hsmp_misc_register(struct device *dev); +int hsmp_get_tbl_dram_base(u16 sock_ind); +ssize_t hsmp_metric_tbl_read(struct hsmp_socket *sock, char *buf, size_t size); +struct hsmp_plat_device *get_hsmp_pdev(void); +#if IS_ENABLED(CONFIG_HWMON) +int hsmp_create_sensor(struct device *dev, u16 sock_ind); +#else +static inline int hsmp_create_sensor(struct device *dev, u16 sock_ind) { return 0; } +#endif +int hsmp_msg_get_nargs(u16 sock_ind, u32 msg_id, u32 *data, u8 num_args); +#endif /* HSMP_H */ diff --git a/drivers/platform/x86/amd/hsmp/hwmon.c b/drivers/platform/x86/amd/hsmp/hwmon.c new file mode 100644 index 000000000000..0cc9a742497f --- /dev/null +++ b/drivers/platform/x86/amd/hsmp/hwmon.c @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * AMD HSMP hwmon support + * Copyright (c) 2025, AMD. + * All Rights Reserved. + * + * This file provides hwmon implementation for HSMP interface. + */ + +#include <asm/amd/hsmp.h> + +#include <linux/device.h> +#include <linux/err.h> +#include <linux/hwmon.h> +#include <linux/types.h> +#include <linux/units.h> + +#include "hsmp.h" + +#define HSMP_HWMON_NAME "amd_hsmp_hwmon" + +static int hsmp_hwmon_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + u16 sock_ind = (uintptr_t)dev_get_drvdata(dev); + struct hsmp_message msg = {}; + + if (type != hwmon_power) + return -EOPNOTSUPP; + + if (attr != hwmon_power_cap) + return -EOPNOTSUPP; + + msg.num_args = 1; + msg.args[0] = val / MICROWATT_PER_MILLIWATT; + msg.msg_id = HSMP_SET_SOCKET_POWER_LIMIT; + msg.sock_ind = sock_ind; + return hsmp_send_message(&msg); +} + +static int hsmp_hwmon_read(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + u16 sock_ind = (uintptr_t)dev_get_drvdata(dev); + struct hsmp_message msg = {}; + int ret; + + if (type != hwmon_power) + return -EOPNOTSUPP; + + msg.sock_ind = sock_ind; + msg.response_sz = 1; + + switch (attr) { + case hwmon_power_input: + msg.msg_id = HSMP_GET_SOCKET_POWER; + break; + case hwmon_power_cap: + msg.msg_id = HSMP_GET_SOCKET_POWER_LIMIT; + break; + case hwmon_power_cap_max: + msg.msg_id = HSMP_GET_SOCKET_POWER_LIMIT_MAX; + break; + default: + return -EOPNOTSUPP; + } + + ret = hsmp_send_message(&msg); + if (!ret) + *val = msg.args[0] * MICROWATT_PER_MILLIWATT; + + return ret; +} + +static umode_t hsmp_hwmon_is_visble(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + if (type != hwmon_power) + return 0; + + switch (attr) { + case hwmon_power_input: + return 0444; + case hwmon_power_cap: + return 0644; + case hwmon_power_cap_max: + return 0444; + default: + return 0; + } +} + +static const struct hwmon_ops hsmp_hwmon_ops = { + .read = hsmp_hwmon_read, + .is_visible = hsmp_hwmon_is_visble, + .write = hsmp_hwmon_write, +}; + +static const struct hwmon_channel_info * const hsmp_info[] = { + HWMON_CHANNEL_INFO(power, HWMON_P_INPUT | HWMON_P_CAP | HWMON_P_CAP_MAX), + NULL +}; + +static const struct hwmon_chip_info hsmp_chip_info = { + .ops = &hsmp_hwmon_ops, + .info = hsmp_info, +}; + +int hsmp_create_sensor(struct device *dev, u16 sock_ind) +{ + struct device *hwmon_dev; + + hwmon_dev = devm_hwmon_device_register_with_info(dev, HSMP_HWMON_NAME, + (void *)(uintptr_t)sock_ind, + &hsmp_chip_info, + NULL); + return PTR_ERR_OR_ZERO(hwmon_dev); +} +EXPORT_SYMBOL_NS(hsmp_create_sensor, "AMD_HSMP"); diff --git a/drivers/platform/x86/amd/hsmp/plat.c b/drivers/platform/x86/amd/hsmp/plat.c new file mode 100644 index 000000000000..e07f68575055 --- /dev/null +++ b/drivers/platform/x86/amd/hsmp/plat.c @@ -0,0 +1,350 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * AMD HSMP Platform Driver + * Copyright (c) 2024, AMD. + * All Rights Reserved. + * + * This file provides platform device implementations. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <asm/amd/hsmp.h> + +#include <linux/acpi.h> +#include <linux/build_bug.h> +#include <linux/device.h> +#include <linux/dev_printk.h> +#include <linux/kconfig.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/sysfs.h> + +#include <asm/amd/node.h> + +#include "hsmp.h" + +#define DRIVER_NAME "amd_hsmp" + +/* + * To access specific HSMP mailbox register, s/w writes the SMN address of HSMP mailbox + * register into the SMN_INDEX register, and reads/writes the SMN_DATA reg. + * Below are required SMN address for HSMP Mailbox register offsets in SMU address space + */ +#define SMN_HSMP_BASE 0x3B00000 +#define SMN_HSMP_MSG_ID 0x0010534 +#define SMN_HSMP_MSG_ID_F1A_M0H 0x0010934 +#define SMN_HSMP_MSG_RESP 0x0010980 +#define SMN_HSMP_MSG_DATA 0x00109E0 + +static struct hsmp_plat_device *hsmp_pdev; + +static int amd_hsmp_pci_rdwr(struct hsmp_socket *sock, u32 offset, + u32 *value, bool write) +{ + return amd_smn_hsmp_rdwr(sock->sock_ind, sock->mbinfo.base_addr + offset, value, write); +} + +static ssize_t hsmp_metric_tbl_plat_read(struct file *filp, struct kobject *kobj, + const struct bin_attribute *bin_attr, char *buf, + loff_t off, size_t count) +{ + struct hsmp_socket *sock; + u16 sock_ind; + + sock_ind = (uintptr_t)bin_attr->private; + if (sock_ind >= hsmp_pdev->num_sockets) + return -EINVAL; + + sock = &hsmp_pdev->sock[sock_ind]; + + return hsmp_metric_tbl_read(sock, buf, count); +} + +static umode_t hsmp_is_sock_attr_visible(struct kobject *kobj, + const struct bin_attribute *battr, int id) +{ + u16 sock_ind; + + sock_ind = (uintptr_t)battr->private; + + if (id == 0 && sock_ind >= hsmp_pdev->num_sockets) + return SYSFS_GROUP_INVISIBLE; + + if (hsmp_pdev->proto_ver == HSMP_PROTO_VER6) + return battr->attr.mode; + + return 0; +} + +/* + * AMD supports maximum of 8 sockets in a system. + * Static array of 8 + 1(for NULL) elements is created below + * to create sysfs groups for sockets. + * is_bin_visible function is used to show / hide the necessary groups. + * + * Validate the maximum number against MAX_AMD_NUM_NODES. If this changes, + * then the attributes and groups below must be adjusted. + */ +static_assert(MAX_AMD_NUM_NODES == 8); + +#define HSMP_BIN_ATTR(index, _list) \ +static const struct bin_attribute attr##index = { \ + .attr = { .name = HSMP_METRICS_TABLE_NAME, .mode = 0444}, \ + .private = (void *)index, \ + .read = hsmp_metric_tbl_plat_read, \ + .size = sizeof(struct hsmp_metric_table), \ +}; \ +static const struct bin_attribute _list[] = { \ + &attr##index, \ + NULL \ +} + +HSMP_BIN_ATTR(0, *sock0_attr_list); +HSMP_BIN_ATTR(1, *sock1_attr_list); +HSMP_BIN_ATTR(2, *sock2_attr_list); +HSMP_BIN_ATTR(3, *sock3_attr_list); +HSMP_BIN_ATTR(4, *sock4_attr_list); +HSMP_BIN_ATTR(5, *sock5_attr_list); +HSMP_BIN_ATTR(6, *sock6_attr_list); +HSMP_BIN_ATTR(7, *sock7_attr_list); + +#define HSMP_BIN_ATTR_GRP(index, _list, _name) \ +static const struct attribute_group sock##index##_attr_grp = { \ + .bin_attrs = _list, \ + .is_bin_visible = hsmp_is_sock_attr_visible, \ + .name = #_name, \ +} + +HSMP_BIN_ATTR_GRP(0, sock0_attr_list, socket0); +HSMP_BIN_ATTR_GRP(1, sock1_attr_list, socket1); +HSMP_BIN_ATTR_GRP(2, sock2_attr_list, socket2); +HSMP_BIN_ATTR_GRP(3, sock3_attr_list, socket3); +HSMP_BIN_ATTR_GRP(4, sock4_attr_list, socket4); +HSMP_BIN_ATTR_GRP(5, sock5_attr_list, socket5); +HSMP_BIN_ATTR_GRP(6, sock6_attr_list, socket6); +HSMP_BIN_ATTR_GRP(7, sock7_attr_list, socket7); + +static const struct attribute_group *hsmp_groups[] = { + &sock0_attr_grp, + &sock1_attr_grp, + &sock2_attr_grp, + &sock3_attr_grp, + &sock4_attr_grp, + &sock5_attr_grp, + &sock6_attr_grp, + &sock7_attr_grp, + NULL +}; + +static inline bool is_f1a_m0h(void) +{ + if (boot_cpu_data.x86 == 0x1A && boot_cpu_data.x86_model <= 0x0F) + return true; + + return false; +} + +static int init_platform_device(struct device *dev) +{ + struct hsmp_socket *sock; + int ret, i; + + for (i = 0; i < hsmp_pdev->num_sockets; i++) { + sock = &hsmp_pdev->sock[i]; + sock->sock_ind = i; + sock->dev = dev; + sock->mbinfo.base_addr = SMN_HSMP_BASE; + sock->amd_hsmp_rdwr = amd_hsmp_pci_rdwr; + + /* + * This is a transitional change from non-ACPI to ACPI, only + * family 0x1A, model 0x00 platform is supported for both ACPI and non-ACPI. + */ + if (is_f1a_m0h()) + sock->mbinfo.msg_id_off = SMN_HSMP_MSG_ID_F1A_M0H; + else + sock->mbinfo.msg_id_off = SMN_HSMP_MSG_ID; + + sock->mbinfo.msg_resp_off = SMN_HSMP_MSG_RESP; + sock->mbinfo.msg_arg_off = SMN_HSMP_MSG_DATA; + sema_init(&sock->hsmp_sem, 1); + + /* Test the hsmp interface on each socket */ + ret = hsmp_test(i, 0xDEADBEEF); + if (ret) { + dev_err(dev, "HSMP test message failed on Fam:%x model:%x\n", + boot_cpu_data.x86, boot_cpu_data.x86_model); + dev_err(dev, "Is HSMP disabled in BIOS ?\n"); + return ret; + } + + ret = hsmp_cache_proto_ver(i); + if (ret) { + dev_err(dev, "Failed to read HSMP protocol version\n"); + return ret; + } + + if (hsmp_pdev->proto_ver == HSMP_PROTO_VER6) { + ret = hsmp_get_tbl_dram_base(i); + if (ret) + dev_info(dev, "Failed to init metric table\n"); + } + + /* Register with hwmon interface for reporting power */ + ret = hsmp_create_sensor(dev, i); + if (ret) + dev_info(dev, "Failed to register HSMP sensors with hwmon\n"); + } + + return 0; +} + +static int hsmp_pltdrv_probe(struct platform_device *pdev) +{ + int ret; + + hsmp_pdev->sock = devm_kcalloc(&pdev->dev, hsmp_pdev->num_sockets, + sizeof(*hsmp_pdev->sock), + GFP_KERNEL); + if (!hsmp_pdev->sock) + return -ENOMEM; + + ret = init_platform_device(&pdev->dev); + if (ret) { + dev_err(&pdev->dev, "Failed to init HSMP mailbox\n"); + return ret; + } + + ret = hsmp_misc_register(&pdev->dev); + if (ret) { + dev_err(&pdev->dev, "Failed to register misc device\n"); + return ret; + } + + dev_dbg(&pdev->dev, "AMD HSMP is probed successfully\n"); + return 0; +} + +static void hsmp_pltdrv_remove(struct platform_device *pdev) +{ + hsmp_misc_deregister(); +} + +static struct platform_driver amd_hsmp_driver = { + .probe = hsmp_pltdrv_probe, + .remove = hsmp_pltdrv_remove, + .driver = { + .name = DRIVER_NAME, + .dev_groups = hsmp_groups, + }, +}; + +static struct platform_device *amd_hsmp_platdev; + +static int hsmp_plat_dev_register(void) +{ + int ret; + + amd_hsmp_platdev = platform_device_alloc(DRIVER_NAME, PLATFORM_DEVID_NONE); + if (!amd_hsmp_platdev) + return -ENOMEM; + + ret = platform_device_add(amd_hsmp_platdev); + if (ret) + platform_device_put(amd_hsmp_platdev); + + return ret; +} + +/* + * This check is only needed for backward compatibility of previous platforms. + * All new platforms are expected to support ACPI based probing. + */ +static bool legacy_hsmp_support(void) +{ + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + return false; + + switch (boot_cpu_data.x86) { + case 0x19: + switch (boot_cpu_data.x86_model) { + case 0x00 ... 0x1F: + case 0x30 ... 0x3F: + case 0x90 ... 0x9F: + case 0xA0 ... 0xAF: + return true; + default: + return false; + } + case 0x1A: + switch (boot_cpu_data.x86_model) { + case 0x00 ... 0x0F: + return true; + default: + return false; + } + default: + return false; + } + + return false; +} + +static int __init hsmp_plt_init(void) +{ + int ret = -ENODEV; + + if (acpi_dev_present(ACPI_HSMP_DEVICE_HID, NULL, -1)) { + if (IS_ENABLED(CONFIG_AMD_HSMP_ACPI)) + pr_debug("HSMP is supported through ACPI on this platform, please use hsmp_acpi.ko\n"); + else + pr_info("HSMP is supported through ACPI on this platform, please enable AMD_HSMP_ACPI config\n"); + return -ENODEV; + } + + if (!legacy_hsmp_support()) { + pr_info("HSMP interface is either disabled or not supported on family:%x model:%x\n", + boot_cpu_data.x86, boot_cpu_data.x86_model); + return ret; + } + + hsmp_pdev = get_hsmp_pdev(); + if (!hsmp_pdev) + return -ENOMEM; + + /* + * amd_num_nodes() returns number of SMN/DF interfaces present in the system + * if we have N SMN/DF interfaces that ideally means N sockets + */ + hsmp_pdev->num_sockets = amd_num_nodes(); + if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_NUM_NODES) { + pr_err("Wrong number of sockets\n"); + return ret; + } + + ret = platform_driver_register(&amd_hsmp_driver); + if (ret) + return ret; + + ret = hsmp_plat_dev_register(); + if (ret) + platform_driver_unregister(&amd_hsmp_driver); + + return ret; +} + +static void __exit hsmp_plt_exit(void) +{ + platform_device_unregister(amd_hsmp_platdev); + platform_driver_unregister(&amd_hsmp_driver); +} + +device_initcall(hsmp_plt_init); +module_exit(hsmp_plt_exit); + +MODULE_IMPORT_NS("AMD_HSMP"); +MODULE_DESCRIPTION("AMD HSMP Platform Interface Driver"); +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/amd/pmc/Kconfig b/drivers/platform/x86/amd/pmc/Kconfig new file mode 100644 index 000000000000..eeffdafd686e --- /dev/null +++ b/drivers/platform/x86/amd/pmc/Kconfig @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# AMD PMC Driver +# + +config AMD_PMC + tristate "AMD SoC PMC driver" + depends on ACPI && PCI && RTC_CLASS && AMD_NODE + depends on SUSPEND + select SERIO + help + The driver provides support for AMD Power Management Controller + primarily responsible for S2Idle transactions that are driven from + a platform firmware running on SMU. This driver also provides a debug + mechanism to investigate the S2Idle transactions and failures. + + Say Y or M here if you have a notebook powered by AMD RYZEN CPU/APU. + + If you choose to compile this driver as a module the module will be + called amd-pmc. + +config AMD_MP2_STB + bool "AMD SoC MP2 STB function" + depends on AMD_PMC + default AMD_PMC + help + AMD MP2 STB function provides a data buffer used to log debug + information about the system execution during S2Idle suspend/resume. + A data buffer known as the STB (Smart Trace Buffer) is a circular + buffer which is a low-level log for the SoC which is used to debug + any hangs/stalls during S2Idle suspend/resume. + + Creates debugfs to get STB, a userspace daemon can access STB log of + last S2Idle suspend/resume which can help to debug if hangs/stalls + during S2Idle suspend/resume. diff --git a/drivers/platform/x86/amd/pmc/Makefile b/drivers/platform/x86/amd/pmc/Makefile new file mode 100644 index 000000000000..bb6905c4cae9 --- /dev/null +++ b/drivers/platform/x86/amd/pmc/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for linux/drivers/platform/x86/amd/pmc +# AMD Power Management Controller Driver +# + +obj-$(CONFIG_AMD_PMC) += amd-pmc.o +amd-pmc-y := pmc.o pmc-quirks.o mp1_stb.o +amd-pmc-$(CONFIG_AMD_MP2_STB) += mp2_stb.o diff --git a/drivers/platform/x86/amd/pmc/mp1_stb.c b/drivers/platform/x86/amd/pmc/mp1_stb.c new file mode 100644 index 000000000000..3b9b9f30faa3 --- /dev/null +++ b/drivers/platform/x86/amd/pmc/mp1_stb.c @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * AMD MP1 Smart Trace Buffer (STB) Layer + * + * Copyright (c) 2024, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Authors: Shyam Sundar S K <Shyam-sundar.S-k@amd.com> + * Sanket Goswami <Sanket.Goswami@amd.com> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <asm/amd/nb.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/uaccess.h> + +#include "pmc.h" + +/* STB Spill to DRAM Parameters */ +#define S2D_TELEMETRY_DRAMBYTES_MAX 0x1000000 +#define S2D_TELEMETRY_BYTES_MAX 0x100000U +#define S2D_RSVD_RAM_SPACE 0x100000 + +/* STB Registers */ +#define AMD_STB_PMI_0 0x03E30600 +#define AMD_PMC_STB_DUMMY_PC 0xC6000007 + +/* STB Spill to DRAM Message Definition */ +#define STB_FORCE_FLUSH_DATA 0xCF +#define FIFO_SIZE 4096 + +/* STB S2D(Spill to DRAM) has different message port offset */ +#define AMD_S2D_REGISTER_MESSAGE 0xA20 +#define AMD_S2D_REGISTER_RESPONSE 0xA80 +#define AMD_S2D_REGISTER_ARGUMENT 0xA88 + +/* STB S2D (Spill to DRAM) message port offset for 44h model */ +#define AMD_GNR_REGISTER_MESSAGE 0x524 +#define AMD_GNR_REGISTER_RESPONSE 0x570 +#define AMD_GNR_REGISTER_ARGUMENT 0xA40 + +static bool enable_stb; +module_param(enable_stb, bool, 0644); +MODULE_PARM_DESC(enable_stb, "Enable the STB debug mechanism"); + +static bool dump_custom_stb; +module_param(dump_custom_stb, bool, 0644); +MODULE_PARM_DESC(dump_custom_stb, "Enable to dump full STB buffer"); + +enum s2d_arg { + S2D_TELEMETRY_SIZE = 0x01, + S2D_PHYS_ADDR_LOW, + S2D_PHYS_ADDR_HIGH, + S2D_NUM_SAMPLES, + S2D_DRAM_SIZE, +}; + +struct amd_stb_v2_data { + size_t size; + u8 data[] __counted_by(size); +}; + +int amd_stb_write(struct amd_pmc_dev *dev, u32 data) +{ + int err; + + err = amd_smn_write(0, AMD_STB_PMI_0, data); + if (err) { + dev_err(dev->dev, "failed to write data in stb: 0x%X\n", AMD_STB_PMI_0); + return pcibios_err_to_errno(err); + } + + return 0; +} + +int amd_stb_read(struct amd_pmc_dev *dev, u32 *buf) +{ + int i, err; + + for (i = 0; i < FIFO_SIZE; i++) { + err = amd_smn_read(0, AMD_STB_PMI_0, buf++); + if (err) { + dev_err(dev->dev, "error reading data from stb: 0x%X\n", AMD_STB_PMI_0); + return pcibios_err_to_errno(err); + } + } + + return 0; +} + +static int amd_stb_debugfs_open(struct inode *inode, struct file *filp) +{ + struct amd_pmc_dev *dev = filp->f_inode->i_private; + u32 size = FIFO_SIZE * sizeof(u32); + u32 *buf; + int rc; + + buf = kzalloc(size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + rc = amd_stb_read(dev, buf); + if (rc) { + kfree(buf); + return rc; + } + + filp->private_data = buf; + return rc; +} + +static ssize_t amd_stb_debugfs_read(struct file *filp, char __user *buf, size_t size, loff_t *pos) +{ + if (!filp->private_data) + return -EINVAL; + + return simple_read_from_buffer(buf, size, pos, filp->private_data, + FIFO_SIZE * sizeof(u32)); +} + +static int amd_stb_debugfs_release(struct inode *inode, struct file *filp) +{ + kfree(filp->private_data); + return 0; +} + +static const struct file_operations amd_stb_debugfs_fops = { + .owner = THIS_MODULE, + .open = amd_stb_debugfs_open, + .read = amd_stb_debugfs_read, + .release = amd_stb_debugfs_release, +}; + +/* Enhanced STB Firmware Reporting Mechanism */ +static int amd_stb_handle_efr(struct file *filp) +{ + struct amd_pmc_dev *dev = filp->f_inode->i_private; + struct amd_stb_v2_data *stb_data_arr; + u32 fsize; + + fsize = dev->dram_size - S2D_RSVD_RAM_SPACE; + stb_data_arr = kmalloc(struct_size(stb_data_arr, data, fsize), GFP_KERNEL); + if (!stb_data_arr) + return -ENOMEM; + + stb_data_arr->size = fsize; + memcpy_fromio(stb_data_arr->data, dev->stb_virt_addr, fsize); + filp->private_data = stb_data_arr; + + return 0; +} + +static int amd_stb_debugfs_open_v2(struct inode *inode, struct file *filp) +{ + struct amd_pmc_dev *dev = filp->f_inode->i_private; + u32 fsize, num_samples, val, stb_rdptr_offset = 0; + struct amd_stb_v2_data *stb_data_arr; + int ret; + + /* Write dummy postcode while reading the STB buffer */ + ret = amd_stb_write(dev, AMD_PMC_STB_DUMMY_PC); + if (ret) + dev_err(dev->dev, "error writing to STB: %d\n", ret); + + /* Spill to DRAM num_samples uses separate SMU message port */ + dev->msg_port = MSG_PORT_S2D; + + ret = amd_pmc_send_cmd(dev, 0, &val, STB_FORCE_FLUSH_DATA, 1); + if (ret) + dev_dbg_once(dev->dev, "S2D force flush not supported: %d\n", ret); + + /* + * We have a custom stb size and the PMFW is supposed to give + * the enhanced dram size. Note that we land here only for the + * platforms that support enhanced dram size reporting. + */ + if (dump_custom_stb) + return amd_stb_handle_efr(filp); + + /* Get the num_samples to calculate the last push location */ + ret = amd_pmc_send_cmd(dev, S2D_NUM_SAMPLES, &num_samples, dev->stb_arg.s2d_msg_id, true); + /* Clear msg_port for other SMU operation */ + dev->msg_port = MSG_PORT_PMC; + if (ret) { + dev_err(dev->dev, "error: S2D_NUM_SAMPLES not supported : %d\n", ret); + return ret; + } + + fsize = min(num_samples, S2D_TELEMETRY_BYTES_MAX); + stb_data_arr = kmalloc(struct_size(stb_data_arr, data, fsize), GFP_KERNEL); + if (!stb_data_arr) + return -ENOMEM; + + stb_data_arr->size = fsize; + + /* + * Start capturing data from the last push location. + * This is for general cases, where the stb limits + * are meant for standard usage. + */ + if (num_samples > S2D_TELEMETRY_BYTES_MAX) { + /* First read oldest data starting 1 behind last write till end of ringbuffer */ + stb_rdptr_offset = num_samples % S2D_TELEMETRY_BYTES_MAX; + fsize = S2D_TELEMETRY_BYTES_MAX - stb_rdptr_offset; + + memcpy_fromio(stb_data_arr->data, dev->stb_virt_addr + stb_rdptr_offset, fsize); + /* Second copy the newer samples from offset 0 - last write */ + memcpy_fromio(stb_data_arr->data + fsize, dev->stb_virt_addr, stb_rdptr_offset); + } else { + memcpy_fromio(stb_data_arr->data, dev->stb_virt_addr, fsize); + } + + filp->private_data = stb_data_arr; + + return 0; +} + +static ssize_t amd_stb_debugfs_read_v2(struct file *filp, char __user *buf, size_t size, + loff_t *pos) +{ + struct amd_stb_v2_data *data = filp->private_data; + + return simple_read_from_buffer(buf, size, pos, data->data, data->size); +} + +static int amd_stb_debugfs_release_v2(struct inode *inode, struct file *filp) +{ + kfree(filp->private_data); + return 0; +} + +static const struct file_operations amd_stb_debugfs_fops_v2 = { + .owner = THIS_MODULE, + .open = amd_stb_debugfs_open_v2, + .read = amd_stb_debugfs_read_v2, + .release = amd_stb_debugfs_release_v2, +}; + +static void amd_stb_update_args(struct amd_pmc_dev *dev) +{ + if (cpu_feature_enabled(X86_FEATURE_ZEN5)) + switch (boot_cpu_data.x86_model) { + case 0x44: + dev->stb_arg.msg = AMD_GNR_REGISTER_MESSAGE; + dev->stb_arg.arg = AMD_GNR_REGISTER_ARGUMENT; + dev->stb_arg.resp = AMD_GNR_REGISTER_RESPONSE; + return; + default: + break; + } + + dev->stb_arg.msg = AMD_S2D_REGISTER_MESSAGE; + dev->stb_arg.arg = AMD_S2D_REGISTER_ARGUMENT; + dev->stb_arg.resp = AMD_S2D_REGISTER_RESPONSE; +} + +static bool amd_is_stb_supported(struct amd_pmc_dev *dev) +{ + switch (dev->cpu_id) { + case AMD_CPU_ID_YC: + case AMD_CPU_ID_CB: + if (boot_cpu_data.x86_model == 0x44) + dev->stb_arg.s2d_msg_id = 0x9B; + else + dev->stb_arg.s2d_msg_id = 0xBE; + break; + case AMD_CPU_ID_PS: + dev->stb_arg.s2d_msg_id = 0x85; + break; + case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT: + case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT: + if (boot_cpu_data.x86_model == 0x70) + dev->stb_arg.s2d_msg_id = 0xF1; + else + dev->stb_arg.s2d_msg_id = 0xDE; + break; + default: + return false; + } + + amd_stb_update_args(dev); + return true; +} + +int amd_stb_s2d_init(struct amd_pmc_dev *dev) +{ + u32 phys_addr_low, phys_addr_hi; + u64 stb_phys_addr; + u32 size = 0; + int ret; + + if (!enable_stb) + return 0; + + if (amd_is_stb_supported(dev)) { + debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev, + &amd_stb_debugfs_fops_v2); + } else { + debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev, + &amd_stb_debugfs_fops); + return 0; + } + + /* Spill to DRAM feature uses separate SMU message port */ + dev->msg_port = MSG_PORT_S2D; + + amd_pmc_send_cmd(dev, S2D_TELEMETRY_SIZE, &size, dev->stb_arg.s2d_msg_id, true); + if (size != S2D_TELEMETRY_BYTES_MAX) + return -EIO; + + /* Get DRAM size */ + ret = amd_pmc_send_cmd(dev, S2D_DRAM_SIZE, &dev->dram_size, dev->stb_arg.s2d_msg_id, true); + if (ret || !dev->dram_size) + dev->dram_size = S2D_TELEMETRY_DRAMBYTES_MAX; + + /* Get STB DRAM address */ + amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_LOW, &phys_addr_low, dev->stb_arg.s2d_msg_id, true); + amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_HIGH, &phys_addr_hi, dev->stb_arg.s2d_msg_id, true); + + stb_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low); + + /* Clear msg_port for other SMU operation */ + dev->msg_port = MSG_PORT_PMC; + + dev->stb_virt_addr = devm_ioremap(dev->dev, stb_phys_addr, dev->dram_size); + if (!dev->stb_virt_addr) + return -ENOMEM; + + return 0; +} diff --git a/drivers/platform/x86/amd/pmc/mp2_stb.c b/drivers/platform/x86/amd/pmc/mp2_stb.c new file mode 100644 index 000000000000..9775ddc1b27a --- /dev/null +++ b/drivers/platform/x86/amd/pmc/mp2_stb.c @@ -0,0 +1,280 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * AMD MP2 STB layer + * + * Copyright (c) 2024, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Basavaraj Natikar <Basavaraj.Natikar@amd.com> + */ + +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/iopoll.h> +#include <linux/pci.h> +#include <linux/sizes.h> +#include <linux/time.h> + +#include "pmc.h" + +#define VALID_MSG 0xA +#define VALID_RESPONSE 2 + +#define AMD_C2P_MSG0 0x10500 +#define AMD_C2P_MSG1 0x10504 +#define AMD_P2C_MSG0 0x10680 +#define AMD_P2C_MSG1 0x10684 + +#define MP2_RESP_SLEEP_US 500 +#define MP2_RESP_TIMEOUT_US (1600 * USEC_PER_MSEC) + +#define MP2_STB_DATA_LEN_2KB 1 +#define MP2_STB_DATA_LEN_16KB 4 + +#define MP2_MMIO_BAR 2 + +struct mp2_cmd_base { + union { + u32 ul; + struct { + u32 cmd_id : 4; + u32 intr_disable : 1; + u32 is_dma_used : 1; + u32 rsvd : 26; + } field; + }; +}; + +struct mp2_cmd_response { + union { + u32 resp; + struct { + u32 cmd_id : 4; + u32 status : 4; + u32 response : 4; + u32 rsvd2 : 20; + } field; + }; +}; + +struct mp2_stb_data_valid { + union { + u32 data_valid; + struct { + u32 valid : 16; + u32 length : 16; + } val; + }; +}; + +static int amd_mp2_wait_response(struct amd_mp2_dev *mp2, u8 cmd_id, u32 command_sts) +{ + struct mp2_cmd_response cmd_resp; + + if (!readl_poll_timeout(mp2->mmio + AMD_P2C_MSG0, cmd_resp.resp, + (cmd_resp.field.response == 0x0 && + cmd_resp.field.status == command_sts && + cmd_resp.field.cmd_id == cmd_id), MP2_RESP_SLEEP_US, + MP2_RESP_TIMEOUT_US)) + return cmd_resp.field.status; + + return -ETIMEDOUT; +} + +static void amd_mp2_stb_send_cmd(struct amd_mp2_dev *mp2, u8 cmd_id, bool is_dma_used) +{ + struct mp2_cmd_base cmd_base; + + cmd_base.ul = 0; + cmd_base.field.cmd_id = cmd_id; + cmd_base.field.intr_disable = 1; + cmd_base.field.is_dma_used = is_dma_used; + + writeq(mp2->dma_addr, mp2->mmio + AMD_C2P_MSG1); + writel(cmd_base.ul, mp2->mmio + AMD_C2P_MSG0); +} + +static int amd_mp2_stb_region(struct amd_mp2_dev *mp2) +{ + struct device *dev = &mp2->pdev->dev; + unsigned int len = mp2->stb_len; + + if (!mp2->stbdata) { + mp2->vslbase = dmam_alloc_coherent(dev, len, &mp2->dma_addr, GFP_KERNEL); + if (!mp2->vslbase) + return -ENOMEM; + + mp2->stbdata = devm_kzalloc(dev, len, GFP_KERNEL); + if (!mp2->stbdata) + return -ENOMEM; + } + + return 0; +} + +static int amd_mp2_process_cmd(struct amd_mp2_dev *mp2, struct file *filp) +{ + struct device *dev = &mp2->pdev->dev; + struct mp2_stb_data_valid stb_dv; + int status; + + stb_dv.data_valid = readl(mp2->mmio + AMD_P2C_MSG1); + + if (stb_dv.val.valid != VALID_MSG) { + dev_dbg(dev, "Invalid STB data\n"); + return -EBADMSG; + } + + if (stb_dv.val.length != MP2_STB_DATA_LEN_2KB && + stb_dv.val.length != MP2_STB_DATA_LEN_16KB) { + dev_dbg(dev, "Unsupported length\n"); + return -EMSGSIZE; + } + + mp2->stb_len = BIT(stb_dv.val.length) * SZ_1K; + + status = amd_mp2_stb_region(mp2); + if (status) { + dev_err(dev, "Failed to init STB region, status %d\n", status); + return status; + } + + amd_mp2_stb_send_cmd(mp2, VALID_MSG, true); + status = amd_mp2_wait_response(mp2, VALID_MSG, VALID_RESPONSE); + if (status == VALID_RESPONSE) { + memcpy_fromio(mp2->stbdata, mp2->vslbase, mp2->stb_len); + filp->private_data = mp2->stbdata; + mp2->is_stb_data = true; + } else { + dev_err(dev, "Failed to start STB dump, status %d\n", status); + return -EOPNOTSUPP; + } + + return 0; +} + +static int amd_mp2_stb_debugfs_open(struct inode *inode, struct file *filp) +{ + struct amd_pmc_dev *dev = filp->f_inode->i_private; + struct amd_mp2_dev *mp2 = dev->mp2; + + if (mp2) { + if (!mp2->is_stb_data) + return amd_mp2_process_cmd(mp2, filp); + + filp->private_data = mp2->stbdata; + + return 0; + } + + return -ENODEV; +} + +static ssize_t amd_mp2_stb_debugfs_read(struct file *filp, char __user *buf, size_t size, + loff_t *pos) +{ + struct amd_pmc_dev *dev = filp->f_inode->i_private; + struct amd_mp2_dev *mp2 = dev->mp2; + + if (!mp2) + return -ENODEV; + + if (!filp->private_data) + return -EINVAL; + + return simple_read_from_buffer(buf, size, pos, filp->private_data, mp2->stb_len); +} + +static const struct file_operations amd_mp2_stb_debugfs_fops = { + .owner = THIS_MODULE, + .open = amd_mp2_stb_debugfs_open, + .read = amd_mp2_stb_debugfs_read, +}; + +static void amd_mp2_dbgfs_register(struct amd_pmc_dev *dev) +{ + if (!dev->dbgfs_dir) + return; + + debugfs_create_file("stb_read_previous_boot", 0644, dev->dbgfs_dir, dev, + &amd_mp2_stb_debugfs_fops); +} + +void amd_mp2_stb_deinit(struct amd_pmc_dev *dev) +{ + struct amd_mp2_dev *mp2 = dev->mp2; + struct pci_dev *pdev; + + if (mp2 && mp2->pdev) { + pdev = mp2->pdev; + + if (mp2->mmio) + pci_clear_master(pdev); + + pci_dev_put(pdev); + + if (mp2->devres_gid) + devres_release_group(&pdev->dev, mp2->devres_gid); + + dev->mp2 = NULL; + } +} + +void amd_mp2_stb_init(struct amd_pmc_dev *dev) +{ + struct amd_mp2_dev *mp2 = NULL; + struct pci_dev *pdev; + int rc; + + mp2 = devm_kzalloc(dev->dev, sizeof(*mp2), GFP_KERNEL); + if (!mp2) + return; + + pdev = pci_get_device(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MP2_STB, NULL); + if (!pdev) + return; + + dev->mp2 = mp2; + mp2->pdev = pdev; + + mp2->devres_gid = devres_open_group(&pdev->dev, NULL, GFP_KERNEL); + if (!mp2->devres_gid) { + dev_err(&pdev->dev, "devres_open_group failed\n"); + goto mp2_error; + } + + rc = pcim_enable_device(pdev); + if (rc) { + dev_err(&pdev->dev, "pcim_enable_device failed\n"); + goto mp2_error; + } + + rc = pcim_iomap_regions(pdev, BIT(MP2_MMIO_BAR), "mp2 stb"); + if (rc) { + dev_err(&pdev->dev, "pcim_iomap_regions failed\n"); + goto mp2_error; + } + + mp2->mmio = pcim_iomap_table(pdev)[MP2_MMIO_BAR]; + if (!mp2->mmio) { + dev_err(&pdev->dev, "pcim_iomap_table failed\n"); + goto mp2_error; + } + + pci_set_master(pdev); + + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rc) { + dev_err(&pdev->dev, "failed to set DMA mask\n"); + goto mp2_error; + } + + amd_mp2_dbgfs_register(dev); + + return; + +mp2_error: + amd_mp2_stb_deinit(dev); +} diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c new file mode 100644 index 000000000000..404e62ad293a --- /dev/null +++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c @@ -0,0 +1,368 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * AMD SoC Power Management Controller Driver Quirks + * + * Copyright (c) 2023, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Mario Limonciello <mario.limonciello@amd.com> + */ + +#include <linux/dmi.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/platform_data/x86/amd-fch.h> + +#include "pmc.h" + +struct quirk_entry { + u32 s2idle_bug_mmio; + bool spurious_8042; +}; + +static struct quirk_entry quirk_s2idle_bug = { + .s2idle_bug_mmio = FCH_PM_BASE + FCH_PM_SCRATCH, +}; + +static struct quirk_entry quirk_spurious_8042 = { + .spurious_8042 = true, +}; + +static struct quirk_entry quirk_s2idle_spurious_8042 = { + .s2idle_bug_mmio = FCH_PM_BASE + FCH_PM_SCRATCH, + .spurious_8042 = true, +}; + +static const struct dmi_system_id fwbug_list[] = { + { + .ident = "L14 Gen2 AMD", + .driver_data = &quirk_s2idle_spurious_8042, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20X5"), + } + }, + { + .ident = "T14s Gen2 AMD", + .driver_data = &quirk_s2idle_spurious_8042, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20XF"), + } + }, + { + .ident = "X13 Gen2 AMD", + .driver_data = &quirk_s2idle_spurious_8042, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20XH"), + } + }, + { + .ident = "T14 Gen2 AMD", + .driver_data = &quirk_s2idle_spurious_8042, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20XK"), + } + }, + { + .ident = "T14 Gen1 AMD", + .driver_data = &quirk_s2idle_spurious_8042, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20UD"), + } + }, + { + .ident = "T14 Gen1 AMD", + .driver_data = &quirk_s2idle_spurious_8042, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20UE"), + } + }, + { + .ident = "T14s Gen1 AMD", + .driver_data = &quirk_s2idle_spurious_8042, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20UH"), + } + }, + { + .ident = "T14s Gen1 AMD", + .driver_data = &quirk_s2idle_spurious_8042, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20UJ"), + } + }, + { + .ident = "P14s Gen1 AMD", + .driver_data = &quirk_s2idle_spurious_8042, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20Y1"), + } + }, + { + .ident = "P14s Gen2 AMD", + .driver_data = &quirk_s2idle_spurious_8042, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21A0"), + } + }, + { + .ident = "P14s Gen2 AMD", + .driver_data = &quirk_s2idle_spurious_8042, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21A1"), + } + }, + { + .ident = "ROG Xbox Ally RC73YA", + .driver_data = &quirk_spurious_8042, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "RC73YA"), + } + }, + /* https://bugzilla.kernel.org/show_bug.cgi?id=218024 */ + { + .ident = "V14 G4 AMN", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82YT"), + } + }, + { + .ident = "V14 G4 AMN", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83GE"), + } + }, + { + .ident = "V15 G4 AMN", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82YU"), + } + }, + { + .ident = "V15 G4 AMN", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83CQ"), + } + }, + { + .ident = "IdeaPad 1 14AMN7", + .driver_data = &quirk_s2idle_spurious_8042, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82VF"), + } + }, + { + .ident = "IdeaPad 1 15AMN7", + .driver_data = &quirk_s2idle_spurious_8042, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82VG"), + } + }, + { + .ident = "IdeaPad 1 15AMN7", + .driver_data = &quirk_s2idle_spurious_8042, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82X5"), + } + }, + { + .ident = "IdeaPad Slim 3 14AMN8", + .driver_data = &quirk_s2idle_spurious_8042, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82XN"), + } + }, + { + .ident = "IdeaPad Slim 3 15AMN8", + .driver_data = &quirk_s2idle_spurious_8042, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82XQ"), + } + }, + /* https://gitlab.freedesktop.org/drm/amd/-/issues/4434 */ + { + .ident = "Lenovo Yoga 6 13ALC6", + .driver_data = &quirk_s2idle_spurious_8042, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82ND"), + } + }, + /* https://gitlab.freedesktop.org/drm/amd/-/issues/4618 */ + { + .ident = "Lenovo Legion Go 2", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83N0"), + } + }, + { + .ident = "Lenovo Legion Go 2", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83N1"), + } + }, + /* https://gitlab.freedesktop.org/drm/amd/-/issues/2684 */ + { + .ident = "HP Laptop 15s-eq2xxx", + .driver_data = &quirk_s2idle_spurious_8042, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Laptop 15s-eq2xxx"), + } + }, + /* https://community.frame.work/t/tracking-framework-amd-ryzen-7040-series-lid-wakeup-behavior-feedback/39128 */ + { + .ident = "Framework Laptop 13 (Phoenix)", + .driver_data = &quirk_spurious_8042, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Framework"), + DMI_MATCH(DMI_PRODUCT_NAME, "Laptop 13 (AMD Ryzen 7040Series)"), + DMI_MATCH(DMI_BIOS_VERSION, "03.03"), + } + }, + { + .ident = "Framework Laptop 13 (Phoenix)", + .driver_data = &quirk_spurious_8042, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Framework"), + DMI_MATCH(DMI_PRODUCT_NAME, "Laptop 13 (AMD Ryzen 7040Series)"), + DMI_MATCH(DMI_BIOS_VERSION, "03.05"), + } + }, + { + .ident = "MECHREVO Wujie 14X (GX4HRXL)", + .driver_data = &quirk_spurious_8042, + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "WUJIE14-GX4HRXL"), + } + }, + { + .ident = "MECHREVO Yilong15Pro Series GM5HG7A", + .driver_data = &quirk_spurious_8042, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MECHREVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "Yilong15Pro Series GM5HG7A"), + } + }, + /* https://bugzilla.kernel.org/show_bug.cgi?id=220116 */ + { + .ident = "PCSpecialist Lafite Pro V 14M", + .driver_data = &quirk_spurious_8042, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PCSpecialist"), + DMI_MATCH(DMI_PRODUCT_NAME, "Lafite Pro V 14M"), + } + }, + { + .ident = "TUXEDO Stellaris Slim 15 AMD Gen6", + .driver_data = &quirk_spurious_8042, + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"), + } + }, + { + .ident = "TUXEDO InfinityBook Pro 14/15 AMD Gen10", + .driver_data = &quirk_spurious_8042, + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "XxHP4NAx"), + } + }, + { + .ident = "TUXEDO InfinityBook Pro 14/15 AMD Gen10", + .driver_data = &quirk_spurious_8042, + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "XxKK4NAx_XxSP4NAx"), + } + }, + {} +}; + +/* + * Laptops that run a SMI handler during the D3->D0 transition that occurs + * specifically when exiting suspend to idle which can cause + * large delays during resume when the IOMMU translation layer is enabled (the default + * behavior) for NVME devices: + * + * To avoid this firmware problem, skip the SMI handler on these machines before the + * D0 transition occurs. + */ +static void amd_pmc_skip_nvme_smi_handler(u32 s2idle_bug_mmio) +{ + void __iomem *addr; + u8 val; + + if (!request_mem_region_muxed(s2idle_bug_mmio, 1, "amd_pmc_pm80")) + return; + + addr = ioremap(s2idle_bug_mmio, 1); + if (!addr) + goto cleanup_resource; + + val = ioread8(addr); + iowrite8(val & ~BIT(0), addr); + + iounmap(addr); +cleanup_resource: + release_mem_region(s2idle_bug_mmio, 1); +} + +void amd_pmc_process_restore_quirks(struct amd_pmc_dev *dev) +{ + if (dev->quirks && dev->quirks->s2idle_bug_mmio) + amd_pmc_skip_nvme_smi_handler(dev->quirks->s2idle_bug_mmio); +} + +void amd_pmc_quirks_init(struct amd_pmc_dev *dev) +{ + const struct dmi_system_id *dmi_id; + + /* + * IRQ1 may cause an interrupt during resume even without a keyboard + * press. + * + * Affects Renoir, Cezanne and Barcelo SoCs + * + * A solution is available in PMFW 64.66.0, but it must be activated by + * SBIOS. If SBIOS is known to have the fix a quirk can be added for + * a given system to avoid workaround. + */ + if (dev->cpu_id == AMD_CPU_ID_CZN) + dev->disable_8042_wakeup = true; + + dmi_id = dmi_first_match(fwbug_list); + if (!dmi_id) + return; + dev->quirks = dmi_id->driver_data; + if (dev->quirks->s2idle_bug_mmio) + pr_info("Using s2idle quirk to avoid %s platform firmware bug\n", + dmi_id->ident); + dev->disable_8042_wakeup = dev->quirks->spurious_8042; +} diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c new file mode 100644 index 000000000000..cae3fcafd4d7 --- /dev/null +++ b/drivers/platform/x86/amd/pmc/pmc.c @@ -0,0 +1,847 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * AMD SoC Power Management Controller Driver + * + * Copyright (c) 2020, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/acpi.h> +#include <linux/array_size.h> +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/debugfs.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/limits.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/rtc.h> +#include <linux/serio.h> +#include <linux/suspend.h> +#include <linux/seq_file.h> +#include <linux/uaccess.h> + +#include <asm/amd/node.h> + +#include "pmc.h" + +static const struct amd_pmc_bit_map soc15_ip_blk_v2[] = { + {"DISPLAY", BIT(0)}, + {"CPU", BIT(1)}, + {"GFX", BIT(2)}, + {"VDD", BIT(3)}, + {"VDD_CCX", BIT(4)}, + {"ACP", BIT(5)}, + {"VCN_0", BIT(6)}, + {"VCN_1", BIT(7)}, + {"ISP", BIT(8)}, + {"NBIO", BIT(9)}, + {"DF", BIT(10)}, + {"USB3_0", BIT(11)}, + {"USB3_1", BIT(12)}, + {"LAPIC", BIT(13)}, + {"USB3_2", BIT(14)}, + {"USB4_RT0", BIT(15)}, + {"USB4_RT1", BIT(16)}, + {"USB4_0", BIT(17)}, + {"USB4_1", BIT(18)}, + {"MPM", BIT(19)}, + {"JPEG_0", BIT(20)}, + {"JPEG_1", BIT(21)}, + {"IPU", BIT(22)}, + {"UMSCH", BIT(23)}, + {"VPE", BIT(24)}, +}; + +static const struct amd_pmc_bit_map soc15_ip_blk[] = { + {"DISPLAY", BIT(0)}, + {"CPU", BIT(1)}, + {"GFX", BIT(2)}, + {"VDD", BIT(3)}, + {"ACP", BIT(4)}, + {"VCN", BIT(5)}, + {"ISP", BIT(6)}, + {"NBIO", BIT(7)}, + {"DF", BIT(8)}, + {"USB3_0", BIT(9)}, + {"USB3_1", BIT(10)}, + {"LAPIC", BIT(11)}, + {"USB3_2", BIT(12)}, + {"USB3_3", BIT(13)}, + {"USB3_4", BIT(14)}, + {"USB4_0", BIT(15)}, + {"USB4_1", BIT(16)}, + {"MPM", BIT(17)}, + {"JPEG", BIT(18)}, + {"IPU", BIT(19)}, + {"UMSCH", BIT(20)}, + {"VPE", BIT(21)}, +}; + +static bool disable_workarounds; +module_param(disable_workarounds, bool, 0644); +MODULE_PARM_DESC(disable_workarounds, "Disable workarounds for platform bugs"); + +static struct amd_pmc_dev pmc; + +static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset) +{ + return ioread32(dev->regbase + reg_offset); +} + +static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u32 val) +{ + iowrite32(val, dev->regbase + reg_offset); +} + +static void amd_pmc_get_ip_info(struct amd_pmc_dev *dev) +{ + switch (dev->cpu_id) { + case AMD_CPU_ID_PCO: + case AMD_CPU_ID_RN: + case AMD_CPU_ID_VG: + case AMD_CPU_ID_YC: + case AMD_CPU_ID_CB: + dev->num_ips = 12; + dev->ips_ptr = soc15_ip_blk; + dev->smu_msg = 0x538; + break; + case AMD_CPU_ID_PS: + dev->num_ips = 21; + dev->ips_ptr = soc15_ip_blk; + dev->smu_msg = 0x538; + break; + case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT: + case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT: + if (boot_cpu_data.x86_model == 0x70) { + dev->num_ips = ARRAY_SIZE(soc15_ip_blk_v2); + dev->ips_ptr = soc15_ip_blk_v2; + } else { + dev->num_ips = ARRAY_SIZE(soc15_ip_blk); + dev->ips_ptr = soc15_ip_blk; + } + dev->smu_msg = 0x938; + break; + } +} + +static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev) +{ + if (dev->cpu_id == AMD_CPU_ID_PCO) { + dev_warn_once(dev->dev, "SMU debugging info not supported on this platform\n"); + return -EINVAL; + } + + /* Get Active devices list from SMU */ + if (!dev->active_ips) + amd_pmc_send_cmd(dev, 0, &dev->active_ips, SMU_MSG_GET_SUP_CONSTRAINTS, true); + + /* Get dram address */ + if (!dev->smu_virt_addr) { + u32 phys_addr_low, phys_addr_hi; + u64 smu_phys_addr; + + amd_pmc_send_cmd(dev, 0, &phys_addr_low, SMU_MSG_LOG_GETDRAM_ADDR_LO, true); + amd_pmc_send_cmd(dev, 0, &phys_addr_hi, SMU_MSG_LOG_GETDRAM_ADDR_HI, true); + smu_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low); + + dev->smu_virt_addr = devm_ioremap(dev->dev, smu_phys_addr, + sizeof(struct smu_metrics)); + if (!dev->smu_virt_addr) + return -ENOMEM; + } + + memset_io(dev->smu_virt_addr, 0, sizeof(struct smu_metrics)); + + /* Start the logging */ + amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_RESET, false); + amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_START, false); + + return 0; +} + +static int get_metrics_table(struct amd_pmc_dev *pdev, struct smu_metrics *table) +{ + int rc; + + if (!pdev->smu_virt_addr) { + rc = amd_pmc_setup_smu_logging(pdev); + if (rc) + return rc; + } + + if (pdev->cpu_id == AMD_CPU_ID_PCO) + return -ENODEV; + memcpy_fromio(table, pdev->smu_virt_addr, sizeof(struct smu_metrics)); + return 0; +} + +static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev) +{ + struct smu_metrics table; + + if (get_metrics_table(pdev, &table)) + return; + + if (!table.s0i3_last_entry_status) + dev_warn(pdev->dev, "Last suspend didn't reach deepest state\n"); + pm_report_hw_sleep_time(table.s0i3_last_entry_status ? + table.timein_s0i3_lastcapture : 0); +} + +static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev) +{ + int rc; + u32 val; + + if (dev->cpu_id == AMD_CPU_ID_PCO) + return -ENODEV; + + rc = amd_pmc_send_cmd(dev, 0, &val, SMU_MSG_GETSMUVERSION, true); + if (rc) + return rc; + + dev->smu_program = (val >> 24) & GENMASK(7, 0); + dev->major = (val >> 16) & GENMASK(7, 0); + dev->minor = (val >> 8) & GENMASK(7, 0); + dev->rev = (val >> 0) & GENMASK(7, 0); + + dev_dbg(dev->dev, "SMU program %u version is %u.%u.%u\n", + dev->smu_program, dev->major, dev->minor, dev->rev); + + return 0; +} + +static ssize_t smu_fw_version_show(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct amd_pmc_dev *dev = dev_get_drvdata(d); + int rc; + + if (!dev->major) { + rc = amd_pmc_get_smu_version(dev); + if (rc) + return rc; + } + return sysfs_emit(buf, "%u.%u.%u\n", dev->major, dev->minor, dev->rev); +} + +static ssize_t smu_program_show(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct amd_pmc_dev *dev = dev_get_drvdata(d); + int rc; + + if (!dev->major) { + rc = amd_pmc_get_smu_version(dev); + if (rc) + return rc; + } + return sysfs_emit(buf, "%u\n", dev->smu_program); +} + +static DEVICE_ATTR_RO(smu_fw_version); +static DEVICE_ATTR_RO(smu_program); + +static umode_t pmc_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx) +{ + struct device *dev = kobj_to_dev(kobj); + struct amd_pmc_dev *pdev = dev_get_drvdata(dev); + + if (pdev->cpu_id == AMD_CPU_ID_PCO) + return 0; + return 0444; +} + +static struct attribute *pmc_attrs[] = { + &dev_attr_smu_fw_version.attr, + &dev_attr_smu_program.attr, + NULL, +}; + +static struct attribute_group pmc_attr_group = { + .attrs = pmc_attrs, + .is_visible = pmc_attr_is_visible, +}; + +static const struct attribute_group *pmc_groups[] = { + &pmc_attr_group, + NULL, +}; + +static int smu_fw_info_show(struct seq_file *s, void *unused) +{ + struct amd_pmc_dev *dev = s->private; + struct smu_metrics table; + int idx; + + if (get_metrics_table(dev, &table)) + return -EINVAL; + + seq_puts(s, "\n=== SMU Statistics ===\n"); + seq_printf(s, "Table Version: %d\n", table.table_version); + seq_printf(s, "Hint Count: %d\n", table.hint_count); + seq_printf(s, "Last S0i3 Status: %s\n", table.s0i3_last_entry_status ? "Success" : + "Unknown/Fail"); + seq_printf(s, "Time (in us) to S0i3: %lld\n", table.timeentering_s0i3_lastcapture); + seq_printf(s, "Time (in us) in S0i3: %lld\n", table.timein_s0i3_lastcapture); + seq_printf(s, "Time (in us) to resume from S0i3: %lld\n", + table.timeto_resume_to_os_lastcapture); + + seq_puts(s, "\n=== Active time (in us) ===\n"); + for (idx = 0 ; idx < dev->num_ips ; idx++) { + if (dev->ips_ptr[idx].bit_mask & dev->active_ips) + seq_printf(s, "%-8s : %lld\n", dev->ips_ptr[idx].name, + table.timecondition_notmet_lastcapture[idx]); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(smu_fw_info); + +static int s0ix_stats_show(struct seq_file *s, void *unused) +{ + struct amd_pmc_dev *dev = s->private; + u64 entry_time, exit_time, residency; + + /* Use FCH registers to get the S0ix stats */ + if (!dev->fch_virt_addr) { + u32 base_addr_lo = FCH_BASE_PHY_ADDR_LOW; + u32 base_addr_hi = FCH_BASE_PHY_ADDR_HIGH; + u64 fch_phys_addr = ((u64)base_addr_hi << 32 | base_addr_lo); + + dev->fch_virt_addr = devm_ioremap(dev->dev, fch_phys_addr, FCH_SSC_MAPPING_SIZE); + if (!dev->fch_virt_addr) + return -ENOMEM; + } + + entry_time = ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_H_OFFSET); + entry_time = entry_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_L_OFFSET); + + exit_time = ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_H_OFFSET); + exit_time = exit_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_L_OFFSET); + + /* It's in 48MHz. We need to convert it */ + residency = exit_time - entry_time; + do_div(residency, 48); + + seq_puts(s, "=== S0ix statistics ===\n"); + seq_printf(s, "S0ix Entry Time: %lld\n", entry_time); + seq_printf(s, "S0ix Exit Time: %lld\n", exit_time); + seq_printf(s, "Residency Time: %lld\n", residency); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(s0ix_stats); + +static int amd_pmc_idlemask_read(struct amd_pmc_dev *pdev, struct device *dev, + struct seq_file *s) +{ + u32 val; + int rc; + + switch (pdev->cpu_id) { + case AMD_CPU_ID_CZN: + /* we haven't yet read SMU version */ + if (!pdev->major) { + rc = amd_pmc_get_smu_version(pdev); + if (rc) + return rc; + } + if (pdev->major > 56 || (pdev->major >= 55 && pdev->minor >= 37)) + val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_CZN); + else + return -EINVAL; + break; + case AMD_CPU_ID_YC: + case AMD_CPU_ID_CB: + case AMD_CPU_ID_PS: + val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_YC); + break; + case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT: + case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT: + val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_1AH); + break; + default: + return -EINVAL; + } + + if (dev) + pm_pr_dbg("SMU idlemask s0i3: 0x%x\n", val); + + if (s) + seq_printf(s, "SMU idlemask : 0x%x\n", val); + + return 0; +} + +static int amd_pmc_idlemask_show(struct seq_file *s, void *unused) +{ + return amd_pmc_idlemask_read(s->private, NULL, s); +} +DEFINE_SHOW_ATTRIBUTE(amd_pmc_idlemask); + +static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev) +{ + debugfs_remove_recursive(dev->dbgfs_dir); +} + +static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev) +{ + dev->dbgfs_dir = debugfs_create_dir("amd_pmc", NULL); + debugfs_create_file("smu_fw_info", 0644, dev->dbgfs_dir, dev, + &smu_fw_info_fops); + debugfs_create_file("s0ix_stats", 0644, dev->dbgfs_dir, dev, + &s0ix_stats_fops); + debugfs_create_file("amd_pmc_idlemask", 0644, dev->dbgfs_dir, dev, + &amd_pmc_idlemask_fops); +} + +static char *amd_pmc_get_msg_port(struct amd_pmc_dev *dev) +{ + switch (dev->msg_port) { + case MSG_PORT_PMC: + return "PMC"; + case MSG_PORT_S2D: + return "S2D"; + default: + return "Invalid message port"; + } +} + +static void amd_pmc_dump_registers(struct amd_pmc_dev *dev) +{ + u32 value, message, argument, response; + + if (dev->msg_port == MSG_PORT_S2D) { + message = dev->stb_arg.msg; + argument = dev->stb_arg.arg; + response = dev->stb_arg.resp; + } else { + message = dev->smu_msg; + argument = AMD_PMC_REGISTER_ARGUMENT; + response = AMD_PMC_REGISTER_RESPONSE; + } + + value = amd_pmc_reg_read(dev, response); + dev_dbg(dev->dev, "AMD_%s_REGISTER_RESPONSE:%x\n", amd_pmc_get_msg_port(dev), value); + + value = amd_pmc_reg_read(dev, argument); + dev_dbg(dev->dev, "AMD_%s_REGISTER_ARGUMENT:%x\n", amd_pmc_get_msg_port(dev), value); + + value = amd_pmc_reg_read(dev, message); + dev_dbg(dev->dev, "AMD_%s_REGISTER_MESSAGE:%x\n", amd_pmc_get_msg_port(dev), value); +} + +int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret) +{ + int rc; + u32 val, message, argument, response; + + guard(mutex)(&dev->lock); + + if (dev->msg_port == MSG_PORT_S2D) { + message = dev->stb_arg.msg; + argument = dev->stb_arg.arg; + response = dev->stb_arg.resp; + } else { + message = dev->smu_msg; + argument = AMD_PMC_REGISTER_ARGUMENT; + response = AMD_PMC_REGISTER_RESPONSE; + } + + /* Wait until we get a valid response */ + rc = readx_poll_timeout(ioread32, dev->regbase + response, + val, val != 0, PMC_MSG_DELAY_MIN_US, + PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX); + if (rc) { + dev_err(dev->dev, "failed to talk to SMU\n"); + return rc; + } + + /* Write zero to response register */ + amd_pmc_reg_write(dev, response, 0); + + /* Write argument into response register */ + amd_pmc_reg_write(dev, argument, arg); + + /* Write message ID to message ID register */ + amd_pmc_reg_write(dev, message, msg); + + /* Wait until we get a valid response */ + rc = readx_poll_timeout(ioread32, dev->regbase + response, + val, val != 0, PMC_MSG_DELAY_MIN_US, + PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX); + if (rc) { + dev_err(dev->dev, "SMU response timed out\n"); + return rc; + } + + switch (val) { + case AMD_PMC_RESULT_OK: + if (ret) { + /* PMFW may take longer time to return back the data */ + usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US); + *data = amd_pmc_reg_read(dev, argument); + } + break; + case AMD_PMC_RESULT_CMD_REJECT_BUSY: + dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val); + rc = -EBUSY; + break; + case AMD_PMC_RESULT_CMD_UNKNOWN: + dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val); + rc = -EINVAL; + break; + case AMD_PMC_RESULT_CMD_REJECT_PREREQ: + case AMD_PMC_RESULT_FAILED: + default: + dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val); + rc = -EIO; + break; + } + + amd_pmc_dump_registers(dev); + return rc; +} + +static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev) +{ + switch (dev->cpu_id) { + case AMD_CPU_ID_PCO: + return MSG_OS_HINT_PCO; + case AMD_CPU_ID_RN: + case AMD_CPU_ID_VG: + case AMD_CPU_ID_YC: + case AMD_CPU_ID_CB: + case AMD_CPU_ID_PS: + case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT: + case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT: + return MSG_OS_HINT_RN; + } + return -EINVAL; +} + +static int amd_pmc_wa_irq1(struct amd_pmc_dev *pdev) +{ + struct device *d; + + d = bus_find_device_by_name(&serio_bus, NULL, "serio0"); + if (!d) + return 0; + if (device_may_wakeup(d)) { + dev_info_once(d, "Disabling IRQ1 wakeup source to avoid platform firmware bug\n"); + disable_irq_wake(1); + device_set_wakeup_enable(d, false); + } + put_device(d); + + return 0; +} + +static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg) +{ + struct rtc_device *rtc_device; + time64_t then, now, duration; + struct rtc_wkalrm alarm; + struct rtc_time tm; + int rc; + + /* we haven't yet read SMU version */ + if (!pdev->major) { + rc = amd_pmc_get_smu_version(pdev); + if (rc) + return rc; + } + + if (pdev->major < 64 || (pdev->major == 64 && pdev->minor < 53)) + return 0; + + rtc_device = rtc_class_open("rtc0"); + if (!rtc_device) + return 0; + rc = rtc_read_alarm(rtc_device, &alarm); + if (rc) + return rc; + if (!alarm.enabled) { + dev_dbg(pdev->dev, "alarm not enabled\n"); + return 0; + } + rc = rtc_read_time(rtc_device, &tm); + if (rc) + return rc; + then = rtc_tm_to_time64(&alarm.time); + now = rtc_tm_to_time64(&tm); + duration = then-now; + + /* in the past */ + if (then < now) + return 0; + + /* will be stored in upper 16 bits of s0i3 hint argument, + * so timer wakeup from s0i3 is limited to ~18 hours or less + */ + if (duration <= 4 || duration > U16_MAX) + return -EINVAL; + + *arg |= (duration << 16); + rc = rtc_alarm_irq_enable(rtc_device, 0); + pm_pr_dbg("wakeup timer programmed for %lld seconds\n", duration); + + return rc; +} + +static void amd_pmc_s2idle_prepare(void) +{ + struct amd_pmc_dev *pdev = &pmc; + int rc; + u8 msg; + u32 arg = 1; + + /* Reset and Start SMU logging - to monitor the s0i3 stats */ + amd_pmc_setup_smu_logging(pdev); + + /* Activate CZN specific platform bug workarounds */ + if (pdev->cpu_id == AMD_CPU_ID_CZN && !disable_workarounds) { + rc = amd_pmc_verify_czn_rtc(pdev, &arg); + if (rc) { + dev_err(pdev->dev, "failed to set RTC: %d\n", rc); + return; + } + } + + msg = amd_pmc_get_os_hint(pdev); + rc = amd_pmc_send_cmd(pdev, arg, NULL, msg, false); + if (rc) { + dev_err(pdev->dev, "suspend failed: %d\n", rc); + return; + } + + rc = amd_stb_write(pdev, AMD_PMC_STB_S2IDLE_PREPARE); + if (rc) + dev_err(pdev->dev, "error writing to STB: %d\n", rc); +} + +static void amd_pmc_s2idle_check(void) +{ + struct amd_pmc_dev *pdev = &pmc; + struct smu_metrics table; + int rc; + + /* Avoid triggering OVP */ + if (!get_metrics_table(pdev, &table) && table.s0i3_last_entry_status) + msleep(2500); + + /* Dump the IdleMask before we add to the STB */ + amd_pmc_idlemask_read(pdev, pdev->dev, NULL); + + rc = amd_stb_write(pdev, AMD_PMC_STB_S2IDLE_CHECK); + if (rc) + dev_err(pdev->dev, "error writing to STB: %d\n", rc); +} + +static int amd_pmc_dump_data(struct amd_pmc_dev *pdev) +{ + if (pdev->cpu_id == AMD_CPU_ID_PCO) + return -ENODEV; + + return amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, false); +} + +static void amd_pmc_s2idle_restore(void) +{ + struct amd_pmc_dev *pdev = &pmc; + int rc; + u8 msg; + + msg = amd_pmc_get_os_hint(pdev); + rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, false); + if (rc) + dev_err(pdev->dev, "resume failed: %d\n", rc); + + /* Let SMU know that we are looking for stats */ + amd_pmc_dump_data(pdev); + + rc = amd_stb_write(pdev, AMD_PMC_STB_S2IDLE_RESTORE); + if (rc) + dev_err(pdev->dev, "error writing to STB: %d\n", rc); + + /* Notify on failed entry */ + amd_pmc_validate_deepest(pdev); + + amd_pmc_process_restore_quirks(pdev); +} + +static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = { + .prepare = amd_pmc_s2idle_prepare, + .check = amd_pmc_s2idle_check, + .restore = amd_pmc_s2idle_restore, +}; + +static int amd_pmc_suspend_handler(struct device *dev) +{ + struct amd_pmc_dev *pdev = dev_get_drvdata(dev); + int rc; + + /* + * Must be called only from the same set of dev_pm_ops handlers + * as i8042_pm_suspend() is called: currently just from .suspend. + */ + if (pdev->disable_8042_wakeup && !disable_workarounds) { + rc = amd_pmc_wa_irq1(pdev); + if (rc) { + dev_err(pdev->dev, "failed to adjust keyboard wakeup: %d\n", rc); + return rc; + } + } + + return 0; +} + +static const struct dev_pm_ops amd_pmc_pm = { + .suspend = amd_pmc_suspend_handler, +}; + +static const struct pci_device_id pmc_pci_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CB) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RN) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PCO) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RV) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_SP) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_SHP) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_VG) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_ROOT) }, + { } +}; + +static int amd_pmc_probe(struct platform_device *pdev) +{ + struct amd_pmc_dev *dev = &pmc; + struct pci_dev *rdev; + u32 base_addr_lo, base_addr_hi; + u64 base_addr; + int err; + u32 val; + + dev->dev = &pdev->dev; + rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0)); + if (!rdev || !pci_match_id(pmc_pci_ids, rdev)) { + err = -ENODEV; + goto err_pci_dev_put; + } + + dev->cpu_id = rdev->device; + if (dev->cpu_id == AMD_CPU_ID_SP || dev->cpu_id == AMD_CPU_ID_SHP) { + dev_warn_once(dev->dev, "S0i3 is not supported on this hardware\n"); + err = -ENODEV; + goto err_pci_dev_put; + } + + dev->rdev = rdev; + err = amd_smn_read(0, AMD_PMC_BASE_ADDR_LO, &val); + if (err) { + dev_err(dev->dev, "error reading 0x%x\n", AMD_PMC_BASE_ADDR_LO); + err = pcibios_err_to_errno(err); + goto err_pci_dev_put; + } + + base_addr_lo = val & AMD_PMC_BASE_ADDR_HI_MASK; + err = amd_smn_read(0, AMD_PMC_BASE_ADDR_HI, &val); + if (err) { + dev_err(dev->dev, "error reading 0x%x\n", AMD_PMC_BASE_ADDR_HI); + err = pcibios_err_to_errno(err); + goto err_pci_dev_put; + } + + base_addr_hi = val & AMD_PMC_BASE_ADDR_LO_MASK; + base_addr = ((u64)base_addr_hi << 32 | base_addr_lo); + + dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMC_BASE_ADDR_OFFSET, + AMD_PMC_MAPPING_SIZE); + if (!dev->regbase) { + err = -ENOMEM; + goto err_pci_dev_put; + } + + err = devm_mutex_init(dev->dev, &dev->lock); + if (err) + goto err_pci_dev_put; + + /* Get num of IP blocks within the SoC */ + amd_pmc_get_ip_info(dev); + + platform_set_drvdata(pdev, dev); + if (IS_ENABLED(CONFIG_SUSPEND)) { + err = acpi_register_lps0_dev(&amd_pmc_s2idle_dev_ops); + if (err) + dev_warn(dev->dev, "failed to register LPS0 sleep handler, expect increased power consumption\n"); + if (!disable_workarounds) + amd_pmc_quirks_init(dev); + } + + amd_pmc_dbgfs_register(dev); + err = amd_stb_s2d_init(dev); + if (err) + goto err_pci_dev_put; + + if (IS_ENABLED(CONFIG_AMD_MP2_STB)) + amd_mp2_stb_init(dev); + pm_report_max_hw_sleep(U64_MAX); + return 0; + +err_pci_dev_put: + pci_dev_put(rdev); + return err; +} + +static void amd_pmc_remove(struct platform_device *pdev) +{ + struct amd_pmc_dev *dev = platform_get_drvdata(pdev); + + if (IS_ENABLED(CONFIG_SUSPEND)) + acpi_unregister_lps0_dev(&amd_pmc_s2idle_dev_ops); + amd_pmc_dbgfs_unregister(dev); + pci_dev_put(dev->rdev); + if (IS_ENABLED(CONFIG_AMD_MP2_STB)) + amd_mp2_stb_deinit(dev); +} + +static const struct acpi_device_id amd_pmc_acpi_ids[] = { + {"AMDI0005", 0}, + {"AMDI0006", 0}, + {"AMDI0007", 0}, + {"AMDI0008", 0}, + {"AMDI0009", 0}, + {"AMDI000A", 0}, + {"AMDI000B", 0}, + {"AMD0004", 0}, + {"AMD0005", 0}, + { } +}; +MODULE_DEVICE_TABLE(acpi, amd_pmc_acpi_ids); + +static struct platform_driver amd_pmc_driver = { + .driver = { + .name = "amd_pmc", + .acpi_match_table = amd_pmc_acpi_ids, + .dev_groups = pmc_groups, + .pm = pm_sleep_ptr(&amd_pmc_pm), + }, + .probe = amd_pmc_probe, + .remove = amd_pmc_remove, +}; +module_platform_driver(amd_pmc_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("AMD PMC Driver"); diff --git a/drivers/platform/x86/amd/pmc/pmc.h b/drivers/platform/x86/amd/pmc/pmc.h new file mode 100644 index 000000000000..fe3f53eb5955 --- /dev/null +++ b/drivers/platform/x86/amd/pmc/pmc.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * AMD SoC Power Management Controller Driver + * + * Copyright (c) 2023, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Mario Limonciello <mario.limonciello@amd.com> + */ + +#ifndef PMC_H +#define PMC_H + +#include <linux/types.h> +#include <linux/mutex.h> + +/* SMU communication registers */ +#define AMD_PMC_REGISTER_RESPONSE 0x980 +#define AMD_PMC_REGISTER_ARGUMENT 0x9BC + +/* PMC Scratch Registers */ +#define AMD_PMC_SCRATCH_REG_CZN 0x94 +#define AMD_PMC_SCRATCH_REG_YC 0xD14 +#define AMD_PMC_SCRATCH_REG_1AH 0xF14 + +/* STB Registers */ +#define AMD_PMC_STB_S2IDLE_PREPARE 0xC6000001 +#define AMD_PMC_STB_S2IDLE_RESTORE 0xC6000002 +#define AMD_PMC_STB_S2IDLE_CHECK 0xC6000003 + +/* Base address of SMU for mapping physical address to virtual address */ +#define AMD_PMC_MAPPING_SIZE 0x01000 +#define AMD_PMC_BASE_ADDR_OFFSET 0x10000 +#define AMD_PMC_BASE_ADDR_LO 0x13B102E8 +#define AMD_PMC_BASE_ADDR_HI 0x13B102EC +#define AMD_PMC_BASE_ADDR_LO_MASK GENMASK(15, 0) +#define AMD_PMC_BASE_ADDR_HI_MASK GENMASK(31, 20) + +/* SMU Response Codes */ +#define AMD_PMC_RESULT_OK 0x01 +#define AMD_PMC_RESULT_CMD_REJECT_BUSY 0xFC +#define AMD_PMC_RESULT_CMD_REJECT_PREREQ 0xFD +#define AMD_PMC_RESULT_CMD_UNKNOWN 0xFE +#define AMD_PMC_RESULT_FAILED 0xFF + +/* FCH SSC Registers */ +#define FCH_S0I3_ENTRY_TIME_L_OFFSET 0x30 +#define FCH_S0I3_ENTRY_TIME_H_OFFSET 0x34 +#define FCH_S0I3_EXIT_TIME_L_OFFSET 0x38 +#define FCH_S0I3_EXIT_TIME_H_OFFSET 0x3C +#define FCH_SSC_MAPPING_SIZE 0x800 +#define FCH_BASE_PHY_ADDR_LOW 0xFED81100 +#define FCH_BASE_PHY_ADDR_HIGH 0x00000000 + +/* SMU Message Definations */ +#define SMU_MSG_GETSMUVERSION 0x02 +#define SMU_MSG_LOG_GETDRAM_ADDR_HI 0x04 +#define SMU_MSG_LOG_GETDRAM_ADDR_LO 0x05 +#define SMU_MSG_LOG_START 0x06 +#define SMU_MSG_LOG_RESET 0x07 +#define SMU_MSG_LOG_DUMP_DATA 0x08 +#define SMU_MSG_GET_SUP_CONSTRAINTS 0x09 + +#define PMC_MSG_DELAY_MIN_US 50 +#define RESPONSE_REGISTER_LOOP_MAX 20000 + +#define DELAY_MIN_US 2000 +#define DELAY_MAX_US 3000 + +enum s2d_msg_port { + MSG_PORT_PMC, + MSG_PORT_S2D, +}; + +struct amd_mp2_dev { + void __iomem *mmio; + void __iomem *vslbase; + void *stbdata; + void *devres_gid; + struct pci_dev *pdev; + dma_addr_t dma_addr; + int stb_len; + bool is_stb_data; +}; + +struct stb_arg { + u32 s2d_msg_id; + u32 msg; + u32 arg; + u32 resp; +}; + +struct amd_pmc_dev { + void __iomem *regbase; + void __iomem *smu_virt_addr; + void __iomem *stb_virt_addr; + void __iomem *fch_virt_addr; + u32 base_addr; + u32 cpu_id; + u32 dram_size; + u32 active_ips; + const struct amd_pmc_bit_map *ips_ptr; + u32 num_ips; + u32 smu_msg; +/* SMU version information */ + u8 smu_program; + u8 major; + u8 minor; + u8 rev; + u8 msg_port; + struct device *dev; + struct pci_dev *rdev; + struct mutex lock; /* generic mutex lock */ + struct dentry *dbgfs_dir; + struct quirk_entry *quirks; + bool disable_8042_wakeup; + struct amd_mp2_dev *mp2; + struct stb_arg stb_arg; +}; + +struct amd_pmc_bit_map { + const char *name; + u32 bit_mask; +}; + +struct smu_metrics { + u32 table_version; + u32 hint_count; + u32 s0i3_last_entry_status; + u32 timein_s0i2; + u64 timeentering_s0i3_lastcapture; + u64 timeentering_s0i3_totaltime; + u64 timeto_resume_to_os_lastcapture; + u64 timeto_resume_to_os_totaltime; + u64 timein_s0i3_lastcapture; + u64 timein_s0i3_totaltime; + u64 timein_swdrips_lastcapture; + u64 timein_swdrips_totaltime; + u64 timecondition_notmet_lastcapture[32]; + u64 timecondition_notmet_totaltime[32]; +} __packed; + +enum amd_pmc_def { + MSG_TEST = 0x01, + MSG_OS_HINT_PCO, + MSG_OS_HINT_RN, +}; + +void amd_pmc_process_restore_quirks(struct amd_pmc_dev *dev); +void amd_pmc_quirks_init(struct amd_pmc_dev *dev); +void amd_mp2_stb_init(struct amd_pmc_dev *dev); +void amd_mp2_stb_deinit(struct amd_pmc_dev *dev); + +/* List of supported CPU ids */ +#define AMD_CPU_ID_RV 0x15D0 +#define AMD_CPU_ID_RN 0x1630 +#define AMD_CPU_ID_PCO AMD_CPU_ID_RV +#define AMD_CPU_ID_CZN AMD_CPU_ID_RN +#define AMD_CPU_ID_VG 0x1645 +#define AMD_CPU_ID_YC 0x14B5 +#define AMD_CPU_ID_CB 0x14D8 +#define AMD_CPU_ID_PS 0x14E8 +#define AMD_CPU_ID_SP 0x14A4 +#define AMD_CPU_ID_SHP 0x153A +#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507 +#define PCI_DEVICE_ID_AMD_1AH_M60H_ROOT 0x1122 +#define PCI_DEVICE_ID_AMD_MP2_STB 0x172c + +int amd_stb_s2d_init(struct amd_pmc_dev *dev); +int amd_stb_read(struct amd_pmc_dev *dev, u32 *buf); +int amd_stb_write(struct amd_pmc_dev *dev, u32 data); +int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret); + +#endif /* PMC_H */ diff --git a/drivers/platform/x86/amd/pmf/Kconfig b/drivers/platform/x86/amd/pmf/Kconfig new file mode 100644 index 000000000000..25b8f7ae3abd --- /dev/null +++ b/drivers/platform/x86/amd/pmf/Kconfig @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# AMD PMF Driver +# + +config AMD_PMF + tristate "AMD Platform Management Framework" + depends on ACPI && PCI + depends on POWER_SUPPLY + depends on AMD_NODE + select ACPI_PLATFORM_PROFILE + depends on TEE && AMDTEE + depends on AMD_SFH_HID + depends on HAS_IOMEM + help + This driver provides support for the AMD Platform Management Framework. + The goal is to enhance end user experience by making AMD PCs smarter, + quiter, power efficient by adapting to user behavior and environment. + + To compile this driver as a module, choose M here: the module will + be called amd_pmf. + +config AMD_PMF_DEBUG + bool "PMF debug information" + depends on AMD_PMF + help + Enabling this option would give more debug information on the OEM fed + power setting values for each of the PMF feature. PMF driver gets this + information after evaluating a ACPI method and the information is stored + in the PMF config store. + + Say Y here to enable more debug logs and Say N here if you are not sure. diff --git a/drivers/platform/x86/amd/pmf/Makefile b/drivers/platform/x86/amd/pmf/Makefile new file mode 100644 index 000000000000..5978464e0eb7 --- /dev/null +++ b/drivers/platform/x86/amd/pmf/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for linux/drivers/platform/x86/amd/pmf +# AMD Platform Management Framework +# + +obj-$(CONFIG_AMD_PMF) += amd-pmf.o +amd-pmf-y := core.o acpi.o sps.o \ + auto-mode.o cnqf.o \ + tee-if.o spc.o diff --git a/drivers/platform/x86/amd/pmf/acpi.c b/drivers/platform/x86/amd/pmf/acpi.c new file mode 100644 index 000000000000..13c4fec2c7ef --- /dev/null +++ b/drivers/platform/x86/amd/pmf/acpi.c @@ -0,0 +1,606 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * AMD Platform Management Framework Driver + * + * Copyright (c) 2022, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com> + */ + +#include <linux/acpi.h> +#include "pmf.h" + +#define APMF_CQL_NOTIFICATION 2 +#define APMF_AMT_NOTIFICATION 3 + +static union acpi_object *apmf_if_call(struct amd_pmf_dev *pdev, int fn, struct acpi_buffer *param) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + acpi_handle ahandle = ACPI_HANDLE(pdev->dev); + struct acpi_object_list apmf_if_arg_list; + union acpi_object apmf_if_args[2]; + acpi_status status; + + apmf_if_arg_list.count = 2; + apmf_if_arg_list.pointer = &apmf_if_args[0]; + + apmf_if_args[0].type = ACPI_TYPE_INTEGER; + apmf_if_args[0].integer.value = fn; + + if (param) { + apmf_if_args[1].type = ACPI_TYPE_BUFFER; + apmf_if_args[1].buffer.length = param->length; + apmf_if_args[1].buffer.pointer = param->pointer; + } else { + apmf_if_args[1].type = ACPI_TYPE_INTEGER; + apmf_if_args[1].integer.value = 0; + } + + status = acpi_evaluate_object(ahandle, "APMF", &apmf_if_arg_list, &buffer); + if (ACPI_FAILURE(status)) { + dev_err(pdev->dev, "APMF method:%d call failed\n", fn); + kfree(buffer.pointer); + return NULL; + } + + return buffer.pointer; +} + +static int apmf_if_call_store_buffer(struct amd_pmf_dev *pdev, int fn, void *dest, size_t out_sz) +{ + union acpi_object *info; + size_t size; + int err = 0; + + info = apmf_if_call(pdev, fn, NULL); + if (!info) + return -EIO; + + if (info->type != ACPI_TYPE_BUFFER) { + dev_err(pdev->dev, "object is not a buffer\n"); + err = -EINVAL; + goto out; + } + + if (info->buffer.length < 2) { + dev_err(pdev->dev, "buffer too small\n"); + err = -EINVAL; + goto out; + } + + size = *(u16 *)info->buffer.pointer; + if (info->buffer.length < size) { + dev_err(pdev->dev, "buffer smaller then headersize %u < %zu\n", + info->buffer.length, size); + err = -EINVAL; + goto out; + } + + if (size < out_sz) { + dev_err(pdev->dev, "buffer too small %zu\n", size); + err = -EINVAL; + goto out; + } + + memcpy(dest, info->buffer.pointer, out_sz); + +out: + kfree(info); + return err; +} + +static union acpi_object *apts_if_call(struct amd_pmf_dev *pdev, u32 state_index) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + acpi_handle ahandle = ACPI_HANDLE(pdev->dev); + struct acpi_object_list apts_if_arg_list; + union acpi_object apts_if_args[3]; + acpi_status status; + + apts_if_arg_list.count = 3; + apts_if_arg_list.pointer = &apts_if_args[0]; + + apts_if_args[0].type = ACPI_TYPE_INTEGER; + apts_if_args[0].integer.value = 1; + apts_if_args[1].type = ACPI_TYPE_INTEGER; + apts_if_args[1].integer.value = state_index; + apts_if_args[2].type = ACPI_TYPE_INTEGER; + apts_if_args[2].integer.value = 0; + + status = acpi_evaluate_object(ahandle, "APTS", &apts_if_arg_list, &buffer); + if (ACPI_FAILURE(status)) { + dev_err(pdev->dev, "APTS state_idx:%u call failed\n", state_index); + kfree(buffer.pointer); + return NULL; + } + + return buffer.pointer; +} + +static int apts_if_call_store_buffer(struct amd_pmf_dev *pdev, + u32 index, void *data, size_t out_sz) +{ + union acpi_object *info; + size_t size; + int err = 0; + + info = apts_if_call(pdev, index); + if (!info) + return -EIO; + + if (info->type != ACPI_TYPE_BUFFER) { + dev_err(pdev->dev, "object is not a buffer\n"); + err = -EINVAL; + goto out; + } + + size = *(u16 *)info->buffer.pointer; + if (info->buffer.length < size) { + dev_err(pdev->dev, "buffer smaller than header size %u < %zu\n", + info->buffer.length, size); + err = -EINVAL; + goto out; + } + + if (size < out_sz) { + dev_err(pdev->dev, "buffer too small %zu\n", size); + err = -EINVAL; + goto out; + } + + memcpy(data, info->buffer.pointer, out_sz); +out: + kfree(info); + return err; +} + +int is_apmf_func_supported(struct amd_pmf_dev *pdev, unsigned long index) +{ + /* If bit-n is set, that indicates function n+1 is supported */ + return !!(pdev->supported_func & BIT(index - 1)); +} + +int is_apmf_bios_input_notifications_supported(struct amd_pmf_dev *pdev) +{ + return !!(pdev->notifications & CUSTOM_BIOS_INPUT_BITS); +} + +int apts_get_static_slider_granular_v2(struct amd_pmf_dev *pdev, + struct amd_pmf_apts_granular_output *data, u32 apts_idx) +{ + if (!is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) + return -EINVAL; + + return apts_if_call_store_buffer(pdev, apts_idx, data, sizeof(*data)); +} + +int apmf_get_static_slider_granular_v2(struct amd_pmf_dev *pdev, + struct apmf_static_slider_granular_output_v2 *data) +{ + if (!is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) + return -EINVAL; + + return apmf_if_call_store_buffer(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR, + data, sizeof(*data)); +} + +int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev, + struct apmf_static_slider_granular_output *data) +{ + if (!is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) + return -EINVAL; + + return apmf_if_call_store_buffer(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR, + data, sizeof(*data)); +} + +int apmf_os_power_slider_update(struct amd_pmf_dev *pdev, u8 event) +{ + struct os_power_slider args; + struct acpi_buffer params; + union acpi_object *info; + + args.size = sizeof(args); + args.slider_event = event; + + params.length = sizeof(args); + params.pointer = (void *)&args; + + info = apmf_if_call(pdev, APMF_FUNC_OS_POWER_SLIDER_UPDATE, ¶ms); + if (!info) + return -EIO; + + kfree(info); + return 0; +} + +static void apmf_sbios_heartbeat_notify(struct work_struct *work) +{ + struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, heart_beat.work); + union acpi_object *info; + + dev_dbg(dev->dev, "Sending heartbeat to SBIOS\n"); + info = apmf_if_call(dev, APMF_FUNC_SBIOS_HEARTBEAT, NULL); + if (!info) + return; + + schedule_delayed_work(&dev->heart_beat, secs_to_jiffies(dev->hb_interval)); + kfree(info); +} + +int amd_pmf_notify_sbios_heartbeat_event_v2(struct amd_pmf_dev *dev, u8 flag) +{ + struct sbios_hb_event_v2 args = { }; + struct acpi_buffer params; + union acpi_object *info; + + args.size = sizeof(args); + + switch (flag) { + case ON_LOAD: + args.load = 1; + break; + case ON_UNLOAD: + args.unload = 1; + break; + case ON_SUSPEND: + args.suspend = 1; + break; + case ON_RESUME: + args.resume = 1; + break; + default: + dev_dbg(dev->dev, "Failed to send v2 heartbeat event, flag:0x%x\n", flag); + return -EINVAL; + } + + params.length = sizeof(args); + params.pointer = &args; + + info = apmf_if_call(dev, APMF_FUNC_SBIOS_HEARTBEAT_V2, ¶ms); + if (!info) + return -EIO; + + kfree(info); + return 0; +} + +int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx) +{ + union acpi_object *info; + struct apmf_fan_idx args; + struct acpi_buffer params; + + args.size = sizeof(args); + args.fan_ctl_mode = manual; + args.fan_ctl_idx = idx; + + params.length = sizeof(args); + params.pointer = (void *)&args; + + info = apmf_if_call(pdev, APMF_FUNC_SET_FAN_IDX, ¶ms); + if (!info) + return -EIO; + + kfree(info); + return 0; +} + +static int apmf_notify_smart_pc_update(struct amd_pmf_dev *pdev, u32 val, u32 preq, u32 index) +{ + struct amd_pmf_notify_smart_pc_update args; + struct acpi_buffer params; + union acpi_object *info; + + args.size = sizeof(args); + args.pending_req = preq; + args.custom_bios[index] = val; + + params.length = sizeof(args); + params.pointer = &args; + + info = apmf_if_call(pdev, APMF_FUNC_NOTIFY_SMART_PC_UPDATES, ¶ms); + if (!info) + return -EIO; + + kfree(info); + dev_dbg(pdev->dev, "Notify smart pc update, val: %u\n", val); + + return 0; +} + +int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data) +{ + return apmf_if_call_store_buffer(pdev, APMF_FUNC_AUTO_MODE, data, sizeof(*data)); +} + +int apmf_get_sbios_requests_v2(struct amd_pmf_dev *pdev, struct apmf_sbios_req_v2 *req) +{ + return apmf_if_call_store_buffer(pdev, APMF_FUNC_SBIOS_REQUESTS, req, sizeof(*req)); +} + +int apmf_get_sbios_requests_v1(struct amd_pmf_dev *pdev, struct apmf_sbios_req_v1 *req) +{ + return apmf_if_call_store_buffer(pdev, APMF_FUNC_SBIOS_REQUESTS, req, sizeof(*req)); +} + +int apmf_get_sbios_requests(struct amd_pmf_dev *pdev, struct apmf_sbios_req *req) +{ + return apmf_if_call_store_buffer(pdev, APMF_FUNC_SBIOS_REQUESTS, + req, sizeof(*req)); +} + +static void amd_pmf_handle_early_preq(struct amd_pmf_dev *pdev) +{ + if (!pdev->cb_flag) + return; + + amd_pmf_invoke_cmd_enact(pdev); + pdev->cb_flag = false; +} + +static void apmf_event_handler_v2(acpi_handle handle, u32 event, void *data) +{ + struct amd_pmf_dev *pmf_dev = data; + int ret; + + guard(mutex)(&pmf_dev->cb_mutex); + + ret = apmf_get_sbios_requests_v2(pmf_dev, &pmf_dev->req); + if (ret) { + dev_err(pmf_dev->dev, "Failed to get v2 SBIOS requests: %d\n", ret); + return; + } + + dev_dbg(pmf_dev->dev, "Pending request (preq): 0x%x\n", pmf_dev->req.pending_req); + + amd_pmf_handle_early_preq(pmf_dev); +} + +static void apmf_event_handler_v1(acpi_handle handle, u32 event, void *data) +{ + struct amd_pmf_dev *pmf_dev = data; + int ret; + + guard(mutex)(&pmf_dev->cb_mutex); + + ret = apmf_get_sbios_requests_v1(pmf_dev, &pmf_dev->req1); + if (ret) { + dev_err(pmf_dev->dev, "Failed to get v1 SBIOS requests: %d\n", ret); + return; + } + + dev_dbg(pmf_dev->dev, "Pending request (preq1): 0x%x\n", pmf_dev->req1.pending_req); + + amd_pmf_handle_early_preq(pmf_dev); +} + +static void apmf_event_handler(acpi_handle handle, u32 event, void *data) +{ + struct amd_pmf_dev *pmf_dev = data; + struct apmf_sbios_req req; + int ret; + + guard(mutex)(&pmf_dev->update_mutex); + ret = apmf_get_sbios_requests(pmf_dev, &req); + if (ret) { + dev_err(pmf_dev->dev, "Failed to get SBIOS requests:%d\n", ret); + return; + } + + if (req.pending_req & BIT(APMF_AMT_NOTIFICATION)) { + dev_dbg(pmf_dev->dev, "AMT is supported and notifications %s\n", + req.amt_event ? "Enabled" : "Disabled"); + pmf_dev->amt_enabled = !!req.amt_event; + + if (pmf_dev->amt_enabled) + amd_pmf_handle_amt(pmf_dev); + else + amd_pmf_reset_amt(pmf_dev); + } + + if (req.pending_req & BIT(APMF_CQL_NOTIFICATION)) { + dev_dbg(pmf_dev->dev, "CQL is supported and notifications %s\n", + req.cql_event ? "Enabled" : "Disabled"); + + /* update the target mode information */ + if (pmf_dev->amt_enabled) + amd_pmf_update_2_cql(pmf_dev, req.cql_event); + } +} + +static int apmf_if_verify_interface(struct amd_pmf_dev *pdev) +{ + struct apmf_verify_interface output; + int err; + + err = apmf_if_call_store_buffer(pdev, APMF_FUNC_VERIFY_INTERFACE, &output, sizeof(output)); + if (err) + return err; + + /* only set if not already set by a quirk */ + if (!pdev->supported_func) + pdev->supported_func = output.supported_functions; + + dev_dbg(pdev->dev, "supported functions:0x%x notifications:0x%x version:%u\n", + output.supported_functions, output.notification_mask, output.version); + + pdev->pmf_if_version = output.version; + + pdev->notifications = output.notification_mask; + return 0; +} + +static int apmf_get_system_params(struct amd_pmf_dev *dev) +{ + struct apmf_system_params params; + int err; + + if (!is_apmf_func_supported(dev, APMF_FUNC_GET_SYS_PARAMS)) + return -EINVAL; + + err = apmf_if_call_store_buffer(dev, APMF_FUNC_GET_SYS_PARAMS, ¶ms, sizeof(params)); + if (err) + return err; + + dev_dbg(dev->dev, "system params mask:0x%x flags:0x%x cmd_code:0x%x heartbeat:%d\n", + params.valid_mask, + params.flags, + params.command_code, + params.heartbeat_int); + params.flags = params.flags & params.valid_mask; + dev->hb_interval = params.heartbeat_int; + + return 0; +} + +int apmf_get_dyn_slider_def_ac(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_output *data) +{ + return apmf_if_call_store_buffer(pdev, APMF_FUNC_DYN_SLIDER_AC, data, sizeof(*data)); +} + +int apmf_get_dyn_slider_def_dc(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_output *data) +{ + return apmf_if_call_store_buffer(pdev, APMF_FUNC_DYN_SLIDER_DC, data, sizeof(*data)); +} + +static apmf_event_handler_t apmf_event_handlers[] = { + [PMF_IF_V1] = apmf_event_handler_v1, + [PMF_IF_V2] = apmf_event_handler_v2, +}; + +int apmf_install_handler(struct amd_pmf_dev *pmf_dev) +{ + acpi_handle ahandle = ACPI_HANDLE(pmf_dev->dev); + acpi_status status; + + /* Install the APMF Notify handler */ + if (is_apmf_func_supported(pmf_dev, APMF_FUNC_AUTO_MODE) && + is_apmf_func_supported(pmf_dev, APMF_FUNC_SBIOS_REQUESTS)) { + status = acpi_install_notify_handler(ahandle, ACPI_ALL_NOTIFY, + apmf_event_handler, pmf_dev); + if (ACPI_FAILURE(status)) { + dev_err(pmf_dev->dev, "failed to install notify handler\n"); + return -ENODEV; + } + + /* Call the handler once manually to catch up with possibly missed notifies. */ + apmf_event_handler(ahandle, 0, pmf_dev); + } + + if (!pmf_dev->smart_pc_enabled) + return -EINVAL; + + switch (pmf_dev->pmf_if_version) { + case PMF_IF_V1: + if (!is_apmf_bios_input_notifications_supported(pmf_dev)) + break; + fallthrough; + case PMF_IF_V2: + status = acpi_install_notify_handler(ahandle, ACPI_ALL_NOTIFY, + apmf_event_handlers[pmf_dev->pmf_if_version], pmf_dev); + if (ACPI_FAILURE(status)) { + dev_err(pmf_dev->dev, + "failed to install notify handler v%d for custom BIOS inputs\n", + pmf_dev->pmf_if_version); + return -ENODEV; + } + break; + default: + break; + } + + return 0; +} + +int apmf_check_smart_pc(struct amd_pmf_dev *pmf_dev) +{ + struct platform_device *pdev = to_platform_device(pmf_dev->dev); + + pmf_dev->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!pmf_dev->res) { + dev_dbg(pmf_dev->dev, "Failed to get I/O memory resource\n"); + return -EINVAL; + } + + pmf_dev->policy_addr = pmf_dev->res->start; + /* + * We cannot use resource_size() here because it adds an extra byte to round off the size. + * In the case of PMF ResourceTemplate(), this rounding is already handled within the _CRS. + * Using resource_size() would increase the resource size by 1, causing a mismatch with the + * length field and leading to issues. Therefore, simply use end-start of the ACPI resource + * to obtain the actual length. + */ + pmf_dev->policy_sz = pmf_dev->res->end - pmf_dev->res->start; + + if (!pmf_dev->policy_addr || pmf_dev->policy_sz > POLICY_BUF_MAX_SZ || + pmf_dev->policy_sz == 0) { + dev_err(pmf_dev->dev, "Incorrect policy params, possibly a SBIOS bug\n"); + return -EINVAL; + } + + return 0; +} + +int amd_pmf_smartpc_apply_bios_output(struct amd_pmf_dev *dev, u32 val, u32 preq, u32 idx) +{ + if (!is_apmf_func_supported(dev, APMF_FUNC_NOTIFY_SMART_PC_UPDATES)) + return -EINVAL; + + return apmf_notify_smart_pc_update(dev, val, preq, idx); +} + +void apmf_acpi_deinit(struct amd_pmf_dev *pmf_dev) +{ + acpi_handle ahandle = ACPI_HANDLE(pmf_dev->dev); + + if (pmf_dev->hb_interval && pmf_dev->pmf_if_version == PMF_IF_V1) + cancel_delayed_work_sync(&pmf_dev->heart_beat); + + if (is_apmf_func_supported(pmf_dev, APMF_FUNC_AUTO_MODE) && + is_apmf_func_supported(pmf_dev, APMF_FUNC_SBIOS_REQUESTS)) + acpi_remove_notify_handler(ahandle, ACPI_ALL_NOTIFY, apmf_event_handler); + + if (!pmf_dev->smart_pc_enabled) + return; + + switch (pmf_dev->pmf_if_version) { + case PMF_IF_V1: + if (!is_apmf_bios_input_notifications_supported(pmf_dev)) + break; + fallthrough; + case PMF_IF_V2: + acpi_remove_notify_handler(ahandle, ACPI_ALL_NOTIFY, + apmf_event_handlers[pmf_dev->pmf_if_version]); + break; + default: + break; + } +} + +int apmf_acpi_init(struct amd_pmf_dev *pmf_dev) +{ + int ret; + + ret = apmf_if_verify_interface(pmf_dev); + if (ret) { + dev_err(pmf_dev->dev, "APMF verify interface failed :%d\n", ret); + goto out; + } + + ret = apmf_get_system_params(pmf_dev); + if (ret) { + dev_dbg(pmf_dev->dev, "APMF apmf_get_system_params failed :%d\n", ret); + goto out; + } + + if (pmf_dev->hb_interval && pmf_dev->pmf_if_version == PMF_IF_V1) { + /* send heartbeats only if the interval is not zero */ + INIT_DELAYED_WORK(&pmf_dev->heart_beat, apmf_sbios_heartbeat_notify); + schedule_delayed_work(&pmf_dev->heart_beat, 0); + } + +out: + return ret; +} diff --git a/drivers/platform/x86/amd/pmf/auto-mode.c b/drivers/platform/x86/amd/pmf/auto-mode.c new file mode 100644 index 000000000000..faf15a8f74bb --- /dev/null +++ b/drivers/platform/x86/amd/pmf/auto-mode.c @@ -0,0 +1,441 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * AMD Platform Management Framework Driver + * + * Copyright (c) 2022, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com> + */ + +#include <linux/acpi.h> +#include <linux/workqueue.h> +#include "pmf.h" + +static struct auto_mode_mode_config config_store; +static const char *state_as_str(unsigned int state); + +#ifdef CONFIG_AMD_PMF_DEBUG +static void amd_pmf_dump_auto_mode_defaults(struct auto_mode_mode_config *data) +{ + struct auto_mode_mode_settings *its_mode; + + pr_debug("Auto Mode Data - BEGIN\n"); + + /* time constant */ + pr_debug("balanced_to_perf: %u ms\n", + data->transition[AUTO_TRANSITION_TO_PERFORMANCE].time_constant); + pr_debug("perf_to_balanced: %u ms\n", + data->transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].time_constant); + pr_debug("quiet_to_balanced: %u ms\n", + data->transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].time_constant); + pr_debug("balanced_to_quiet: %u ms\n", + data->transition[AUTO_TRANSITION_TO_QUIET].time_constant); + + /* power floor */ + pr_debug("pfloor_perf: %u mW\n", data->mode_set[AUTO_PERFORMANCE].power_floor); + pr_debug("pfloor_balanced: %u mW\n", data->mode_set[AUTO_BALANCE].power_floor); + pr_debug("pfloor_quiet: %u mW\n", data->mode_set[AUTO_QUIET].power_floor); + + /* Power delta for mode change */ + pr_debug("pd_balanced_to_perf: %u mW\n", + data->transition[AUTO_TRANSITION_TO_PERFORMANCE].power_delta); + pr_debug("pd_perf_to_balanced: %u mW\n", + data->transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_delta); + pr_debug("pd_quiet_to_balanced: %u mW\n", + data->transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_delta); + pr_debug("pd_balanced_to_quiet: %u mW\n", + data->transition[AUTO_TRANSITION_TO_QUIET].power_delta); + + /* skin temperature limits */ + its_mode = &data->mode_set[AUTO_PERFORMANCE_ON_LAP]; + pr_debug("stt_apu_perf_on_lap: %u C\n", + its_mode->power_control.stt_skin_temp[STT_TEMP_APU]); + pr_debug("stt_hs2_perf_on_lap: %u C\n", + its_mode->power_control.stt_skin_temp[STT_TEMP_HS2]); + pr_debug("stt_min_limit_perf_on_lap: %u mW\n", its_mode->power_control.stt_min); + + its_mode = &data->mode_set[AUTO_PERFORMANCE]; + pr_debug("stt_apu_perf: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_APU]); + pr_debug("stt_hs2_perf: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_HS2]); + pr_debug("stt_min_limit_perf: %u mW\n", its_mode->power_control.stt_min); + + its_mode = &data->mode_set[AUTO_BALANCE]; + pr_debug("stt_apu_balanced: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_APU]); + pr_debug("stt_hs2_balanced: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_HS2]); + pr_debug("stt_min_limit_balanced: %u mW\n", its_mode->power_control.stt_min); + + its_mode = &data->mode_set[AUTO_QUIET]; + pr_debug("stt_apu_quiet: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_APU]); + pr_debug("stt_hs2_quiet: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_HS2]); + pr_debug("stt_min_limit_quiet: %u mW\n", its_mode->power_control.stt_min); + + /* SPL based power limits */ + its_mode = &data->mode_set[AUTO_PERFORMANCE_ON_LAP]; + pr_debug("fppt_perf_on_lap: %u mW\n", its_mode->power_control.fppt); + pr_debug("sppt_perf_on_lap: %u mW\n", its_mode->power_control.sppt); + pr_debug("spl_perf_on_lap: %u mW\n", its_mode->power_control.spl); + pr_debug("sppt_apu_only_perf_on_lap: %u mW\n", its_mode->power_control.sppt_apu_only); + + its_mode = &data->mode_set[AUTO_PERFORMANCE]; + pr_debug("fppt_perf: %u mW\n", its_mode->power_control.fppt); + pr_debug("sppt_perf: %u mW\n", its_mode->power_control.sppt); + pr_debug("spl_perf: %u mW\n", its_mode->power_control.spl); + pr_debug("sppt_apu_only_perf: %u mW\n", its_mode->power_control.sppt_apu_only); + + its_mode = &data->mode_set[AUTO_BALANCE]; + pr_debug("fppt_balanced: %u mW\n", its_mode->power_control.fppt); + pr_debug("sppt_balanced: %u mW\n", its_mode->power_control.sppt); + pr_debug("spl_balanced: %u mW\n", its_mode->power_control.spl); + pr_debug("sppt_apu_only_balanced: %u mW\n", its_mode->power_control.sppt_apu_only); + + its_mode = &data->mode_set[AUTO_QUIET]; + pr_debug("fppt_quiet: %u mW\n", its_mode->power_control.fppt); + pr_debug("sppt_quiet: %u mW\n", its_mode->power_control.sppt); + pr_debug("spl_quiet: %u mW\n", its_mode->power_control.spl); + pr_debug("sppt_apu_only_quiet: %u mW\n", its_mode->power_control.sppt_apu_only); + + /* Fan ID */ + pr_debug("fan_id_perf: %lu\n", + data->mode_set[AUTO_PERFORMANCE].fan_control.fan_id); + pr_debug("fan_id_balanced: %lu\n", + data->mode_set[AUTO_BALANCE].fan_control.fan_id); + pr_debug("fan_id_quiet: %lu\n", + data->mode_set[AUTO_QUIET].fan_control.fan_id); + + pr_debug("Auto Mode Data - END\n"); +} +#else +static void amd_pmf_dump_auto_mode_defaults(struct auto_mode_mode_config *data) {} +#endif + +static void amd_pmf_set_automode(struct amd_pmf_dev *dev, int idx, + struct auto_mode_mode_config *table) +{ + struct power_table_control *pwr_ctrl = &config_store.mode_set[idx].power_control; + + amd_pmf_send_cmd(dev, SET_SPL, SET_CMD, pwr_ctrl->spl, NULL); + amd_pmf_send_cmd(dev, SET_FPPT, SET_CMD, pwr_ctrl->fppt, NULL); + amd_pmf_send_cmd(dev, SET_SPPT, SET_CMD, pwr_ctrl->sppt, NULL); + amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, SET_CMD, pwr_ctrl->sppt_apu_only, NULL); + amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, SET_CMD, pwr_ctrl->stt_min, NULL); + amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, SET_CMD, + fixp_q88_fromint(pwr_ctrl->stt_skin_temp[STT_TEMP_APU]), NULL); + amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, SET_CMD, + fixp_q88_fromint(pwr_ctrl->stt_skin_temp[STT_TEMP_HS2]), NULL); + + if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX)) + apmf_update_fan_idx(dev, config_store.mode_set[idx].fan_control.manual, + config_store.mode_set[idx].fan_control.fan_id); +} + +static int amd_pmf_get_moving_avg(struct amd_pmf_dev *pdev, int socket_power) +{ + int i, total = 0; + + if (pdev->socket_power_history_idx == -1) { + for (i = 0; i < AVG_SAMPLE_SIZE; i++) + pdev->socket_power_history[i] = socket_power; + } + + pdev->socket_power_history_idx = (pdev->socket_power_history_idx + 1) % AVG_SAMPLE_SIZE; + pdev->socket_power_history[pdev->socket_power_history_idx] = socket_power; + + for (i = 0; i < AVG_SAMPLE_SIZE; i++) + total += pdev->socket_power_history[i]; + + return total / AVG_SAMPLE_SIZE; +} + +void amd_pmf_trans_automode(struct amd_pmf_dev *dev, int socket_power, ktime_t time_elapsed_ms) +{ + int avg_power = 0; + bool update = false; + int i, j; + + /* Get the average moving average computed by auto mode algorithm */ + avg_power = amd_pmf_get_moving_avg(dev, socket_power); + + for (i = 0; i < AUTO_TRANSITION_MAX; i++) { + if ((config_store.transition[i].shifting_up && avg_power >= + config_store.transition[i].power_threshold) || + (!config_store.transition[i].shifting_up && avg_power <= + config_store.transition[i].power_threshold)) { + if (config_store.transition[i].timer < + config_store.transition[i].time_constant) + config_store.transition[i].timer += time_elapsed_ms; + } else { + config_store.transition[i].timer = 0; + } + + if (config_store.transition[i].timer >= + config_store.transition[i].time_constant && + !config_store.transition[i].applied) { + config_store.transition[i].applied = true; + update = true; + } else if (config_store.transition[i].timer <= + config_store.transition[i].time_constant && + config_store.transition[i].applied) { + config_store.transition[i].applied = false; + update = true; + } + +#ifdef CONFIG_AMD_PMF_DEBUG + dev_dbg(dev->dev, "[AUTO MODE] average_power : %d mW mode: %s\n", avg_power, + state_as_str(config_store.current_mode)); + + dev_dbg(dev->dev, "[AUTO MODE] time: %lld ms timer: %u ms tc: %u ms\n", + time_elapsed_ms, config_store.transition[i].timer, + config_store.transition[i].time_constant); + + dev_dbg(dev->dev, "[AUTO MODE] shiftup: %u pt: %u mW pf: %u mW pd: %u mW\n", + config_store.transition[i].shifting_up, + config_store.transition[i].power_threshold, + config_store.mode_set[i].power_floor, + config_store.transition[i].power_delta); +#endif + } + + dev_dbg(dev->dev, "[AUTO_MODE] avg power: %u mW mode: %s\n", avg_power, + state_as_str(config_store.current_mode)); + +#ifdef CONFIG_AMD_PMF_DEBUG + dev_dbg(dev->dev, "[AUTO MODE] priority1: %u priority2: %u priority3: %u priority4: %u\n", + config_store.transition[0].applied, + config_store.transition[1].applied, + config_store.transition[2].applied, + config_store.transition[3].applied); +#endif + + if (update) { + for (j = 0; j < AUTO_TRANSITION_MAX; j++) { + /* Apply the mode with highest priority indentified */ + if (config_store.transition[j].applied) { + if (config_store.current_mode != + config_store.transition[j].target_mode) { + config_store.current_mode = + config_store.transition[j].target_mode; + dev_dbg(dev->dev, "[AUTO_MODE] moving to mode:%s\n", + state_as_str(config_store.current_mode)); + amd_pmf_set_automode(dev, config_store.current_mode, NULL); + } + break; + } + } + } +} + +void amd_pmf_update_2_cql(struct amd_pmf_dev *dev, bool is_cql_event) +{ + int mode = config_store.current_mode; + + config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode = + is_cql_event ? AUTO_PERFORMANCE_ON_LAP : AUTO_PERFORMANCE; + + if ((mode == AUTO_PERFORMANCE || mode == AUTO_PERFORMANCE_ON_LAP) && + mode != config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode) { + mode = config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode; + amd_pmf_set_automode(dev, mode, NULL); + } + dev_dbg(dev->dev, "updated CQL thermals\n"); +} + +static void amd_pmf_get_power_threshold(void) +{ + config_store.transition[AUTO_TRANSITION_TO_QUIET].power_threshold = + config_store.mode_set[AUTO_BALANCE].power_floor - + config_store.transition[AUTO_TRANSITION_TO_QUIET].power_delta; + + config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_threshold = + config_store.mode_set[AUTO_BALANCE].power_floor - + config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_delta; + + config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_threshold = + config_store.mode_set[AUTO_QUIET].power_floor - + config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_delta; + + config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_threshold = + config_store.mode_set[AUTO_PERFORMANCE].power_floor - + config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_delta; + +#ifdef CONFIG_AMD_PMF_DEBUG + pr_debug("[AUTO MODE TO_QUIET] pt: %u mW pf: %u mW pd: %u mW\n", + config_store.transition[AUTO_TRANSITION_TO_QUIET].power_threshold, + config_store.mode_set[AUTO_BALANCE].power_floor, + config_store.transition[AUTO_TRANSITION_TO_QUIET].power_delta); + + pr_debug("[AUTO MODE TO_PERFORMANCE] pt: %u mW pf: %u mW pd: %u mW\n", + config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_threshold, + config_store.mode_set[AUTO_BALANCE].power_floor, + config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_delta); + + pr_debug("[AUTO MODE QUIET_TO_BALANCE] pt: %u mW pf: %u mW pd: %u mW\n", + config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE] + .power_threshold, + config_store.mode_set[AUTO_QUIET].power_floor, + config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_delta); + + pr_debug("[AUTO MODE PERFORMANCE_TO_BALANCE] pt: %u mW pf: %u mW pd: %u mW\n", + config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE] + .power_threshold, + config_store.mode_set[AUTO_PERFORMANCE].power_floor, + config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_delta); +#endif +} + +static const char *state_as_str(unsigned int state) +{ + switch (state) { + case AUTO_QUIET: + return "QUIET"; + case AUTO_BALANCE: + return "BALANCED"; + case AUTO_PERFORMANCE_ON_LAP: + return "ON_LAP"; + case AUTO_PERFORMANCE: + return "PERFORMANCE"; + default: + return "Unknown Auto Mode State"; + } +} + +static void amd_pmf_load_defaults_auto_mode(struct amd_pmf_dev *dev) +{ + struct apmf_auto_mode output; + struct power_table_control *pwr_ctrl; + int i; + + apmf_get_auto_mode_def(dev, &output); + /* time constant */ + config_store.transition[AUTO_TRANSITION_TO_QUIET].time_constant = + output.balanced_to_quiet; + config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].time_constant = + output.balanced_to_perf; + config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].time_constant = + output.quiet_to_balanced; + config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].time_constant = + output.perf_to_balanced; + + /* power floor */ + config_store.mode_set[AUTO_QUIET].power_floor = output.pfloor_quiet; + config_store.mode_set[AUTO_BALANCE].power_floor = output.pfloor_balanced; + config_store.mode_set[AUTO_PERFORMANCE].power_floor = output.pfloor_perf; + config_store.mode_set[AUTO_PERFORMANCE_ON_LAP].power_floor = output.pfloor_perf; + + /* Power delta for mode change */ + config_store.transition[AUTO_TRANSITION_TO_QUIET].power_delta = + output.pd_balanced_to_quiet; + config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_delta = + output.pd_balanced_to_perf; + config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_delta = + output.pd_quiet_to_balanced; + config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_delta = + output.pd_perf_to_balanced; + + /* Power threshold */ + amd_pmf_get_power_threshold(); + + /* skin temperature limits */ + pwr_ctrl = &config_store.mode_set[AUTO_QUIET].power_control; + pwr_ctrl->spl = output.spl_quiet; + pwr_ctrl->sppt = output.sppt_quiet; + pwr_ctrl->fppt = output.fppt_quiet; + pwr_ctrl->sppt_apu_only = output.sppt_apu_only_quiet; + pwr_ctrl->stt_min = output.stt_min_limit_quiet; + pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_quiet; + pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_quiet; + + pwr_ctrl = &config_store.mode_set[AUTO_BALANCE].power_control; + pwr_ctrl->spl = output.spl_balanced; + pwr_ctrl->sppt = output.sppt_balanced; + pwr_ctrl->fppt = output.fppt_balanced; + pwr_ctrl->sppt_apu_only = output.sppt_apu_only_balanced; + pwr_ctrl->stt_min = output.stt_min_limit_balanced; + pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_balanced; + pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_balanced; + + pwr_ctrl = &config_store.mode_set[AUTO_PERFORMANCE].power_control; + pwr_ctrl->spl = output.spl_perf; + pwr_ctrl->sppt = output.sppt_perf; + pwr_ctrl->fppt = output.fppt_perf; + pwr_ctrl->sppt_apu_only = output.sppt_apu_only_perf; + pwr_ctrl->stt_min = output.stt_min_limit_perf; + pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_perf; + pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_perf; + + pwr_ctrl = &config_store.mode_set[AUTO_PERFORMANCE_ON_LAP].power_control; + pwr_ctrl->spl = output.spl_perf_on_lap; + pwr_ctrl->sppt = output.sppt_perf_on_lap; + pwr_ctrl->fppt = output.fppt_perf_on_lap; + pwr_ctrl->sppt_apu_only = output.sppt_apu_only_perf_on_lap; + pwr_ctrl->stt_min = output.stt_min_limit_perf_on_lap; + pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_perf_on_lap; + pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_perf_on_lap; + + /* Fan ID */ + config_store.mode_set[AUTO_QUIET].fan_control.fan_id = output.fan_id_quiet; + config_store.mode_set[AUTO_BALANCE].fan_control.fan_id = output.fan_id_balanced; + config_store.mode_set[AUTO_PERFORMANCE].fan_control.fan_id = output.fan_id_perf; + config_store.mode_set[AUTO_PERFORMANCE_ON_LAP].fan_control.fan_id = + output.fan_id_perf; + + config_store.transition[AUTO_TRANSITION_TO_QUIET].target_mode = AUTO_QUIET; + config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode = + AUTO_PERFORMANCE; + config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].target_mode = + AUTO_BALANCE; + config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].target_mode = + AUTO_BALANCE; + + config_store.transition[AUTO_TRANSITION_TO_QUIET].shifting_up = false; + config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].shifting_up = true; + config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].shifting_up = true; + config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].shifting_up = + false; + + for (i = 0 ; i < AUTO_MODE_MAX ; i++) { + if (config_store.mode_set[i].fan_control.fan_id == FAN_INDEX_AUTO) + config_store.mode_set[i].fan_control.manual = false; + else + config_store.mode_set[i].fan_control.manual = true; + } + + /* set to initial default values */ + config_store.current_mode = AUTO_BALANCE; + dev->socket_power_history_idx = -1; + + amd_pmf_dump_auto_mode_defaults(&config_store); +} + +int amd_pmf_reset_amt(struct amd_pmf_dev *dev) +{ + /* + * OEM BIOS implementation guide says that if the auto mode is enabled + * the platform_profile registration shall be done by the OEM driver. + * There could be cases where both static slider and auto mode BIOS + * functions are enabled, in that case enable static slider updates + * only if it advertised as supported. + */ + + if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) { + dev_dbg(dev->dev, "resetting AMT thermals\n"); + amd_pmf_set_sps_power_limits(dev); + } + return 0; +} + +void amd_pmf_handle_amt(struct amd_pmf_dev *dev) +{ + amd_pmf_set_automode(dev, config_store.current_mode, NULL); +} + +void amd_pmf_deinit_auto_mode(struct amd_pmf_dev *dev) +{ + cancel_delayed_work_sync(&dev->work_buffer); +} + +void amd_pmf_init_auto_mode(struct amd_pmf_dev *dev) +{ + amd_pmf_load_defaults_auto_mode(dev); + amd_pmf_init_metrics_table(dev); +} diff --git a/drivers/platform/x86/amd/pmf/cnqf.c b/drivers/platform/x86/amd/pmf/cnqf.c new file mode 100644 index 000000000000..5469fefb6001 --- /dev/null +++ b/drivers/platform/x86/amd/pmf/cnqf.c @@ -0,0 +1,467 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * AMD Platform Management Framework Driver + * + * Copyright (c) 2022, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com> + */ + +#include <linux/string_choices.h> +#include <linux/workqueue.h> +#include "pmf.h" + +static struct cnqf_config config_store; + +#ifdef CONFIG_AMD_PMF_DEBUG +static const char *state_as_str_cnqf(unsigned int state) +{ + switch (state) { + case APMF_CNQF_TURBO: + return "turbo"; + case APMF_CNQF_PERFORMANCE: + return "performance"; + case APMF_CNQF_BALANCE: + return "balance"; + case APMF_CNQF_QUIET: + return "quiet"; + default: + return "Unknown CnQF State"; + } +} + +static void amd_pmf_cnqf_dump_defaults(struct apmf_dyn_slider_output *data, int idx) +{ + int i; + + pr_debug("Dynamic Slider %s Defaults - BEGIN\n", idx ? "DC" : "AC"); + pr_debug("size: %u\n", data->size); + pr_debug("flags: 0x%x\n", data->flags); + + /* Time constants */ + pr_debug("t_perf_to_turbo: %u ms\n", data->t_perf_to_turbo); + pr_debug("t_balanced_to_perf: %u ms\n", data->t_balanced_to_perf); + pr_debug("t_quiet_to_balanced: %u ms\n", data->t_quiet_to_balanced); + pr_debug("t_balanced_to_quiet: %u ms\n", data->t_balanced_to_quiet); + pr_debug("t_perf_to_balanced: %u ms\n", data->t_perf_to_balanced); + pr_debug("t_turbo_to_perf: %u ms\n", data->t_turbo_to_perf); + + for (i = 0 ; i < CNQF_MODE_MAX ; i++) { + pr_debug("pfloor_%s: %u mW\n", state_as_str_cnqf(i), data->ps[i].pfloor); + pr_debug("fppt_%s: %u mW\n", state_as_str_cnqf(i), data->ps[i].fppt); + pr_debug("sppt_%s: %u mW\n", state_as_str_cnqf(i), data->ps[i].sppt); + pr_debug("sppt_apuonly_%s: %u mW\n", + state_as_str_cnqf(i), data->ps[i].sppt_apu_only); + pr_debug("spl_%s: %u mW\n", state_as_str_cnqf(i), data->ps[i].spl); + pr_debug("stt_minlimit_%s: %u mW\n", + state_as_str_cnqf(i), data->ps[i].stt_min_limit); + pr_debug("stt_skintemp_apu_%s: %u C\n", state_as_str_cnqf(i), + data->ps[i].stt_skintemp[STT_TEMP_APU]); + pr_debug("stt_skintemp_hs2_%s: %u C\n", state_as_str_cnqf(i), + data->ps[i].stt_skintemp[STT_TEMP_HS2]); + pr_debug("fan_id_%s: %u\n", state_as_str_cnqf(i), data->ps[i].fan_id); + } + + pr_debug("Dynamic Slider %s Defaults - END\n", idx ? "DC" : "AC"); +} +#else +static void amd_pmf_cnqf_dump_defaults(struct apmf_dyn_slider_output *data, int idx) {} +#endif + +static int amd_pmf_set_cnqf(struct amd_pmf_dev *dev, int src, int idx, + struct cnqf_config *table) +{ + struct power_table_control *pc; + + pc = &config_store.mode_set[src][idx].power_control; + + amd_pmf_send_cmd(dev, SET_SPL, SET_CMD, pc->spl, NULL); + amd_pmf_send_cmd(dev, SET_FPPT, SET_CMD, pc->fppt, NULL); + amd_pmf_send_cmd(dev, SET_SPPT, SET_CMD, pc->sppt, NULL); + amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, SET_CMD, pc->sppt_apu_only, NULL); + amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, SET_CMD, pc->stt_min, NULL); + amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, SET_CMD, + fixp_q88_fromint(pc->stt_skin_temp[STT_TEMP_APU]), NULL); + amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, SET_CMD, + fixp_q88_fromint(pc->stt_skin_temp[STT_TEMP_HS2]), NULL); + + if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX)) + apmf_update_fan_idx(dev, + config_store.mode_set[src][idx].fan_control.manual, + config_store.mode_set[src][idx].fan_control.fan_id); + + return 0; +} + +static void amd_pmf_update_power_threshold(int src) +{ + struct cnqf_mode_settings *ts; + struct cnqf_tran_params *tp; + + tp = &config_store.trans_param[src][CNQF_TRANSITION_TO_QUIET]; + ts = &config_store.mode_set[src][CNQF_MODE_BALANCE]; + tp->power_threshold = ts->power_floor; + + tp = &config_store.trans_param[src][CNQF_TRANSITION_TO_TURBO]; + ts = &config_store.mode_set[src][CNQF_MODE_PERFORMANCE]; + tp->power_threshold = ts->power_floor; + + tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_BALANCE_TO_PERFORMANCE]; + ts = &config_store.mode_set[src][CNQF_MODE_BALANCE]; + tp->power_threshold = ts->power_floor; + + tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_PERFORMANCE_TO_BALANCE]; + ts = &config_store.mode_set[src][CNQF_MODE_PERFORMANCE]; + tp->power_threshold = ts->power_floor; + + tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_QUIET_TO_BALANCE]; + ts = &config_store.mode_set[src][CNQF_MODE_QUIET]; + tp->power_threshold = ts->power_floor; + + tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_TURBO_TO_PERFORMANCE]; + ts = &config_store.mode_set[src][CNQF_MODE_TURBO]; + tp->power_threshold = ts->power_floor; +} + +static const char *state_as_str(unsigned int state) +{ + switch (state) { + case CNQF_MODE_QUIET: + return "QUIET"; + case CNQF_MODE_BALANCE: + return "BALANCED"; + case CNQF_MODE_TURBO: + return "TURBO"; + case CNQF_MODE_PERFORMANCE: + return "PERFORMANCE"; + default: + return "Unknown CnQF mode"; + } +} + +static int amd_pmf_cnqf_get_power_source(struct amd_pmf_dev *dev) +{ + if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) && + is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) + return amd_pmf_get_power_source(); + else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) + return POWER_SOURCE_DC; + else + return POWER_SOURCE_AC; +} + +int amd_pmf_trans_cnqf(struct amd_pmf_dev *dev, int socket_power, ktime_t time_lapsed_ms) +{ + struct cnqf_tran_params *tp; + int src, i, j; + u32 avg_power = 0; + + src = amd_pmf_cnqf_get_power_source(dev); + + if (is_pprof_balanced(dev)) { + amd_pmf_set_cnqf(dev, src, config_store.current_mode, NULL); + } else { + /* + * Return from here if the platform_profile is not balanced + * so that preference is given to user mode selection, rather + * than enforcing CnQF to run all the time (if enabled) + */ + return -EINVAL; + } + + for (i = 0; i < CNQF_TRANSITION_MAX; i++) { + config_store.trans_param[src][i].timer += time_lapsed_ms; + config_store.trans_param[src][i].total_power += socket_power; + config_store.trans_param[src][i].count++; + + tp = &config_store.trans_param[src][i]; + +#ifdef CONFIG_AMD_PMF_DEBUG + dev_dbg(dev->dev, "avg_power: %u mW total_power: %u mW count: %u timer: %u ms\n", + avg_power, config_store.trans_param[src][i].total_power, + config_store.trans_param[src][i].count, + config_store.trans_param[src][i].timer); +#endif + if (tp->timer >= tp->time_constant && tp->count) { + avg_power = tp->total_power / tp->count; + + /* Reset the indices */ + tp->timer = 0; + tp->total_power = 0; + tp->count = 0; + + if ((tp->shifting_up && avg_power >= tp->power_threshold) || + (!tp->shifting_up && avg_power <= tp->power_threshold)) { + tp->priority = true; + } else { + tp->priority = false; + } + } + } + + dev_dbg(dev->dev, "[CNQF] Avg power: %u mW socket power: %u mW mode:%s\n", + avg_power, socket_power, state_as_str(config_store.current_mode)); + +#ifdef CONFIG_AMD_PMF_DEBUG + dev_dbg(dev->dev, "[CNQF] priority1: %u priority2: %u priority3: %u\n", + config_store.trans_param[src][0].priority, + config_store.trans_param[src][1].priority, + config_store.trans_param[src][2].priority); + + dev_dbg(dev->dev, "[CNQF] priority4: %u priority5: %u priority6: %u\n", + config_store.trans_param[src][3].priority, + config_store.trans_param[src][4].priority, + config_store.trans_param[src][5].priority); +#endif + + for (j = 0; j < CNQF_TRANSITION_MAX; j++) { + /* apply the highest priority */ + if (config_store.trans_param[src][j].priority) { + if (config_store.current_mode != + config_store.trans_param[src][j].target_mode) { + config_store.current_mode = + config_store.trans_param[src][j].target_mode; + dev_dbg(dev->dev, "Moving to Mode :%s\n", + state_as_str(config_store.current_mode)); + amd_pmf_set_cnqf(dev, src, + config_store.current_mode, NULL); + } + break; + } + } + return 0; +} + +static void amd_pmf_update_trans_data(int idx, struct apmf_dyn_slider_output *out) +{ + struct cnqf_tran_params *tp; + + tp = &config_store.trans_param[idx][CNQF_TRANSITION_TO_QUIET]; + tp->time_constant = out->t_balanced_to_quiet; + tp->target_mode = CNQF_MODE_QUIET; + tp->shifting_up = false; + + tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_BALANCE_TO_PERFORMANCE]; + tp->time_constant = out->t_balanced_to_perf; + tp->target_mode = CNQF_MODE_PERFORMANCE; + tp->shifting_up = true; + + tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_QUIET_TO_BALANCE]; + tp->time_constant = out->t_quiet_to_balanced; + tp->target_mode = CNQF_MODE_BALANCE; + tp->shifting_up = true; + + tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_PERFORMANCE_TO_BALANCE]; + tp->time_constant = out->t_perf_to_balanced; + tp->target_mode = CNQF_MODE_BALANCE; + tp->shifting_up = false; + + tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_TURBO_TO_PERFORMANCE]; + tp->time_constant = out->t_turbo_to_perf; + tp->target_mode = CNQF_MODE_PERFORMANCE; + tp->shifting_up = false; + + tp = &config_store.trans_param[idx][CNQF_TRANSITION_TO_TURBO]; + tp->time_constant = out->t_perf_to_turbo; + tp->target_mode = CNQF_MODE_TURBO; + tp->shifting_up = true; +} + +static void amd_pmf_update_mode_set(int idx, struct apmf_dyn_slider_output *out) +{ + struct cnqf_mode_settings *ms; + + /* Quiet Mode */ + ms = &config_store.mode_set[idx][CNQF_MODE_QUIET]; + ms->power_floor = out->ps[APMF_CNQF_QUIET].pfloor; + ms->power_control.fppt = out->ps[APMF_CNQF_QUIET].fppt; + ms->power_control.sppt = out->ps[APMF_CNQF_QUIET].sppt; + ms->power_control.sppt_apu_only = out->ps[APMF_CNQF_QUIET].sppt_apu_only; + ms->power_control.spl = out->ps[APMF_CNQF_QUIET].spl; + ms->power_control.stt_min = out->ps[APMF_CNQF_QUIET].stt_min_limit; + ms->power_control.stt_skin_temp[STT_TEMP_APU] = + out->ps[APMF_CNQF_QUIET].stt_skintemp[STT_TEMP_APU]; + ms->power_control.stt_skin_temp[STT_TEMP_HS2] = + out->ps[APMF_CNQF_QUIET].stt_skintemp[STT_TEMP_HS2]; + ms->fan_control.fan_id = out->ps[APMF_CNQF_QUIET].fan_id; + + /* Balance Mode */ + ms = &config_store.mode_set[idx][CNQF_MODE_BALANCE]; + ms->power_floor = out->ps[APMF_CNQF_BALANCE].pfloor; + ms->power_control.fppt = out->ps[APMF_CNQF_BALANCE].fppt; + ms->power_control.sppt = out->ps[APMF_CNQF_BALANCE].sppt; + ms->power_control.sppt_apu_only = out->ps[APMF_CNQF_BALANCE].sppt_apu_only; + ms->power_control.spl = out->ps[APMF_CNQF_BALANCE].spl; + ms->power_control.stt_min = out->ps[APMF_CNQF_BALANCE].stt_min_limit; + ms->power_control.stt_skin_temp[STT_TEMP_APU] = + out->ps[APMF_CNQF_BALANCE].stt_skintemp[STT_TEMP_APU]; + ms->power_control.stt_skin_temp[STT_TEMP_HS2] = + out->ps[APMF_CNQF_BALANCE].stt_skintemp[STT_TEMP_HS2]; + ms->fan_control.fan_id = out->ps[APMF_CNQF_BALANCE].fan_id; + + /* Performance Mode */ + ms = &config_store.mode_set[idx][CNQF_MODE_PERFORMANCE]; + ms->power_floor = out->ps[APMF_CNQF_PERFORMANCE].pfloor; + ms->power_control.fppt = out->ps[APMF_CNQF_PERFORMANCE].fppt; + ms->power_control.sppt = out->ps[APMF_CNQF_PERFORMANCE].sppt; + ms->power_control.sppt_apu_only = out->ps[APMF_CNQF_PERFORMANCE].sppt_apu_only; + ms->power_control.spl = out->ps[APMF_CNQF_PERFORMANCE].spl; + ms->power_control.stt_min = out->ps[APMF_CNQF_PERFORMANCE].stt_min_limit; + ms->power_control.stt_skin_temp[STT_TEMP_APU] = + out->ps[APMF_CNQF_PERFORMANCE].stt_skintemp[STT_TEMP_APU]; + ms->power_control.stt_skin_temp[STT_TEMP_HS2] = + out->ps[APMF_CNQF_PERFORMANCE].stt_skintemp[STT_TEMP_HS2]; + ms->fan_control.fan_id = out->ps[APMF_CNQF_PERFORMANCE].fan_id; + + /* Turbo Mode */ + ms = &config_store.mode_set[idx][CNQF_MODE_TURBO]; + ms->power_floor = out->ps[APMF_CNQF_TURBO].pfloor; + ms->power_control.fppt = out->ps[APMF_CNQF_TURBO].fppt; + ms->power_control.sppt = out->ps[APMF_CNQF_TURBO].sppt; + ms->power_control.sppt_apu_only = out->ps[APMF_CNQF_TURBO].sppt_apu_only; + ms->power_control.spl = out->ps[APMF_CNQF_TURBO].spl; + ms->power_control.stt_min = out->ps[APMF_CNQF_TURBO].stt_min_limit; + ms->power_control.stt_skin_temp[STT_TEMP_APU] = + out->ps[APMF_CNQF_TURBO].stt_skintemp[STT_TEMP_APU]; + ms->power_control.stt_skin_temp[STT_TEMP_HS2] = + out->ps[APMF_CNQF_TURBO].stt_skintemp[STT_TEMP_HS2]; + ms->fan_control.fan_id = out->ps[APMF_CNQF_TURBO].fan_id; +} + +static int amd_pmf_check_flags(struct amd_pmf_dev *dev) +{ + struct apmf_dyn_slider_output out = {}; + + if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC)) + apmf_get_dyn_slider_def_ac(dev, &out); + else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) + apmf_get_dyn_slider_def_dc(dev, &out); + + return out.flags; +} + +static int amd_pmf_load_defaults_cnqf(struct amd_pmf_dev *dev) +{ + struct apmf_dyn_slider_output out; + int i, j, ret; + + for (i = 0; i < POWER_SOURCE_MAX; i++) { + if (!is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC + i)) + continue; + + if (i == POWER_SOURCE_AC) + ret = apmf_get_dyn_slider_def_ac(dev, &out); + else + ret = apmf_get_dyn_slider_def_dc(dev, &out); + if (ret) { + dev_err(dev->dev, "APMF apmf_get_dyn_slider_def_dc failed :%d\n", ret); + return ret; + } + + amd_pmf_cnqf_dump_defaults(&out, i); + amd_pmf_update_mode_set(i, &out); + amd_pmf_update_trans_data(i, &out); + amd_pmf_update_power_threshold(i); + + for (j = 0; j < CNQF_MODE_MAX; j++) { + if (config_store.mode_set[i][j].fan_control.fan_id == FAN_INDEX_AUTO) + config_store.mode_set[i][j].fan_control.manual = false; + else + config_store.mode_set[i][j].fan_control.manual = true; + } + } + + /* set to initial default values */ + config_store.current_mode = CNQF_MODE_BALANCE; + + return 0; +} + +static ssize_t cnqf_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct amd_pmf_dev *pdev = dev_get_drvdata(dev); + int result, src; + bool input; + + result = kstrtobool(buf, &input); + if (result) + return result; + + src = amd_pmf_cnqf_get_power_source(pdev); + pdev->cnqf_enabled = input; + + if (pdev->cnqf_enabled && is_pprof_balanced(pdev)) { + amd_pmf_set_cnqf(pdev, src, config_store.current_mode, NULL); + } else { + if (is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) + amd_pmf_set_sps_power_limits(pdev); + } + + dev_dbg(pdev->dev, "Received CnQF %s\n", str_on_off(input)); + return count; +} + +static ssize_t cnqf_enable_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct amd_pmf_dev *pdev = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%s\n", str_on_off(pdev->cnqf_enabled)); +} + +static DEVICE_ATTR_RW(cnqf_enable); + +static umode_t cnqf_feature_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct amd_pmf_dev *pdev = dev_get_drvdata(dev); + + return pdev->cnqf_supported ? attr->mode : 0; +} + +static struct attribute *cnqf_feature_attrs[] = { + &dev_attr_cnqf_enable.attr, + NULL +}; + +const struct attribute_group cnqf_feature_attribute_group = { + .is_visible = cnqf_feature_is_visible, + .attrs = cnqf_feature_attrs, +}; + +void amd_pmf_deinit_cnqf(struct amd_pmf_dev *dev) +{ + cancel_delayed_work_sync(&dev->work_buffer); +} + +int amd_pmf_init_cnqf(struct amd_pmf_dev *dev) +{ + int ret, src; + + /* + * Note the caller of this function has already checked that both + * APMF_FUNC_DYN_SLIDER_AC and APMF_FUNC_DYN_SLIDER_DC are supported. + */ + + ret = amd_pmf_load_defaults_cnqf(dev); + if (ret < 0) + return ret; + + amd_pmf_init_metrics_table(dev); + + dev->cnqf_supported = true; + dev->cnqf_enabled = amd_pmf_check_flags(dev); + + /* update the thermal for CnQF */ + if (dev->cnqf_enabled && is_pprof_balanced(dev)) { + src = amd_pmf_cnqf_get_power_source(dev); + amd_pmf_set_cnqf(dev, src, config_store.current_mode, NULL); + } + + return 0; +} diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c new file mode 100644 index 000000000000..8fc293c9c538 --- /dev/null +++ b/drivers/platform/x86/amd/pmf/core.c @@ -0,0 +1,523 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * AMD Platform Management Framework Driver + * + * Copyright (c) 2022, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com> + */ + +#include <linux/debugfs.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/power_supply.h> +#include <asm/amd/node.h> +#include "pmf.h" + +/* PMF-SMU communication registers */ +#define AMD_PMF_REGISTER_MESSAGE 0xA18 +#define AMD_PMF_REGISTER_RESPONSE 0xA78 +#define AMD_PMF_REGISTER_ARGUMENT 0xA58 + +/* Base address of SMU for mapping physical address to virtual address */ +#define AMD_PMF_MAPPING_SIZE 0x01000 +#define AMD_PMF_BASE_ADDR_OFFSET 0x10000 +#define AMD_PMF_BASE_ADDR_LO 0x13B102E8 +#define AMD_PMF_BASE_ADDR_HI 0x13B102EC +#define AMD_PMF_BASE_ADDR_LO_MASK GENMASK(15, 0) +#define AMD_PMF_BASE_ADDR_HI_MASK GENMASK(31, 20) + +/* SMU Response Codes */ +#define AMD_PMF_RESULT_OK 0x01 +#define AMD_PMF_RESULT_CMD_REJECT_BUSY 0xFC +#define AMD_PMF_RESULT_CMD_REJECT_PREREQ 0xFD +#define AMD_PMF_RESULT_CMD_UNKNOWN 0xFE +#define AMD_PMF_RESULT_FAILED 0xFF + +#define PMF_MSG_DELAY_MIN_US 50 +#define RESPONSE_REGISTER_LOOP_MAX 20000 + +#define DELAY_MIN_US 2000 +#define DELAY_MAX_US 3000 + +/* override Metrics Table sample size time (in ms) */ +static int metrics_table_loop_ms = 1000; +module_param(metrics_table_loop_ms, int, 0644); +MODULE_PARM_DESC(metrics_table_loop_ms, "Metrics Table sample size time (default = 1000ms)"); + +/* Force load on supported older platforms */ +static bool force_load; +module_param(force_load, bool, 0444); +MODULE_PARM_DESC(force_load, "Force load this driver on supported older platforms (experimental)"); + +static int amd_pmf_pwr_src_notify_call(struct notifier_block *nb, unsigned long event, void *data) +{ + struct amd_pmf_dev *pmf = container_of(nb, struct amd_pmf_dev, pwr_src_notifier); + + if (event != PSY_EVENT_PROP_CHANGED) + return NOTIFY_OK; + + if (is_apmf_func_supported(pmf, APMF_FUNC_AUTO_MODE) || + is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_DC) || + is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_AC)) { + if ((pmf->amt_enabled || pmf->cnqf_enabled) && is_pprof_balanced(pmf)) + return NOTIFY_DONE; + } + + if (is_apmf_func_supported(pmf, APMF_FUNC_STATIC_SLIDER_GRANULAR)) + amd_pmf_set_sps_power_limits(pmf); + + if (is_apmf_func_supported(pmf, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) + amd_pmf_power_slider_update_event(pmf); + + return NOTIFY_OK; +} + +static int current_power_limits_show(struct seq_file *seq, void *unused) +{ + struct amd_pmf_dev *dev = seq->private; + struct amd_pmf_static_slider_granular table; + int mode, src = 0; + + mode = amd_pmf_get_pprof_modes(dev); + if (mode < 0) + return mode; + + src = amd_pmf_get_power_source(); + amd_pmf_update_slider(dev, SLIDER_OP_GET, mode, &table); + seq_printf(seq, "spl:%u fppt:%u sppt:%u sppt_apu_only:%u stt_min:%u stt[APU]:%u stt[HS2]: %u\n", + table.prop[src][mode].spl, + table.prop[src][mode].fppt, + table.prop[src][mode].sppt, + table.prop[src][mode].sppt_apu_only, + table.prop[src][mode].stt_min, + table.prop[src][mode].stt_skin_temp[STT_TEMP_APU], + table.prop[src][mode].stt_skin_temp[STT_TEMP_HS2]); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(current_power_limits); + +static void amd_pmf_dbgfs_unregister(struct amd_pmf_dev *dev) +{ + debugfs_remove_recursive(dev->dbgfs_dir); +} + +static void amd_pmf_dbgfs_register(struct amd_pmf_dev *dev) +{ + dev->dbgfs_dir = debugfs_create_dir("amd_pmf", NULL); + if (dev->pmf_if_version == PMF_IF_V1) + debugfs_create_file("current_power_limits", 0644, dev->dbgfs_dir, dev, + ¤t_power_limits_fops); +} + +int amd_pmf_get_power_source(void) +{ + if (power_supply_is_system_supplied() > 0) + return POWER_SOURCE_AC; + else + return POWER_SOURCE_DC; +} + +static void amd_pmf_get_metrics(struct work_struct *work) +{ + struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, work_buffer.work); + ktime_t time_elapsed_ms; + int socket_power; + + guard(mutex)(&dev->update_mutex); + + /* Transfer table contents */ + memset(dev->buf, 0, sizeof(dev->m_table)); + amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, SET_CMD, METRICS_TABLE_ID, NULL); + memcpy(&dev->m_table, dev->buf, sizeof(dev->m_table)); + + time_elapsed_ms = ktime_to_ms(ktime_get()) - dev->start_time; + /* Calculate the avg SoC power consumption */ + socket_power = dev->m_table.apu_power + dev->m_table.dgpu_power; + + if (dev->amt_enabled) { + /* Apply the Auto Mode transition */ + amd_pmf_trans_automode(dev, socket_power, time_elapsed_ms); + } + + if (dev->cnqf_enabled) { + /* Apply the CnQF transition */ + amd_pmf_trans_cnqf(dev, socket_power, time_elapsed_ms); + } + + dev->start_time = ktime_to_ms(ktime_get()); + schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms)); +} + +static inline u32 amd_pmf_reg_read(struct amd_pmf_dev *dev, int reg_offset) +{ + return ioread32(dev->regbase + reg_offset); +} + +static inline void amd_pmf_reg_write(struct amd_pmf_dev *dev, int reg_offset, u32 val) +{ + iowrite32(val, dev->regbase + reg_offset); +} + +static void __maybe_unused amd_pmf_dump_registers(struct amd_pmf_dev *dev) +{ + u32 value; + + value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_RESPONSE); + dev_dbg(dev->dev, "AMD_PMF_REGISTER_RESPONSE:%x\n", value); + + value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT); + dev_dbg(dev->dev, "AMD_PMF_REGISTER_ARGUMENT:%d\n", value); + + value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_MESSAGE); + dev_dbg(dev->dev, "AMD_PMF_REGISTER_MESSAGE:%x\n", value); +} + +/** + * fixp_q88_fromint: Convert integer to Q8.8 + * @val: input value + * + * Converts an integer into binary fixed point format where 8 bits + * are used for integer and 8 bits are used for the decimal. + * + * Return: unsigned integer converted to Q8.8 format + */ +u32 fixp_q88_fromint(u32 val) +{ + return val << 8; +} + +int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32 *data) +{ + int rc; + u32 val; + + guard(mutex)(&dev->lock); + + /* Wait until we get a valid response */ + rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE, + val, val != 0, PMF_MSG_DELAY_MIN_US, + PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX); + if (rc) { + dev_err(dev->dev, "failed to talk to SMU\n"); + return rc; + } + + /* Write zero to response register */ + amd_pmf_reg_write(dev, AMD_PMF_REGISTER_RESPONSE, 0); + + /* Write argument into argument register */ + amd_pmf_reg_write(dev, AMD_PMF_REGISTER_ARGUMENT, arg); + + /* Write message ID to message ID register */ + amd_pmf_reg_write(dev, AMD_PMF_REGISTER_MESSAGE, message); + + /* Wait until we get a valid response */ + rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE, + val, val != 0, PMF_MSG_DELAY_MIN_US, + PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX); + if (rc) { + dev_err(dev->dev, "SMU response timed out\n"); + return rc; + } + + switch (val) { + case AMD_PMF_RESULT_OK: + if (get) { + /* PMFW may take longer time to return back the data */ + usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US); + *data = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT); + } + break; + case AMD_PMF_RESULT_CMD_REJECT_BUSY: + dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val); + rc = -EBUSY; + break; + case AMD_PMF_RESULT_CMD_UNKNOWN: + dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val); + rc = -EINVAL; + break; + case AMD_PMF_RESULT_CMD_REJECT_PREREQ: + case AMD_PMF_RESULT_FAILED: + default: + dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val); + rc = -EIO; + break; + } + + amd_pmf_dump_registers(dev); + return rc; +} + +static const struct pci_device_id pmf_pci_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RMB) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_ROOT) }, + { } +}; + +int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer) +{ + u64 phys_addr; + u32 hi, low; + + /* Get Metrics Table Address */ + if (alloc_buffer) { + switch (dev->cpu_id) { + case AMD_CPU_ID_PS: + case AMD_CPU_ID_RMB: + dev->mtable_size = sizeof(dev->m_table); + break; + case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT: + case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT: + dev->mtable_size = sizeof(dev->m_table_v2); + break; + default: + dev_err(dev->dev, "Invalid CPU id: 0x%x", dev->cpu_id); + } + + dev->buf = devm_kzalloc(dev->dev, dev->mtable_size, GFP_KERNEL); + if (!dev->buf) + return -ENOMEM; + } + + phys_addr = virt_to_phys(dev->buf); + hi = phys_addr >> 32; + low = phys_addr & GENMASK(31, 0); + + amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, SET_CMD, hi, NULL); + amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, SET_CMD, low, NULL); + + return 0; +} + +int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev) +{ + int ret; + + INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics); + + ret = amd_pmf_set_dram_addr(dev, true); + if (ret) + return ret; + + /* + * Start collecting the metrics data after a small delay + * or else, we might end up getting stale values from PMFW. + */ + schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms * 3)); + + return 0; +} + +static int amd_pmf_suspend_handler(struct device *dev) +{ + struct amd_pmf_dev *pdev = dev_get_drvdata(dev); + + if (pdev->smart_pc_enabled) + cancel_delayed_work_sync(&pdev->pb_work); + + if (is_apmf_func_supported(pdev, APMF_FUNC_SBIOS_HEARTBEAT_V2)) + amd_pmf_notify_sbios_heartbeat_event_v2(pdev, ON_SUSPEND); + + return 0; +} + +static int amd_pmf_resume_handler(struct device *dev) +{ + struct amd_pmf_dev *pdev = dev_get_drvdata(dev); + int ret; + + if (pdev->buf) { + ret = amd_pmf_set_dram_addr(pdev, false); + if (ret) + return ret; + } + + if (is_apmf_func_supported(pdev, APMF_FUNC_SBIOS_HEARTBEAT_V2)) + amd_pmf_notify_sbios_heartbeat_event_v2(pdev, ON_RESUME); + + if (pdev->smart_pc_enabled) + schedule_delayed_work(&pdev->pb_work, msecs_to_jiffies(2000)); + + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmf_pm, amd_pmf_suspend_handler, amd_pmf_resume_handler); + +static void amd_pmf_init_features(struct amd_pmf_dev *dev) +{ + int ret; + + /* Enable Static Slider */ + if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) || + is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) { + amd_pmf_init_sps(dev); + dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call; + power_supply_reg_notifier(&dev->pwr_src_notifier); + dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n"); + } + + amd_pmf_init_smart_pc(dev); + if (dev->smart_pc_enabled) { + dev_dbg(dev->dev, "Smart PC Solution Enabled\n"); + /* If Smart PC is enabled, no need to check for other features */ + return; + } + + if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) { + amd_pmf_init_auto_mode(dev); + dev_dbg(dev->dev, "Auto Mode Init done\n"); + } else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) || + is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) { + ret = amd_pmf_init_cnqf(dev); + if (ret) + dev_warn(dev->dev, "CnQF Init failed\n"); + } +} + +static void amd_pmf_deinit_features(struct amd_pmf_dev *dev) +{ + if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) || + is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) { + power_supply_unreg_notifier(&dev->pwr_src_notifier); + } + + if (dev->smart_pc_enabled) { + amd_pmf_deinit_smart_pc(dev); + } else if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) { + amd_pmf_deinit_auto_mode(dev); + } else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) || + is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) { + amd_pmf_deinit_cnqf(dev); + } +} + +static const struct acpi_device_id amd_pmf_acpi_ids[] = { + {"AMDI0100", 0x100}, + {"AMDI0102", 0}, + {"AMDI0103", 0}, + {"AMDI0105", 0}, + {"AMDI0107", 0}, + {"AMDI0108", 0}, + { } +}; +MODULE_DEVICE_TABLE(acpi, amd_pmf_acpi_ids); + +static int amd_pmf_probe(struct platform_device *pdev) +{ + const struct acpi_device_id *id; + struct amd_pmf_dev *dev; + struct pci_dev *rdev; + u32 base_addr_lo; + u32 base_addr_hi; + u64 base_addr; + u32 val; + int err; + + id = acpi_match_device(amd_pmf_acpi_ids, &pdev->dev); + if (!id) + return -ENODEV; + + if (id->driver_data == 0x100 && !force_load) + return -ENODEV; + + dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + dev->dev = &pdev->dev; + + rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0)); + if (!rdev || !pci_match_id(pmf_pci_ids, rdev)) { + pci_dev_put(rdev); + return -ENODEV; + } + + dev->cpu_id = rdev->device; + + err = amd_smn_read(0, AMD_PMF_BASE_ADDR_LO, &val); + if (err) { + pci_dev_put(rdev); + return dev_err_probe(dev->dev, pcibios_err_to_errno(err), + "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_LO); + } + + base_addr_lo = val & AMD_PMF_BASE_ADDR_HI_MASK; + + err = amd_smn_read(0, AMD_PMF_BASE_ADDR_HI, &val); + if (err) { + pci_dev_put(rdev); + return dev_err_probe(dev->dev, pcibios_err_to_errno(err), + "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_HI); + } + + base_addr_hi = val & AMD_PMF_BASE_ADDR_LO_MASK; + pci_dev_put(rdev); + base_addr = ((u64)base_addr_hi << 32 | base_addr_lo); + + dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMF_BASE_ADDR_OFFSET, + AMD_PMF_MAPPING_SIZE); + if (!dev->regbase) + return -ENOMEM; + + err = devm_mutex_init(dev->dev, &dev->lock); + if (err) + return err; + + err = devm_mutex_init(dev->dev, &dev->update_mutex); + if (err) + return err; + + err = devm_mutex_init(dev->dev, &dev->cb_mutex); + if (err) + return err; + + apmf_acpi_init(dev); + platform_set_drvdata(pdev, dev); + amd_pmf_dbgfs_register(dev); + amd_pmf_init_features(dev); + apmf_install_handler(dev); + if (is_apmf_func_supported(dev, APMF_FUNC_SBIOS_HEARTBEAT_V2)) + amd_pmf_notify_sbios_heartbeat_event_v2(dev, ON_LOAD); + + dev_info(dev->dev, "registered PMF device successfully\n"); + + return 0; +} + +static void amd_pmf_remove(struct platform_device *pdev) +{ + struct amd_pmf_dev *dev = platform_get_drvdata(pdev); + + amd_pmf_deinit_features(dev); + if (is_apmf_func_supported(dev, APMF_FUNC_SBIOS_HEARTBEAT_V2)) + amd_pmf_notify_sbios_heartbeat_event_v2(dev, ON_UNLOAD); + apmf_acpi_deinit(dev); + amd_pmf_dbgfs_unregister(dev); +} + +static const struct attribute_group *amd_pmf_driver_groups[] = { + &cnqf_feature_attribute_group, + NULL, +}; + +static struct platform_driver amd_pmf_driver = { + .driver = { + .name = "amd-pmf", + .acpi_match_table = amd_pmf_acpi_ids, + .dev_groups = amd_pmf_driver_groups, + .pm = pm_sleep_ptr(&amd_pmf_pm), + }, + .probe = amd_pmf_probe, + .remove = amd_pmf_remove, +}; +module_platform_driver(amd_pmf_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("AMD Platform Management Framework Driver"); +MODULE_SOFTDEP("pre: amdtee"); diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h new file mode 100644 index 000000000000..9144c8c3bbaf --- /dev/null +++ b/drivers/platform/x86/amd/pmf/pmf.h @@ -0,0 +1,898 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * AMD Platform Management Framework Driver + * + * Copyright (c) 2022, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com> + */ + +#ifndef PMF_H +#define PMF_H + +#include <linux/acpi.h> +#include <linux/input.h> +#include <linux/platform_device.h> +#include <linux/platform_profile.h> + +#define POLICY_BUF_MAX_SZ 0x4b000 +#define POLICY_SIGN_COOKIE 0x31535024 +#define POLICY_COOKIE_OFFSET 0x10 + +/* List of supported CPU ids */ +#define AMD_CPU_ID_RMB 0x14b5 +#define AMD_CPU_ID_PS 0x14e8 +#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507 +#define PCI_DEVICE_ID_AMD_1AH_M60H_ROOT 0x1122 + +struct cookie_header { + u32 sign; + u32 length; +} __packed; + +/* APMF Functions */ +#define APMF_FUNC_VERIFY_INTERFACE 0 +#define APMF_FUNC_GET_SYS_PARAMS 1 +#define APMF_FUNC_SBIOS_REQUESTS 2 +#define APMF_FUNC_SBIOS_HEARTBEAT 4 +#define APMF_FUNC_AUTO_MODE 5 +#define APMF_FUNC_SET_FAN_IDX 7 +#define APMF_FUNC_OS_POWER_SLIDER_UPDATE 8 +#define APMF_FUNC_STATIC_SLIDER_GRANULAR 9 +#define APMF_FUNC_DYN_SLIDER_AC 11 +#define APMF_FUNC_DYN_SLIDER_DC 12 +#define APMF_FUNC_NOTIFY_SMART_PC_UPDATES 14 +#define APMF_FUNC_SBIOS_HEARTBEAT_V2 16 + +/* Message Definitions */ +#define SET_SPL 0x03 /* SPL: Sustained Power Limit */ +#define SET_SPPT 0x05 /* SPPT: Slow Package Power Tracking */ +#define SET_FPPT 0x07 /* FPPT: Fast Package Power Tracking */ +#define GET_SPL 0x0B +#define GET_SPPT 0x0D +#define GET_FPPT 0x0F +#define SET_DRAM_ADDR_HIGH 0x14 +#define SET_DRAM_ADDR_LOW 0x15 +#define SET_TRANSFER_TABLE 0x16 +#define SET_STT_MIN_LIMIT 0x18 /* STT: Skin Temperature Tracking */ +#define SET_STT_LIMIT_APU 0x19 +#define SET_STT_LIMIT_HS2 0x1A +#define SET_SPPT_APU_ONLY 0x1D +#define GET_SPPT_APU_ONLY 0x1E +#define GET_STT_MIN_LIMIT 0x1F +#define GET_STT_LIMIT_APU 0x20 +#define GET_STT_LIMIT_HS2 0x21 +#define SET_P3T 0x23 /* P3T: Peak Package Power Limit */ +#define SET_PMF_PPT 0x25 +#define SET_PMF_PPT_APU_ONLY 0x26 + +/* OS slider update notification */ +#define DC_BEST_PERF 0 +#define DC_BETTER_PERF 1 +#define DC_BATTERY_SAVER 3 +#define AC_BEST_PERF 4 +#define AC_BETTER_PERF 5 +#define AC_BETTER_BATTERY 6 + +/* Fan Index for Auto Mode */ +#define FAN_INDEX_AUTO 0xFFFFFFFF + +#define ARG_NONE 0 +#define AVG_SAMPLE_SIZE 3 + +/* Policy Actions */ +#define PMF_POLICY_SPL 2 +#define PMF_POLICY_SPPT 3 +#define PMF_POLICY_FPPT 4 +#define PMF_POLICY_SPPT_APU_ONLY 5 +#define PMF_POLICY_STT_MIN 6 +#define PMF_POLICY_STT_SKINTEMP_APU 7 +#define PMF_POLICY_STT_SKINTEMP_HS2 8 +#define PMF_POLICY_SYSTEM_STATE 9 +#define PMF_POLICY_BIOS_OUTPUT_1 10 +#define PMF_POLICY_BIOS_OUTPUT_2 11 +#define PMF_POLICY_P3T 38 +#define PMF_POLICY_PMF_PPT 54 +#define PMF_POLICY_PMF_PPT_APU_ONLY 55 +#define PMF_POLICY_BIOS_OUTPUT_3 57 +#define PMF_POLICY_BIOS_OUTPUT_4 58 +#define PMF_POLICY_BIOS_OUTPUT_5 59 +#define PMF_POLICY_BIOS_OUTPUT_6 60 +#define PMF_POLICY_BIOS_OUTPUT_7 61 +#define PMF_POLICY_BIOS_OUTPUT_8 62 +#define PMF_POLICY_BIOS_OUTPUT_9 63 +#define PMF_POLICY_BIOS_OUTPUT_10 64 + +/* TA macros */ +#define PMF_TA_IF_VERSION_MAJOR 1 +#define TA_PMF_ACTION_MAX 32 +#define TA_PMF_UNDO_MAX 8 +#define TA_OUTPUT_RESERVED_MEM 922 +#define MAX_OPERATION_PARAMS 4 + +#define TA_ERROR_CRYPTO_INVALID_PARAM 0x20002 +#define TA_ERROR_CRYPTO_BIN_TOO_LARGE 0x2000d + +#define PMF_IF_V1 1 +#define PMF_IF_V2 2 + +#define APTS_MAX_STATES 16 +#define CUSTOM_BIOS_INPUT_BITS GENMASK(16, 7) +#define BIOS_INPUTS_MAX 10 + +/* amd_pmf_send_cmd() set/get */ +#define SET_CMD false +#define GET_CMD true + +#define METRICS_TABLE_ID 7 + +typedef void (*apmf_event_handler_t)(acpi_handle handle, u32 event, void *data); + +/* APTS PMF BIOS Interface */ +struct amd_pmf_apts_output { + u16 table_version; + u32 fan_table_idx; + u32 pmf_ppt; + u32 ppt_pmf_apu_only; + u32 stt_min_limit; + u8 stt_skin_temp_limit_apu; + u8 stt_skin_temp_limit_hs2; +} __packed; + +struct amd_pmf_apts_granular_output { + u16 size; + struct amd_pmf_apts_output val; +} __packed; + +struct amd_pmf_apts_granular { + u16 size; + struct amd_pmf_apts_output val[APTS_MAX_STATES]; +}; + +struct sbios_hb_event_v2 { + u16 size; + u8 load; + u8 unload; + u8 suspend; + u8 resume; +} __packed; + +enum sbios_hb_v2 { + ON_LOAD, + ON_UNLOAD, + ON_SUSPEND, + ON_RESUME, +}; + +/* AMD PMF BIOS interfaces */ +struct apmf_verify_interface { + u16 size; + u16 version; + u32 notification_mask; + u32 supported_functions; +} __packed; + +struct apmf_system_params { + u16 size; + u32 valid_mask; + u32 flags; + u8 command_code; + u32 heartbeat_int; +} __packed; + +struct apmf_sbios_req { + u16 size; + u32 pending_req; + u8 rsd; + u8 cql_event; + u8 amt_event; + u32 fppt; + u32 sppt; + u32 fppt_apu_only; + u32 spl; + u32 stt_min_limit; + u8 skin_temp_apu; + u8 skin_temp_hs2; +} __packed; + +/* As per APMF spec 1.3 */ +struct apmf_sbios_req_v1 { + u16 size; + u32 pending_req; + u8 rsvd; + u8 cql_event; + u8 amt_event; + u32 fppt; + u32 sppt; + u32 sppt_apu_only; + u32 spl; + u32 stt_min_limit; + u8 skin_temp_apu; + u8 skin_temp_hs2; + u8 enable_cnqf; + u32 custom_policy[BIOS_INPUTS_MAX]; +} __packed; + +struct apmf_sbios_req_v2 { + u16 size; + u32 pending_req; + u8 rsd; + u32 ppt_pmf; + u32 ppt_pmf_apu_only; + u32 stt_min_limit; + u8 skin_temp_apu; + u8 skin_temp_hs2; + u32 custom_policy[BIOS_INPUTS_MAX]; +} __packed; + +struct apmf_fan_idx { + u16 size; + u8 fan_ctl_mode; + u32 fan_ctl_idx; +} __packed; + +struct smu_pmf_metrics_v2 { + u16 core_frequency[16]; /* MHz */ + u16 core_power[16]; /* mW */ + u16 core_temp[16]; /* centi-C */ + u16 gfx_temp; /* centi-C */ + u16 soc_temp; /* centi-C */ + u16 stapm_opn_limit; /* mW */ + u16 stapm_cur_limit; /* mW */ + u16 infra_cpu_maxfreq; /* MHz */ + u16 infra_gfx_maxfreq; /* MHz */ + u16 skin_temp; /* centi-C */ + u16 gfxclk_freq; /* MHz */ + u16 fclk_freq; /* MHz */ + u16 gfx_activity; /* GFX busy % [0-100] */ + u16 socclk_freq; /* MHz */ + u16 vclk_freq; /* MHz */ + u16 vcn_activity; /* VCN busy % [0-100] */ + u16 vpeclk_freq; /* MHz */ + u16 npuclk_freq; /* MHz */ + u16 npu_busy[8]; /* NPU busy % [0-100] */ + u16 dram_reads; /* MB/sec */ + u16 dram_writes; /* MB/sec */ + u16 core_c0residency[16]; /* C0 residency % [0-100] */ + u16 npu_power; /* mW */ + u32 apu_power; /* mW */ + u32 gfx_power; /* mW */ + u32 dgpu_power; /* mW */ + u32 socket_power; /* mW */ + u32 all_core_power; /* mW */ + u32 filter_alpha_value; /* time constant [us] */ + u32 metrics_counter; + u16 memclk_freq; /* MHz */ + u16 mpnpuclk_freq; /* MHz */ + u16 npu_reads; /* MB/sec */ + u16 npu_writes; /* MB/sec */ + u32 throttle_residency_prochot; + u32 throttle_residency_spl; + u32 throttle_residency_fppt; + u32 throttle_residency_sppt; + u32 throttle_residency_thm_core; + u32 throttle_residency_thm_gfx; + u32 throttle_residency_thm_soc; + u16 psys; + u16 spare1; + u32 spare[6]; +} __packed; + +struct smu_pmf_metrics { + u16 gfxclk_freq; /* in MHz */ + u16 socclk_freq; /* in MHz */ + u16 vclk_freq; /* in MHz */ + u16 dclk_freq; /* in MHz */ + u16 memclk_freq; /* in MHz */ + u16 spare; + u16 gfx_activity; /* in Centi */ + u16 uvd_activity; /* in Centi */ + u16 voltage[2]; /* in mV */ + u16 currents[2]; /* in mA */ + u16 power[2];/* in mW */ + u16 core_freq[8]; /* in MHz */ + u16 core_power[8]; /* in mW */ + u16 core_temp[8]; /* in centi-Celsius */ + u16 l3_freq; /* in MHz */ + u16 l3_temp; /* in centi-Celsius */ + u16 gfx_temp; /* in centi-Celsius */ + u16 soc_temp; /* in centi-Celsius */ + u16 throttler_status; + u16 current_socketpower; /* in mW */ + u16 stapm_orig_limit; /* in W */ + u16 stapm_cur_limit; /* in W */ + u32 apu_power; /* in mW */ + u32 dgpu_power; /* in mW */ + u16 vdd_tdc_val; /* in mA */ + u16 soc_tdc_val; /* in mA */ + u16 vdd_edc_val; /* in mA */ + u16 soc_edcv_al; /* in mA */ + u16 infra_cpu_maxfreq; /* in MHz */ + u16 infra_gfx_maxfreq; /* in MHz */ + u16 skin_temp; /* in centi-Celsius */ + u16 device_state; + u16 curtemp; /* in centi-Celsius */ + u16 filter_alpha_value; + u16 avg_gfx_clkfrequency; + u16 avg_fclk_frequency; + u16 avg_gfx_activity; + u16 avg_socclk_frequency; + u16 avg_vclk_frequency; + u16 avg_vcn_activity; + u16 avg_dram_reads; + u16 avg_dram_writes; + u16 avg_socket_power; + u16 avg_core_power[2]; + u16 avg_core_c0residency[16]; + u16 spare1; + u32 metrics_counter; +} __packed; + +enum amd_stt_skin_temp { + STT_TEMP_APU, + STT_TEMP_HS2, + STT_TEMP_COUNT, +}; + +enum amd_slider_op { + SLIDER_OP_GET, + SLIDER_OP_SET, +}; + +enum power_source { + POWER_SOURCE_AC, + POWER_SOURCE_DC, + POWER_SOURCE_MAX, +}; + +enum power_modes { + POWER_MODE_PERFORMANCE, + POWER_MODE_BALANCED_POWER, + POWER_MODE_POWER_SAVER, + POWER_MODE_MAX, +}; + +enum power_modes_v2 { + POWER_MODE_BEST_PERFORMANCE, + POWER_MODE_BALANCED, + POWER_MODE_BEST_POWER_EFFICIENCY, + POWER_MODE_ENERGY_SAVE, + POWER_MODE_V2_MAX, +}; + +struct pmf_bios_inputs_prev { + u32 custom_bios_inputs[BIOS_INPUTS_MAX]; +}; + +struct amd_pmf_dev { + void __iomem *regbase; + void __iomem *smu_virt_addr; + void *buf; + u32 base_addr; + u32 cpu_id; + struct device *dev; + struct mutex lock; /* protects the PMF interface */ + u32 supported_func; + enum platform_profile_option current_profile; + struct device *ppdev; /* platform profile class device */ + struct dentry *dbgfs_dir; + int hb_interval; /* SBIOS heartbeat interval */ + struct delayed_work heart_beat; + struct smu_pmf_metrics m_table; + struct smu_pmf_metrics_v2 m_table_v2; + struct delayed_work work_buffer; + ktime_t start_time; + int socket_power_history[AVG_SAMPLE_SIZE]; + int socket_power_history_idx; + bool amt_enabled; + struct mutex update_mutex; /* protects race between ACPI handler and metrics thread */ + bool cnqf_enabled; + bool cnqf_supported; + struct notifier_block pwr_src_notifier; + /* Smart PC solution builder */ + struct dentry *esbin; + unsigned char *policy_buf; + resource_size_t policy_sz; + struct tee_context *tee_ctx; + struct tee_shm *fw_shm_pool; + u32 session_id; + void *shbuf; + struct delayed_work pb_work; + struct pmf_action_table *prev_data; + resource_size_t policy_addr; + void __iomem *policy_base; + bool smart_pc_enabled; + u16 pmf_if_version; + struct input_dev *pmf_idev; + size_t mtable_size; + struct resource *res; + struct apmf_sbios_req_v2 req; /* To get custom bios pending request */ + struct mutex cb_mutex; + u32 notifications; + struct apmf_sbios_req_v1 req1; + struct pmf_bios_inputs_prev cb_prev; /* To preserve custom BIOS inputs */ + bool cb_flag; /* To handle first custom BIOS input */ +}; + +struct apmf_sps_prop_granular_v2 { + u8 power_states[POWER_SOURCE_MAX][POWER_MODE_V2_MAX]; +} __packed; + +struct apmf_sps_prop_granular { + u32 fppt; + u32 sppt; + u32 sppt_apu_only; + u32 spl; + u32 stt_min; + u8 stt_skin_temp[STT_TEMP_COUNT]; + u32 fan_id; +} __packed; + +/* Static Slider */ +struct apmf_static_slider_granular_output { + u16 size; + struct apmf_sps_prop_granular prop[POWER_SOURCE_MAX * POWER_MODE_MAX]; +} __packed; + +struct amd_pmf_static_slider_granular { + u16 size; + struct apmf_sps_prop_granular prop[POWER_SOURCE_MAX][POWER_MODE_MAX]; +}; + +struct apmf_static_slider_granular_output_v2 { + u16 size; + struct apmf_sps_prop_granular_v2 sps_idx; +} __packed; + +struct amd_pmf_static_slider_granular_v2 { + u16 size; + struct apmf_sps_prop_granular_v2 sps_idx; +}; + +struct os_power_slider { + u16 size; + u8 slider_event; +} __packed; + +struct amd_pmf_notify_smart_pc_update { + u16 size; + u32 pending_req; + u32 custom_bios[BIOS_INPUTS_MAX]; +} __packed; + +struct fan_table_control { + bool manual; + unsigned long fan_id; +}; + +struct power_table_control { + u32 spl; + u32 sppt; + u32 fppt; + u32 sppt_apu_only; + u32 stt_min; + u32 stt_skin_temp[STT_TEMP_COUNT]; + u32 reserved[16]; +}; + +/* Auto Mode Layer */ +enum auto_mode_transition_priority { + AUTO_TRANSITION_TO_PERFORMANCE, /* Any other mode to Performance Mode */ + AUTO_TRANSITION_FROM_QUIET_TO_BALANCE, /* Quiet Mode to Balance Mode */ + AUTO_TRANSITION_TO_QUIET, /* Any other mode to Quiet Mode */ + AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE, /* Performance Mode to Balance Mode */ + AUTO_TRANSITION_MAX, +}; + +enum auto_mode_mode { + AUTO_QUIET, + AUTO_BALANCE, + AUTO_PERFORMANCE_ON_LAP, + AUTO_PERFORMANCE, + AUTO_MODE_MAX, +}; + +struct auto_mode_trans_params { + u32 time_constant; /* minimum time required to switch to next mode */ + u32 power_delta; /* delta power to shift mode */ + u32 power_threshold; + u32 timer; /* elapsed time. if timer > TimeThreshold, it will move to next mode */ + u32 applied; + enum auto_mode_mode target_mode; + u32 shifting_up; +}; + +struct auto_mode_mode_settings { + struct power_table_control power_control; + struct fan_table_control fan_control; + u32 power_floor; +}; + +struct auto_mode_mode_config { + struct auto_mode_trans_params transition[AUTO_TRANSITION_MAX]; + struct auto_mode_mode_settings mode_set[AUTO_MODE_MAX]; + enum auto_mode_mode current_mode; +}; + +struct apmf_auto_mode { + u16 size; + /* time constant */ + u32 balanced_to_perf; + u32 perf_to_balanced; + u32 quiet_to_balanced; + u32 balanced_to_quiet; + /* power floor */ + u32 pfloor_perf; + u32 pfloor_balanced; + u32 pfloor_quiet; + /* Power delta for mode change */ + u32 pd_balanced_to_perf; + u32 pd_perf_to_balanced; + u32 pd_quiet_to_balanced; + u32 pd_balanced_to_quiet; + /* skin temperature limits */ + u8 stt_apu_perf_on_lap; /* CQL ON */ + u8 stt_hs2_perf_on_lap; /* CQL ON */ + u8 stt_apu_perf; + u8 stt_hs2_perf; + u8 stt_apu_balanced; + u8 stt_hs2_balanced; + u8 stt_apu_quiet; + u8 stt_hs2_quiet; + u32 stt_min_limit_perf_on_lap; /* CQL ON */ + u32 stt_min_limit_perf; + u32 stt_min_limit_balanced; + u32 stt_min_limit_quiet; + /* SPL based */ + u32 fppt_perf_on_lap; /* CQL ON */ + u32 sppt_perf_on_lap; /* CQL ON */ + u32 spl_perf_on_lap; /* CQL ON */ + u32 sppt_apu_only_perf_on_lap; /* CQL ON */ + u32 fppt_perf; + u32 sppt_perf; + u32 spl_perf; + u32 sppt_apu_only_perf; + u32 fppt_balanced; + u32 sppt_balanced; + u32 spl_balanced; + u32 sppt_apu_only_balanced; + u32 fppt_quiet; + u32 sppt_quiet; + u32 spl_quiet; + u32 sppt_apu_only_quiet; + /* Fan ID */ + u32 fan_id_perf; + u32 fan_id_balanced; + u32 fan_id_quiet; +} __packed; + +/* CnQF Layer */ +enum cnqf_trans_priority { + CNQF_TRANSITION_TO_TURBO, /* Any other mode to Turbo Mode */ + CNQF_TRANSITION_FROM_BALANCE_TO_PERFORMANCE, /* quiet/balance to Performance Mode */ + CNQF_TRANSITION_FROM_QUIET_TO_BALANCE, /* Quiet Mode to Balance Mode */ + CNQF_TRANSITION_TO_QUIET, /* Any other mode to Quiet Mode */ + CNQF_TRANSITION_FROM_PERFORMANCE_TO_BALANCE, /* Performance/Turbo to Balance Mode */ + CNQF_TRANSITION_FROM_TURBO_TO_PERFORMANCE, /* Turbo mode to Performance Mode */ + CNQF_TRANSITION_MAX, +}; + +enum cnqf_mode { + CNQF_MODE_QUIET, + CNQF_MODE_BALANCE, + CNQF_MODE_PERFORMANCE, + CNQF_MODE_TURBO, + CNQF_MODE_MAX, +}; + +enum apmf_cnqf_pos { + APMF_CNQF_TURBO, + APMF_CNQF_PERFORMANCE, + APMF_CNQF_BALANCE, + APMF_CNQF_QUIET, + APMF_CNQF_MAX, +}; + +struct cnqf_mode_settings { + struct power_table_control power_control; + struct fan_table_control fan_control; + u32 power_floor; +}; + +struct cnqf_tran_params { + u32 time_constant; /* minimum time required to switch to next mode */ + u32 power_threshold; + u32 timer; /* elapsed time. if timer > timethreshold, it will move to next mode */ + u32 total_power; + u32 count; + bool priority; + bool shifting_up; + enum cnqf_mode target_mode; +}; + +struct cnqf_config { + struct cnqf_tran_params trans_param[POWER_SOURCE_MAX][CNQF_TRANSITION_MAX]; + struct cnqf_mode_settings mode_set[POWER_SOURCE_MAX][CNQF_MODE_MAX]; + struct power_table_control defaults; + enum cnqf_mode current_mode; + u32 power_src; + u32 avg_power; +}; + +struct apmf_cnqf_power_set { + u32 pfloor; + u32 fppt; + u32 sppt; + u32 sppt_apu_only; + u32 spl; + u32 stt_min_limit; + u8 stt_skintemp[STT_TEMP_COUNT]; + u32 fan_id; +} __packed; + +struct apmf_dyn_slider_output { + u16 size; + u16 flags; + u32 t_perf_to_turbo; + u32 t_balanced_to_perf; + u32 t_quiet_to_balanced; + u32 t_balanced_to_quiet; + u32 t_perf_to_balanced; + u32 t_turbo_to_perf; + struct apmf_cnqf_power_set ps[APMF_CNQF_MAX]; +} __packed; + +/* Smart PC - TA internals */ +enum system_state { + SYSTEM_STATE_S0i3, + SYSTEM_STATE_S4, + SYSTEM_STATE_SCREEN_LOCK, + SYSTEM_STATE_MAX, +}; + +enum ta_slider { + TA_BEST_BATTERY, + TA_BETTER_BATTERY, + TA_BETTER_PERFORMANCE, + TA_BEST_PERFORMANCE, + TA_MAX, +}; + +struct amd_pmf_pb_bitmap { + const char *name; + u32 bit_mask; +}; + +static const struct amd_pmf_pb_bitmap custom_bios_inputs[] __used = { + {"NOTIFY_CUSTOM_BIOS_INPUT1", BIT(5)}, + {"NOTIFY_CUSTOM_BIOS_INPUT2", BIT(6)}, + {"NOTIFY_CUSTOM_BIOS_INPUT3", BIT(7)}, + {"NOTIFY_CUSTOM_BIOS_INPUT4", BIT(8)}, + {"NOTIFY_CUSTOM_BIOS_INPUT5", BIT(9)}, + {"NOTIFY_CUSTOM_BIOS_INPUT6", BIT(10)}, + {"NOTIFY_CUSTOM_BIOS_INPUT7", BIT(11)}, + {"NOTIFY_CUSTOM_BIOS_INPUT8", BIT(12)}, + {"NOTIFY_CUSTOM_BIOS_INPUT9", BIT(13)}, + {"NOTIFY_CUSTOM_BIOS_INPUT10", BIT(14)}, +}; + +static const struct amd_pmf_pb_bitmap custom_bios_inputs_v1[] __used = { + {"NOTIFY_CUSTOM_BIOS_INPUT1", BIT(7)}, + {"NOTIFY_CUSTOM_BIOS_INPUT2", BIT(8)}, + {"NOTIFY_CUSTOM_BIOS_INPUT3", BIT(9)}, + {"NOTIFY_CUSTOM_BIOS_INPUT4", BIT(10)}, + {"NOTIFY_CUSTOM_BIOS_INPUT5", BIT(11)}, + {"NOTIFY_CUSTOM_BIOS_INPUT6", BIT(12)}, + {"NOTIFY_CUSTOM_BIOS_INPUT7", BIT(13)}, + {"NOTIFY_CUSTOM_BIOS_INPUT8", BIT(14)}, + {"NOTIFY_CUSTOM_BIOS_INPUT9", BIT(15)}, + {"NOTIFY_CUSTOM_BIOS_INPUT10", BIT(16)}, +}; + +enum platform_type { + PTYPE_UNKNOWN = 0, + LID_CLOSE, + CLAMSHELL, + FLAT, + TENT, + STAND, + TABLET, + BOOK, + PRESENTATION, + PULL_FWD, + PTYPE_INVALID = 0xf, +}; + +/* Command ids for TA communication */ +enum ta_pmf_command { + TA_PMF_COMMAND_POLICY_BUILDER_INITIALIZE, + TA_PMF_COMMAND_POLICY_BUILDER_ENACT_POLICIES, +}; + +enum ta_pmf_error_type { + TA_PMF_TYPE_SUCCESS, + TA_PMF_ERROR_TYPE_GENERIC, + TA_PMF_ERROR_TYPE_CRYPTO, + TA_PMF_ERROR_TYPE_CRYPTO_VALIDATE, + TA_PMF_ERROR_TYPE_CRYPTO_VERIFY_OEM, + TA_PMF_ERROR_TYPE_POLICY_BUILDER, + TA_PMF_ERROR_TYPE_PB_CONVERT, + TA_PMF_ERROR_TYPE_PB_SETUP, + TA_PMF_ERROR_TYPE_PB_ENACT, + TA_PMF_ERROR_TYPE_ASD_GET_DEVICE_INFO, + TA_PMF_ERROR_TYPE_ASD_GET_DEVICE_PCIE_INFO, + TA_PMF_ERROR_TYPE_SYS_DRV_FW_VALIDATION, + TA_PMF_ERROR_TYPE_MAX, +}; + +struct pmf_action_table { + enum system_state system_state; + u32 spl; /* in mW */ + u32 sppt; /* in mW */ + u32 sppt_apuonly; /* in mW */ + u32 fppt; /* in mW */ + u32 stt_minlimit; /* in mW */ + u32 stt_skintemp_apu; /* in C */ + u32 stt_skintemp_hs2; /* in C */ + u32 p3t_limit; /* in mW */ + u32 pmf_ppt; /* in mW */ + u32 pmf_ppt_apu_only; /* in mW */ +}; + +/* Input conditions */ +struct ta_pmf_condition_info { + u32 power_source; + u32 bat_percentage; + u32 power_slider; + u32 lid_state; + bool user_present; + u32 bios_input_1[2]; + u32 monitor_count; + u32 rsvd2[2]; + u32 bat_design; + u32 full_charge_capacity; + int drain_rate; + bool user_engaged; + u32 device_state; + u32 socket_power; + u32 skin_temperature; + u32 rsvd3[2]; + u32 platform_type; + u32 rsvd3_1[2]; + u32 ambient_light; + u32 length; + u32 avg_c0residency; + u32 max_c0residency; + u32 s0i3_entry; + u32 gfx_busy; + u32 rsvd4[7]; + bool camera_state; + u32 workload_type; + u32 display_type; + u32 display_state; + u32 rsvd5_1[17]; + u32 bios_input_2[8]; + u32 rsvd5[125]; +}; + +struct ta_pmf_load_policy_table { + u32 table_size; + u8 table[POLICY_BUF_MAX_SZ]; +}; + +/* TA initialization params */ +struct ta_pmf_init_table { + u32 frequency; /* SMU sampling frequency */ + bool validate; + bool sku_check; + bool metadata_macrocheck; + struct ta_pmf_load_policy_table policies_table; +}; + +/* Everything the TA needs to Enact Policies */ +struct ta_pmf_enact_table { + struct ta_pmf_condition_info ev_info; + u32 name; +}; + +struct ta_pmf_action { + u32 action_index; + u32 value; + u32 spl_arg; +}; + +/* Output actions from TA */ +struct ta_pmf_enact_result { + u32 actions_count; + struct ta_pmf_action actions_list[TA_PMF_ACTION_MAX]; + u32 undo_count; + struct ta_pmf_action undo_list[TA_PMF_UNDO_MAX]; +}; + +union ta_pmf_input { + struct ta_pmf_enact_table enact_table; + struct ta_pmf_init_table init_table; +}; + +union ta_pmf_output { + struct ta_pmf_enact_result policy_apply_table; + u32 rsvd[TA_OUTPUT_RESERVED_MEM]; +}; + +struct ta_pmf_shared_memory { + int command_id; + int resp_id; + u32 pmf_result; + u32 if_version; + union ta_pmf_output pmf_output; + union ta_pmf_input pmf_input; +}; + +/* Core Layer */ +int apmf_acpi_init(struct amd_pmf_dev *pmf_dev); +void apmf_acpi_deinit(struct amd_pmf_dev *pmf_dev); +int is_apmf_func_supported(struct amd_pmf_dev *pdev, unsigned long index); +int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32 *data); +int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev); +int amd_pmf_get_power_source(void); +int apmf_install_handler(struct amd_pmf_dev *pmf_dev); +int apmf_os_power_slider_update(struct amd_pmf_dev *dev, u8 flag); +int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer); +int amd_pmf_notify_sbios_heartbeat_event_v2(struct amd_pmf_dev *dev, u8 flag); +u32 fixp_q88_fromint(u32 val); +int is_apmf_bios_input_notifications_supported(struct amd_pmf_dev *pdev); + +/* SPS Layer */ +int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf); +void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx, + struct amd_pmf_static_slider_granular *table); +int amd_pmf_init_sps(struct amd_pmf_dev *dev); +int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev, + struct apmf_static_slider_granular_output *output); +bool is_pprof_balanced(struct amd_pmf_dev *pmf); +int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev); +const char *amd_pmf_source_as_str(unsigned int state); + +const char *amd_pmf_source_as_str(unsigned int state); + +int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx); +int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf); +int apmf_get_static_slider_granular_v2(struct amd_pmf_dev *dev, + struct apmf_static_slider_granular_output_v2 *data); +int apts_get_static_slider_granular_v2(struct amd_pmf_dev *pdev, + struct amd_pmf_apts_granular_output *data, u32 apts_idx); + +/* Auto Mode Layer */ +int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data); +void amd_pmf_init_auto_mode(struct amd_pmf_dev *dev); +void amd_pmf_deinit_auto_mode(struct amd_pmf_dev *dev); +void amd_pmf_trans_automode(struct amd_pmf_dev *dev, int socket_power, ktime_t time_elapsed_ms); +int apmf_get_sbios_requests(struct amd_pmf_dev *pdev, struct apmf_sbios_req *req); +int apmf_get_sbios_requests_v1(struct amd_pmf_dev *pdev, struct apmf_sbios_req_v1 *req); +int apmf_get_sbios_requests_v2(struct amd_pmf_dev *pdev, struct apmf_sbios_req_v2 *req); + +void amd_pmf_update_2_cql(struct amd_pmf_dev *dev, bool is_cql_event); +int amd_pmf_reset_amt(struct amd_pmf_dev *dev); +void amd_pmf_handle_amt(struct amd_pmf_dev *dev); + +/* CnQF Layer */ +int apmf_get_dyn_slider_def_ac(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_output *data); +int apmf_get_dyn_slider_def_dc(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_output *data); +int amd_pmf_init_cnqf(struct amd_pmf_dev *dev); +void amd_pmf_deinit_cnqf(struct amd_pmf_dev *dev); +int amd_pmf_trans_cnqf(struct amd_pmf_dev *dev, int socket_power, ktime_t time_lapsed_ms); +extern const struct attribute_group cnqf_feature_attribute_group; + +/* Smart PC builder Layer */ +int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev); +void amd_pmf_deinit_smart_pc(struct amd_pmf_dev *dev); +int apmf_check_smart_pc(struct amd_pmf_dev *pmf_dev); +int amd_pmf_smartpc_apply_bios_output(struct amd_pmf_dev *dev, u32 val, u32 preq, u32 idx); + +/* Smart PC - TA interfaces */ +void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in); +void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in); +int amd_pmf_invoke_cmd_enact(struct amd_pmf_dev *dev); + +#endif /* PMF_H */ diff --git a/drivers/platform/x86/amd/pmf/spc.c b/drivers/platform/x86/amd/pmf/spc.c new file mode 100644 index 000000000000..0a37dc6a7950 --- /dev/null +++ b/drivers/platform/x86/amd/pmf/spc.c @@ -0,0 +1,343 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * AMD Platform Management Framework Driver - Smart PC Capabilities + * + * Copyright (c) 2023, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Authors: Shyam Sundar S K <Shyam-sundar.S-k@amd.com> + * Patil Rajesh Reddy <Patil.Reddy@amd.com> + */ + +#include <acpi/button.h> +#include <linux/amd-pmf-io.h> +#include <linux/power_supply.h> +#include <linux/units.h> +#include "pmf.h" + +#ifdef CONFIG_AMD_PMF_DEBUG +static const char *platform_type_as_str(u16 platform_type) +{ + switch (platform_type) { + case CLAMSHELL: + return "CLAMSHELL"; + case FLAT: + return "FLAT"; + case TENT: + return "TENT"; + case STAND: + return "STAND"; + case TABLET: + return "TABLET"; + case BOOK: + return "BOOK"; + case PRESENTATION: + return "PRESENTATION"; + case PULL_FWD: + return "PULL_FWD"; + default: + return "UNKNOWN"; + } +} + +static const char *laptop_placement_as_str(u16 device_state) +{ + switch (device_state) { + case ON_TABLE: + return "ON_TABLE"; + case ON_LAP_MOTION: + return "ON_LAP_MOTION"; + case IN_BAG: + return "IN_BAG"; + case OUT_OF_BAG: + return "OUT_OF_BAG"; + default: + return "UNKNOWN"; + } +} + +static const char *ta_slider_as_str(unsigned int state) +{ + switch (state) { + case TA_BEST_PERFORMANCE: + return "PERFORMANCE"; + case TA_BETTER_PERFORMANCE: + return "BALANCED"; + case TA_BEST_BATTERY: + return "POWER_SAVER"; + default: + return "Unknown TA Slider State"; + } +} + +static u32 amd_pmf_get_ta_custom_bios_inputs(struct ta_pmf_enact_table *in, int index) +{ + switch (index) { + case 0 ... 1: + return in->ev_info.bios_input_1[index]; + case 2 ... 9: + return in->ev_info.bios_input_2[index - 2]; + default: + return 0; + } +} + +void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in) +{ + int i; + + dev_dbg(dev->dev, "==== TA inputs START ====\n"); + dev_dbg(dev->dev, "Slider State: %s\n", ta_slider_as_str(in->ev_info.power_slider)); + dev_dbg(dev->dev, "Power Source: %s\n", amd_pmf_source_as_str(in->ev_info.power_source)); + dev_dbg(dev->dev, "Battery Percentage: %u\n", in->ev_info.bat_percentage); + dev_dbg(dev->dev, "Designed Battery Capacity: %u\n", in->ev_info.bat_design); + dev_dbg(dev->dev, "Fully Charged Capacity: %u\n", in->ev_info.full_charge_capacity); + dev_dbg(dev->dev, "Drain Rate: %d\n", in->ev_info.drain_rate); + dev_dbg(dev->dev, "Socket Power: %u\n", in->ev_info.socket_power); + dev_dbg(dev->dev, "Skin Temperature: %u\n", in->ev_info.skin_temperature); + dev_dbg(dev->dev, "Avg C0 Residency: %u\n", in->ev_info.avg_c0residency); + dev_dbg(dev->dev, "Max C0 Residency: %u\n", in->ev_info.max_c0residency); + dev_dbg(dev->dev, "GFX Busy: %u\n", in->ev_info.gfx_busy); + dev_dbg(dev->dev, "LID State: %s\n", in->ev_info.lid_state ? "close" : "open"); + dev_dbg(dev->dev, "User Presence: %s\n", in->ev_info.user_present ? "Present" : "Away"); + dev_dbg(dev->dev, "Ambient Light: %d\n", in->ev_info.ambient_light); + dev_dbg(dev->dev, "Platform type: %s\n", platform_type_as_str(in->ev_info.platform_type)); + dev_dbg(dev->dev, "Laptop placement: %s\n", + laptop_placement_as_str(in->ev_info.device_state)); + for (i = 0; i < ARRAY_SIZE(custom_bios_inputs); i++) + dev_dbg(dev->dev, "Custom BIOS input%d: %u\n", i + 1, + amd_pmf_get_ta_custom_bios_inputs(in, i)); + dev_dbg(dev->dev, "==== TA inputs END ====\n"); +} +#else +void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in) {} +#endif + +/* + * This helper function sets the appropriate BIOS input value in the TA enact + * table based on the provided index. We need this approach because the custom + * BIOS input array is not continuous, due to the existing TA structure layout. + */ +static void amd_pmf_set_ta_custom_bios_input(struct ta_pmf_enact_table *in, int index, u32 value) +{ + switch (index) { + case 0 ... 1: + in->ev_info.bios_input_1[index] = value; + break; + case 2 ... 9: + in->ev_info.bios_input_2[index - 2] = value; + break; + default: + return; + } +} + +static void amd_pmf_update_bios_inputs(struct amd_pmf_dev *pdev, u32 pending_req, + const struct amd_pmf_pb_bitmap *inputs, + const u32 *custom_policy, struct ta_pmf_enact_table *in) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(custom_bios_inputs); i++) { + if (!(pending_req & inputs[i].bit_mask)) + continue; + amd_pmf_set_ta_custom_bios_input(in, i, custom_policy[i]); + pdev->cb_prev.custom_bios_inputs[i] = custom_policy[i]; + dev_dbg(pdev->dev, "Custom BIOS Input[%d]: %u\n", i, custom_policy[i]); + } +} + +static void amd_pmf_get_custom_bios_inputs(struct amd_pmf_dev *pdev, + struct ta_pmf_enact_table *in) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(custom_bios_inputs); i++) + amd_pmf_set_ta_custom_bios_input(in, i, pdev->cb_prev.custom_bios_inputs[i]); + + if (!(pdev->req.pending_req || pdev->req1.pending_req)) + return; + + if (!pdev->smart_pc_enabled) + return; + + switch (pdev->pmf_if_version) { + case PMF_IF_V1: + if (!is_apmf_bios_input_notifications_supported(pdev)) + return; + amd_pmf_update_bios_inputs(pdev, pdev->req1.pending_req, custom_bios_inputs_v1, + pdev->req1.custom_policy, in); + break; + case PMF_IF_V2: + amd_pmf_update_bios_inputs(pdev, pdev->req.pending_req, custom_bios_inputs, + pdev->req.custom_policy, in); + break; + default: + break; + } + + /* Clear pending requests after handling */ + memset(&pdev->req, 0, sizeof(pdev->req)); + memset(&pdev->req1, 0, sizeof(pdev->req1)); +} + +static void amd_pmf_get_c0_residency(u16 *core_res, size_t size, struct ta_pmf_enact_table *in) +{ + u16 max, avg = 0; + int i; + + /* Get the avg and max C0 residency of all the cores */ + max = *core_res; + for (i = 0; i < size; i++) { + avg += core_res[i]; + if (core_res[i] > max) + max = core_res[i]; + } + avg = DIV_ROUND_CLOSEST(avg, size); + in->ev_info.avg_c0residency = avg; + in->ev_info.max_c0residency = max; +} + +static void amd_pmf_get_smu_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in) +{ + /* Get the updated metrics table data */ + memset(dev->buf, 0, dev->mtable_size); + amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, SET_CMD, METRICS_TABLE_ID, NULL); + + switch (dev->cpu_id) { + case AMD_CPU_ID_PS: + memcpy(&dev->m_table, dev->buf, dev->mtable_size); + in->ev_info.socket_power = dev->m_table.apu_power + dev->m_table.dgpu_power; + in->ev_info.skin_temperature = dev->m_table.skin_temp; + in->ev_info.gfx_busy = dev->m_table.avg_gfx_activity; + amd_pmf_get_c0_residency(dev->m_table.avg_core_c0residency, + ARRAY_SIZE(dev->m_table.avg_core_c0residency), in); + break; + case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT: + case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT: + memcpy(&dev->m_table_v2, dev->buf, dev->mtable_size); + in->ev_info.socket_power = dev->m_table_v2.apu_power + dev->m_table_v2.dgpu_power; + in->ev_info.skin_temperature = dev->m_table_v2.skin_temp; + in->ev_info.gfx_busy = dev->m_table_v2.gfx_activity; + amd_pmf_get_c0_residency(dev->m_table_v2.core_c0residency, + ARRAY_SIZE(dev->m_table_v2.core_c0residency), in); + break; + default: + dev_err(dev->dev, "Unsupported CPU id: 0x%x", dev->cpu_id); + } +} + +static const char * const pmf_battery_supply_name[] = { + "BATT", + "BAT0", +}; + +static int amd_pmf_get_battery_prop(enum power_supply_property prop) +{ + union power_supply_propval value; + struct power_supply *psy; + int i, ret; + + for (i = 0; i < ARRAY_SIZE(pmf_battery_supply_name); i++) { + psy = power_supply_get_by_name(pmf_battery_supply_name[i]); + if (!psy) + continue; + + ret = power_supply_get_property(psy, prop, &value); + if (ret) { + power_supply_put(psy); + return ret; + } + } + + return value.intval; +} + +static int amd_pmf_get_battery_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in) +{ + int val; + + val = amd_pmf_get_battery_prop(POWER_SUPPLY_PROP_PRESENT); + if (val < 0) + return val; + if (val != 1) + return -ENODEV; + + in->ev_info.bat_percentage = amd_pmf_get_battery_prop(POWER_SUPPLY_PROP_CAPACITY); + /* all values in mWh metrics */ + in->ev_info.bat_design = amd_pmf_get_battery_prop(POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN) / + MILLIWATT_PER_WATT; + in->ev_info.full_charge_capacity = amd_pmf_get_battery_prop(POWER_SUPPLY_PROP_ENERGY_FULL) / + MILLIWATT_PER_WATT; + in->ev_info.drain_rate = amd_pmf_get_battery_prop(POWER_SUPPLY_PROP_POWER_NOW) / + MILLIWATT_PER_WATT; + + return 0; +} + +static int amd_pmf_get_slider_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in) +{ + int val; + + switch (dev->current_profile) { + case PLATFORM_PROFILE_PERFORMANCE: + case PLATFORM_PROFILE_BALANCED_PERFORMANCE: + val = TA_BEST_PERFORMANCE; + break; + case PLATFORM_PROFILE_BALANCED: + val = TA_BETTER_PERFORMANCE; + break; + case PLATFORM_PROFILE_LOW_POWER: + case PLATFORM_PROFILE_QUIET: + val = TA_BEST_BATTERY; + break; + default: + dev_err(dev->dev, "Unknown Platform Profile.\n"); + return -EOPNOTSUPP; + } + in->ev_info.power_slider = val; + + return 0; +} + +static void amd_pmf_get_sensor_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in) +{ + struct amd_sfh_info sfh_info; + + /* Get the latest information from SFH */ + in->ev_info.user_present = false; + + /* Get ALS data */ + if (!amd_get_sfh_info(&sfh_info, MT_ALS)) + in->ev_info.ambient_light = sfh_info.ambient_light; + else + dev_dbg(dev->dev, "ALS is not enabled/detected\n"); + + /* get HPD data */ + if (!amd_get_sfh_info(&sfh_info, MT_HPD)) { + if (sfh_info.user_present == SFH_USER_PRESENT) + in->ev_info.user_present = true; + } else { + dev_dbg(dev->dev, "HPD is not enabled/detected\n"); + } + + /* Get SRA (Secondary Accelerometer) data */ + if (!amd_get_sfh_info(&sfh_info, MT_SRA)) { + in->ev_info.platform_type = sfh_info.platform_type; + in->ev_info.device_state = sfh_info.laptop_placement; + } else { + dev_dbg(dev->dev, "SRA is not enabled/detected\n"); + } +} + +void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in) +{ + /* TA side lid open is 1 and close is 0, hence the ! here */ + in->ev_info.lid_state = !acpi_lid_open(); + in->ev_info.power_source = amd_pmf_get_power_source(); + amd_pmf_get_smu_info(dev, in); + amd_pmf_get_battery_info(dev, in); + amd_pmf_get_slider_info(dev, in); + amd_pmf_get_sensor_info(dev, in); + amd_pmf_get_custom_bios_inputs(dev, in); +} diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c new file mode 100644 index 000000000000..0b70a5153f46 --- /dev/null +++ b/drivers/platform/x86/amd/pmf/sps.c @@ -0,0 +1,444 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * AMD Platform Management Framework (PMF) Driver + * + * Copyright (c) 2022, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com> + */ + +#include "pmf.h" + +static struct amd_pmf_static_slider_granular_v2 config_store_v2; +static struct amd_pmf_static_slider_granular config_store; +static struct amd_pmf_apts_granular apts_config_store; + +#ifdef CONFIG_AMD_PMF_DEBUG +static const char *slider_v2_as_str(unsigned int state) +{ + switch (state) { + case POWER_MODE_BEST_PERFORMANCE: + return "Best Performance"; + case POWER_MODE_BALANCED: + return "Balanced"; + case POWER_MODE_BEST_POWER_EFFICIENCY: + return "Best Power Efficiency"; + case POWER_MODE_ENERGY_SAVE: + return "Energy Save"; + default: + return "Unknown Power Mode"; + } +} + +static const char *slider_as_str(unsigned int state) +{ + switch (state) { + case POWER_MODE_PERFORMANCE: + return "PERFORMANCE"; + case POWER_MODE_BALANCED_POWER: + return "BALANCED_POWER"; + case POWER_MODE_POWER_SAVER: + return "POWER_SAVER"; + default: + return "Unknown Slider State"; + } +} + +const char *amd_pmf_source_as_str(unsigned int state) +{ + switch (state) { + case POWER_SOURCE_AC: + return "AC"; + case POWER_SOURCE_DC: + return "DC"; + default: + return "Unknown Power State"; + } +} + +static void amd_pmf_dump_sps_defaults(struct amd_pmf_static_slider_granular *data) +{ + int i, j; + + pr_debug("Static Slider Data - BEGIN\n"); + + for (i = 0; i < POWER_SOURCE_MAX; i++) { + for (j = 0; j < POWER_MODE_MAX; j++) { + pr_debug("--- Source:%s Mode:%s ---\n", amd_pmf_source_as_str(i), + slider_as_str(j)); + pr_debug("SPL: %u mW\n", data->prop[i][j].spl); + pr_debug("SPPT: %u mW\n", data->prop[i][j].sppt); + pr_debug("SPPT_ApuOnly: %u mW\n", data->prop[i][j].sppt_apu_only); + pr_debug("FPPT: %u mW\n", data->prop[i][j].fppt); + pr_debug("STTMinLimit: %u mW\n", data->prop[i][j].stt_min); + pr_debug("STT_SkinTempLimit_APU: %u C\n", + data->prop[i][j].stt_skin_temp[STT_TEMP_APU]); + pr_debug("STT_SkinTempLimit_HS2: %u C\n", + data->prop[i][j].stt_skin_temp[STT_TEMP_HS2]); + } + } + + pr_debug("Static Slider Data - END\n"); +} + +static void amd_pmf_dump_sps_defaults_v2(struct amd_pmf_static_slider_granular_v2 *data) +{ + unsigned int i, j; + + pr_debug("Static Slider APTS state index data - BEGIN"); + pr_debug("size: %u\n", data->size); + + for (i = 0; i < POWER_SOURCE_MAX; i++) + for (j = 0; j < POWER_MODE_V2_MAX; j++) + pr_debug("%s %s: %u\n", amd_pmf_source_as_str(i), slider_v2_as_str(j), + data->sps_idx.power_states[i][j]); + + pr_debug("Static Slider APTS state index data - END\n"); +} + +static void amd_pmf_dump_apts_sps_defaults(struct amd_pmf_apts_granular *info) +{ + int i; + + pr_debug("Static Slider APTS index default values data - BEGIN"); + + for (i = 0; i < APTS_MAX_STATES; i++) { + pr_debug("Table Version[%d] = %u\n", i, info->val[i].table_version); + pr_debug("Fan Index[%d] = %u\n", i, info->val[i].fan_table_idx); + pr_debug("PPT[%d] = %u\n", i, info->val[i].pmf_ppt); + pr_debug("PPT APU[%d] = %u\n", i, info->val[i].ppt_pmf_apu_only); + pr_debug("STT Min[%d] = %u\n", i, info->val[i].stt_min_limit); + pr_debug("STT APU[%d] = %u\n", i, info->val[i].stt_skin_temp_limit_apu); + pr_debug("STT HS2[%d] = %u\n", i, info->val[i].stt_skin_temp_limit_hs2); + } + + pr_debug("Static Slider APTS index default values data - END"); +} +#else +static void amd_pmf_dump_sps_defaults(struct amd_pmf_static_slider_granular *data) {} +static void amd_pmf_dump_sps_defaults_v2(struct amd_pmf_static_slider_granular_v2 *data) {} +static void amd_pmf_dump_apts_sps_defaults(struct amd_pmf_apts_granular *info) {} +#endif + +static void amd_pmf_load_apts_defaults_sps_v2(struct amd_pmf_dev *pdev) +{ + struct amd_pmf_apts_granular_output output; + struct amd_pmf_apts_output *ps; + int i; + + memset(&apts_config_store, 0, sizeof(apts_config_store)); + + ps = apts_config_store.val; + + for (i = 0; i < APTS_MAX_STATES; i++) { + apts_get_static_slider_granular_v2(pdev, &output, i); + ps[i].table_version = output.val.table_version; + ps[i].fan_table_idx = output.val.fan_table_idx; + ps[i].pmf_ppt = output.val.pmf_ppt; + ps[i].ppt_pmf_apu_only = output.val.ppt_pmf_apu_only; + ps[i].stt_min_limit = output.val.stt_min_limit; + ps[i].stt_skin_temp_limit_apu = output.val.stt_skin_temp_limit_apu; + ps[i].stt_skin_temp_limit_hs2 = output.val.stt_skin_temp_limit_hs2; + } + + amd_pmf_dump_apts_sps_defaults(&apts_config_store); +} + +static void amd_pmf_load_defaults_sps_v2(struct amd_pmf_dev *dev) +{ + struct apmf_static_slider_granular_output_v2 output; + unsigned int i, j; + + memset(&config_store_v2, 0, sizeof(config_store_v2)); + apmf_get_static_slider_granular_v2(dev, &output); + + config_store_v2.size = output.size; + + for (i = 0; i < POWER_SOURCE_MAX; i++) + for (j = 0; j < POWER_MODE_V2_MAX; j++) + config_store_v2.sps_idx.power_states[i][j] = + output.sps_idx.power_states[i][j]; + + amd_pmf_dump_sps_defaults_v2(&config_store_v2); +} + +static void amd_pmf_load_defaults_sps(struct amd_pmf_dev *dev) +{ + struct apmf_static_slider_granular_output output; + int i, j, idx = 0; + + memset(&config_store, 0, sizeof(config_store)); + apmf_get_static_slider_granular(dev, &output); + + for (i = 0; i < POWER_SOURCE_MAX; i++) { + for (j = 0; j < POWER_MODE_MAX; j++) { + config_store.prop[i][j].spl = output.prop[idx].spl; + config_store.prop[i][j].sppt = output.prop[idx].sppt; + config_store.prop[i][j].sppt_apu_only = + output.prop[idx].sppt_apu_only; + config_store.prop[i][j].fppt = output.prop[idx].fppt; + config_store.prop[i][j].stt_min = output.prop[idx].stt_min; + config_store.prop[i][j].stt_skin_temp[STT_TEMP_APU] = + output.prop[idx].stt_skin_temp[STT_TEMP_APU]; + config_store.prop[i][j].stt_skin_temp[STT_TEMP_HS2] = + output.prop[idx].stt_skin_temp[STT_TEMP_HS2]; + config_store.prop[i][j].fan_id = output.prop[idx].fan_id; + idx++; + } + } + amd_pmf_dump_sps_defaults(&config_store); +} + +static void amd_pmf_update_slider_v2(struct amd_pmf_dev *dev, int idx) +{ + amd_pmf_send_cmd(dev, SET_PMF_PPT, SET_CMD, apts_config_store.val[idx].pmf_ppt, NULL); + amd_pmf_send_cmd(dev, SET_PMF_PPT_APU_ONLY, SET_CMD, + apts_config_store.val[idx].ppt_pmf_apu_only, NULL); + amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, SET_CMD, + apts_config_store.val[idx].stt_min_limit, NULL); + amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, SET_CMD, + fixp_q88_fromint(apts_config_store.val[idx].stt_skin_temp_limit_apu), + NULL); + amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, SET_CMD, + fixp_q88_fromint(apts_config_store.val[idx].stt_skin_temp_limit_hs2), + NULL); +} + +void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx, + struct amd_pmf_static_slider_granular *table) +{ + int src = amd_pmf_get_power_source(); + + if (op == SLIDER_OP_SET) { + amd_pmf_send_cmd(dev, SET_SPL, SET_CMD, config_store.prop[src][idx].spl, NULL); + amd_pmf_send_cmd(dev, SET_FPPT, SET_CMD, config_store.prop[src][idx].fppt, NULL); + amd_pmf_send_cmd(dev, SET_SPPT, SET_CMD, config_store.prop[src][idx].sppt, NULL); + amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, SET_CMD, + config_store.prop[src][idx].sppt_apu_only, NULL); + amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, SET_CMD, + config_store.prop[src][idx].stt_min, NULL); + amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, SET_CMD, + fixp_q88_fromint(config_store.prop[src][idx].stt_skin_temp[STT_TEMP_APU]), + NULL); + amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, SET_CMD, + fixp_q88_fromint(config_store.prop[src][idx].stt_skin_temp[STT_TEMP_HS2]), + NULL); + } else if (op == SLIDER_OP_GET) { + amd_pmf_send_cmd(dev, GET_SPL, GET_CMD, ARG_NONE, &table->prop[src][idx].spl); + amd_pmf_send_cmd(dev, GET_FPPT, GET_CMD, ARG_NONE, &table->prop[src][idx].fppt); + amd_pmf_send_cmd(dev, GET_SPPT, GET_CMD, ARG_NONE, &table->prop[src][idx].sppt); + amd_pmf_send_cmd(dev, GET_SPPT_APU_ONLY, GET_CMD, ARG_NONE, + &table->prop[src][idx].sppt_apu_only); + amd_pmf_send_cmd(dev, GET_STT_MIN_LIMIT, GET_CMD, ARG_NONE, + &table->prop[src][idx].stt_min); + amd_pmf_send_cmd(dev, GET_STT_LIMIT_APU, GET_CMD, ARG_NONE, + (u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_APU]); + amd_pmf_send_cmd(dev, GET_STT_LIMIT_HS2, GET_CMD, ARG_NONE, + (u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_HS2]); + } +} + +static int amd_pmf_update_sps_power_limits_v2(struct amd_pmf_dev *pdev, int pwr_mode) +{ + int src, index; + + src = amd_pmf_get_power_source(); + + switch (pwr_mode) { + case POWER_MODE_PERFORMANCE: + index = config_store_v2.sps_idx.power_states[src][POWER_MODE_BEST_PERFORMANCE]; + amd_pmf_update_slider_v2(pdev, index); + break; + case POWER_MODE_BALANCED_POWER: + index = config_store_v2.sps_idx.power_states[src][POWER_MODE_BALANCED]; + amd_pmf_update_slider_v2(pdev, index); + break; + case POWER_MODE_POWER_SAVER: + index = config_store_v2.sps_idx.power_states[src][POWER_MODE_BEST_POWER_EFFICIENCY]; + amd_pmf_update_slider_v2(pdev, index); + break; + default: + return -EINVAL; + } + + return 0; +} + +int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf) +{ + int mode; + + mode = amd_pmf_get_pprof_modes(pmf); + if (mode < 0) + return mode; + + if (pmf->pmf_if_version == PMF_IF_V2) + return amd_pmf_update_sps_power_limits_v2(pmf, mode); + + amd_pmf_update_slider(pmf, SLIDER_OP_SET, mode, NULL); + + return 0; +} + +bool is_pprof_balanced(struct amd_pmf_dev *pmf) +{ + return pmf->current_profile == PLATFORM_PROFILE_BALANCED; +} + +static int amd_pmf_profile_get(struct device *dev, + enum platform_profile_option *profile) +{ + struct amd_pmf_dev *pmf = dev_get_drvdata(dev); + + *profile = pmf->current_profile; + return 0; +} + +int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf) +{ + int mode; + + switch (pmf->current_profile) { + case PLATFORM_PROFILE_PERFORMANCE: + case PLATFORM_PROFILE_BALANCED_PERFORMANCE: + mode = POWER_MODE_PERFORMANCE; + break; + case PLATFORM_PROFILE_BALANCED: + mode = POWER_MODE_BALANCED_POWER; + break; + case PLATFORM_PROFILE_LOW_POWER: + case PLATFORM_PROFILE_QUIET: + mode = POWER_MODE_POWER_SAVER; + break; + default: + dev_err(pmf->dev, "Unknown Platform Profile.\n"); + return -EOPNOTSUPP; + } + + return mode; +} + +int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev) +{ + u8 flag = 0; + int mode; + int src; + + mode = amd_pmf_get_pprof_modes(dev); + if (mode < 0) + return mode; + + src = amd_pmf_get_power_source(); + + if (src == POWER_SOURCE_AC) { + switch (mode) { + case POWER_MODE_PERFORMANCE: + flag |= BIT(AC_BEST_PERF); + break; + case POWER_MODE_BALANCED_POWER: + flag |= BIT(AC_BETTER_PERF); + break; + case POWER_MODE_POWER_SAVER: + flag |= BIT(AC_BETTER_BATTERY); + break; + default: + dev_err(dev->dev, "unsupported platform profile\n"); + return -EOPNOTSUPP; + } + + } else if (src == POWER_SOURCE_DC) { + switch (mode) { + case POWER_MODE_PERFORMANCE: + flag |= BIT(DC_BEST_PERF); + break; + case POWER_MODE_BALANCED_POWER: + flag |= BIT(DC_BETTER_PERF); + break; + case POWER_MODE_POWER_SAVER: + flag |= BIT(DC_BATTERY_SAVER); + break; + default: + dev_err(dev->dev, "unsupported platform profile\n"); + return -EOPNOTSUPP; + } + } + + apmf_os_power_slider_update(dev, flag); + + return 0; +} + +static int amd_pmf_profile_set(struct device *dev, + enum platform_profile_option profile) +{ + struct amd_pmf_dev *pmf = dev_get_drvdata(dev); + int ret = 0; + + pmf->current_profile = profile; + + /* Notify EC about the slider position change */ + if (is_apmf_func_supported(pmf, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) { + ret = amd_pmf_power_slider_update_event(pmf); + if (ret) + return ret; + } + + if (is_apmf_func_supported(pmf, APMF_FUNC_STATIC_SLIDER_GRANULAR)) { + ret = amd_pmf_set_sps_power_limits(pmf); + if (ret) + return ret; + } + + return 0; +} + +static int amd_pmf_hidden_choices(void *drvdata, unsigned long *choices) +{ + set_bit(PLATFORM_PROFILE_QUIET, choices); + set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, choices); + + return 0; +} + +static int amd_pmf_profile_probe(void *drvdata, unsigned long *choices) +{ + set_bit(PLATFORM_PROFILE_LOW_POWER, choices); + set_bit(PLATFORM_PROFILE_BALANCED, choices); + set_bit(PLATFORM_PROFILE_PERFORMANCE, choices); + + return 0; +} + +static const struct platform_profile_ops amd_pmf_profile_ops = { + .probe = amd_pmf_profile_probe, + .hidden_choices = amd_pmf_hidden_choices, + .profile_get = amd_pmf_profile_get, + .profile_set = amd_pmf_profile_set, +}; + +int amd_pmf_init_sps(struct amd_pmf_dev *dev) +{ + dev->current_profile = PLATFORM_PROFILE_BALANCED; + + if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) { + if (dev->pmf_if_version == PMF_IF_V2) { + amd_pmf_load_defaults_sps_v2(dev); + amd_pmf_load_apts_defaults_sps_v2(dev); + } else { + amd_pmf_load_defaults_sps(dev); + } + + /* update SPS balanced power mode thermals */ + amd_pmf_set_sps_power_limits(dev); + } + + /* Create platform_profile structure and register */ + dev->ppdev = devm_platform_profile_register(dev->dev, "amd-pmf", dev, + &amd_pmf_profile_ops); + if (IS_ERR(dev->ppdev)) + dev_err(dev->dev, "Failed to register SPS support, this is most likely an SBIOS bug: %ld\n", + PTR_ERR(dev->ppdev)); + + return PTR_ERR_OR_ZERO(dev->ppdev); +} diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c new file mode 100644 index 000000000000..0abce76f89ff --- /dev/null +++ b/drivers/platform/x86/amd/pmf/tee-if.c @@ -0,0 +1,629 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * AMD Platform Management Framework Driver - TEE Interface + * + * Copyright (c) 2023, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com> + */ + +#include <linux/debugfs.h> +#include <linux/tee_drv.h> +#include <linux/uuid.h> +#include "pmf.h" + +#define MAX_TEE_PARAM 4 + +/* Policy binary actions sampling frequency (in ms) */ +static int pb_actions_ms = MSEC_PER_SEC; +/* Sideload policy binaries to debug policy failures */ +static bool pb_side_load; + +#ifdef CONFIG_AMD_PMF_DEBUG +module_param(pb_actions_ms, int, 0644); +MODULE_PARM_DESC(pb_actions_ms, "Policy binary actions sampling frequency (default = 1000ms)"); +module_param(pb_side_load, bool, 0444); +MODULE_PARM_DESC(pb_side_load, "Sideload policy binaries debug policy failures"); +#endif + +static const uuid_t amd_pmf_ta_uuid[] = { UUID_INIT(0xd9b39bf2, 0x66bd, 0x4154, 0xaf, 0xb8, 0x8a, + 0xcc, 0x2b, 0x2b, 0x60, 0xd6), + UUID_INIT(0x6fd93b77, 0x3fb8, 0x524d, 0xb1, 0x2d, 0xc5, + 0x29, 0xb1, 0x3d, 0x85, 0x43), + }; + +static const char *amd_pmf_uevent_as_str(unsigned int state) +{ + switch (state) { + case SYSTEM_STATE_S0i3: + return "S0i3"; + case SYSTEM_STATE_S4: + return "S4"; + case SYSTEM_STATE_SCREEN_LOCK: + return "SCREEN_LOCK"; + default: + return "Unknown Smart PC event"; + } +} + +static void amd_pmf_prepare_args(struct amd_pmf_dev *dev, int cmd, + struct tee_ioctl_invoke_arg *arg, + struct tee_param *param) +{ + memset(arg, 0, sizeof(*arg)); + memset(param, 0, MAX_TEE_PARAM * sizeof(*param)); + + arg->func = cmd; + arg->session = dev->session_id; + arg->num_params = MAX_TEE_PARAM; + + /* Fill invoke cmd params */ + param[0].u.memref.size = sizeof(struct ta_pmf_shared_memory); + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT; + param[0].u.memref.shm = dev->fw_shm_pool; + param[0].u.memref.shm_offs = 0; +} + +static void amd_pmf_update_uevents(struct amd_pmf_dev *dev, u16 event) +{ + input_report_key(dev->pmf_idev, event, 1); /* key press */ + input_sync(dev->pmf_idev); + input_report_key(dev->pmf_idev, event, 0); /* key release */ + input_sync(dev->pmf_idev); +} + +static int amd_pmf_get_bios_output_idx(u32 action_idx) +{ + switch (action_idx) { + case PMF_POLICY_BIOS_OUTPUT_1: + return 0; + case PMF_POLICY_BIOS_OUTPUT_2: + return 1; + case PMF_POLICY_BIOS_OUTPUT_3: + return 2; + case PMF_POLICY_BIOS_OUTPUT_4: + return 3; + case PMF_POLICY_BIOS_OUTPUT_5: + return 4; + case PMF_POLICY_BIOS_OUTPUT_6: + return 5; + case PMF_POLICY_BIOS_OUTPUT_7: + return 6; + case PMF_POLICY_BIOS_OUTPUT_8: + return 7; + case PMF_POLICY_BIOS_OUTPUT_9: + return 8; + case PMF_POLICY_BIOS_OUTPUT_10: + return 9; + default: + return -EINVAL; + } +} + +static void amd_pmf_update_bios_output(struct amd_pmf_dev *pdev, struct ta_pmf_action *action) +{ + u32 bios_idx; + + bios_idx = amd_pmf_get_bios_output_idx(action->action_index); + + amd_pmf_smartpc_apply_bios_output(pdev, action->value, BIT(bios_idx), bios_idx); +} + +static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_result *out) +{ + struct ta_pmf_action *action; + u32 val; + int idx; + + for (idx = 0; idx < out->actions_count; idx++) { + action = &out->actions_list[idx]; + val = action->value; + switch (action->action_index) { + case PMF_POLICY_SPL: + if (dev->prev_data->spl != val) { + amd_pmf_send_cmd(dev, SET_SPL, SET_CMD, val, NULL); + dev_dbg(dev->dev, "update SPL: %u\n", val); + dev->prev_data->spl = val; + } + break; + + case PMF_POLICY_SPPT: + if (dev->prev_data->sppt != val) { + amd_pmf_send_cmd(dev, SET_SPPT, SET_CMD, val, NULL); + dev_dbg(dev->dev, "update SPPT: %u\n", val); + dev->prev_data->sppt = val; + } + break; + + case PMF_POLICY_FPPT: + if (dev->prev_data->fppt != val) { + amd_pmf_send_cmd(dev, SET_FPPT, SET_CMD, val, NULL); + dev_dbg(dev->dev, "update FPPT: %u\n", val); + dev->prev_data->fppt = val; + } + break; + + case PMF_POLICY_SPPT_APU_ONLY: + if (dev->prev_data->sppt_apuonly != val) { + amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, SET_CMD, val, NULL); + dev_dbg(dev->dev, "update SPPT_APU_ONLY: %u\n", val); + dev->prev_data->sppt_apuonly = val; + } + break; + + case PMF_POLICY_STT_MIN: + if (dev->prev_data->stt_minlimit != val) { + amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, SET_CMD, val, NULL); + dev_dbg(dev->dev, "update STT_MIN: %u\n", val); + dev->prev_data->stt_minlimit = val; + } + break; + + case PMF_POLICY_STT_SKINTEMP_APU: + if (dev->prev_data->stt_skintemp_apu != val) { + amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, SET_CMD, + fixp_q88_fromint(val), NULL); + dev_dbg(dev->dev, "update STT_SKINTEMP_APU: %u\n", val); + dev->prev_data->stt_skintemp_apu = val; + } + break; + + case PMF_POLICY_STT_SKINTEMP_HS2: + if (dev->prev_data->stt_skintemp_hs2 != val) { + amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, SET_CMD, + fixp_q88_fromint(val), NULL); + dev_dbg(dev->dev, "update STT_SKINTEMP_HS2: %u\n", val); + dev->prev_data->stt_skintemp_hs2 = val; + } + break; + + case PMF_POLICY_P3T: + if (dev->prev_data->p3t_limit != val) { + amd_pmf_send_cmd(dev, SET_P3T, SET_CMD, val, NULL); + dev_dbg(dev->dev, "update P3T: %u\n", val); + dev->prev_data->p3t_limit = val; + } + break; + + case PMF_POLICY_PMF_PPT: + if (dev->prev_data->pmf_ppt != val) { + amd_pmf_send_cmd(dev, SET_PMF_PPT, SET_CMD, val, NULL); + dev_dbg(dev->dev, "update PMF PPT: %u\n", val); + dev->prev_data->pmf_ppt = val; + } + break; + + case PMF_POLICY_PMF_PPT_APU_ONLY: + if (dev->prev_data->pmf_ppt_apu_only != val) { + amd_pmf_send_cmd(dev, SET_PMF_PPT_APU_ONLY, SET_CMD, val, NULL); + dev_dbg(dev->dev, "update PMF PPT APU ONLY: %u\n", val); + dev->prev_data->pmf_ppt_apu_only = val; + } + break; + + case PMF_POLICY_SYSTEM_STATE: + switch (val) { + case 0: + amd_pmf_update_uevents(dev, KEY_SLEEP); + break; + case 1: + amd_pmf_update_uevents(dev, KEY_SUSPEND); + break; + case 2: + amd_pmf_update_uevents(dev, KEY_SCREENLOCK); + break; + default: + dev_err(dev->dev, "Invalid PMF policy system state: %d\n", val); + } + + dev_dbg(dev->dev, "update SYSTEM_STATE: %s\n", + amd_pmf_uevent_as_str(val)); + break; + + case PMF_POLICY_BIOS_OUTPUT_1: + case PMF_POLICY_BIOS_OUTPUT_2: + case PMF_POLICY_BIOS_OUTPUT_3: + case PMF_POLICY_BIOS_OUTPUT_4: + case PMF_POLICY_BIOS_OUTPUT_5: + case PMF_POLICY_BIOS_OUTPUT_6: + case PMF_POLICY_BIOS_OUTPUT_7: + case PMF_POLICY_BIOS_OUTPUT_8: + case PMF_POLICY_BIOS_OUTPUT_9: + case PMF_POLICY_BIOS_OUTPUT_10: + amd_pmf_update_bios_output(dev, action); + break; + } + } +} + +int amd_pmf_invoke_cmd_enact(struct amd_pmf_dev *dev) +{ + struct ta_pmf_shared_memory *ta_sm = NULL; + struct ta_pmf_enact_result *out = NULL; + struct ta_pmf_enact_table *in = NULL; + struct tee_param param[MAX_TEE_PARAM]; + struct tee_ioctl_invoke_arg arg; + int ret = 0; + + if (!dev->tee_ctx) + return -ENODEV; + + memset(dev->shbuf, 0, dev->policy_sz); + ta_sm = dev->shbuf; + out = &ta_sm->pmf_output.policy_apply_table; + in = &ta_sm->pmf_input.enact_table; + + memset(ta_sm, 0, sizeof(*ta_sm)); + ta_sm->command_id = TA_PMF_COMMAND_POLICY_BUILDER_ENACT_POLICIES; + ta_sm->if_version = PMF_TA_IF_VERSION_MAJOR; + + amd_pmf_populate_ta_inputs(dev, in); + amd_pmf_prepare_args(dev, TA_PMF_COMMAND_POLICY_BUILDER_ENACT_POLICIES, &arg, param); + + ret = tee_client_invoke_func(dev->tee_ctx, &arg, param); + if (ret < 0 || arg.ret != 0) { + dev_err(dev->dev, "TEE enact cmd failed. err: %x, ret:%d\n", arg.ret, ret); + return ret; + } + + if (ta_sm->pmf_result == TA_PMF_TYPE_SUCCESS && out->actions_count) { + amd_pmf_dump_ta_inputs(dev, in); + dev_dbg(dev->dev, "action count:%u result:%x\n", out->actions_count, + ta_sm->pmf_result); + amd_pmf_apply_policies(dev, out); + } + + return 0; +} + +static int amd_pmf_invoke_cmd_init(struct amd_pmf_dev *dev) +{ + struct ta_pmf_shared_memory *ta_sm = NULL; + struct tee_param param[MAX_TEE_PARAM]; + struct ta_pmf_init_table *in = NULL; + struct tee_ioctl_invoke_arg arg; + int ret = 0; + + if (!dev->tee_ctx) { + dev_err(dev->dev, "Failed to get TEE context\n"); + return -ENODEV; + } + + dev_dbg(dev->dev, "Policy Binary size: %llu bytes\n", (unsigned long long)dev->policy_sz); + memset(dev->shbuf, 0, dev->policy_sz); + ta_sm = dev->shbuf; + in = &ta_sm->pmf_input.init_table; + + ta_sm->command_id = TA_PMF_COMMAND_POLICY_BUILDER_INITIALIZE; + ta_sm->if_version = PMF_TA_IF_VERSION_MAJOR; + + in->metadata_macrocheck = false; + in->sku_check = false; + in->validate = true; + in->frequency = pb_actions_ms; + in->policies_table.table_size = dev->policy_sz; + + memcpy(in->policies_table.table, dev->policy_buf, dev->policy_sz); + amd_pmf_prepare_args(dev, TA_PMF_COMMAND_POLICY_BUILDER_INITIALIZE, &arg, param); + + ret = tee_client_invoke_func(dev->tee_ctx, &arg, param); + if (ret < 0 || arg.ret != 0) { + dev_err(dev->dev, "Failed to invoke TEE init cmd. err: %x, ret:%d\n", arg.ret, ret); + return ret; + } + + return ta_sm->pmf_result; +} + +static void amd_pmf_invoke_cmd(struct work_struct *work) +{ + struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, pb_work.work); + + amd_pmf_invoke_cmd_enact(dev); + schedule_delayed_work(&dev->pb_work, msecs_to_jiffies(pb_actions_ms)); +} + +static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev) +{ + struct cookie_header *header; + int res; + + if (dev->policy_sz < POLICY_COOKIE_OFFSET + sizeof(*header)) + return -EINVAL; + + header = (struct cookie_header *)(dev->policy_buf + POLICY_COOKIE_OFFSET); + + if (header->sign != POLICY_SIGN_COOKIE || !header->length) { + dev_dbg(dev->dev, "cookie doesn't match\n"); + return -EINVAL; + } + + if (dev->policy_sz < header->length + 512) + return -EINVAL; + + /* Update the actual length */ + dev->policy_sz = header->length + 512; + res = amd_pmf_invoke_cmd_init(dev); + if (res == TA_PMF_TYPE_SUCCESS) { + /* Now its safe to announce that smart pc is enabled */ + dev->smart_pc_enabled = true; + /* + * Start collecting the data from TA FW after a small delay + * or else, we might end up getting stale values. + */ + schedule_delayed_work(&dev->pb_work, msecs_to_jiffies(pb_actions_ms * 3)); + } else { + dev_dbg(dev->dev, "ta invoke cmd init failed err: %x\n", res); + dev->smart_pc_enabled = false; + return res; + } + + return 0; +} + +static inline bool amd_pmf_pb_valid(struct amd_pmf_dev *dev) +{ + return memchr_inv(dev->policy_buf, 0xff, dev->policy_sz); +} + +#ifdef CONFIG_AMD_PMF_DEBUG +static void amd_pmf_hex_dump_pb(struct amd_pmf_dev *dev) +{ + print_hex_dump_debug("(pb): ", DUMP_PREFIX_OFFSET, 16, 1, dev->policy_buf, + dev->policy_sz, false); +} + +static ssize_t amd_pmf_get_pb_data(struct file *filp, const char __user *buf, + size_t length, loff_t *pos) +{ + struct amd_pmf_dev *dev = filp->private_data; + unsigned char *new_policy_buf; + int ret; + + /* Policy binary size cannot exceed POLICY_BUF_MAX_SZ */ + if (length > POLICY_BUF_MAX_SZ || length == 0) + return -EINVAL; + + /* re-alloc to the new buffer length of the policy binary */ + new_policy_buf = devm_kzalloc(dev->dev, length, GFP_KERNEL); + if (!new_policy_buf) + return -ENOMEM; + + if (copy_from_user(new_policy_buf, buf, length)) { + devm_kfree(dev->dev, new_policy_buf); + return -EFAULT; + } + + devm_kfree(dev->dev, dev->policy_buf); + dev->policy_buf = new_policy_buf; + dev->policy_sz = length; + + if (!amd_pmf_pb_valid(dev)) + return -EINVAL; + + amd_pmf_hex_dump_pb(dev); + ret = amd_pmf_start_policy_engine(dev); + if (ret < 0) + return ret; + + return length; +} + +static const struct file_operations pb_fops = { + .write = amd_pmf_get_pb_data, + .open = simple_open, +}; + +static void amd_pmf_open_pb(struct amd_pmf_dev *dev, struct dentry *debugfs_root) +{ + dev->esbin = debugfs_create_dir("pb", debugfs_root); + debugfs_create_file("update_policy", 0644, dev->esbin, dev, &pb_fops); +} + +static void amd_pmf_remove_pb(struct amd_pmf_dev *dev) +{ + debugfs_remove_recursive(dev->esbin); +} +#else +static void amd_pmf_open_pb(struct amd_pmf_dev *dev, struct dentry *debugfs_root) {} +static void amd_pmf_remove_pb(struct amd_pmf_dev *dev) {} +static void amd_pmf_hex_dump_pb(struct amd_pmf_dev *dev) {} +#endif + +static int amd_pmf_amdtee_ta_match(struct tee_ioctl_version_data *ver, const void *data) +{ + return ver->impl_id == TEE_IMPL_ID_AMDTEE; +} + +static int amd_pmf_ta_open_session(struct tee_context *ctx, u32 *id, const uuid_t *uuid) +{ + struct tee_ioctl_open_session_arg sess_arg = {}; + int rc; + + export_uuid(sess_arg.uuid, uuid); + sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC; + sess_arg.num_params = 0; + + rc = tee_client_open_session(ctx, &sess_arg, NULL); + if (rc < 0 || sess_arg.ret != 0) { + pr_err("Failed to open TEE session err:%#x, rc:%d\n", sess_arg.ret, rc); + return rc ?: -EINVAL; + } + + *id = sess_arg.session; + + return 0; +} + +static int amd_pmf_register_input_device(struct amd_pmf_dev *dev) +{ + int err; + + dev->pmf_idev = devm_input_allocate_device(dev->dev); + if (!dev->pmf_idev) + return -ENOMEM; + + dev->pmf_idev->name = "PMF-TA output events"; + dev->pmf_idev->phys = "amd-pmf/input0"; + + input_set_capability(dev->pmf_idev, EV_KEY, KEY_SLEEP); + input_set_capability(dev->pmf_idev, EV_KEY, KEY_SCREENLOCK); + input_set_capability(dev->pmf_idev, EV_KEY, KEY_SUSPEND); + + err = input_register_device(dev->pmf_idev); + if (err) { + dev_err(dev->dev, "Failed to register input device: %d\n", err); + return err; + } + + return 0; +} + +static int amd_pmf_tee_init(struct amd_pmf_dev *dev, const uuid_t *uuid) +{ + u32 size; + int ret; + + dev->tee_ctx = tee_client_open_context(NULL, amd_pmf_amdtee_ta_match, NULL, NULL); + if (IS_ERR(dev->tee_ctx)) { + dev_err(dev->dev, "Failed to open TEE context\n"); + ret = PTR_ERR(dev->tee_ctx); + dev->tee_ctx = NULL; + return ret; + } + + ret = amd_pmf_ta_open_session(dev->tee_ctx, &dev->session_id, uuid); + if (ret) { + dev_err(dev->dev, "Failed to open TA session (%d)\n", ret); + ret = -EINVAL; + goto out_ctx; + } + + size = sizeof(struct ta_pmf_shared_memory) + dev->policy_sz; + dev->fw_shm_pool = tee_shm_alloc_kernel_buf(dev->tee_ctx, size); + if (IS_ERR(dev->fw_shm_pool)) { + dev_err(dev->dev, "Failed to alloc TEE shared memory\n"); + ret = PTR_ERR(dev->fw_shm_pool); + goto out_sess; + } + + dev->shbuf = tee_shm_get_va(dev->fw_shm_pool, 0); + if (IS_ERR(dev->shbuf)) { + dev_err(dev->dev, "Failed to get TEE virtual address\n"); + ret = PTR_ERR(dev->shbuf); + goto out_shm; + } + dev_dbg(dev->dev, "TEE init done\n"); + + return 0; + +out_shm: + tee_shm_free(dev->fw_shm_pool); +out_sess: + tee_client_close_session(dev->tee_ctx, dev->session_id); +out_ctx: + tee_client_close_context(dev->tee_ctx); + + return ret; +} + +static void amd_pmf_tee_deinit(struct amd_pmf_dev *dev) +{ + if (!dev->tee_ctx) + return; + tee_shm_free(dev->fw_shm_pool); + tee_client_close_session(dev->tee_ctx, dev->session_id); + tee_client_close_context(dev->tee_ctx); + dev->tee_ctx = NULL; +} + +int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev) +{ + bool status; + int ret, i; + + ret = apmf_check_smart_pc(dev); + if (ret) { + /* + * Lets not return from here if Smart PC bit is not advertised in + * the BIOS. This way, there will be some amount of power savings + * to the user with static slider (if enabled). + */ + dev_info(dev->dev, "PMF Smart PC not advertised in BIOS!:%d\n", ret); + return -ENODEV; + } + + INIT_DELAYED_WORK(&dev->pb_work, amd_pmf_invoke_cmd); + + ret = amd_pmf_set_dram_addr(dev, true); + if (ret) + return ret; + + dev->policy_base = devm_ioremap_resource(dev->dev, dev->res); + if (IS_ERR(dev->policy_base)) + return PTR_ERR(dev->policy_base); + + dev->policy_buf = devm_kzalloc(dev->dev, dev->policy_sz, GFP_KERNEL); + if (!dev->policy_buf) + return -ENOMEM; + + memcpy_fromio(dev->policy_buf, dev->policy_base, dev->policy_sz); + + if (!amd_pmf_pb_valid(dev)) { + dev_info(dev->dev, "No Smart PC policy present\n"); + return -EINVAL; + } + + amd_pmf_hex_dump_pb(dev); + + dev->prev_data = devm_kzalloc(dev->dev, sizeof(*dev->prev_data), GFP_KERNEL); + if (!dev->prev_data) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(amd_pmf_ta_uuid); i++) { + ret = amd_pmf_tee_init(dev, &amd_pmf_ta_uuid[i]); + if (ret) + return ret; + + ret = amd_pmf_start_policy_engine(dev); + dev_dbg(dev->dev, "start policy engine ret: %d\n", ret); + status = ret == TA_PMF_TYPE_SUCCESS; + if (status) { + dev->cb_flag = true; + break; + } + amd_pmf_tee_deinit(dev); + } + + if (!status && !pb_side_load) { + ret = -EINVAL; + goto err; + } + + if (pb_side_load) + amd_pmf_open_pb(dev, dev->dbgfs_dir); + + ret = amd_pmf_register_input_device(dev); + if (ret) + goto err; + + return 0; + +err: + amd_pmf_deinit_smart_pc(dev); + + return ret; +} + +void amd_pmf_deinit_smart_pc(struct amd_pmf_dev *dev) +{ + if (dev->pmf_idev) + input_unregister_device(dev->pmf_idev); + + if (pb_side_load && dev->esbin) + amd_pmf_remove_pb(dev); + + cancel_delayed_work_sync(&dev->pb_work); + amd_pmf_tee_deinit(dev); +} diff --git a/drivers/platform/x86/amd/wbrf.c b/drivers/platform/x86/amd/wbrf.c new file mode 100644 index 000000000000..dd197b3aebe0 --- /dev/null +++ b/drivers/platform/x86/amd/wbrf.c @@ -0,0 +1,317 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Wifi Frequency Band Manage Interface + * Copyright (C) 2023 Advanced Micro Devices + */ + +#include <linux/acpi.h> +#include <linux/acpi_amd_wbrf.h> + +/* + * Functions bit vector for WBRF method + * + * Bit 0: WBRF supported. + * Bit 1: Function 1 (Add / Remove frequency) is supported. + * Bit 2: Function 2 (Get frequency list) is supported. + */ +#define WBRF_ENABLED 0x0 +#define WBRF_RECORD 0x1 +#define WBRF_RETRIEVE 0x2 + +#define WBRF_REVISION 0x1 + +/* + * The data structure used for WBRF_RETRIEVE is not naturally aligned. + * And unfortunately the design has been settled down. + */ +struct amd_wbrf_ranges_out { + u32 num_of_ranges; + struct freq_band_range band_list[MAX_NUM_OF_WBRF_RANGES]; +} __packed; + +static const guid_t wifi_acpi_dsm_guid = + GUID_INIT(0x7b7656cf, 0xdc3d, 0x4c1c, + 0x83, 0xe9, 0x66, 0xe7, 0x21, 0xde, 0x30, 0x70); + +/* + * Used to notify consumer (amdgpu driver currently) about + * the wifi frequency is change. + */ +static BLOCKING_NOTIFIER_HEAD(wbrf_chain_head); + +static int wbrf_record(struct acpi_device *adev, uint8_t action, struct wbrf_ranges_in_out *in) +{ + union acpi_object argv4; + union acpi_object *tmp; + union acpi_object *obj; + u32 num_of_ranges = 0; + u32 num_of_elements; + u32 arg_idx = 0; + int ret; + u32 i; + + if (!in) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(in->band_list); i++) { + if (in->band_list[i].start && in->band_list[i].end) + num_of_ranges++; + } + + /* + * The num_of_ranges value in the "in" object supplied by + * the caller is required to be equal to the number of + * entries in the band_list array in there. + */ + if (num_of_ranges != in->num_of_ranges) + return -EINVAL; + + /* + * Every input frequency band comes with two end points(start/end) + * and each is accounted as an element. Meanwhile the range count + * and action type are accounted as an element each. + * So, the total element count = 2 * num_of_ranges + 1 + 1. + */ + num_of_elements = 2 * num_of_ranges + 2; + + tmp = kcalloc(num_of_elements, sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + argv4.package.type = ACPI_TYPE_PACKAGE; + argv4.package.count = num_of_elements; + argv4.package.elements = tmp; + + /* save the number of ranges*/ + tmp[0].integer.type = ACPI_TYPE_INTEGER; + tmp[0].integer.value = num_of_ranges; + + /* save the action(WBRF_RECORD_ADD/REMOVE/RETRIEVE) */ + tmp[1].integer.type = ACPI_TYPE_INTEGER; + tmp[1].integer.value = action; + + arg_idx = 2; + for (i = 0; i < ARRAY_SIZE(in->band_list); i++) { + if (!in->band_list[i].start || !in->band_list[i].end) + continue; + + tmp[arg_idx].integer.type = ACPI_TYPE_INTEGER; + tmp[arg_idx++].integer.value = in->band_list[i].start; + tmp[arg_idx].integer.type = ACPI_TYPE_INTEGER; + tmp[arg_idx++].integer.value = in->band_list[i].end; + } + + obj = acpi_evaluate_dsm(adev->handle, &wifi_acpi_dsm_guid, + WBRF_REVISION, WBRF_RECORD, &argv4); + + if (!obj) + return -EINVAL; + + if (obj->type != ACPI_TYPE_INTEGER) { + ret = -EINVAL; + goto out; + } + + ret = obj->integer.value; + if (ret) + ret = -EINVAL; + +out: + ACPI_FREE(obj); + kfree(tmp); + + return ret; +} + +/** + * acpi_amd_wbrf_add_remove - add or remove the frequency band the device is using + * + * @dev: device pointer + * @action: remove or add the frequency band into bios + * @in: input structure containing the frequency band the device is using + * + * Broadcast to other consumers the frequency band the device starts + * to use. Underneath the surface the information is cached into an + * internal buffer first. Then a notification is sent to all those + * registered consumers. So then they can retrieve that buffer to + * know the latest active frequency bands. Consumers that haven't + * yet been registered can retrieve the information from the cache + * when they register. + * + * Return: + * 0 for success add/remove wifi frequency band. + * Returns a negative error code for failure. + */ +int acpi_amd_wbrf_add_remove(struct device *dev, uint8_t action, struct wbrf_ranges_in_out *in) +{ + struct acpi_device *adev; + int ret; + + adev = ACPI_COMPANION(dev); + if (!adev) + return -ENODEV; + + ret = wbrf_record(adev, action, in); + if (ret) + return ret; + + blocking_notifier_call_chain(&wbrf_chain_head, WBRF_CHANGED, NULL); + + return 0; +} +EXPORT_SYMBOL_GPL(acpi_amd_wbrf_add_remove); + +/** + * acpi_amd_wbrf_supported_producer - determine if the WBRF can be enabled + * for the device as a producer + * + * @dev: device pointer + * + * Check if the platform equipped with necessary implementations to + * support WBRF for the device as a producer. + * + * Return: + * true if WBRF is supported, otherwise returns false + */ +bool acpi_amd_wbrf_supported_producer(struct device *dev) +{ + struct acpi_device *adev; + + adev = ACPI_COMPANION(dev); + if (!adev) + return false; + + return acpi_check_dsm(adev->handle, &wifi_acpi_dsm_guid, + WBRF_REVISION, BIT(WBRF_RECORD)); +} +EXPORT_SYMBOL_GPL(acpi_amd_wbrf_supported_producer); + +/** + * acpi_amd_wbrf_supported_consumer - determine if the WBRF can be enabled + * for the device as a consumer + * + * @dev: device pointer + * + * Determine if the platform equipped with necessary implementations to + * support WBRF for the device as a consumer. + * + * Return: + * true if WBRF is supported, otherwise returns false. + */ +bool acpi_amd_wbrf_supported_consumer(struct device *dev) +{ + struct acpi_device *adev; + + adev = ACPI_COMPANION(dev); + if (!adev) + return false; + + return acpi_check_dsm(adev->handle, &wifi_acpi_dsm_guid, + WBRF_REVISION, BIT(WBRF_RETRIEVE)); +} +EXPORT_SYMBOL_GPL(acpi_amd_wbrf_supported_consumer); + +/** + * amd_wbrf_retrieve_freq_band - retrieve current active frequency bands + * + * @dev: device pointer + * @out: output structure containing all the active frequency bands + * + * Retrieve the current active frequency bands which were broadcasted + * by other producers. The consumer who calls this API should take + * proper actions if any of the frequency band may cause RFI with its + * own frequency band used. + * + * Return: + * 0 for getting wifi freq band successfully. + * Returns a negative error code for failure. + */ +int amd_wbrf_retrieve_freq_band(struct device *dev, struct wbrf_ranges_in_out *out) +{ + struct amd_wbrf_ranges_out acpi_out = {0}; + struct acpi_device *adev; + union acpi_object *obj; + union acpi_object param; + int ret = 0; + + adev = ACPI_COMPANION(dev); + if (!adev) + return -ENODEV; + + param.type = ACPI_TYPE_STRING; + param.string.length = 0; + param.string.pointer = NULL; + + obj = acpi_evaluate_dsm(adev->handle, &wifi_acpi_dsm_guid, + WBRF_REVISION, WBRF_RETRIEVE, ¶m); + if (!obj) + return -EINVAL; + + /* + * The return buffer is with variable length and the format below: + * number_of_entries(1 DWORD): Number of entries + * start_freq of 1st entry(1 QWORD): Start frequency of the 1st entry + * end_freq of 1st entry(1 QWORD): End frequency of the 1st entry + * ... + * ... + * start_freq of the last entry(1 QWORD) + * end_freq of the last entry(1 QWORD) + * + * Thus the buffer length is determined by the number of entries. + * - For zero entry scenario, the buffer length will be 4 bytes. + * - For one entry scenario, the buffer length will be 20 bytes. + */ + if (obj->buffer.length > sizeof(acpi_out) || obj->buffer.length < 4) { + dev_err(dev, "Wrong sized WBRT information"); + ret = -EINVAL; + goto out; + } + memcpy(&acpi_out, obj->buffer.pointer, obj->buffer.length); + + out->num_of_ranges = acpi_out.num_of_ranges; + memcpy(out->band_list, acpi_out.band_list, sizeof(acpi_out.band_list)); + +out: + ACPI_FREE(obj); + return ret; +} +EXPORT_SYMBOL_GPL(amd_wbrf_retrieve_freq_band); + +/** + * amd_wbrf_register_notifier - register for notifications of frequency + * band update + * + * @nb: driver notifier block + * + * The consumer should register itself via this API so that it can get + * notified on the frequency band updates from other producers. + * + * Return: + * 0 for registering a consumer driver successfully. + * Returns a negative error code for failure. + */ +int amd_wbrf_register_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&wbrf_chain_head, nb); +} +EXPORT_SYMBOL_GPL(amd_wbrf_register_notifier); + +/** + * amd_wbrf_unregister_notifier - unregister for notifications of + * frequency band update + * + * @nb: driver notifier block + * + * The consumer should call this API when it is longer interested with + * the frequency band updates from other producers. Usually, this should + * be performed during driver cleanup. + * + * Return: + * 0 for unregistering a consumer driver. + * Returns a negative error code for failure. + */ +int amd_wbrf_unregister_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&wbrf_chain_head, nb); +} +EXPORT_SYMBOL_GPL(amd_wbrf_unregister_notifier); diff --git a/drivers/platform/x86/amd/x3d_vcache.c b/drivers/platform/x86/amd/x3d_vcache.c new file mode 100644 index 000000000000..0f6d3c54d879 --- /dev/null +++ b/drivers/platform/x86/amd/x3d_vcache.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * AMD 3D V-Cache Performance Optimizer Driver + * + * Copyright (c) 2024, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Authors: Basavaraj Natikar <Basavaraj.Natikar@amd.com> + * Perry Yuan <perry.yuan@amd.com> + * Mario Limonciello <mario.limonciello@amd.com> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/acpi.h> +#include <linux/array_size.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/sysfs.h> +#include <linux/uuid.h> + +static char *x3d_mode = "frequency"; +module_param(x3d_mode, charp, 0); +MODULE_PARM_DESC(x3d_mode, "Initial 3D-VCache mode; 'frequency' (default) or 'cache'"); + +#define DSM_REVISION_ID 0 +#define DSM_SET_X3D_MODE 1 + +static guid_t x3d_guid = GUID_INIT(0xdff8e55f, 0xbcfd, 0x46fb, 0xba, 0x0a, + 0xef, 0xd0, 0x45, 0x0f, 0x34, 0xee); + +enum amd_x3d_mode_type { + MODE_INDEX_FREQ, + MODE_INDEX_CACHE, +}; + +static const char * const amd_x3d_mode_strings[] = { + [MODE_INDEX_FREQ] = "frequency", + [MODE_INDEX_CACHE] = "cache", +}; + +struct amd_x3d_dev { + struct device *dev; + acpi_handle ahandle; + /* To protect x3d mode setting */ + struct mutex lock; + enum amd_x3d_mode_type curr_mode; +}; + +static int amd_x3d_get_mode(struct amd_x3d_dev *data) +{ + guard(mutex)(&data->lock); + + return data->curr_mode; +} + +static int amd_x3d_mode_switch(struct amd_x3d_dev *data, int new_state) +{ + union acpi_object *out, argv; + + guard(mutex)(&data->lock); + argv.type = ACPI_TYPE_INTEGER; + argv.integer.value = new_state; + + out = acpi_evaluate_dsm(data->ahandle, &x3d_guid, DSM_REVISION_ID, + DSM_SET_X3D_MODE, &argv); + if (!out) { + dev_err(data->dev, "failed to evaluate _DSM\n"); + return -EINVAL; + } + + data->curr_mode = new_state; + + kfree(out); + + return 0; +} + +static ssize_t amd_x3d_mode_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct amd_x3d_dev *data = dev_get_drvdata(dev); + int ret; + + ret = sysfs_match_string(amd_x3d_mode_strings, buf); + if (ret < 0) + return ret; + + ret = amd_x3d_mode_switch(data, ret); + if (ret < 0) + return ret; + + return count; +} + +static ssize_t amd_x3d_mode_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct amd_x3d_dev *data = dev_get_drvdata(dev); + int mode = amd_x3d_get_mode(data); + + return sysfs_emit(buf, "%s\n", amd_x3d_mode_strings[mode]); +} +static DEVICE_ATTR_RW(amd_x3d_mode); + +static struct attribute *amd_x3d_attrs[] = { + &dev_attr_amd_x3d_mode.attr, + NULL +}; +ATTRIBUTE_GROUPS(amd_x3d); + +static int amd_x3d_resume_handler(struct device *dev) +{ + struct amd_x3d_dev *data = dev_get_drvdata(dev); + int ret = amd_x3d_get_mode(data); + + return amd_x3d_mode_switch(data, ret); +} + +static DEFINE_SIMPLE_DEV_PM_OPS(amd_x3d_pm, NULL, amd_x3d_resume_handler); + +static const struct acpi_device_id amd_x3d_acpi_ids[] = { + {"AMDI0101"}, + { }, +}; +MODULE_DEVICE_TABLE(acpi, amd_x3d_acpi_ids); + +static int amd_x3d_probe(struct platform_device *pdev) +{ + struct amd_x3d_dev *data; + acpi_handle handle; + int ret; + + handle = ACPI_HANDLE(&pdev->dev); + if (!handle) + return -ENODEV; + + if (!acpi_check_dsm(handle, &x3d_guid, DSM_REVISION_ID, BIT(DSM_SET_X3D_MODE))) + return -ENODEV; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->dev = &pdev->dev; + + ret = devm_mutex_init(data->dev, &data->lock); + if (ret) + return ret; + + data->ahandle = handle; + platform_set_drvdata(pdev, data); + + ret = match_string(amd_x3d_mode_strings, ARRAY_SIZE(amd_x3d_mode_strings), x3d_mode); + if (ret < 0) + return dev_err_probe(&pdev->dev, -EINVAL, "invalid mode %s\n", x3d_mode); + + return amd_x3d_mode_switch(data, ret); +} + +static struct platform_driver amd_3d_vcache_driver = { + .driver = { + .name = "amd_x3d_vcache", + .dev_groups = amd_x3d_groups, + .acpi_match_table = amd_x3d_acpi_ids, + .pm = pm_sleep_ptr(&amd_x3d_pm), + }, + .probe = amd_x3d_probe, +}; +module_platform_driver(amd_3d_vcache_driver); + +MODULE_DESCRIPTION("AMD 3D V-Cache Performance Optimizer Driver"); +MODULE_LICENSE("GPL"); |
