diff options
Diffstat (limited to 'drivers/platform/x86/intel')
87 files changed, 14561 insertions, 2301 deletions
diff --git a/drivers/platform/x86/intel/Kconfig b/drivers/platform/x86/intel/Kconfig index d5a33473e838..2900407d6095 100644 --- a/drivers/platform/x86/intel/Kconfig +++ b/drivers/platform/x86/intel/Kconfig @@ -41,6 +41,19 @@ config INTEL_VBTN To compile this driver as a module, choose M here: the module will be called intel_vbtn. +config INTEL_EHL_PSE_IO + tristate "Intel Elkhart Lake PSE I/O driver" + depends on PCI + select AUXILIARY_BUS + help + Select this option to enable Intel Elkhart Lake PSE GPIO and Timed + I/O support. This driver enumerates the PCI parent device and + creates auxiliary child devices for these capabilities. The actual + functionalities are provided by their respective auxiliary drivers. + + To compile this driver as a module, choose M here: the module will + be called intel_ehl_pse_io. + config INTEL_INT0002_VGPIO tristate "Intel ACPI INT0002 Virtual GPIO driver" depends on GPIOLIB && ACPI && PM_SLEEP @@ -62,7 +75,7 @@ config INTEL_INT0002_VGPIO config INTEL_OAKTRAIL tristate "Intel Oaktrail Platform Extras" - depends on ACPI + depends on ACPI_EC depends on ACPI_VIDEO || ACPI_VIDEO=n depends on RFKILL && BACKLIGHT_CLASS_DEVICE && ACPI help @@ -80,6 +93,17 @@ config INTEL_BXTWC_PMIC_TMU This driver enables the alarm wakeup functionality in the TMU unit of Whiskey Cove PMIC. +config INTEL_BYTCRC_PWRSRC + tristate "Intel Bay Trail Crystal Cove power source driver" + depends on INTEL_SOC_PMIC + depends on POWER_SUPPLY + help + This option adds a power source driver for Crystal Cove PMICs + on Intel Bay Trail devices. + + To compile this driver as a module, choose M here: the module + will be called intel_bytcrc_pwrsrc. + config INTEL_CHTDC_TI_PWRBTN tristate "Intel Cherry Trail Dollar Cove TI power button driver" depends on INTEL_SOC_PMIC_CHTDC_TI @@ -182,6 +206,30 @@ config INTEL_SMARTCONNECT This driver checks to determine whether the device has Intel Smart Connect enabled, and if so disables it. +config INTEL_TPMI_POWER_DOMAINS + tristate + +config INTEL_TPMI + tristate "Intel Topology Aware Register and PM Capsule Interface (TPMI)" + depends on INTEL_VSEC + depends on X86_64 + select INTEL_TPMI_POWER_DOMAINS + help + The Intel Topology Aware Register and PM Capsule Interface (TPMI), + provides enumerable MMIO interface for power management features. + This driver creates devices, so that other PM feature driver can + be loaded for PM specific feature operation. + + To compile this driver as a module, choose M here: the module will + be called intel_vsec_tpmi. + +config INTEL_PLR_TPMI + tristate "Intel SoC TPMI Power Limit Reasons driver" + depends on INTEL_TPMI + help + This driver provides the TPMI power limit reasons status information + via debugfs files. + config INTEL_TURBO_MAX_3 bool "Intel Turbo Boost Max Technology 3.0 enumeration driver" depends on X86_64 && SCHED_MC_PRIO diff --git a/drivers/platform/x86/intel/Makefile b/drivers/platform/x86/intel/Makefile index 717933dd0cfd..138b13756158 100644 --- a/drivers/platform/x86/intel/Makefile +++ b/drivers/platform/x86/intel/Makefile @@ -17,40 +17,41 @@ obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL) += uncore-frequency/ # Intel input drivers -intel-hid-y := hid.o -obj-$(CONFIG_INTEL_HID_EVENT) += intel-hid.o -intel-vbtn-y := vbtn.o -obj-$(CONFIG_INTEL_VBTN) += intel-vbtn.o +intel-target-$(CONFIG_INTEL_HID_EVENT) += hid.o +intel-target-$(CONFIG_INTEL_VBTN) += vbtn.o # Intel miscellaneous drivers -obj-$(CONFIG_INTEL_ISHTP_ECLITE) += ishtp_eclite.o -intel_int0002_vgpio-y := int0002_vgpio.o -obj-$(CONFIG_INTEL_INT0002_VGPIO) += intel_int0002_vgpio.o -intel_oaktrail-y := oaktrail.o -obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o -intel_sdsi-y := sdsi.o -obj-$(CONFIG_INTEL_SDSI) += intel_sdsi.o -intel_vsec-y := vsec.o -obj-$(CONFIG_INTEL_VSEC) += intel_vsec.o +intel-target-$(CONFIG_INTEL_EHL_PSE_IO) += ehl_pse_io.o +intel-target-$(CONFIG_INTEL_INT0002_VGPIO) += int0002_vgpio.o +intel-target-$(CONFIG_INTEL_ISHTP_ECLITE) += ishtp_eclite.o +intel-target-$(CONFIG_INTEL_OAKTRAIL) += oaktrail.o +intel-target-$(CONFIG_INTEL_SDSI) += sdsi.o +intel-target-$(CONFIG_INTEL_VSEC) += vsec.o # Intel PMIC / PMC / P-Unit drivers -intel_bxtwc_tmu-y := bxtwc_tmu.o -obj-$(CONFIG_INTEL_BXTWC_PMIC_TMU) += intel_bxtwc_tmu.o -intel_crystal_cove_charger-y := crystal_cove_charger.o -obj-$(CONFIG_X86_ANDROID_TABLETS) += intel_crystal_cove_charger.o -intel_chtdc_ti_pwrbtn-y := chtdc_ti_pwrbtn.o -obj-$(CONFIG_INTEL_CHTDC_TI_PWRBTN) += intel_chtdc_ti_pwrbtn.o -intel_chtwc_int33fe-y := chtwc_int33fe.o -obj-$(CONFIG_INTEL_CHTWC_INT33FE) += intel_chtwc_int33fe.o -intel_mrfld_pwrbtn-y := mrfld_pwrbtn.o -obj-$(CONFIG_INTEL_MRFLD_PWRBTN) += intel_mrfld_pwrbtn.o -intel_punit_ipc-y := punit_ipc.o -obj-$(CONFIG_INTEL_PUNIT_IPC) += intel_punit_ipc.o +intel-target-$(CONFIG_INTEL_BYTCRC_PWRSRC) += bytcrc_pwrsrc.o +intel-target-$(CONFIG_INTEL_BXTWC_PMIC_TMU) += bxtwc_tmu.o +intel-target-$(CONFIG_INTEL_CHTDC_TI_PWRBTN) += chtdc_ti_pwrbtn.o +intel-target-$(CONFIG_INTEL_CHTWC_INT33FE) += chtwc_int33fe.o +intel-target-$(CONFIG_X86_ANDROID_TABLETS) += crystal_cove_charger.o +intel-target-$(CONFIG_INTEL_MRFLD_PWRBTN) += mrfld_pwrbtn.o +intel-target-$(CONFIG_INTEL_PUNIT_IPC) += punit_ipc.o + +# TPMI drivers +intel-target-$(CONFIG_INTEL_PLR_TPMI) += plr_tpmi.o +intel-target-$(CONFIG_INTEL_TPMI_POWER_DOMAINS) += tpmi_power_domains.o +intel-target-$(CONFIG_INTEL_TPMI) += vsec_tpmi.o # Intel Uncore drivers -intel-rst-y := rst.o -obj-$(CONFIG_INTEL_RST) += intel-rst.o -intel-smartconnect-y := smartconnect.o -obj-$(CONFIG_INTEL_SMARTCONNECT) += intel-smartconnect.o -intel_turbo_max_3-y := turbo_max_3.o -obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o +intel-target-$(CONFIG_INTEL_RST) += rst.o +intel-target-$(CONFIG_INTEL_SMARTCONNECT) += smartconnect.o +intel-target-$(CONFIG_INTEL_TURBO_MAX_3) += turbo_max_3.o + +# Add 'intel' prefix to each module listed in intel-target-* +define INTEL_OBJ_TARGET +intel-$(1)-y := $(1).o +obj-$(2) += intel-$(1).o +endef + +$(foreach target, $(basename $(intel-target-y)), $(eval $(call INTEL_OBJ_TARGET,$(target),y))) +$(foreach target, $(basename $(intel-target-m)), $(eval $(call INTEL_OBJ_TARGET,$(target),m))) diff --git a/drivers/platform/x86/intel/bxtwc_tmu.c b/drivers/platform/x86/intel/bxtwc_tmu.c index 7ccf583649e6..99437b2ccc25 100644 --- a/drivers/platform/x86/intel/bxtwc_tmu.c +++ b/drivers/platform/x86/intel/bxtwc_tmu.c @@ -48,9 +48,8 @@ static irqreturn_t bxt_wcove_tmu_irq_handler(int irq, void *data) static int bxt_wcove_tmu_probe(struct platform_device *pdev) { struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent); - struct regmap_irq_chip_data *regmap_irq_chip; struct wcove_tmu *wctmu; - int ret, virq, irq; + int ret; wctmu = devm_kzalloc(&pdev->dev, sizeof(*wctmu), GFP_KERNEL); if (!wctmu) @@ -59,27 +58,18 @@ static int bxt_wcove_tmu_probe(struct platform_device *pdev) wctmu->dev = &pdev->dev; wctmu->regmap = pmic->regmap; - irq = platform_get_irq(pdev, 0); - if (irq < 0) - return irq; + wctmu->irq = platform_get_irq(pdev, 0); + if (wctmu->irq < 0) + return wctmu->irq; - regmap_irq_chip = pmic->irq_chip_data_tmu; - virq = regmap_irq_get_virq(regmap_irq_chip, irq); - if (virq < 0) { - dev_err(&pdev->dev, - "failed to get virtual interrupt=%d\n", irq); - return virq; - } - - ret = devm_request_threaded_irq(&pdev->dev, virq, + ret = devm_request_threaded_irq(&pdev->dev, wctmu->irq, NULL, bxt_wcove_tmu_irq_handler, IRQF_ONESHOT, "bxt_wcove_tmu", wctmu); if (ret) { dev_err(&pdev->dev, "request irq failed: %d,virq: %d\n", - ret, virq); + ret, wctmu->irq); return ret; } - wctmu->irq = virq; /* Unmask TMU second level Wake & System alarm */ regmap_update_bits(wctmu->regmap, BXTWC_MTMUIRQ_REG, @@ -89,7 +79,7 @@ static int bxt_wcove_tmu_probe(struct platform_device *pdev) return 0; } -static int bxt_wcove_tmu_remove(struct platform_device *pdev) +static void bxt_wcove_tmu_remove(struct platform_device *pdev) { struct wcove_tmu *wctmu = platform_get_drvdata(pdev); unsigned int val; @@ -101,7 +91,6 @@ static int bxt_wcove_tmu_remove(struct platform_device *pdev) regmap_read(wctmu->regmap, BXTWC_MTMUIRQ_REG, &val); regmap_write(wctmu->regmap, BXTWC_MTMUIRQ_REG, val | BXTWC_TMU_ALRM_MASK); - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/platform/x86/intel/bytcrc_pwrsrc.c b/drivers/platform/x86/intel/bytcrc_pwrsrc.c new file mode 100644 index 000000000000..68ac040082df --- /dev/null +++ b/drivers/platform/x86/intel/bytcrc_pwrsrc.c @@ -0,0 +1,255 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Power-source driver for Bay Trail Crystal Cove PMIC + * + * Copyright (c) 2023 Hans de Goede <hdegoede@redhat.com> + * + * Based on intel_crystalcove_pwrsrc.c from Android kernel sources, which is: + * Copyright (C) 2013 Intel Corporation + */ + +#include <linux/array_size.h> +#include <linux/bits.h> +#include <linux/debugfs.h> +#include <linux/interrupt.h> +#include <linux/mfd/intel_soc_pmic.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/power_supply.h> +#include <linux/property.h> +#include <linux/regmap.h> + +#define CRYSTALCOVE_PWRSRC_IRQ 0x03 +#define CRYSTALCOVE_SPWRSRC_REG 0x1E +#define CRYSTALCOVE_SPWRSRC_USB BIT(0) +#define CRYSTALCOVE_SPWRSRC_DC BIT(1) +#define CRYSTALCOVE_SPWRSRC_BATTERY BIT(2) +#define CRYSTALCOVE_RESETSRC0_REG 0x20 +#define CRYSTALCOVE_RESETSRC1_REG 0x21 +#define CRYSTALCOVE_WAKESRC_REG 0x22 + +struct crc_pwrsrc_data { + struct regmap *regmap; + struct dentry *debug_dentry; + struct power_supply *psy; + unsigned int resetsrc0; + unsigned int resetsrc1; + unsigned int wakesrc; +}; + +static const char * const pwrsrc_pwrsrc_info[] = { + /* bit 0 */ "USB", + /* bit 1 */ "DC in", + /* bit 2 */ "Battery", + NULL, +}; + +static const char * const pwrsrc_resetsrc0_info[] = { + /* bit 0 */ "SOC reporting a thermal event", + /* bit 1 */ "critical PMIC temperature", + /* bit 2 */ "critical system temperature", + /* bit 3 */ "critical battery temperature", + /* bit 4 */ "VSYS under voltage", + /* bit 5 */ "VSYS over voltage", + /* bit 6 */ "battery removal", + NULL, +}; + +static const char * const pwrsrc_resetsrc1_info[] = { + /* bit 0 */ "VCRIT threshold", + /* bit 1 */ "BATID reporting battery removal", + /* bit 2 */ "user pressing the power button", + NULL, +}; + +static const char * const pwrsrc_wakesrc_info[] = { + /* bit 0 */ "user pressing the power button", + /* bit 1 */ "a battery insertion", + /* bit 2 */ "a USB charger insertion", + /* bit 3 */ "an adapter insertion", + NULL, +}; + +static void crc_pwrsrc_log(struct seq_file *seq, const char *prefix, + const char * const *info, unsigned int reg_val) +{ + int i; + + for (i = 0; info[i]; i++) { + if (reg_val & BIT(i)) + seq_printf(seq, "%s by %s\n", prefix, info[i]); + } +} + +static int pwrsrc_show(struct seq_file *seq, void *unused) +{ + struct crc_pwrsrc_data *data = seq->private; + unsigned int reg_val; + int ret; + + ret = regmap_read(data->regmap, CRYSTALCOVE_SPWRSRC_REG, ®_val); + if (ret) + return ret; + + crc_pwrsrc_log(seq, "System powered", pwrsrc_pwrsrc_info, reg_val); + return 0; +} + +static int resetsrc_show(struct seq_file *seq, void *unused) +{ + struct crc_pwrsrc_data *data = seq->private; + + crc_pwrsrc_log(seq, "Last shutdown caused", pwrsrc_resetsrc0_info, data->resetsrc0); + crc_pwrsrc_log(seq, "Last shutdown caused", pwrsrc_resetsrc1_info, data->resetsrc1); + return 0; +} + +static int wakesrc_show(struct seq_file *seq, void *unused) +{ + struct crc_pwrsrc_data *data = seq->private; + + crc_pwrsrc_log(seq, "Last wake caused", pwrsrc_wakesrc_info, data->wakesrc); + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(pwrsrc); +DEFINE_SHOW_ATTRIBUTE(resetsrc); +DEFINE_SHOW_ATTRIBUTE(wakesrc); + +static int crc_pwrsrc_read_and_clear(struct crc_pwrsrc_data *data, + unsigned int reg, unsigned int *val) +{ + int ret; + + ret = regmap_read(data->regmap, reg, val); + if (ret) + return ret; + + return regmap_write(data->regmap, reg, *val); +} + +static irqreturn_t crc_pwrsrc_irq_handler(int irq, void *_data) +{ + struct crc_pwrsrc_data *data = _data; + unsigned int irq_mask; + + if (regmap_read(data->regmap, CRYSTALCOVE_PWRSRC_IRQ, &irq_mask)) + return IRQ_NONE; + + regmap_write(data->regmap, CRYSTALCOVE_PWRSRC_IRQ, irq_mask); + + power_supply_changed(data->psy); + return IRQ_HANDLED; +} + +static int crc_pwrsrc_psy_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct crc_pwrsrc_data *data = power_supply_get_drvdata(psy); + unsigned int pwrsrc; + int ret; + + if (psp != POWER_SUPPLY_PROP_ONLINE) + return -EINVAL; + + ret = regmap_read(data->regmap, CRYSTALCOVE_SPWRSRC_REG, &pwrsrc); + if (ret) + return ret; + + val->intval = !!(pwrsrc & (CRYSTALCOVE_SPWRSRC_USB | + CRYSTALCOVE_SPWRSRC_DC)); + return 0; +} + +static const enum power_supply_property crc_pwrsrc_psy_props[] = { + POWER_SUPPLY_PROP_ONLINE, +}; + +static const struct power_supply_desc crc_pwrsrc_psy_desc = { + .name = "crystal_cove_pwrsrc", + .type = POWER_SUPPLY_TYPE_MAINS, + .properties = crc_pwrsrc_psy_props, + .num_properties = ARRAY_SIZE(crc_pwrsrc_psy_props), + .get_property = crc_pwrsrc_psy_get_property, +}; + +static int crc_pwrsrc_probe(struct platform_device *pdev) +{ + struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent); + struct device *dev = &pdev->dev; + struct crc_pwrsrc_data *data; + int irq, ret; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->regmap = pmic->regmap; + + /* + * Read + clear resetsrc0/1 and wakesrc now, so that they get + * cleared even if the debugfs interface is never used. + * + * Properly clearing the wakesrc is important, leaving bit 0 of it + * set turns reboot into poweroff on some tablets. + */ + ret = crc_pwrsrc_read_and_clear(data, CRYSTALCOVE_RESETSRC0_REG, &data->resetsrc0); + if (ret) + return ret; + + ret = crc_pwrsrc_read_and_clear(data, CRYSTALCOVE_RESETSRC1_REG, &data->resetsrc1); + if (ret) + return ret; + + ret = crc_pwrsrc_read_and_clear(data, CRYSTALCOVE_WAKESRC_REG, &data->wakesrc); + if (ret) + return ret; + + if (device_property_read_bool(dev->parent, "linux,register-pwrsrc-power_supply")) { + struct power_supply_config psy_cfg = { .drv_data = data }; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + data->psy = devm_power_supply_register(dev, &crc_pwrsrc_psy_desc, &psy_cfg); + if (IS_ERR(data->psy)) + return dev_err_probe(dev, PTR_ERR(data->psy), "registering power-supply\n"); + + ret = devm_request_threaded_irq(dev, irq, NULL, + crc_pwrsrc_irq_handler, + IRQF_ONESHOT, KBUILD_MODNAME, data); + if (ret) + return dev_err_probe(dev, ret, "requesting IRQ\n"); + } + + data->debug_dentry = debugfs_create_dir(KBUILD_MODNAME, NULL); + debugfs_create_file("pwrsrc", 0444, data->debug_dentry, data, &pwrsrc_fops); + debugfs_create_file("resetsrc", 0444, data->debug_dentry, data, &resetsrc_fops); + debugfs_create_file("wakesrc", 0444, data->debug_dentry, data, &wakesrc_fops); + + platform_set_drvdata(pdev, data); + return 0; +} + +static void crc_pwrsrc_remove(struct platform_device *pdev) +{ + struct crc_pwrsrc_data *data = platform_get_drvdata(pdev); + + debugfs_remove_recursive(data->debug_dentry); +} + +static struct platform_driver crc_pwrsrc_driver = { + .probe = crc_pwrsrc_probe, + .remove = crc_pwrsrc_remove, + .driver = { + .name = "crystal_cove_pwrsrc", + }, +}; +module_platform_driver(crc_pwrsrc_driver); + +MODULE_ALIAS("platform:crystal_cove_pwrsrc"); +MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); +MODULE_DESCRIPTION("Power-source driver for Bay Trail Crystal Cove PMIC"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/intel/chtdc_ti_pwrbtn.c b/drivers/platform/x86/intel/chtdc_ti_pwrbtn.c index 9606a994af22..53f01e198047 100644 --- a/drivers/platform/x86/intel/chtdc_ti_pwrbtn.c +++ b/drivers/platform/x86/intel/chtdc_ti_pwrbtn.c @@ -67,11 +67,10 @@ static int chtdc_ti_pwrbtn_probe(struct platform_device *pdev) return 0; } -static int chtdc_ti_pwrbtn_remove(struct platform_device *pdev) +static void chtdc_ti_pwrbtn_remove(struct platform_device *pdev) { dev_pm_clear_wake_irq(&pdev->dev); device_init_wakeup(&pdev->dev, false); - return 0; } static const struct platform_device_id chtdc_ti_pwrbtn_id_table[] = { diff --git a/drivers/platform/x86/intel/chtwc_int33fe.c b/drivers/platform/x86/intel/chtwc_int33fe.c index 2c9a7d52be07..d183aa53c318 100644 --- a/drivers/platform/x86/intel/chtwc_int33fe.c +++ b/drivers/platform/x86/intel/chtwc_int33fe.c @@ -77,7 +77,7 @@ static const struct software_node max17047_node = { * software node. */ static struct software_node_ref_args fusb302_mux_refs[] = { - { .node = NULL }, + SOFTWARE_NODE_REFERENCE(NULL), }; static const struct property_entry fusb302_properties[] = { @@ -136,7 +136,7 @@ static const struct software_node altmodes_node = { }; static const struct property_entry dp_altmode_properties[] = { - PROPERTY_ENTRY_U32("svid", 0xff01), + PROPERTY_ENTRY_U16("svid", 0xff01), PROPERTY_ENTRY_U32("vdo", 0x0c0086), { } }; @@ -190,11 +190,6 @@ static void cht_int33fe_remove_nodes(struct cht_int33fe_data *data) { software_node_unregister_node_group(node_group); - if (fusb302_mux_refs[0].node) { - fwnode_handle_put(software_node_fwnode(fusb302_mux_refs[0].node)); - fusb302_mux_refs[0].node = NULL; - } - if (data->dp) { data->dp->secondary = NULL; fwnode_handle_put(data->dp); @@ -202,7 +197,15 @@ static void cht_int33fe_remove_nodes(struct cht_int33fe_data *data) } } -static int cht_int33fe_add_nodes(struct cht_int33fe_data *data) +static void cht_int33fe_put_swnode(void *data) +{ + struct fwnode_handle *fwnode = data; + + fwnode_handle_put(fwnode); + fusb302_mux_refs[0] = SOFTWARE_NODE_REFERENCE(NULL); +} + +static int cht_int33fe_add_nodes(struct device *dev, struct cht_int33fe_data *data) { const struct software_node *mux_ref_node; int ret; @@ -212,17 +215,25 @@ static int cht_int33fe_add_nodes(struct cht_int33fe_data *data) * until the mux driver has created software node for the mux device. * It means we depend on the mux driver. This function will return * -EPROBE_DEFER until the mux device is registered. + * + * FIXME: the relevant software node exists in intel-xhci-usb-role-switch + * and - if exported - could be used to set up a static reference. */ mux_ref_node = software_node_find_by_name(NULL, "intel-xhci-usb-sw"); if (!mux_ref_node) return -EPROBE_DEFER; + ret = devm_add_action_or_reset(dev, cht_int33fe_put_swnode, + software_node_fwnode(mux_ref_node)); + if (ret) + return ret; + /* * Update node used in "usb-role-switch" property. Note that we * rely on software_node_register_node_group() to use the original * instance of properties instead of copying them. */ - fusb302_mux_refs[0].node = mux_ref_node; + fusb302_mux_refs[0] = SOFTWARE_NODE_REFERENCE(mux_ref_node); ret = software_node_register_node_group(node_group); if (ret) @@ -270,7 +281,7 @@ cht_int33fe_register_max17047(struct device *dev, struct cht_int33fe_data *data) } memset(&board_info, 0, sizeof(board_info)); - strscpy(board_info.type, "max17047", I2C_NAME_SIZE); + strscpy(board_info.type, "max17047"); board_info.dev_name = "max17047"; board_info.fwnode = fwnode; data->battery_fg = i2c_acpi_new_device(dev, 1, &board_info); @@ -345,7 +356,7 @@ static int cht_int33fe_typec_probe(struct platform_device *pdev) return fusb302_irq; } - ret = cht_int33fe_add_nodes(data); + ret = cht_int33fe_add_nodes(dev, data); if (ret) return ret; @@ -361,7 +372,7 @@ static int cht_int33fe_typec_probe(struct platform_device *pdev) } memset(&board_info, 0, sizeof(board_info)); - strscpy(board_info.type, "typec_fusb302", I2C_NAME_SIZE); + strscpy(board_info.type, "typec_fusb302"); board_info.dev_name = "fusb302"; board_info.fwnode = fwnode; board_info.irq = fusb302_irq; @@ -381,7 +392,7 @@ static int cht_int33fe_typec_probe(struct platform_device *pdev) memset(&board_info, 0, sizeof(board_info)); board_info.dev_name = "pi3usb30532"; board_info.fwnode = fwnode; - strscpy(board_info.type, "pi3usb30532", I2C_NAME_SIZE); + strscpy(board_info.type, "pi3usb30532"); data->pi3usb30532 = i2c_acpi_new_device(dev, 3, &board_info); if (IS_ERR(data->pi3usb30532)) { @@ -405,7 +416,7 @@ out_remove_nodes: return ret; } -static int cht_int33fe_typec_remove(struct platform_device *pdev) +static void cht_int33fe_typec_remove(struct platform_device *pdev) { struct cht_int33fe_data *data = platform_get_drvdata(pdev); @@ -414,8 +425,6 @@ static int cht_int33fe_typec_remove(struct platform_device *pdev) i2c_unregister_device(data->battery_fg); cht_int33fe_remove_nodes(data); - - return 0; } static const struct acpi_device_id cht_int33fe_acpi_ids[] = { diff --git a/drivers/platform/x86/intel/ehl_pse_io.c b/drivers/platform/x86/intel/ehl_pse_io.c new file mode 100644 index 000000000000..861e14808b35 --- /dev/null +++ b/drivers/platform/x86/intel/ehl_pse_io.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Intel Elkhart Lake Programmable Service Engine (PSE) I/O + * + * Copyright (c) 2025 Intel Corporation. + * + * Author: Raag Jadav <raag.jadav@intel.com> + */ + +#include <linux/auxiliary_bus.h> +#include <linux/device/devres.h> +#include <linux/errno.h> +#include <linux/gfp_types.h> +#include <linux/ioport.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/sizes.h> +#include <linux/types.h> + +#include <linux/ehl_pse_io_aux.h> + +#define EHL_PSE_IO_DEV_SIZE SZ_4K + +static int ehl_pse_io_dev_create(struct pci_dev *pci, const char *name, int idx) +{ + struct device *dev = &pci->dev; + struct auxiliary_device *adev; + struct ehl_pse_io_data *data; + resource_size_t start, offset; + u32 id; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + id = (pci_domain_nr(pci->bus) << 16) | pci_dev_id(pci); + start = pci_resource_start(pci, 0); + offset = EHL_PSE_IO_DEV_SIZE * idx; + + data->mem = DEFINE_RES_MEM(start + offset, EHL_PSE_IO_DEV_SIZE); + data->irq = pci_irq_vector(pci, idx); + + adev = __devm_auxiliary_device_create(dev, EHL_PSE_IO_NAME, name, data, id); + + return adev ? 0 : -ENODEV; +} + +static int ehl_pse_io_probe(struct pci_dev *pci, const struct pci_device_id *id) +{ + int ret; + + ret = pcim_enable_device(pci); + if (ret) + return ret; + + pci_set_master(pci); + + ret = pci_alloc_irq_vectors(pci, 2, 2, PCI_IRQ_MSI); + if (ret < 0) + return ret; + + ret = ehl_pse_io_dev_create(pci, EHL_PSE_GPIO_NAME, 0); + if (ret) + return ret; + + return ehl_pse_io_dev_create(pci, EHL_PSE_TIO_NAME, 1); +} + +static const struct pci_device_id ehl_pse_io_ids[] = { + { PCI_VDEVICE(INTEL, 0x4b88) }, + { PCI_VDEVICE(INTEL, 0x4b89) }, + { } +}; +MODULE_DEVICE_TABLE(pci, ehl_pse_io_ids); + +static struct pci_driver ehl_pse_io_driver = { + .name = EHL_PSE_IO_NAME, + .id_table = ehl_pse_io_ids, + .probe = ehl_pse_io_probe, +}; +module_pci_driver(ehl_pse_io_driver); + +MODULE_AUTHOR("Raag Jadav <raag.jadav@intel.com>"); +MODULE_DESCRIPTION("Intel Elkhart Lake PSE I/O driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c index b6c06b37862e..560cc063198e 100644 --- a/drivers/platform/x86/intel/hid.c +++ b/drivers/platform/x86/intel/hid.c @@ -13,6 +13,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> +#include <linux/string_choices.h> #include <linux/suspend.h> #include "../dual_accel_detect.h" @@ -38,18 +39,24 @@ MODULE_PARM_DESC(enable_sw_tablet_mode, /* When NOT in tablet mode, VGBS returns with the flag 0x40 */ #define TABLET_MODE_FLAG BIT(6) +MODULE_DESCRIPTION("Intel HID Event hotkey driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alex Hung"); static const struct acpi_device_id intel_hid_ids[] = { - {"INT33D5", 0}, - {"INTC1051", 0}, - {"INTC1054", 0}, - {"INTC1070", 0}, - {"INTC1076", 0}, - {"INTC1077", 0}, - {"INTC1078", 0}, - {"", 0}, + { "INT33D5" }, + { "INTC1051" }, + { "INTC1054" }, + { "INTC1070" }, + { "INTC1076" }, + { "INTC1077" }, + { "INTC1078" }, + { "INTC107B" }, + { "INTC10CB" }, + { "INTC10CC" }, + { "INTC10F1" }, + { "INTC10F2" }, + { } }; MODULE_DEVICE_TABLE(acpi, intel_hid_ids); @@ -115,6 +122,13 @@ static const struct dmi_system_id button_array_table[] = { }, }, { + .ident = "Lenovo ThinkPad X1 Tablet Gen 1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X12 Detachable Gen 1"), + }, + }, + { .ident = "Lenovo ThinkPad X1 Tablet Gen 2", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), @@ -128,6 +142,13 @@ static const struct dmi_system_id button_array_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"), }, }, + { + .ident = "Microsoft Surface Go 4", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 4"), + }, + }, { } }; @@ -150,6 +171,24 @@ static const struct dmi_system_id dmi_vgbs_allow_list[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go"), }, }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite Dragonfly G2 Notebook PC"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Dell Pro Rugged 10 Tablet RA00260"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Dell Pro Rugged 12 Tablet RA02260"), + }, + }, { } }; @@ -322,10 +361,8 @@ static int intel_hid_set_enable(struct device *device, bool enable) acpi_handle handle = ACPI_HANDLE(device); /* Enable|disable features - power button is always enabled */ - if (!intel_hid_execute_method(handle, INTEL_HID_DSM_HDSM_FN, - enable)) { - dev_warn(device, "failed to %sable hotkeys\n", - enable ? "en" : "dis"); + if (!intel_hid_execute_method(handle, INTEL_HID_DSM_HDSM_FN, enable)) { + dev_warn(device, "failed to %s hotkeys\n", str_enable_disable(enable)); return -EIO; } @@ -498,6 +535,7 @@ static void notify_handler(acpi_handle handle, u32 event, void *context) struct platform_device *device = context; struct intel_hid_priv *priv = dev_get_drvdata(&device->dev); unsigned long long ev_index; + struct key_entry *ke; int err; /* @@ -539,11 +577,15 @@ static void notify_handler(acpi_handle handle, u32 event, void *context) if (event == 0xc0 || !priv->array) return; - if (!sparse_keymap_entry_from_scancode(priv->array, event)) { + ke = sparse_keymap_entry_from_scancode(priv->array, event); + if (!ke) { dev_info(&device->dev, "unknown event 0x%x\n", event); return; } + if (ke->type == KE_IGNORE) + return; + wakeup: pm_wakeup_hard_event(&device->dev); @@ -620,7 +662,7 @@ static bool button_array_present(struct platform_device *device) static int intel_hid_probe(struct platform_device *device) { acpi_handle handle = ACPI_HANDLE(&device->dev); - unsigned long long mode; + unsigned long long mode, dummy; struct intel_hid_priv *priv; acpi_status status; int err; @@ -692,18 +734,15 @@ static int intel_hid_probe(struct platform_device *device) if (err) goto err_remove_notify; - if (priv->array) { - unsigned long long dummy; - - intel_button_array_enable(&device->dev, true); + intel_button_array_enable(&device->dev, true); - /* Call button load method to enable HID power button */ - if (!intel_hid_evaluate_method(handle, INTEL_HID_DSM_BTNL_FN, - &dummy)) { - dev_warn(&device->dev, - "failed to enable HID power button\n"); - } - } + /* + * Call button load method to enable HID power button + * Always do this since it activates events on some devices without + * a button array too. + */ + if (!intel_hid_evaluate_method(handle, INTEL_HID_DSM_BTNL_FN, &dummy)) + dev_warn(&device->dev, "failed to enable HID power button\n"); device_init_wakeup(&device->dev, true); /* @@ -720,7 +759,7 @@ err_remove_notify: return err; } -static int intel_hid_remove(struct platform_device *device) +static void intel_hid_remove(struct platform_device *device) { acpi_handle handle = ACPI_HANDLE(&device->dev); @@ -728,12 +767,6 @@ static int intel_hid_remove(struct platform_device *device) acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler); intel_hid_set_enable(&device->dev, false); intel_button_array_enable(&device->dev, false); - - /* - * Even if we failed to shut off the event stream, we can still - * safely detach from the device. - */ - return 0; } static struct platform_driver intel_hid_pl_driver = { diff --git a/drivers/platform/x86/intel/ifs/Makefile b/drivers/platform/x86/intel/ifs/Makefile index 30f035ef5581..c3e417bce9b6 100644 --- a/drivers/platform/x86/intel/ifs/Makefile +++ b/drivers/platform/x86/intel/ifs/Makefile @@ -1,3 +1,3 @@ obj-$(CONFIG_INTEL_IFS) += intel_ifs.o -intel_ifs-objs := core.o load.o runtest.o sysfs.o +intel_ifs-y := core.o load.o runtest.o sysfs.o diff --git a/drivers/platform/x86/intel/ifs/core.c b/drivers/platform/x86/intel/ifs/core.c index 206a617c2e02..b73e582128c9 100644 --- a/drivers/platform/x86/intel/ifs/core.c +++ b/drivers/platform/x86/intel/ifs/core.c @@ -1,78 +1,154 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2022 Intel Corporation. */ +#include <linux/bitfield.h> #include <linux/module.h> #include <linux/kdev_t.h> #include <linux/semaphore.h> #include <linux/slab.h> #include <asm/cpu_device_id.h> +#include <asm/msr.h> #include "ifs.h" -#define X86_MATCH(model) \ - X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, \ - INTEL_FAM6_##model, X86_FEATURE_CORE_CAPABILITIES, NULL) +#define X86_MATCH(vfm, array_gen) \ + X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_CORE_CAPABILITIES, array_gen) static const struct x86_cpu_id ifs_cpu_ids[] __initconst = { - X86_MATCH(SAPPHIRERAPIDS_X), + X86_MATCH(INTEL_SAPPHIRERAPIDS_X, ARRAY_GEN0), + X86_MATCH(INTEL_EMERALDRAPIDS_X, ARRAY_GEN0), + X86_MATCH(INTEL_GRANITERAPIDS_X, ARRAY_GEN0), + X86_MATCH(INTEL_GRANITERAPIDS_D, ARRAY_GEN0), + X86_MATCH(INTEL_ATOM_CRESTMONT_X, ARRAY_GEN1), + X86_MATCH(INTEL_ATOM_DARKMONT_X, ARRAY_GEN1), {} }; MODULE_DEVICE_TABLE(x86cpu, ifs_cpu_ids); -static struct ifs_device ifs_device = { - .data = { - .integrity_cap_bit = MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT, - .test_num = 0, +ATTRIBUTE_GROUPS(plat_ifs); +ATTRIBUTE_GROUPS(plat_ifs_array); + +bool *ifs_pkg_auth; + +static const struct ifs_test_caps scan_test = { + .integrity_cap_bit = MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT, + .test_num = IFS_TYPE_SAF, + .image_suffix = "scan", +}; + +static const struct ifs_test_caps array_test = { + .integrity_cap_bit = MSR_INTEGRITY_CAPS_ARRAY_BIST_BIT, + .test_num = IFS_TYPE_ARRAY_BIST, +}; + +static const struct ifs_test_msrs scan_msrs = { + .copy_hashes = MSR_COPY_SCAN_HASHES, + .copy_hashes_status = MSR_SCAN_HASHES_STATUS, + .copy_chunks = MSR_AUTHENTICATE_AND_COPY_CHUNK, + .copy_chunks_status = MSR_CHUNKS_AUTHENTICATION_STATUS, + .test_ctrl = MSR_SAF_CTRL, +}; + +static const struct ifs_test_msrs sbaf_msrs = { + .copy_hashes = MSR_COPY_SBAF_HASHES, + .copy_hashes_status = MSR_SBAF_HASHES_STATUS, + .copy_chunks = MSR_AUTHENTICATE_AND_COPY_SBAF_CHUNK, + .copy_chunks_status = MSR_SBAF_CHUNKS_AUTHENTICATION_STATUS, + .test_ctrl = MSR_SBAF_CTRL, +}; + +static const struct ifs_test_caps sbaf_test = { + .integrity_cap_bit = MSR_INTEGRITY_CAPS_SBAF_BIT, + .test_num = IFS_TYPE_SBAF, + .image_suffix = "sbft", +}; + +static struct ifs_device ifs_devices[] = { + [IFS_TYPE_SAF] = { + .test_caps = &scan_test, + .test_msrs = &scan_msrs, + .misc = { + .name = "intel_ifs_0", + .minor = MISC_DYNAMIC_MINOR, + .groups = plat_ifs_groups, + }, }, - .misc = { - .name = "intel_ifs_0", - .nodename = "intel_ifs/0", - .minor = MISC_DYNAMIC_MINOR, + [IFS_TYPE_ARRAY_BIST] = { + .test_caps = &array_test, + .misc = { + .name = "intel_ifs_1", + .minor = MISC_DYNAMIC_MINOR, + .groups = plat_ifs_array_groups, + }, + }, + [IFS_TYPE_SBAF] = { + .test_caps = &sbaf_test, + .test_msrs = &sbaf_msrs, + .misc = { + .name = "intel_ifs_2", + .minor = MISC_DYNAMIC_MINOR, + .groups = plat_ifs_groups, + }, }, }; +#define IFS_NUMTESTS ARRAY_SIZE(ifs_devices) + +static void ifs_cleanup(void) +{ + int i; + + for (i = 0; i < IFS_NUMTESTS; i++) { + if (ifs_devices[i].misc.this_device) + misc_deregister(&ifs_devices[i].misc); + } + kfree(ifs_pkg_auth); +} + static int __init ifs_init(void) { const struct x86_cpu_id *m; u64 msrval; - int ret; + int i, ret; m = x86_match_cpu(ifs_cpu_ids); if (!m) return -ENODEV; - if (rdmsrl_safe(MSR_IA32_CORE_CAPS, &msrval)) + if (rdmsrq_safe(MSR_IA32_CORE_CAPS, &msrval)) return -ENODEV; if (!(msrval & MSR_IA32_CORE_CAPS_INTEGRITY_CAPS)) return -ENODEV; - if (rdmsrl_safe(MSR_INTEGRITY_CAPS, &msrval)) - return -ENODEV; - - ifs_device.misc.groups = ifs_get_groups(); - - if (!(msrval & BIT(ifs_device.data.integrity_cap_bit))) + if (rdmsrq_safe(MSR_INTEGRITY_CAPS, &msrval)) return -ENODEV; - ifs_device.data.pkg_auth = kmalloc_array(topology_max_packages(), sizeof(bool), GFP_KERNEL); - if (!ifs_device.data.pkg_auth) + ifs_pkg_auth = kmalloc_array(topology_max_packages(), sizeof(bool), GFP_KERNEL); + if (!ifs_pkg_auth) return -ENOMEM; - ret = misc_register(&ifs_device.misc); - if (ret) { - kfree(ifs_device.data.pkg_auth); - return ret; + for (i = 0; i < IFS_NUMTESTS; i++) { + if (!(msrval & BIT(ifs_devices[i].test_caps->integrity_cap_bit))) + continue; + ifs_devices[i].rw_data.generation = FIELD_GET(MSR_INTEGRITY_CAPS_SAF_GEN_MASK, + msrval); + ifs_devices[i].rw_data.array_gen = (u32)m->driver_data; + ret = misc_register(&ifs_devices[i].misc); + if (ret) + goto err_exit; } - return 0; + +err_exit: + ifs_cleanup(); + return ret; } static void __exit ifs_exit(void) { - misc_deregister(&ifs_device.misc); - kfree(ifs_device.data.pkg_auth); + ifs_cleanup(); } module_init(ifs_init); diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h index 046e39304fd5..f369fb0d3d82 100644 --- a/drivers/platform/x86/intel/ifs/ifs.h +++ b/drivers/platform/x86/intel/ifs/ifs.h @@ -17,15 +17,20 @@ * In Field Scan (IFS) is a hardware feature to run circuit level tests on * a CPU core to detect problems that are not caught by parity or ECC checks. * Future CPUs will support more than one type of test which will show up - * with a new platform-device instance-id, for now only .0 is exposed. + * with a new platform-device instance-id. * * * IFS Image * --------- * - * Intel provides a firmware file containing the scan tests via - * github [#f1]_. Similar to microcode there is a separate file for each - * family-model-stepping. + * Intel provides firmware files containing the scan tests via the webpage [#f1]_. + * Look under "In-Field Scan Test Images Download" section towards the + * end of the page. Similar to microcode, there are separate files for each + * family-model-stepping. IFS Images are not applicable for some test types. + * Wherever applicable the sysfs directory would provide a "current_batch" file + * (see below) for loading the image. + * + * .. [#f1] https://intel.com/InFieldScan * * IFS Image Loading * ----------------- @@ -35,7 +40,7 @@ * SHA hashes for the test. Then the tests themselves. Status MSRs provide * feedback on the success/failure of these steps. * - * The test files are kept in a fixed location: /lib/firmware/intel/ifs_0/ + * The test files are kept in a fixed location: /lib/firmware/intel/ifs_<n>/ * For e.g if there are 3 test files, they would be named in the following * fashion: * ff-mm-ss-01.scan @@ -47,7 +52,7 @@ * (e.g 1, 2 or 3 in the above scenario) into the curent_batch file. * To load ff-mm-ss-02.scan, the following command can be used:: * - * # echo 2 > /sys/devices/virtual/misc/intel_ifs_0/current_batch + * # echo 2 > /sys/devices/virtual/misc/intel_ifs_<n>/current_batch * * The above file can also be read to know the currently loaded image. * @@ -69,16 +74,16 @@ * to migrate those applications to other cores before running a core test. * It may also be necessary to redirect interrupts to other CPUs. * - * In all cases reading the SCAN_STATUS MSR provides details on what + * In all cases reading the corresponding test's STATUS MSR provides details on what * happened. The driver makes the value of this MSR visible to applications * via the "details" file (see below). Interrupted tests may be restarted. * - * The IFS driver provides sysfs interfaces via /sys/devices/virtual/misc/intel_ifs_0/ + * The IFS driver provides sysfs interfaces via /sys/devices/virtual/misc/intel_ifs_<n>/ * to control execution: * * Test a specific core:: * - * # echo <cpu#> > /sys/devices/virtual/misc/intel_ifs_0/run_test + * # echo <cpu#> > /sys/devices/virtual/misc/intel_ifs_<n>/run_test * * when HT is enabled any of the sibling cpu# can be specified to test * its corresponding physical core. Since the tests are per physical core, @@ -87,21 +92,21 @@ * * For e.g. to test core corresponding to cpu5 * - * # echo 5 > /sys/devices/virtual/misc/intel_ifs_0/run_test + * # echo 5 > /sys/devices/virtual/misc/intel_ifs_<n>/run_test * * Results of the last test is provided in /sys:: * - * $ cat /sys/devices/virtual/misc/intel_ifs_0/status + * $ cat /sys/devices/virtual/misc/intel_ifs_<n>/status * pass * * Status can be one of pass, fail, untested * * Additional details of the last test is provided by the details file:: * - * $ cat /sys/devices/virtual/misc/intel_ifs_0/details + * $ cat /sys/devices/virtual/misc/intel_ifs_<n>/details * 0x8081 * - * The details file reports the hex value of the SCAN_STATUS MSR. + * The details file reports the hex value of the test specific status MSR. * Hardware defined error codes are documented in volume 4 of the Intel * Software Developer's Manual but the error_code field may contain one of * the following driver defined software codes: @@ -122,21 +127,60 @@ * 2) Hardware allows for some number of cores to be tested in parallel. * The driver does not make use of this, it only tests one core at a time. * - * .. [#f1] https://github.com/intel/TBD + * Structural Based Functional Test at Field (SBAF): + * ------------------------------------------------- + * + * SBAF is a new type of testing that provides comprehensive core test + * coverage complementing Scan at Field (SAF) testing. SBAF mimics the + * manufacturing screening environment and leverages the same test suite. + * It makes use of Design For Test (DFT) observation sites and features + * to maximize coverage in minimum time. + * + * Similar to the SAF test, SBAF isolates the core under test from the + * rest of the system during execution. Upon completion, the core + * seamlessly resets to its pre-test state and resumes normal operation. + * Any machine checks or hangs encountered during the test are confined to + * the isolated core, preventing disruption to the overall system. + * + * Like the SAF test, the SBAF test is also divided into multiple batches, + * and each batch test can take hundreds of milliseconds (100-200 ms) to + * complete. If such a lengthy interruption is undesirable, it is + * recommended to relocate the time-sensitive applications to other cores. */ #include <linux/device.h> #include <linux/miscdevice.h> +#define MSR_ARRAY_BIST 0x00000105 + +#define MSR_COPY_SBAF_HASHES 0x000002b8 +#define MSR_SBAF_HASHES_STATUS 0x000002b9 +#define MSR_AUTHENTICATE_AND_COPY_SBAF_CHUNK 0x000002ba +#define MSR_SBAF_CHUNKS_AUTHENTICATION_STATUS 0x000002bb +#define MSR_ACTIVATE_SBAF 0x000002bc +#define MSR_SBAF_STATUS 0x000002bd + #define MSR_COPY_SCAN_HASHES 0x000002c2 #define MSR_SCAN_HASHES_STATUS 0x000002c3 #define MSR_AUTHENTICATE_AND_COPY_CHUNK 0x000002c4 #define MSR_CHUNKS_AUTHENTICATION_STATUS 0x000002c5 #define MSR_ACTIVATE_SCAN 0x000002c6 #define MSR_SCAN_STATUS 0x000002c7 +#define MSR_ARRAY_TRIGGER 0x000002d6 +#define MSR_ARRAY_STATUS 0x000002d7 +#define MSR_SAF_CTRL 0x000004f0 +#define MSR_SBAF_CTRL 0x000004f8 + #define SCAN_NOT_TESTED 0 #define SCAN_TEST_PASS 1 #define SCAN_TEST_FAIL 2 +#define IFS_TYPE_SAF 0 +#define IFS_TYPE_ARRAY_BIST 1 +#define IFS_TYPE_SBAF 2 + +#define ARRAY_GEN0 0 +#define ARRAY_GEN1 1 + /* MSR_SCAN_HASHES_STATUS bit fields */ union ifs_scan_hashes_status { u64 data; @@ -151,6 +195,19 @@ union ifs_scan_hashes_status { }; }; +union ifs_scan_hashes_status_gen2 { + u64 data; + struct { + u16 chunk_size; + u16 num_chunks; + u32 error_code :8; + u32 chunks_in_stride :9; + u32 rsvd :2; + u32 max_core_limit :12; + u32 valid :1; + }; +}; + /* MSR_CHUNKS_AUTH_STATUS bit fields */ union ifs_chunks_auth_status { u64 data; @@ -163,13 +220,32 @@ union ifs_chunks_auth_status { }; }; +union ifs_chunks_auth_status_gen2 { + u64 data; + struct { + u16 valid_chunks; + u16 total_chunks; + u32 error_code :8; + u32 rsvd2 :8; + u32 max_bundle :16; + }; +}; + /* MSR_ACTIVATE_SCAN bit fields */ union ifs_scan { u64 data; struct { - u32 start :8; - u32 stop :8; - u32 rsvd :16; + union { + struct { + u8 start; + u8 stop; + u16 rsvd; + } gen0; + struct { + u16 start; + u16 stop; + } gen2; + }; u32 delay :31; u32 sigmce :1; }; @@ -179,9 +255,17 @@ union ifs_scan { union ifs_status { u64 data; struct { - u32 chunk_num :8; - u32 chunk_stop_index :8; - u32 rsvd1 :16; + union { + struct { + u8 chunk_num; + u8 chunk_stop_index; + u16 rsvd1; + } gen0; + struct { + u16 chunk_num; + u16 chunk_stop_index; + } gen2; + }; u32 error_code :8; u32 rsvd2 :22; u32 control_error :1; @@ -189,6 +273,45 @@ union ifs_status { }; }; +/* MSR_ARRAY_BIST bit fields */ +union ifs_array { + u64 data; + struct { + u32 array_bitmask; + u16 array_bank; + u16 rsvd :15; + u16 ctrl_result :1; + }; +}; + +/* MSR_ACTIVATE_SBAF bit fields */ +union ifs_sbaf { + u64 data; + struct { + u32 bundle_idx :9; + u32 rsvd1 :5; + u32 pgm_idx :2; + u32 rsvd2 :16; + u32 delay :31; + u32 sigmce :1; + }; +}; + +/* MSR_SBAF_STATUS bit fields */ +union ifs_sbaf_status { + u64 data; + struct { + u32 bundle_idx :9; + u32 rsvd1 :5; + u32 pgm_idx :2; + u32 rsvd2 :16; + u32 error_code :8; + u32 rsvd3 :21; + u32 test_fail :1; + u32 sbaf_status :2; + }; +}; + /* * Driver populated error-codes * 0xFD: Test timed out before completing all the chunks. @@ -197,22 +320,45 @@ union ifs_status { #define IFS_SW_TIMEOUT 0xFD #define IFS_SW_PARTIAL_COMPLETION 0xFE +#define IFS_SUFFIX_SZ 5 + +struct ifs_test_caps { + int integrity_cap_bit; + int test_num; + char image_suffix[IFS_SUFFIX_SZ]; +}; + +/** + * struct ifs_test_msrs - MSRs used in IFS tests + * @copy_hashes: Copy test hash data + * @copy_hashes_status: Status of copied test hash data + * @copy_chunks: Copy chunks of the test data + * @copy_chunks_status: Status of the copied test data chunks + * @test_ctrl: Control the test attributes + */ +struct ifs_test_msrs { + u32 copy_hashes; + u32 copy_hashes_status; + u32 copy_chunks; + u32 copy_chunks_status; + u32 test_ctrl; +}; + /** * struct ifs_data - attributes related to intel IFS driver - * @integrity_cap_bit: MSR_INTEGRITY_CAPS bit enumerating this test * @loaded_version: stores the currently loaded ifs image version. - * @pkg_auth: array of bool storing per package auth status * @loaded: If a valid test binary has been loaded into the memory * @loading_error: Error occurred on another CPU while loading image * @valid_chunks: number of chunks which could be validated. * @status: it holds simple status pass/fail/untested * @scan_details: opaque scan status code from h/w * @cur_batch: number indicating the currently loaded test file - * @test_num: number indicating the test type + * @generation: IFS test generation enumerated by hardware + * @chunk_size: size of a test chunk + * @array_gen: test generation of array test + * @max_bundle: maximum bundle index */ struct ifs_data { - int integrity_cap_bit; - bool *pkg_auth; int loaded_version; bool loaded; bool loading_error; @@ -220,7 +366,10 @@ struct ifs_data { int status; u64 scan_details; u32 cur_batch; - int test_num; + u32 generation; + u32 chunk_size; + u32 array_gen; + u32 max_bundle; }; struct ifs_work { @@ -229,7 +378,9 @@ struct ifs_work { }; struct ifs_device { - struct ifs_data data; + const struct ifs_test_caps *test_caps; + const struct ifs_test_msrs *test_msrs; + struct ifs_data rw_data; struct miscdevice misc; }; @@ -238,11 +389,29 @@ static inline struct ifs_data *ifs_get_data(struct device *dev) struct miscdevice *m = dev_get_drvdata(dev); struct ifs_device *d = container_of(m, struct ifs_device, misc); - return &d->data; + return &d->rw_data; +} + +static inline const struct ifs_test_caps *ifs_get_test_caps(struct device *dev) +{ + struct miscdevice *m = dev_get_drvdata(dev); + struct ifs_device *d = container_of(m, struct ifs_device, misc); + + return d->test_caps; +} + +static inline const struct ifs_test_msrs *ifs_get_test_msrs(struct device *dev) +{ + struct miscdevice *m = dev_get_drvdata(dev); + struct ifs_device *d = container_of(m, struct ifs_device, misc); + + return d->test_msrs; } +extern bool *ifs_pkg_auth; int ifs_load_firmware(struct device *dev); int do_core_test(int cpu, struct device *dev); -const struct attribute_group **ifs_get_groups(void); +extern struct attribute *plat_ifs_attrs[]; +extern struct attribute *plat_ifs_array_attrs[]; #endif diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c index c5c24e6fdc43..50f1fdf7dfed 100644 --- a/drivers/platform/x86/intel/ifs/load.c +++ b/drivers/platform/x86/intel/ifs/load.c @@ -2,8 +2,10 @@ /* Copyright(c) 2022 Intel Corporation. */ #include <linux/firmware.h> +#include <linux/sizes.h> #include <asm/cpu.h> -#include <asm/microcode_intel.h> +#include <asm/microcode.h> +#include <asm/msr.h> #include "ifs.h" @@ -26,6 +28,11 @@ union meta_data { #define IFS_HEADER_SIZE (sizeof(struct microcode_header_intel)) #define META_TYPE_IFS 1 +#define INVALIDATE_STRIDE 0x1UL +#define IFS_GEN_STRIDE_AWARE 2 +#define AUTH_INTERRUPTED_ERROR 5 +#define IFS_AUTH_RETRY_CT 10 + static struct microcode_header_intel *ifs_header_ptr; /* pointer to the ifs image header */ static u64 ifs_hash_ptr; /* Address of ifs metadata (hash) */ static u64 ifs_test_image_ptr; /* 256B aligned address of test pattern */ @@ -44,7 +51,10 @@ static const char * const scan_hash_status[] = { static const char * const scan_authentication_status[] = { [0] = "No error reported", [1] = "Attempt to authenticate a chunk which is already marked as authentic", - [2] = "Chunk authentication error. The hash of chunk did not match expected value" + [2] = "Chunk authentication error. The hash of chunk did not match expected value", + [3] = "Reserved", + [4] = "Chunk outside the current stride", + [5] = "Authentication flow interrupted", }; #define MC_HEADER_META_TYPE_END (0) @@ -56,12 +66,13 @@ struct metadata_header { static struct metadata_header *find_meta_data(void *ucode, unsigned int meta_type) { + struct microcode_header_intel *hdr = &((struct microcode_intel *)ucode)->hdr; struct metadata_header *meta_header; unsigned long data_size, total_meta; unsigned long meta_size = 0; - data_size = get_datasize(ucode); - total_meta = ((struct microcode_intel *)ucode)->hdr.metasize; + data_size = intel_microcode_get_datasize(hdr); + total_meta = hdr->metasize; if (!total_meta) return NULL; @@ -79,6 +90,23 @@ static struct metadata_header *find_meta_data(void *ucode, unsigned int meta_typ return NULL; } +static void hashcopy_err_message(struct device *dev, u32 err_code) +{ + if (err_code >= ARRAY_SIZE(scan_hash_status)) + dev_err(dev, "invalid error code 0x%x for hash copy\n", err_code); + else + dev_err(dev, "Hash copy error : %s\n", scan_hash_status[err_code]); +} + +static void auth_err_message(struct device *dev, u32 err_code) +{ + if (err_code >= ARRAY_SIZE(scan_authentication_status)) + dev_err(dev, "invalid error code 0x%x for authentication\n", err_code); + else + dev_err(dev, "Chunk authentication error : %s\n", + scan_authentication_status[err_code]); +} + /* * To copy scan hashes and authenticate test chunks, the initiating cpu must point * to the EDX:EAX to the test image in linear address. @@ -91,15 +119,17 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work) union ifs_scan_hashes_status hashes_status; union ifs_chunks_auth_status chunk_status; struct device *dev = local_work->dev; + const struct ifs_test_msrs *msrs; int i, num_chunks, chunk_size; struct ifs_data *ifsd; u64 linear_addr, base; u32 err_code; ifsd = ifs_get_data(dev); + msrs = ifs_get_test_msrs(dev); /* run scan hash copy */ - wrmsrl(MSR_COPY_SCAN_HASHES, ifs_hash_ptr); - rdmsrl(MSR_SCAN_HASHES_STATUS, hashes_status.data); + wrmsrq(msrs->copy_hashes, ifs_hash_ptr); + rdmsrq(msrs->copy_hashes_status, hashes_status.data); /* enumerate the scan image information */ num_chunks = hashes_status.num_chunks; @@ -108,11 +138,7 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work) if (!hashes_status.valid) { ifsd->loading_error = true; - if (err_code >= ARRAY_SIZE(scan_hash_status)) { - dev_err(dev, "invalid error code 0x%x for hash copy\n", err_code); - goto done; - } - dev_err(dev, "Hash copy error : %s", scan_hash_status[err_code]); + hashcopy_err_message(dev, err_code); goto done; } @@ -124,21 +150,15 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work) linear_addr = base + i * chunk_size; linear_addr |= i; - wrmsrl(MSR_AUTHENTICATE_AND_COPY_CHUNK, linear_addr); - rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data); + wrmsrq(msrs->copy_chunks, linear_addr); + rdmsrq(msrs->copy_chunks_status, chunk_status.data); ifsd->valid_chunks = chunk_status.valid_chunks; err_code = chunk_status.error_code; if (err_code) { ifsd->loading_error = true; - if (err_code >= ARRAY_SIZE(scan_authentication_status)) { - dev_err(dev, - "invalid error code 0x%x for authentication\n", err_code); - goto done; - } - dev_err(dev, "Chunk authentication error %s\n", - scan_authentication_status[err_code]); + auth_err_message(dev, err_code); goto done; } } @@ -146,16 +166,118 @@ done: complete(&ifs_done); } +static int get_num_chunks(int gen, union ifs_scan_hashes_status_gen2 status) +{ + return gen >= IFS_GEN_STRIDE_AWARE ? status.chunks_in_stride : status.num_chunks; +} + +static bool need_copy_scan_hashes(struct ifs_data *ifsd) +{ + return !ifsd->loaded || + ifsd->generation < IFS_GEN_STRIDE_AWARE || + ifsd->loaded_version != ifs_header_ptr->rev; +} + +static int copy_hashes_authenticate_chunks_gen2(struct device *dev) +{ + union ifs_scan_hashes_status_gen2 hashes_status; + union ifs_chunks_auth_status_gen2 chunk_status; + u32 err_code, valid_chunks, total_chunks; + const struct ifs_test_msrs *msrs; + int i, num_chunks, chunk_size; + union meta_data *ifs_meta; + int starting_chunk_nr; + struct ifs_data *ifsd; + u64 linear_addr, base; + u64 chunk_table[2]; + int retry_count; + + ifsd = ifs_get_data(dev); + msrs = ifs_get_test_msrs(dev); + + if (need_copy_scan_hashes(ifsd)) { + wrmsrq(msrs->copy_hashes, ifs_hash_ptr); + rdmsrq(msrs->copy_hashes_status, hashes_status.data); + + /* enumerate the scan image information */ + chunk_size = hashes_status.chunk_size * SZ_1K; + err_code = hashes_status.error_code; + + num_chunks = get_num_chunks(ifsd->generation, hashes_status); + + if (!hashes_status.valid) { + hashcopy_err_message(dev, err_code); + return -EIO; + } + ifsd->loaded_version = ifs_header_ptr->rev; + ifsd->chunk_size = chunk_size; + } else { + num_chunks = ifsd->valid_chunks; + chunk_size = ifsd->chunk_size; + } + + if (ifsd->generation >= IFS_GEN_STRIDE_AWARE) { + wrmsrq(msrs->test_ctrl, INVALIDATE_STRIDE); + rdmsrq(msrs->copy_chunks_status, chunk_status.data); + if (chunk_status.valid_chunks != 0) { + dev_err(dev, "Couldn't invalidate installed stride - %d\n", + chunk_status.valid_chunks); + return -EIO; + } + } + + base = ifs_test_image_ptr; + ifs_meta = (union meta_data *)find_meta_data(ifs_header_ptr, META_TYPE_IFS); + starting_chunk_nr = ifs_meta->starting_chunk; + + /* scan data authentication and copy chunks to secured memory */ + for (i = 0; i < num_chunks; i++) { + retry_count = IFS_AUTH_RETRY_CT; + linear_addr = base + i * chunk_size; + + chunk_table[0] = starting_chunk_nr + i; + chunk_table[1] = linear_addr; + do { + local_irq_disable(); + wrmsrq(msrs->copy_chunks, (u64)chunk_table); + local_irq_enable(); + rdmsrq(msrs->copy_chunks_status, chunk_status.data); + err_code = chunk_status.error_code; + } while (err_code == AUTH_INTERRUPTED_ERROR && --retry_count); + + if (err_code) { + ifsd->loading_error = true; + auth_err_message(dev, err_code); + return -EIO; + } + } + + valid_chunks = chunk_status.valid_chunks; + total_chunks = chunk_status.total_chunks; + + if (valid_chunks != total_chunks) { + ifsd->loading_error = true; + dev_err(dev, "Couldn't authenticate all the chunks. Authenticated %d total %d.\n", + valid_chunks, total_chunks); + return -EIO; + } + ifsd->valid_chunks = valid_chunks; + ifsd->max_bundle = chunk_status.max_bundle; + + return 0; +} + static int validate_ifs_metadata(struct device *dev) { + const struct ifs_test_caps *test = ifs_get_test_caps(dev); struct ifs_data *ifsd = ifs_get_data(dev); union meta_data *ifs_meta; char test_file[64]; int ret = -EINVAL; - snprintf(test_file, sizeof(test_file), "%02x-%02x-%02x-%02x.scan", + snprintf(test_file, sizeof(test_file), "%02x-%02x-%02x-%02x.%s", boot_cpu_data.x86, boot_cpu_data.x86_model, - boot_cpu_data.x86_stepping, ifsd->cur_batch); + boot_cpu_data.x86_stepping, ifsd->cur_batch, test->image_suffix); ifs_meta = (union meta_data *)find_meta_data(ifs_header_ptr, META_TYPE_IFS); if (!ifs_meta) { @@ -178,6 +300,19 @@ static int validate_ifs_metadata(struct device *dev) return ret; } + if (ifs_meta->chunks_per_stride && + (ifs_meta->starting_chunk % ifs_meta->chunks_per_stride != 0)) { + dev_warn(dev, "Starting chunk num %u not a multiple of chunks_per_stride %u\n", + ifs_meta->starting_chunk, ifs_meta->chunks_per_stride); + return ret; + } + + if (ifs_meta->test_type != test->test_num) { + dev_warn(dev, "Metadata test_type %d mismatches with device type\n", + ifs_meta->test_type); + return ret; + } + return 0; } @@ -192,32 +327,35 @@ static int scan_chunks_sanity_check(struct device *dev) struct ifs_work local_work; int curr_pkg, cpu, ret; - memset(ifsd->pkg_auth, 0, (topology_max_packages() * sizeof(bool))); + memset(ifs_pkg_auth, 0, (topology_max_packages() * sizeof(bool))); ret = validate_ifs_metadata(dev); if (ret) return ret; ifsd->loading_error = false; - ifsd->loaded_version = ifs_header_ptr->rev; + + if (ifsd->generation > 0) + return copy_hashes_authenticate_chunks_gen2(dev); /* copy the scan hash and authenticate per package */ cpus_read_lock(); for_each_online_cpu(cpu) { curr_pkg = topology_physical_package_id(cpu); - if (ifsd->pkg_auth[curr_pkg]) + if (ifs_pkg_auth[curr_pkg]) continue; reinit_completion(&ifs_done); local_work.dev = dev; - INIT_WORK(&local_work.w, copy_hashes_authenticate_chunks); + INIT_WORK_ONSTACK(&local_work.w, copy_hashes_authenticate_chunks); schedule_work_on(cpu, &local_work.w); wait_for_completion(&ifs_done); if (ifsd->loading_error) { ret = -EIO; goto out; } - ifsd->pkg_auth[curr_pkg] = 1; + ifs_pkg_auth[curr_pkg] = 1; } ret = 0; + ifsd->loaded_version = ifs_header_ptr->rev; out: cpus_read_unlock(); @@ -226,7 +364,7 @@ out: static int image_sanity_check(struct device *dev, const struct microcode_header_intel *data) { - struct ucode_cpu_info uci; + struct cpu_signature sig; /* Provide a specific error message when loading an older/unsupported image */ if (data->hdrver != MC_HEADER_TYPE_IFS) { @@ -239,11 +377,9 @@ static int image_sanity_check(struct device *dev, const struct microcode_header_ return -EINVAL; } - intel_cpu_collect_info(&uci); + intel_collect_cpu_info(&sig); - if (!intel_find_matching_signature((void *)data, - uci.cpu_sig.sig, - uci.cpu_sig.pf)) { + if (!intel_find_matching_signature((void *)data, &sig)) { dev_err(dev, "cpu signature, processor flags not matching\n"); return -EINVAL; } @@ -257,14 +393,16 @@ static int image_sanity_check(struct device *dev, const struct microcode_header_ */ int ifs_load_firmware(struct device *dev) { + const struct ifs_test_caps *test = ifs_get_test_caps(dev); struct ifs_data *ifsd = ifs_get_data(dev); + unsigned int expected_size; const struct firmware *fw; char scan_path[64]; - int ret = -EINVAL; + int ret; - snprintf(scan_path, sizeof(scan_path), "intel/ifs_%d/%02x-%02x-%02x-%02x.scan", - ifsd->test_num, boot_cpu_data.x86, boot_cpu_data.x86_model, - boot_cpu_data.x86_stepping, ifsd->cur_batch); + snprintf(scan_path, sizeof(scan_path), "intel/ifs_%d/%02x-%02x-%02x-%02x.%s", + test->test_num, boot_cpu_data.x86, boot_cpu_data.x86_model, + boot_cpu_data.x86_stepping, ifsd->cur_batch, test->image_suffix); ret = request_firmware_direct(&fw, scan_path, dev); if (ret) { @@ -272,6 +410,14 @@ int ifs_load_firmware(struct device *dev) goto done; } + expected_size = ((struct microcode_header_intel *)fw->data)->totalsize; + if (fw->size != expected_size) { + dev_err(dev, "File size mismatch (expected %u, actual %zu). Corrupted IFS image.\n", + expected_size, fw->size); + ret = -EINVAL; + goto release; + } + ret = image_sanity_check(dev, (struct microcode_header_intel *)fw->data); if (ret) goto release; diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c index 0bfd8fcdd7e8..dfc119d7354d 100644 --- a/drivers/platform/x86/intel/ifs/runtest.c +++ b/drivers/platform/x86/intel/ifs/runtest.c @@ -7,6 +7,7 @@ #include <linux/nmi.h> #include <linux/slab.h> #include <linux/stop_machine.h> +#include <asm/msr.h> #include "ifs.h" @@ -23,6 +24,19 @@ /* Max retries on the same chunk */ #define MAX_IFS_RETRIES 5 +struct run_params { + struct ifs_data *ifsd; + union ifs_scan *activate; + union ifs_status status; +}; + +struct sbaf_run_params { + struct ifs_data *ifsd; + int *retry_cnt; + union ifs_sbaf *activate; + union ifs_sbaf_status status; +}; + /* * Number of TSC cycles that a logical CPU will wait for the other * logical CPU on the core in the WRMSR(ACTIVATE_SCAN). @@ -40,6 +54,8 @@ enum ifs_status_err_code { IFS_UNASSIGNED_ERROR_CODE = 7, IFS_EXCEED_NUMBER_OF_THREADS_CONCURRENT = 8, IFS_INTERRUPTED_DURING_EXECUTION = 9, + IFS_UNASSIGNED_ERROR_CODE_0xA = 0xA, + IFS_CORRUPTED_CHUNK = 0xB, }; static const char * const scan_test_status[] = { @@ -55,10 +71,25 @@ static const char * const scan_test_status[] = { [IFS_EXCEED_NUMBER_OF_THREADS_CONCURRENT] = "Exceeded number of Logical Processors (LP) allowed to run Scan-At-Field concurrently", [IFS_INTERRUPTED_DURING_EXECUTION] = "Interrupt occurred prior to SCAN start", + [IFS_UNASSIGNED_ERROR_CODE_0xA] = "Unassigned error code 0xA", + [IFS_CORRUPTED_CHUNK] = "Scan operation aborted due to corrupted image. Try reloading", }; static void message_not_tested(struct device *dev, int cpu, union ifs_status status) { + struct ifs_data *ifsd = ifs_get_data(dev); + + /* + * control_error is set when the microcode runs into a problem + * loading the image from the reserved BIOS memory, or it has + * been corrupted. Reloading the image may fix this issue. + */ + if (status.control_error) { + dev_warn(dev, "CPU(s) %*pbl: Scan controller error. Batch: %02x version: 0x%x\n", + cpumask_pr_args(cpu_smt_mask(cpu)), ifsd->cur_batch, ifsd->loaded_version); + return; + } + if (status.error_code < ARRAY_SIZE(scan_test_status)) { dev_info(dev, "CPU(s) %*pbl: SCAN operation did not start. %s\n", cpumask_pr_args(cpu_smt_mask(cpu)), @@ -81,16 +112,6 @@ static void message_fail(struct device *dev, int cpu, union ifs_status status) struct ifs_data *ifsd = ifs_get_data(dev); /* - * control_error is set when the microcode runs into a problem - * loading the image from the reserved BIOS memory, or it has - * been corrupted. Reloading the image may fix this issue. - */ - if (status.control_error) { - dev_err(dev, "CPU(s) %*pbl: could not execute from loaded scan image. Batch: %02x version: 0x%x\n", - cpumask_pr_args(cpu_smt_mask(cpu)), ifsd->cur_batch, ifsd->loaded_version); - } - - /* * signature_error is set when the output from the scan chains does not * match the expected signature. This might be a transient problem (e.g. * due to a bit flip from an alpha particle or neutron). If the problem @@ -123,24 +144,64 @@ static bool can_restart(union ifs_status status) case IFS_MISMATCH_ARGUMENTS_BETWEEN_THREADS: case IFS_CORE_NOT_CAPABLE_CURRENTLY: case IFS_UNASSIGNED_ERROR_CODE: + case IFS_UNASSIGNED_ERROR_CODE_0xA: + case IFS_CORRUPTED_CHUNK: break; } return false; } +#define SPINUNIT 100 /* 100 nsec */ +static atomic_t array_cpus_in; +static atomic_t scan_cpus_in; +static atomic_t sbaf_cpus_in; + +/* + * Simplified cpu sibling rendezvous loop based on microcode loader __wait_for_cpus() + */ +static void wait_for_sibling_cpu(atomic_t *t, long long timeout) +{ + int cpu = smp_processor_id(); + const struct cpumask *smt_mask = cpu_smt_mask(cpu); + int all_cpus = cpumask_weight(smt_mask); + + atomic_inc(t); + while (atomic_read(t) < all_cpus) { + if (timeout < SPINUNIT) + return; + ndelay(SPINUNIT); + timeout -= SPINUNIT; + touch_nmi_watchdog(); + } +} + /* * Execute the scan. Called "simultaneously" on all threads of a core * at high priority using the stop_cpus mechanism. */ static int doscan(void *data) { - int cpu = smp_processor_id(); - u64 *msrs = data; + int cpu = smp_processor_id(), start, stop; + struct run_params *params = data; + union ifs_status status; + struct ifs_data *ifsd; int first; + ifsd = params->ifsd; + + if (ifsd->generation) { + start = params->activate->gen2.start; + stop = params->activate->gen2.stop; + } else { + start = params->activate->gen0.start; + stop = params->activate->gen0.stop; + } + /* Only the first logical CPU on a core reports result */ first = cpumask_first(cpu_smt_mask(cpu)); + wait_for_sibling_cpu(&scan_cpus_in, NSEC_PER_SEC); + /* * This WRMSR will wait for other HT threads to also write * to this MSR (at most for activate.delay cycles). Then it @@ -149,12 +210,14 @@ static int doscan(void *data) * take up to 200 milliseconds (in the case where all chunks * are processed in a single pass) before it retires. */ - wrmsrl(MSR_ACTIVATE_SCAN, msrs[0]); + wrmsrq(MSR_ACTIVATE_SCAN, params->activate->data); + rdmsrq(MSR_SCAN_STATUS, status.data); - if (cpu == first) { - /* Pass back the result of the scan */ - rdmsrl(MSR_SCAN_STATUS, msrs[1]); - } + trace_ifs_status(ifsd->cur_batch, start, stop, status.data); + + /* Pass back the result of the scan */ + if (cpu == first) + params->status = status; return 0; } @@ -167,42 +230,54 @@ static int doscan(void *data) */ static void ifs_test_core(int cpu, struct device *dev) { + union ifs_status status = {}; union ifs_scan activate; - union ifs_status status; unsigned long timeout; struct ifs_data *ifsd; - u64 msrvals[2]; + int to_start, to_stop; + int status_chunk; + struct run_params params; int retries; ifsd = ifs_get_data(dev); - activate.rsvd = 0; + activate.gen0.rsvd = 0; activate.delay = IFS_THREAD_WAIT; activate.sigmce = 0; - activate.start = 0; - activate.stop = ifsd->valid_chunks - 1; + to_start = 0; + to_stop = ifsd->valid_chunks - 1; + + params.ifsd = ifs_get_data(dev); + + if (ifsd->generation) { + activate.gen2.start = to_start; + activate.gen2.stop = to_stop; + } else { + activate.gen0.start = to_start; + activate.gen0.stop = to_stop; + } timeout = jiffies + HZ / 2; retries = MAX_IFS_RETRIES; - while (activate.start <= activate.stop) { + while (to_start <= to_stop) { if (time_after(jiffies, timeout)) { status.error_code = IFS_SW_TIMEOUT; break; } - msrvals[0] = activate.data; - stop_core_cpuslocked(cpu, doscan, msrvals); - - status.data = msrvals[1]; + params.activate = &activate; + atomic_set(&scan_cpus_in, 0); + stop_core_cpuslocked(cpu, doscan, ¶ms); - trace_ifs_status(cpu, activate, status); + status = params.status; /* Some cases can be retried, give up for others */ if (!can_restart(status)) break; - if (status.chunk_num == activate.start) { + status_chunk = ifsd->generation ? status.gen2.chunk_num : status.gen0.chunk_num; + if (status_chunk == to_start) { /* Check for forward progress */ if (--retries == 0) { if (status.error_code == IFS_NO_ERROR) @@ -211,17 +286,21 @@ static void ifs_test_core(int cpu, struct device *dev) } } else { retries = MAX_IFS_RETRIES; - activate.start = status.chunk_num; + if (ifsd->generation) + activate.gen2.start = status_chunk; + else + activate.gen0.start = status_chunk; + to_start = status_chunk; } } /* Update status for this core */ ifsd->scan_details = status.data; - if (status.control_error || status.signature_error) { + if (status.signature_error) { ifsd->status = SCAN_TEST_FAIL; message_fail(dev, cpu, status); - } else if (status.error_code) { + } else if (status.control_error || status.error_code) { ifsd->status = SCAN_NOT_TESTED; message_not_tested(dev, cpu, status); } else { @@ -229,6 +308,313 @@ static void ifs_test_core(int cpu, struct device *dev) } } +static int do_array_test(void *data) +{ + union ifs_array *command = data; + int cpu = smp_processor_id(); + int first; + + wait_for_sibling_cpu(&array_cpus_in, NSEC_PER_SEC); + + /* + * Only one logical CPU on a core needs to trigger the Array test via MSR write. + */ + first = cpumask_first(cpu_smt_mask(cpu)); + + if (cpu == first) { + wrmsrq(MSR_ARRAY_BIST, command->data); + /* Pass back the result of the test */ + rdmsrq(MSR_ARRAY_BIST, command->data); + } + + return 0; +} + +static void ifs_array_test_core(int cpu, struct device *dev) +{ + union ifs_array command = {}; + bool timed_out = false; + struct ifs_data *ifsd; + unsigned long timeout; + + ifsd = ifs_get_data(dev); + + command.array_bitmask = ~0U; + timeout = jiffies + HZ / 2; + + do { + if (time_after(jiffies, timeout)) { + timed_out = true; + break; + } + atomic_set(&array_cpus_in, 0); + stop_core_cpuslocked(cpu, do_array_test, &command); + + if (command.ctrl_result) + break; + } while (command.array_bitmask); + + ifsd->scan_details = command.data; + + if (command.ctrl_result) + ifsd->status = SCAN_TEST_FAIL; + else if (timed_out || command.array_bitmask) + ifsd->status = SCAN_NOT_TESTED; + else + ifsd->status = SCAN_TEST_PASS; +} + +#define ARRAY_GEN1_TEST_ALL_ARRAYS 0x0ULL +#define ARRAY_GEN1_STATUS_FAIL 0x1ULL + +static int do_array_test_gen1(void *status) +{ + int cpu = smp_processor_id(); + int first; + + first = cpumask_first(cpu_smt_mask(cpu)); + + if (cpu == first) { + wrmsrq(MSR_ARRAY_TRIGGER, ARRAY_GEN1_TEST_ALL_ARRAYS); + rdmsrq(MSR_ARRAY_STATUS, *((u64 *)status)); + } + + return 0; +} + +static void ifs_array_test_gen1(int cpu, struct device *dev) +{ + struct ifs_data *ifsd = ifs_get_data(dev); + u64 status = 0; + + stop_core_cpuslocked(cpu, do_array_test_gen1, &status); + ifsd->scan_details = status; + + if (status & ARRAY_GEN1_STATUS_FAIL) + ifsd->status = SCAN_TEST_FAIL; + else + ifsd->status = SCAN_TEST_PASS; +} + +#define SBAF_STATUS_PASS 0 +#define SBAF_STATUS_SIGN_FAIL 1 +#define SBAF_STATUS_INTR 2 +#define SBAF_STATUS_TEST_FAIL 3 + +enum sbaf_status_err_code { + IFS_SBAF_NO_ERROR = 0, + IFS_SBAF_OTHER_THREAD_COULD_NOT_JOIN = 1, + IFS_SBAF_INTERRUPTED_BEFORE_RENDEZVOUS = 2, + IFS_SBAF_UNASSIGNED_ERROR_CODE3 = 3, + IFS_SBAF_INVALID_BUNDLE_INDEX = 4, + IFS_SBAF_MISMATCH_ARGS_BETWEEN_THREADS = 5, + IFS_SBAF_CORE_NOT_CAPABLE_CURRENTLY = 6, + IFS_SBAF_UNASSIGNED_ERROR_CODE7 = 7, + IFS_SBAF_EXCEED_NUMBER_OF_THREADS_CONCURRENT = 8, + IFS_SBAF_INTERRUPTED_DURING_EXECUTION = 9, + IFS_SBAF_INVALID_PROGRAM_INDEX = 0xA, + IFS_SBAF_CORRUPTED_CHUNK = 0xB, + IFS_SBAF_DID_NOT_START = 0xC, +}; + +static const char * const sbaf_test_status[] = { + [IFS_SBAF_NO_ERROR] = "SBAF no error", + [IFS_SBAF_OTHER_THREAD_COULD_NOT_JOIN] = "Other thread could not join.", + [IFS_SBAF_INTERRUPTED_BEFORE_RENDEZVOUS] = "Interrupt occurred prior to SBAF coordination.", + [IFS_SBAF_UNASSIGNED_ERROR_CODE3] = "Unassigned error code 0x3", + [IFS_SBAF_INVALID_BUNDLE_INDEX] = "Non-valid sbaf bundles. Reload test image", + [IFS_SBAF_MISMATCH_ARGS_BETWEEN_THREADS] = "Mismatch in arguments between threads T0/T1.", + [IFS_SBAF_CORE_NOT_CAPABLE_CURRENTLY] = "Core not capable of performing SBAF currently", + [IFS_SBAF_UNASSIGNED_ERROR_CODE7] = "Unassigned error code 0x7", + [IFS_SBAF_EXCEED_NUMBER_OF_THREADS_CONCURRENT] = "Exceeded number of Logical Processors (LP) allowed to run Scan-At-Field concurrently", + [IFS_SBAF_INTERRUPTED_DURING_EXECUTION] = "Interrupt occurred prior to SBAF start", + [IFS_SBAF_INVALID_PROGRAM_INDEX] = "SBAF program index not valid", + [IFS_SBAF_CORRUPTED_CHUNK] = "SBAF operation aborted due to corrupted chunk", + [IFS_SBAF_DID_NOT_START] = "SBAF operation did not start", +}; + +static void sbaf_message_not_tested(struct device *dev, int cpu, u64 status_data) +{ + union ifs_sbaf_status status = (union ifs_sbaf_status)status_data; + + if (status.error_code < ARRAY_SIZE(sbaf_test_status)) { + dev_info(dev, "CPU(s) %*pbl: SBAF operation did not start. %s\n", + cpumask_pr_args(cpu_smt_mask(cpu)), + sbaf_test_status[status.error_code]); + } else if (status.error_code == IFS_SW_TIMEOUT) { + dev_info(dev, "CPU(s) %*pbl: software timeout during scan\n", + cpumask_pr_args(cpu_smt_mask(cpu))); + } else if (status.error_code == IFS_SW_PARTIAL_COMPLETION) { + dev_info(dev, "CPU(s) %*pbl: %s\n", + cpumask_pr_args(cpu_smt_mask(cpu)), + "Not all SBAF bundles executed. Maximum forward progress retries exceeded"); + } else { + dev_info(dev, "CPU(s) %*pbl: SBAF unknown status %llx\n", + cpumask_pr_args(cpu_smt_mask(cpu)), status.data); + } +} + +static void sbaf_message_fail(struct device *dev, int cpu, union ifs_sbaf_status status) +{ + /* Failed signature check is set when SBAF signature did not match the expected value */ + if (status.sbaf_status == SBAF_STATUS_SIGN_FAIL) { + dev_err(dev, "CPU(s) %*pbl: Failed signature check\n", + cpumask_pr_args(cpu_smt_mask(cpu))); + } + + /* Failed to reach end of test */ + if (status.sbaf_status == SBAF_STATUS_TEST_FAIL) { + dev_err(dev, "CPU(s) %*pbl: Failed to complete test\n", + cpumask_pr_args(cpu_smt_mask(cpu))); + } +} + +static bool sbaf_bundle_completed(union ifs_sbaf_status status) +{ + return !(status.sbaf_status || status.error_code); +} + +static bool sbaf_can_restart(union ifs_sbaf_status status) +{ + enum sbaf_status_err_code err_code = status.error_code; + + /* Signature for chunk is bad, or scan test failed */ + if (status.sbaf_status == SBAF_STATUS_SIGN_FAIL || + status.sbaf_status == SBAF_STATUS_TEST_FAIL) + return false; + + switch (err_code) { + case IFS_SBAF_NO_ERROR: + case IFS_SBAF_OTHER_THREAD_COULD_NOT_JOIN: + case IFS_SBAF_INTERRUPTED_BEFORE_RENDEZVOUS: + case IFS_SBAF_EXCEED_NUMBER_OF_THREADS_CONCURRENT: + case IFS_SBAF_INTERRUPTED_DURING_EXECUTION: + return true; + case IFS_SBAF_UNASSIGNED_ERROR_CODE3: + case IFS_SBAF_INVALID_BUNDLE_INDEX: + case IFS_SBAF_MISMATCH_ARGS_BETWEEN_THREADS: + case IFS_SBAF_CORE_NOT_CAPABLE_CURRENTLY: + case IFS_SBAF_UNASSIGNED_ERROR_CODE7: + case IFS_SBAF_INVALID_PROGRAM_INDEX: + case IFS_SBAF_CORRUPTED_CHUNK: + case IFS_SBAF_DID_NOT_START: + break; + } + return false; +} + +/* + * Execute the SBAF test. Called "simultaneously" on all threads of a core + * at high priority using the stop_cpus mechanism. + */ +static int dosbaf(void *data) +{ + struct sbaf_run_params *run_params = data; + int cpu = smp_processor_id(); + union ifs_sbaf_status status; + struct ifs_data *ifsd; + int first; + + ifsd = run_params->ifsd; + + /* Only the first logical CPU on a core reports result */ + first = cpumask_first(cpu_smt_mask(cpu)); + wait_for_sibling_cpu(&sbaf_cpus_in, NSEC_PER_SEC); + + /* + * This WRMSR will wait for other HT threads to also write + * to this MSR (at most for activate.delay cycles). Then it + * starts scan of each requested bundle. The core test happens + * during the "execution" of the WRMSR. + */ + wrmsrq(MSR_ACTIVATE_SBAF, run_params->activate->data); + rdmsrq(MSR_SBAF_STATUS, status.data); + trace_ifs_sbaf(ifsd->cur_batch, *run_params->activate, status); + + /* Pass back the result of the test */ + if (cpu == first) + run_params->status = status; + + return 0; +} + +static void ifs_sbaf_test_core(int cpu, struct device *dev) +{ + struct sbaf_run_params run_params; + union ifs_sbaf_status status = {}; + union ifs_sbaf activate; + unsigned long timeout; + struct ifs_data *ifsd; + int stop_bundle; + int retries; + + ifsd = ifs_get_data(dev); + + activate.data = 0; + activate.delay = IFS_THREAD_WAIT; + + timeout = jiffies + 2 * HZ; + retries = MAX_IFS_RETRIES; + activate.bundle_idx = 0; + stop_bundle = ifsd->max_bundle; + + while (activate.bundle_idx <= stop_bundle) { + if (time_after(jiffies, timeout)) { + status.error_code = IFS_SW_TIMEOUT; + break; + } + + atomic_set(&sbaf_cpus_in, 0); + + run_params.ifsd = ifsd; + run_params.activate = &activate; + run_params.retry_cnt = &retries; + stop_core_cpuslocked(cpu, dosbaf, &run_params); + + status = run_params.status; + + if (sbaf_bundle_completed(status)) { + activate.bundle_idx = status.bundle_idx + 1; + activate.pgm_idx = 0; + retries = MAX_IFS_RETRIES; + continue; + } + + /* Some cases can be retried, give up for others */ + if (!sbaf_can_restart(status)) + break; + + if (status.pgm_idx == activate.pgm_idx) { + /* If no progress retry */ + if (--retries == 0) { + if (status.error_code == IFS_NO_ERROR) + status.error_code = IFS_SW_PARTIAL_COMPLETION; + break; + } + } else { + /* if some progress, more pgms remaining in bundle, reset retries */ + retries = MAX_IFS_RETRIES; + activate.bundle_idx = status.bundle_idx; + activate.pgm_idx = status.pgm_idx; + } + } + + /* Update status for this core */ + ifsd->scan_details = status.data; + + if (status.sbaf_status == SBAF_STATUS_SIGN_FAIL || + status.sbaf_status == SBAF_STATUS_TEST_FAIL) { + ifsd->status = SCAN_TEST_FAIL; + sbaf_message_fail(dev, cpu, status); + } else if (status.error_code || status.sbaf_status == SBAF_STATUS_INTR || + (activate.bundle_idx < stop_bundle)) { + ifsd->status = SCAN_NOT_TESTED; + sbaf_message_not_tested(dev, cpu, status.data); + } else { + ifsd->status = SCAN_TEST_PASS; + } +} + /* * Initiate per core test. It wakes up work queue threads on the target cpu and * its sibling cpu. Once all sibling threads wake up, the scan test gets executed and @@ -236,6 +622,8 @@ static void ifs_test_core(int cpu, struct device *dev) */ int do_core_test(int cpu, struct device *dev) { + const struct ifs_test_caps *test = ifs_get_test_caps(dev); + struct ifs_data *ifsd = ifs_get_data(dev); int ret = 0; /* Prevent CPUs from being taken offline during the scan test */ @@ -247,7 +635,28 @@ int do_core_test(int cpu, struct device *dev) goto out; } - ifs_test_core(cpu, dev); + switch (test->test_num) { + case IFS_TYPE_SAF: + if (!ifsd->loaded) + ret = -EPERM; + else + ifs_test_core(cpu, dev); + break; + case IFS_TYPE_ARRAY_BIST: + if (ifsd->array_gen == ARRAY_GEN0) + ifs_array_test_core(cpu, dev); + else + ifs_array_test_gen1(cpu, dev); + break; + case IFS_TYPE_SBAF: + if (!ifsd->loaded) + ret = -EPERM; + else + ifs_sbaf_test_core(cpu, dev); + break; + default: + ret = -EINVAL; + } out: cpus_read_unlock(); return ret; diff --git a/drivers/platform/x86/intel/ifs/sysfs.c b/drivers/platform/x86/intel/ifs/sysfs.c index ee636a76b083..01b7502f46b0 100644 --- a/drivers/platform/x86/intel/ifs/sysfs.c +++ b/drivers/platform/x86/intel/ifs/sysfs.c @@ -13,7 +13,7 @@ * Protects against simultaneous tests on multiple cores, or * reloading can file while a test is in progress */ -static DEFINE_SEMAPHORE(ifs_sem); +static DEFINE_SEMAPHORE(ifs_sem, 1); /* * The sysfs interface to check additional details of last test @@ -64,7 +64,6 @@ static ssize_t run_test_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct ifs_data *ifsd = ifs_get_data(dev); unsigned int cpu; int rc; @@ -75,10 +74,7 @@ static ssize_t run_test_store(struct device *dev, if (down_interruptible(&ifs_sem)) return -EINTR; - if (!ifsd->loaded) - rc = -EPERM; - else - rc = do_core_test(cpu, dev); + rc = do_core_test(cpu, dev); up(&ifs_sem); @@ -141,7 +137,7 @@ static ssize_t image_version_show(struct device *dev, static DEVICE_ATTR_RO(image_version); /* global scan sysfs attributes */ -static struct attribute *plat_ifs_attrs[] = { +struct attribute *plat_ifs_attrs[] = { &dev_attr_details.attr, &dev_attr_status.attr, &dev_attr_run_test.attr, @@ -150,9 +146,10 @@ static struct attribute *plat_ifs_attrs[] = { NULL }; -ATTRIBUTE_GROUPS(plat_ifs); - -const struct attribute_group **ifs_get_groups(void) -{ - return plat_ifs_groups; -} +/* global array sysfs attributes */ +struct attribute *plat_ifs_array_attrs[] = { + &dev_attr_details.attr, + &dev_attr_status.attr, + &dev_attr_run_test.attr, + NULL +}; diff --git a/drivers/platform/x86/intel/int0002_vgpio.c b/drivers/platform/x86/intel/int0002_vgpio.c index 97cfbc520a02..6f5629dc3f8d 100644 --- a/drivers/platform/x86/intel/int0002_vgpio.c +++ b/drivers/platform/x86/intel/int0002_vgpio.c @@ -23,7 +23,7 @@ * ACPI mechanisms, this is not a real GPIO at all. * * This driver will bind to the INT0002 device, and register as a GPIO - * controller, letting gpiolib-acpi.c call the _L02 handler as it would + * controller, letting gpiolib-acpi call the _L02 handler as it would * for a real GPIO controller. */ @@ -65,9 +65,10 @@ static int int0002_gpio_get(struct gpio_chip *chip, unsigned int offset) return 0; } -static void int0002_gpio_set(struct gpio_chip *chip, unsigned int offset, - int value) +static int int0002_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) { + return 0; } static int int0002_gpio_direction_output(struct gpio_chip *chip, @@ -83,8 +84,12 @@ static void int0002_irq_ack(struct irq_data *data) static void int0002_irq_unmask(struct irq_data *data) { + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + irq_hw_number_t hwirq = irqd_to_hwirq(data); u32 gpe_en_reg; + gpiochip_enable_irq(gc, hwirq); + gpe_en_reg = inl(GPE0A_EN_PORT); gpe_en_reg |= GPE0A_PME_B0_EN_BIT; outl(gpe_en_reg, GPE0A_EN_PORT); @@ -92,11 +97,15 @@ static void int0002_irq_unmask(struct irq_data *data) static void int0002_irq_mask(struct irq_data *data) { + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + irq_hw_number_t hwirq = irqd_to_hwirq(data); u32 gpe_en_reg; gpe_en_reg = inl(GPE0A_EN_PORT); gpe_en_reg &= ~GPE0A_PME_B0_EN_BIT; outl(gpe_en_reg, GPE0A_EN_PORT); + + gpiochip_disable_irq(gc, hwirq); } static int int0002_irq_set_wake(struct irq_data *data, unsigned int on) @@ -140,12 +149,14 @@ static bool int0002_check_wake(void *data) return (gpe_sts_reg & GPE0A_PME_B0_STS_BIT); } -static struct irq_chip int0002_irqchip = { +static const struct irq_chip int0002_irqchip = { .name = DRV_NAME, .irq_ack = int0002_irq_ack, .irq_mask = int0002_irq_mask, .irq_unmask = int0002_irq_unmask, .irq_set_wake = int0002_irq_set_wake, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static void int0002_init_irq_valid_mask(struct gpio_chip *chip, @@ -196,14 +207,14 @@ static int int0002_probe(struct platform_device *pdev) * IRQs into gpiolib. */ ret = devm_request_irq(dev, irq, int0002_irq, - IRQF_SHARED, "INT0002", chip); + IRQF_ONESHOT | IRQF_SHARED, "INT0002", chip); if (ret) { dev_err(dev, "Error requesting IRQ %d: %d\n", irq, ret); return ret; } girq = &chip->irq; - girq->chip = &int0002_irqchip; + gpio_irq_chip_set_chip(girq, &int0002_irqchip); /* This let us handle the parent IRQ in the driver */ girq->parent_handler = NULL; girq->num_parents = 0; @@ -223,11 +234,10 @@ static int int0002_probe(struct platform_device *pdev) return 0; } -static int int0002_remove(struct platform_device *pdev) +static void int0002_remove(struct platform_device *pdev) { device_init_wakeup(&pdev->dev, false); acpi_unregister_wakeup_handler(int0002_check_wake, NULL); - return 0; } static int int0002_suspend(struct device *dev) @@ -267,7 +277,7 @@ static const struct acpi_device_id int0002_acpi_ids[] = { MODULE_DEVICE_TABLE(acpi, int0002_acpi_ids); static struct platform_driver int0002_driver = { - .driver = { + .driver = { .name = DRV_NAME, .acpi_match_table = int0002_acpi_ids, .pm = &int0002_pm_ops, diff --git a/drivers/platform/x86/intel/int1092/intel_sar.c b/drivers/platform/x86/intel/int1092/intel_sar.c index e03943e6380a..e526841aff60 100644 --- a/drivers/platform/x86/intel/int1092/intel_sar.c +++ b/drivers/platform/x86/intel/int1092/intel_sar.c @@ -131,16 +131,15 @@ static acpi_status sar_get_device_mode(struct platform_device *device) acpi_status status = AE_OK; union acpi_object *out; u32 rev = 0; - int value; - out = acpi_evaluate_dsm(context->handle, &context->guid, rev, - COMMAND_ID_DEV_MODE, NULL); - if (get_int_value(out, &value)) { + out = acpi_evaluate_dsm_typed(context->handle, &context->guid, rev, + COMMAND_ID_DEV_MODE, NULL, ACPI_TYPE_INTEGER); + if (!out) { dev_err(&device->dev, "DSM cmd:%d Failed to retrieve value\n", COMMAND_ID_DEV_MODE); status = AE_ERROR; goto dev_mode_error; } - context->sar_data.device_mode = value; + context->sar_data.device_mode = out->integer.value; update_sar_data(context); sysfs_notify(&device->dev.kobj, NULL, SYSFS_DATANAME); @@ -221,11 +220,11 @@ static void sar_get_data(int reg, struct wwan_sar_context *context) req.type = ACPI_TYPE_INTEGER; req.integer.value = reg; - out = acpi_evaluate_dsm(context->handle, &context->guid, rev, - COMMAND_ID_CONFIG_TABLE, &req); + out = acpi_evaluate_dsm_typed(context->handle, &context->guid, rev, + COMMAND_ID_CONFIG_TABLE, &req, ACPI_TYPE_PACKAGE); if (!out) return; - if (out->type == ACPI_TYPE_PACKAGE && out->package.count >= 3 && + if (out->package.count >= 3 && out->package.elements[0].type == ACPI_TYPE_INTEGER && out->package.elements[1].type == ACPI_TYPE_INTEGER && out->package.elements[2].type == ACPI_TYPE_PACKAGE && @@ -293,7 +292,7 @@ r_free: return result; } -static int sar_remove(struct platform_device *device) +static void sar_remove(struct platform_device *device) { struct wwan_sar_context *context = dev_get_drvdata(&device->dev); int reg; @@ -305,7 +304,6 @@ static int sar_remove(struct platform_device *device) kfree(context->config_data[reg].device_mode_info); kfree(context); - return 0; } static struct platform_driver sar_driver = { diff --git a/drivers/platform/x86/intel/int3472/Kconfig b/drivers/platform/x86/intel/int3472/Kconfig index 62e5d4cf9ee5..17ae997f93ea 100644 --- a/drivers/platform/x86/intel/int3472/Kconfig +++ b/drivers/platform/x86/intel/int3472/Kconfig @@ -4,6 +4,7 @@ config INTEL_SKL_INT3472 depends on COMMON_CLK depends on I2C depends on GPIOLIB + depends on LEDS_CLASS depends on REGULATOR select MFD_CORE select REGMAP_I2C diff --git a/drivers/platform/x86/intel/int3472/Makefile b/drivers/platform/x86/intel/int3472/Makefile index cfec7784c5c9..103661e6685d 100644 --- a/drivers/platform/x86/intel/int3472/Makefile +++ b/drivers/platform/x86/intel/int3472/Makefile @@ -1,4 +1,8 @@ obj-$(CONFIG_INTEL_SKL_INT3472) += intel_skl_int3472_discrete.o \ - intel_skl_int3472_tps68470.o -intel_skl_int3472_discrete-y := discrete.o clk_and_regulator.o common.o -intel_skl_int3472_tps68470-y := tps68470.o tps68470_board_data.o common.o + intel_skl_int3472_tps68470.o \ + intel_skl_int3472_common.o +intel_skl_int3472_discrete-y := discrete.o discrete_quirks.o \ + clk_and_regulator.o led.o +intel_skl_int3472_tps68470-y := tps68470.o tps68470_board_data.o + +intel_skl_int3472_common-y += common.o diff --git a/drivers/platform/x86/intel/int3472/clk_and_regulator.c b/drivers/platform/x86/intel/int3472/clk_and_regulator.c index 74dc2cff799e..9e052b164a1a 100644 --- a/drivers/platform/x86/intel/int3472/clk_and_regulator.c +++ b/drivers/platform/x86/intel/int3472/clk_and_regulator.c @@ -6,10 +6,44 @@ #include <linux/clk-provider.h> #include <linux/device.h> #include <linux/gpio/consumer.h> +#include <linux/platform_data/x86/int3472.h> #include <linux/regulator/driver.h> #include <linux/slab.h> -#include "common.h" +/* + * 82c0d13a-78c5-4244-9bb1-eb8b539a8d11 + * This _DSM GUID allows controlling the sensor clk when it is not controlled + * through a GPIO. + */ +static const guid_t img_clk_guid = + GUID_INIT(0x82c0d13a, 0x78c5, 0x4244, + 0x9b, 0xb1, 0xeb, 0x8b, 0x53, 0x9a, 0x8d, 0x11); + +static void skl_int3472_enable_clk(struct int3472_clock *clk, int enable) +{ + struct int3472_discrete_device *int3472 = to_int3472_device(clk); + union acpi_object args[3]; + union acpi_object argv4; + + if (clk->ena_gpio) { + gpiod_set_value_cansleep(clk->ena_gpio, enable); + return; + } + + args[0].integer.type = ACPI_TYPE_INTEGER; + args[0].integer.value = clk->imgclk_index; + args[1].integer.type = ACPI_TYPE_INTEGER; + args[1].integer.value = enable; + args[2].integer.type = ACPI_TYPE_INTEGER; + args[2].integer.value = 1; + + argv4.type = ACPI_TYPE_PACKAGE; + argv4.package.count = 3; + argv4.package.elements = args; + + acpi_evaluate_dsm(acpi_device_handle(int3472->adev), &img_clk_guid, + 0, 1, &argv4); +} /* * The regulators have to have .ops to be valid, but the only ops we actually @@ -20,20 +54,13 @@ static const struct regulator_ops int3472_gpio_regulator_ops; static int skl_int3472_clk_prepare(struct clk_hw *hw) { - struct int3472_gpio_clock *clk = to_int3472_clk(hw); - - gpiod_set_value_cansleep(clk->ena_gpio, 1); - gpiod_set_value_cansleep(clk->led_gpio, 1); - + skl_int3472_enable_clk(to_int3472_clk(hw), 1); return 0; } static void skl_int3472_clk_unprepare(struct clk_hw *hw) { - struct int3472_gpio_clock *clk = to_int3472_clk(hw); - - gpiod_set_value_cansleep(clk->ena_gpio, 0); - gpiod_set_value_cansleep(clk->led_gpio, 0); + skl_int3472_enable_clk(to_int3472_clk(hw), 0); } static int skl_int3472_clk_enable(struct clk_hw *hw) @@ -76,7 +103,7 @@ static unsigned int skl_int3472_get_clk_frequency(struct int3472_discrete_device static unsigned long skl_int3472_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { - struct int3472_gpio_clock *clk = to_int3472_clk(hw); + struct int3472_clock *clk = to_int3472_clk(hw); return clk->frequency; } @@ -89,31 +116,28 @@ static const struct clk_ops skl_int3472_clock_ops = { .recalc_rate = skl_int3472_clk_recalc_rate, }; -int skl_int3472_register_clock(struct int3472_discrete_device *int3472) +static int skl_int3472_register_clock(struct int3472_discrete_device *int3472) { + struct acpi_device *adev = int3472->adev; struct clk_init_data init = { .ops = &skl_int3472_clock_ops, .flags = CLK_GET_RATE_NOCACHE, }; int ret; - init.name = kasprintf(GFP_KERNEL, "%s-clk", - acpi_dev_name(int3472->adev)); + init.name = kasprintf(GFP_KERNEL, "%s-clk", acpi_dev_name(adev)); if (!init.name) return -ENOMEM; int3472->clock.frequency = skl_int3472_get_clk_frequency(int3472); - int3472->clock.clk_hw.init = &init; - int3472->clock.clk = clk_register(&int3472->adev->dev, - &int3472->clock.clk_hw); + int3472->clock.clk = clk_register(&adev->dev, &int3472->clock.clk_hw); if (IS_ERR(int3472->clock.clk)) { ret = PTR_ERR(int3472->clock.clk); goto out_free_init_name; } - int3472->clock.cl = clkdev_create(int3472->clock.clk, NULL, - int3472->sensor_name); + int3472->clock.cl = clkdev_create(int3472->clock.clk, NULL, int3472->sensor_name); if (!int3472->clock.cl) { ret = -ENOMEM; goto err_unregister_clk; @@ -126,86 +150,107 @@ err_unregister_clk: clk_unregister(int3472->clock.clk); out_free_init_name: kfree(init.name); - return ret; } +int skl_int3472_register_dsm_clock(struct int3472_discrete_device *int3472) +{ + if (int3472->clock.cl) + return 0; /* A GPIO controlled clk has already been registered */ + + if (!acpi_check_dsm(int3472->adev->handle, &img_clk_guid, 0, BIT(1))) + return 0; /* DSM clock control is not available */ + + return skl_int3472_register_clock(int3472); +} + +int skl_int3472_register_gpio_clock(struct int3472_discrete_device *int3472, + struct gpio_desc *gpio) +{ + if (int3472->clock.cl) + return -EBUSY; + + int3472->clock.ena_gpio = gpio; + + return skl_int3472_register_clock(int3472); +} + void skl_int3472_unregister_clock(struct int3472_discrete_device *int3472) { + if (!int3472->clock.cl) + return; + clkdev_drop(int3472->clock.cl); clk_unregister(int3472->clock.clk); + gpiod_put(int3472->clock.ena_gpio); } int skl_int3472_register_regulator(struct int3472_discrete_device *int3472, - struct acpi_resource_gpio *agpio) + struct gpio_desc *gpio, + unsigned int enable_time, + const char *supply_name, + const char *second_sensor) { - const struct int3472_sensor_config *sensor_config; - char *path = agpio->resource_source.string_ptr; - struct regulator_consumer_supply supply_map; struct regulator_init_data init_data = { }; + struct int3472_gpio_regulator *regulator; struct regulator_config cfg = { }; - int ret; + int i, j; - sensor_config = int3472->sensor_config; - if (IS_ERR(sensor_config)) { - dev_err(int3472->dev, "No sensor module config\n"); - return PTR_ERR(sensor_config); + if (int3472->n_regulator_gpios >= INT3472_MAX_REGULATORS) { + dev_err(int3472->dev, "Too many regulators mapped\n"); + return -EINVAL; } - if (!sensor_config->supply_map.supply) { - dev_err(int3472->dev, "No supply name defined\n"); - return -ENODEV; + if (strlen(supply_name) >= GPIO_SUPPLY_NAME_LENGTH) { + dev_err(int3472->dev, "supply-name '%s' length too long\n", supply_name); + return -E2BIG; } - init_data.constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS; - init_data.num_consumer_supplies = 1; - supply_map = sensor_config->supply_map; - supply_map.dev_name = int3472->sensor_name; - init_data.consumer_supplies = &supply_map; - - snprintf(int3472->regulator.regulator_name, - sizeof(int3472->regulator.regulator_name), "%s-regulator", - acpi_dev_name(int3472->adev)); - snprintf(int3472->regulator.supply_name, - GPIO_REGULATOR_SUPPLY_NAME_LENGTH, "supply-0"); - - int3472->regulator.rdesc = INT3472_REGULATOR( - int3472->regulator.regulator_name, - int3472->regulator.supply_name, - &int3472_gpio_regulator_ops); - - int3472->regulator.gpio = acpi_get_and_request_gpiod(path, agpio->pin_table[0], - "int3472,regulator"); - if (IS_ERR(int3472->regulator.gpio)) { - dev_err(int3472->dev, "Failed to get regulator GPIO line\n"); - return PTR_ERR(int3472->regulator.gpio); + regulator = &int3472->regulators[int3472->n_regulator_gpios]; + string_upper(regulator->supply_name_upper, supply_name); + + /* The below code assume that map-count is 2 (upper- and lower-case) */ + static_assert(GPIO_REGULATOR_SUPPLY_MAP_COUNT == 2); + + for (i = 0, j = 0; i < GPIO_REGULATOR_SUPPLY_MAP_COUNT; i++) { + const char *supply = i ? regulator->supply_name_upper : supply_name; + + regulator->supply_map[j].supply = supply; + regulator->supply_map[j].dev_name = int3472->sensor_name; + j++; + + if (second_sensor) { + regulator->supply_map[j].supply = supply; + regulator->supply_map[j].dev_name = second_sensor; + j++; + } } - /* Ensure the pin is in output mode and non-active state */ - gpiod_direction_output(int3472->regulator.gpio, 0); + init_data.constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS; + init_data.consumer_supplies = regulator->supply_map; + init_data.num_consumer_supplies = j; + + snprintf(regulator->regulator_name, sizeof(regulator->regulator_name), "%s-%s", + acpi_dev_name(int3472->adev), supply_name); + + regulator->rdesc = INT3472_REGULATOR(regulator->regulator_name, + &int3472_gpio_regulator_ops, + enable_time, GPIO_REGULATOR_OFF_ON_DELAY); cfg.dev = &int3472->adev->dev; cfg.init_data = &init_data; - cfg.ena_gpiod = int3472->regulator.gpio; - - int3472->regulator.rdev = regulator_register(int3472->dev, - &int3472->regulator.rdesc, - &cfg); - if (IS_ERR(int3472->regulator.rdev)) { - ret = PTR_ERR(int3472->regulator.rdev); - goto err_free_gpio; - } - - return 0; + cfg.ena_gpiod = gpio; -err_free_gpio: - gpiod_put(int3472->regulator.gpio); + regulator->rdev = regulator_register(int3472->dev, ®ulator->rdesc, &cfg); + if (IS_ERR(regulator->rdev)) + return PTR_ERR(regulator->rdev); - return ret; + int3472->n_regulator_gpios++; + return 0; } void skl_int3472_unregister_regulator(struct int3472_discrete_device *int3472) { - regulator_unregister(int3472->regulator.rdev); - gpiod_put(int3472->regulator.gpio); + for (int i = 0; i < int3472->n_regulator_gpios; i++) + regulator_unregister(int3472->regulators[i].rdev); } diff --git a/drivers/platform/x86/intel/int3472/common.c b/drivers/platform/x86/intel/int3472/common.c index 9db2bb0bbba4..6dc38d5cbd0b 100644 --- a/drivers/platform/x86/intel/int3472/common.c +++ b/drivers/platform/x86/intel/int3472/common.c @@ -2,10 +2,9 @@ /* Author: Dan Scally <djrscally@gmail.com> */ #include <linux/acpi.h> +#include <linux/platform_data/x86/int3472.h> #include <linux/slab.h> -#include "common.h" - union acpi_object *skl_int3472_get_acpi_buffer(struct acpi_device *adev, char *id) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; @@ -29,6 +28,7 @@ union acpi_object *skl_int3472_get_acpi_buffer(struct acpi_device *adev, char *i return obj; } +EXPORT_SYMBOL_NS_GPL(skl_int3472_get_acpi_buffer, "INTEL_INT3472"); int skl_int3472_fill_cldb(struct acpi_device *adev, struct int3472_cldb *cldb) { @@ -52,6 +52,7 @@ out_free_obj: kfree(obj); return ret; } +EXPORT_SYMBOL_NS_GPL(skl_int3472_fill_cldb, "INTEL_INT3472"); /* sensor_adev_ret may be NULL, name_ret must not be NULL */ int skl_int3472_get_sensor_adev_and_name(struct device *dev, @@ -68,6 +69,8 @@ int skl_int3472_get_sensor_adev_and_name(struct device *dev, return -ENODEV; } + dev_dbg(dev, "Sensor name %s\n", acpi_dev_name(sensor)); + *name_ret = devm_kasprintf(dev, GFP_KERNEL, I2C_DEV_NAME_FORMAT, acpi_dev_name(sensor)); if (!*name_ret) @@ -80,3 +83,8 @@ int skl_int3472_get_sensor_adev_and_name(struct device *dev, return ret; } +EXPORT_SYMBOL_NS_GPL(skl_int3472_get_sensor_adev_and_name, "INTEL_INT3472"); + +MODULE_DESCRIPTION("Intel SkyLake INT3472 ACPI Device Driver library"); +MODULE_AUTHOR("Daniel Scally <djrscally@gmail.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/intel/int3472/common.h b/drivers/platform/x86/intel/int3472/common.h deleted file mode 100644 index 53270d19c73a..000000000000 --- a/drivers/platform/x86/intel/int3472/common.h +++ /dev/null @@ -1,122 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Author: Dan Scally <djrscally@gmail.com> */ - -#ifndef _INTEL_SKL_INT3472_H -#define _INTEL_SKL_INT3472_H - -#include <linux/clk-provider.h> -#include <linux/gpio/machine.h> -#include <linux/regulator/driver.h> -#include <linux/regulator/machine.h> -#include <linux/types.h> - -/* FIXME drop this once the I2C_DEV_NAME_FORMAT macro has been added to include/linux/i2c.h */ -#ifndef I2C_DEV_NAME_FORMAT -#define I2C_DEV_NAME_FORMAT "i2c-%s" -#endif - -/* PMIC GPIO Types */ -#define INT3472_GPIO_TYPE_RESET 0x00 -#define INT3472_GPIO_TYPE_POWERDOWN 0x01 -#define INT3472_GPIO_TYPE_POWER_ENABLE 0x0b -#define INT3472_GPIO_TYPE_CLK_ENABLE 0x0c -#define INT3472_GPIO_TYPE_PRIVACY_LED 0x0d - -#define INT3472_PDEV_MAX_NAME_LEN 23 -#define INT3472_MAX_SENSOR_GPIOS 3 - -#define GPIO_REGULATOR_NAME_LENGTH 21 -#define GPIO_REGULATOR_SUPPLY_NAME_LENGTH 9 - -#define CIO2_SENSOR_SSDB_MCLKSPEED_OFFSET 86 - -#define INT3472_REGULATOR(_name, _supply, _ops) \ - (const struct regulator_desc) { \ - .name = _name, \ - .supply_name = _supply, \ - .type = REGULATOR_VOLTAGE, \ - .ops = _ops, \ - .owner = THIS_MODULE, \ - } - -#define to_int3472_clk(hw) \ - container_of(hw, struct int3472_gpio_clock, clk_hw) - -#define to_int3472_device(clk) \ - container_of(clk, struct int3472_discrete_device, clock) - -struct acpi_device; -struct i2c_client; -struct platform_device; - -struct int3472_cldb { - u8 version; - /* - * control logic type - * 0: UNKNOWN - * 1: DISCRETE(CRD-D) - * 2: PMIC TPS68470 - * 3: PMIC uP6641 - */ - u8 control_logic_type; - u8 control_logic_id; - u8 sensor_card_sku; - u8 reserved[28]; -}; - -struct int3472_gpio_function_remap { - const char *documented; - const char *actual; -}; - -struct int3472_sensor_config { - const char *sensor_module_name; - struct regulator_consumer_supply supply_map; - const struct int3472_gpio_function_remap *function_maps; -}; - -struct int3472_discrete_device { - struct acpi_device *adev; - struct device *dev; - struct acpi_device *sensor; - const char *sensor_name; - - const struct int3472_sensor_config *sensor_config; - - struct int3472_gpio_regulator { - char regulator_name[GPIO_REGULATOR_NAME_LENGTH]; - char supply_name[GPIO_REGULATOR_SUPPLY_NAME_LENGTH]; - struct gpio_desc *gpio; - struct regulator_dev *rdev; - struct regulator_desc rdesc; - } regulator; - - struct int3472_gpio_clock { - struct clk *clk; - struct clk_hw clk_hw; - struct clk_lookup *cl; - struct gpio_desc *ena_gpio; - struct gpio_desc *led_gpio; - u32 frequency; - } clock; - - unsigned int ngpios; /* how many GPIOs have we seen */ - unsigned int n_sensor_gpios; /* how many have we mapped to sensor */ - struct gpiod_lookup_table gpios; -}; - -union acpi_object *skl_int3472_get_acpi_buffer(struct acpi_device *adev, - char *id); -int skl_int3472_fill_cldb(struct acpi_device *adev, struct int3472_cldb *cldb); -int skl_int3472_get_sensor_adev_and_name(struct device *dev, - struct acpi_device **sensor_adev_ret, - const char **name_ret); - -int skl_int3472_register_clock(struct int3472_discrete_device *int3472); -void skl_int3472_unregister_clock(struct int3472_discrete_device *int3472); - -int skl_int3472_register_regulator(struct int3472_discrete_device *int3472, - struct acpi_resource_gpio *agpio); -void skl_int3472_unregister_regulator(struct int3472_discrete_device *int3472); - -#endif diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c index c42c3faa2c32..1505fc3ef7a8 100644 --- a/drivers/platform/x86/intel/int3472/discrete.c +++ b/drivers/platform/x86/intel/int3472/discrete.c @@ -2,20 +2,21 @@ /* Author: Dan Scally <djrscally@gmail.com> */ #include <linux/acpi.h> -#include <linux/clkdev.h> -#include <linux/clk-provider.h> +#include <linux/array_size.h> +#include <linux/bitfield.h> #include <linux/device.h> +#include <linux/dmi.h> #include <linux/gpio/consumer.h> #include <linux/gpio/machine.h> #include <linux/i2c.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/overflow.h> +#include <linux/platform_data/x86/int3472.h> #include <linux/platform_device.h> +#include <linux/string_choices.h> #include <linux/uuid.h> -#include "common.h" - /* * 79234640-9e10-4fea-a5c1-b5aa8b19756f * This _DSM GUID returns information about the GPIO lines mapped to a @@ -27,6 +28,10 @@ static const guid_t int3472_gpio_guid = GUID_INIT(0x79234640, 0x9e10, 0x4fea, 0xa5, 0xc1, 0xb5, 0xaa, 0x8b, 0x19, 0x75, 0x6f); +#define INT3472_GPIO_DSM_TYPE GENMASK(7, 0) +#define INT3472_GPIO_DSM_PIN GENMASK(15, 8) +#define INT3472_GPIO_DSM_SENSOR_ON_VAL GENMASK(31, 24) + /* * 822ace8f-2814-4174-a56b-5f029fe079ee * This _DSM GUID returns a string from the sensor device, which acts as a @@ -36,156 +41,199 @@ static const guid_t cio2_sensor_module_guid = GUID_INIT(0x822ace8f, 0x2814, 0x4174, 0xa5, 0x6b, 0x5f, 0x02, 0x9f, 0xe0, 0x79, 0xee); -/* - * Here follows platform specific mapping information that we can pass to - * the functions mapping resources to the sensors. Where the sensors have - * a power enable pin defined in DSDT we need to provide a supply name so - * the sensor drivers can find the regulator. The device name will be derived - * from the sensor's ACPI device within the code. Optionally, we can provide a - * NULL terminated array of function name mappings to deal with any platform - * specific deviations from the documented behaviour of GPIOs. - * - * Map a GPIO function name to NULL to prevent the driver from mapping that - * GPIO at all. - */ - -static const struct int3472_gpio_function_remap ov2680_gpio_function_remaps[] = { - { "reset", NULL }, - { "powerdown", "reset" }, - { } -}; - -static const struct int3472_sensor_config int3472_sensor_configs[] = { - /* Lenovo Miix 510-12ISK - OV2680, Front */ - { "GNDF140809R", { 0 }, ov2680_gpio_function_remaps }, - /* Lenovo Miix 510-12ISK - OV5648, Rear */ - { "GEFF150023R", REGULATOR_SUPPLY("avdd", NULL), NULL }, - /* Surface Go 1&2 - OV5693, Front */ - { "YHCU", REGULATOR_SUPPLY("avdd", NULL), NULL }, -}; - -static const struct int3472_sensor_config * -skl_int3472_get_sensor_module_config(struct int3472_discrete_device *int3472) +static void skl_int3472_log_sensor_module_name(struct int3472_discrete_device *int3472) { union acpi_object *obj; - unsigned int i; obj = acpi_evaluate_dsm_typed(int3472->sensor->handle, &cio2_sensor_module_guid, 0x00, 0x01, NULL, ACPI_TYPE_STRING); - - if (!obj) { - dev_err(int3472->dev, - "Failed to get sensor module string from _DSM\n"); - return ERR_PTR(-ENODEV); - } - - if (obj->string.type != ACPI_TYPE_STRING) { - dev_err(int3472->dev, - "Sensor _DSM returned a non-string value\n"); - + if (obj) { + dev_dbg(int3472->dev, "Sensor module id: '%s'\n", obj->string.pointer); ACPI_FREE(obj); - return ERR_PTR(-EINVAL); } +} - for (i = 0; i < ARRAY_SIZE(int3472_sensor_configs); i++) { - if (!strcmp(int3472_sensor_configs[i].sensor_module_name, - obj->string.pointer)) - break; - } +static int skl_int3472_fill_gpiod_lookup(struct gpiod_lookup *table_entry, + struct acpi_resource_gpio *agpio, + const char *con_id, unsigned long gpio_flags) +{ + char *path = agpio->resource_source.string_ptr; + struct acpi_device *adev; + acpi_handle handle; + acpi_status status; - ACPI_FREE(obj); + status = acpi_get_handle(NULL, path, &handle); + if (ACPI_FAILURE(status)) + return -EINVAL; + + adev = acpi_fetch_acpi_dev(handle); + if (!adev) + return -ENODEV; - if (i >= ARRAY_SIZE(int3472_sensor_configs)) - return ERR_PTR(-EINVAL); + *table_entry = GPIO_LOOKUP(acpi_dev_name(adev), agpio->pin_table[0], con_id, gpio_flags); - return &int3472_sensor_configs[i]; + return 0; } static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int3472, struct acpi_resource_gpio *agpio, - const char *func, u32 polarity) + const char *con_id, unsigned long gpio_flags) { - const struct int3472_sensor_config *sensor_config; - char *path = agpio->resource_source.string_ptr; - struct gpiod_lookup *table_entry; - struct acpi_device *adev; - acpi_handle handle; - acpi_status status; + int ret; if (int3472->n_sensor_gpios >= INT3472_MAX_SENSOR_GPIOS) { dev_warn(int3472->dev, "Too many GPIOs mapped\n"); return -EINVAL; } - sensor_config = int3472->sensor_config; - if (!IS_ERR(sensor_config) && sensor_config->function_maps) { - const struct int3472_gpio_function_remap *remap; + ret = skl_int3472_fill_gpiod_lookup(&int3472->gpios.table[int3472->n_sensor_gpios], + agpio, con_id, gpio_flags); + if (ret) + return ret; - for (remap = sensor_config->function_maps; remap->documented; remap++) { - if (!strcmp(func, remap->documented)) { - func = remap->actual; - break; - } - } - } + int3472->n_sensor_gpios++; - /* Functions mapped to NULL should not be mapped to the sensor */ - if (!func) - return 0; + return 0; +} - status = acpi_get_handle(NULL, path, &handle); - if (ACPI_FAILURE(status)) - return -EINVAL; +/* This should *really* only be used when there's no other way... */ +static struct gpio_desc * +skl_int3472_gpiod_get_from_temp_lookup(struct int3472_discrete_device *int3472, + struct acpi_resource_gpio *agpio, + const char *con_id, unsigned long gpio_flags) +{ + struct gpio_desc *desc; + int ret; - adev = acpi_fetch_acpi_dev(handle); - if (!adev) - return -ENODEV; + struct gpiod_lookup_table *lookup __free(kfree) = + kzalloc(struct_size(lookup, table, 2), GFP_KERNEL); + if (!lookup) + return ERR_PTR(-ENOMEM); - table_entry = &int3472->gpios.table[int3472->n_sensor_gpios]; - table_entry->key = acpi_dev_name(adev); - table_entry->chip_hwnum = agpio->pin_table[0]; - table_entry->con_id = func; - table_entry->idx = 0; - table_entry->flags = polarity; + lookup->dev_id = dev_name(int3472->dev); + ret = skl_int3472_fill_gpiod_lookup(&lookup->table[0], agpio, con_id, gpio_flags); + if (ret) + return ERR_PTR(ret); - int3472->n_sensor_gpios++; + gpiod_add_lookup_table(lookup); + desc = gpiod_get(int3472->dev, con_id, GPIOD_OUT_LOW); + gpiod_remove_lookup_table(lookup); - return 0; + return desc; } -static int skl_int3472_map_gpio_to_clk(struct int3472_discrete_device *int3472, - struct acpi_resource_gpio *agpio, u8 type) +/** + * struct int3472_gpio_map - Map GPIOs to whatever is expected by the + * sensor driver (as in DT bindings) + * @hid: The ACPI HID of the device without the instance number e.g. INT347E + * @type_from: The GPIO type from ACPI ?SDT + * @type_to: The assigned GPIO type, typically same as @type_from + * @enable_time_us: Enable time in usec for GPIOs mapped to regulators + * @con_id: The name of the GPIO for the device + * @polarity_low: GPIO_ACTIVE_LOW true if the @polarity_low is true, + * GPIO_ACTIVE_HIGH otherwise + */ +struct int3472_gpio_map { + const char *hid; + u8 type_from; + u8 type_to; + bool polarity_low; + unsigned int enable_time_us; + const char *con_id; +}; + +static const struct int3472_gpio_map int3472_gpio_map[] = { + { /* mt9m114 designs declare a powerdown pin which controls the regulators */ + .hid = "INT33F0", + .type_from = INT3472_GPIO_TYPE_POWERDOWN, + .type_to = INT3472_GPIO_TYPE_POWER_ENABLE, + .con_id = "vdd", + .enable_time_us = GPIO_REGULATOR_ENABLE_TIME, + }, + { /* ov7251 driver / DT-bindings expect "enable" as con_id for reset */ + .hid = "INT347E", + .type_from = INT3472_GPIO_TYPE_RESET, + .type_to = INT3472_GPIO_TYPE_RESET, + .con_id = "enable", + }, + { /* ov08x40's handshake pin needs a 45 ms delay on some HP laptops */ + .hid = "OVTI08F4", + .type_from = INT3472_GPIO_TYPE_HANDSHAKE, + .type_to = INT3472_GPIO_TYPE_HANDSHAKE, + .con_id = "dvdd", + .enable_time_us = 45 * USEC_PER_MSEC, + }, +}; + +static void int3472_get_con_id_and_polarity(struct int3472_discrete_device *int3472, u8 *type, + const char **con_id, unsigned long *gpio_flags, + unsigned int *enable_time_us) { - char *path = agpio->resource_source.string_ptr; - u16 pin = agpio->pin_table[0]; - struct gpio_desc *gpio; + struct acpi_device *adev = int3472->sensor; + unsigned int i; - switch (type) { - case INT3472_GPIO_TYPE_CLK_ENABLE: - gpio = acpi_get_and_request_gpiod(path, pin, "int3472,clk-enable"); - if (IS_ERR(gpio)) - return (PTR_ERR(gpio)); + for (i = 0; i < ARRAY_SIZE(int3472_gpio_map); i++) { + /* + * Map the firmware-provided GPIO to whatever a driver expects + * (as in DT bindings). First check if the type matches with the + * GPIO map, then further check that the device _HID matches. + */ + if (*type != int3472_gpio_map[i].type_from) + continue; + + if (!acpi_dev_hid_uid_match(adev, int3472_gpio_map[i].hid, NULL)) + continue; + + dev_dbg(int3472->dev, "mapping type 0x%02x pin to 0x%02x %s\n", + *type, int3472_gpio_map[i].type_to, int3472_gpio_map[i].con_id); + + *type = int3472_gpio_map[i].type_to; + *gpio_flags = int3472_gpio_map[i].polarity_low ? + GPIO_ACTIVE_LOW : GPIO_ACTIVE_HIGH; + *con_id = int3472_gpio_map[i].con_id; + *enable_time_us = int3472_gpio_map[i].enable_time_us; + return; + } - int3472->clock.ena_gpio = gpio; - /* Ensure the pin is in output mode and non-active state */ - gpiod_direction_output(int3472->clock.ena_gpio, 0); + *enable_time_us = GPIO_REGULATOR_ENABLE_TIME; + + switch (*type) { + case INT3472_GPIO_TYPE_RESET: + *con_id = "reset"; + *gpio_flags = GPIO_ACTIVE_LOW; + break; + case INT3472_GPIO_TYPE_POWERDOWN: + *con_id = "powerdown"; + *gpio_flags = GPIO_ACTIVE_LOW; + break; + case INT3472_GPIO_TYPE_CLK_ENABLE: + *con_id = "clk-enable"; + *gpio_flags = GPIO_ACTIVE_HIGH; break; case INT3472_GPIO_TYPE_PRIVACY_LED: - gpio = acpi_get_and_request_gpiod(path, pin, "int3472,privacy-led"); - if (IS_ERR(gpio)) - return (PTR_ERR(gpio)); - - int3472->clock.led_gpio = gpio; - /* Ensure the pin is in output mode and non-active state */ - gpiod_direction_output(int3472->clock.led_gpio, 0); + *con_id = "privacy-led"; + *gpio_flags = GPIO_ACTIVE_HIGH; + break; + case INT3472_GPIO_TYPE_HOTPLUG_DETECT: + *con_id = "hpd"; + *gpio_flags = GPIO_ACTIVE_HIGH; + break; + case INT3472_GPIO_TYPE_POWER_ENABLE: + *con_id = "avdd"; + *gpio_flags = GPIO_ACTIVE_HIGH; + break; + case INT3472_GPIO_TYPE_HANDSHAKE: + *con_id = "dvdd"; + *gpio_flags = GPIO_ACTIVE_HIGH; + /* Setups using a handshake pin need 25 ms enable delay */ + *enable_time_us = 25 * USEC_PER_MSEC; break; default: - dev_err(int3472->dev, "Invalid GPIO type 0x%02x for clock\n", type); + *con_id = "unknown"; + *gpio_flags = GPIO_ACTIVE_HIGH; break; } - - return 0; } /** @@ -203,6 +251,7 @@ static int skl_int3472_map_gpio_to_clk(struct int3472_discrete_device *int3472, * 0x0b Power enable * 0x0c Clock enable * 0x0d Privacy LED + * 0x13 Hotplug detect * * There are some known platform specific quirks where that does not quite * hold up; for example where a pin with type 0x01 (Power down) is mapped to @@ -214,21 +263,26 @@ static int skl_int3472_map_gpio_to_clk(struct int3472_discrete_device *int3472, * to create clocks and regulators via the usual frameworks. * * Return: - * * 1 - To continue the loop - * * 0 - When all resources found are handled properly. - * * -EINVAL - If the resource is not a GPIO IO resource - * * -ENODEV - If the resource has no corresponding _DSM entry - * * -Other - Errors propagated from one of the sub-functions. + * * 1 - Continue the loop without adding a copy of the resource to + * * the list passed to acpi_dev_get_resources() + * * 0 - Continue the loop after adding a copy of the resource to + * * the list passed to acpi_dev_get_resources() + * * -errno - Error, break loop */ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares, void *data) { struct int3472_discrete_device *int3472 = data; + const char *second_sensor = NULL; struct acpi_resource_gpio *agpio; + unsigned int enable_time_us; + u8 active_value, pin, type; + unsigned long gpio_flags; union acpi_object *obj; + struct gpio_desc *gpio; const char *err_msg; + const char *con_id; int ret; - u8 type; if (!acpi_gpio_get_io_resource(ares, &agpio)) return 1; @@ -248,34 +302,74 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares, return 1; } - type = obj->integer.value & 0xff; + type = FIELD_GET(INT3472_GPIO_DSM_TYPE, obj->integer.value); + + int3472_get_con_id_and_polarity(int3472, &type, &con_id, &gpio_flags, &enable_time_us); + + pin = FIELD_GET(INT3472_GPIO_DSM_PIN, obj->integer.value); + /* Pin field is not really used under Windows and wraps around at 8 bits */ + if (pin != (agpio->pin_table[0] & 0xff)) + dev_dbg(int3472->dev, FW_BUG "%s %s pin number mismatch _DSM %d resource %d\n", + con_id, agpio->resource_source.string_ptr, pin, agpio->pin_table[0]); + + active_value = FIELD_GET(INT3472_GPIO_DSM_SENSOR_ON_VAL, obj->integer.value); + if (!active_value) + gpio_flags ^= GPIO_ACTIVE_LOW; + + dev_dbg(int3472->dev, "%s %s pin %d active-%s\n", con_id, + agpio->resource_source.string_ptr, agpio->pin_table[0], + str_high_low(gpio_flags == GPIO_ACTIVE_HIGH)); switch (type) { case INT3472_GPIO_TYPE_RESET: - ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, "reset", - GPIO_ACTIVE_LOW); - if (ret) - err_msg = "Failed to map reset pin to sensor\n"; - - break; case INT3472_GPIO_TYPE_POWERDOWN: - ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, "powerdown", - GPIO_ACTIVE_LOW); + case INT3472_GPIO_TYPE_HOTPLUG_DETECT: + ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, con_id, gpio_flags); if (ret) - err_msg = "Failed to map powerdown pin to sensor\n"; + err_msg = "Failed to map GPIO pin to sensor\n"; break; case INT3472_GPIO_TYPE_CLK_ENABLE: case INT3472_GPIO_TYPE_PRIVACY_LED: - ret = skl_int3472_map_gpio_to_clk(int3472, agpio, type); - if (ret) - err_msg = "Failed to map GPIO to clock\n"; - - break; case INT3472_GPIO_TYPE_POWER_ENABLE: - ret = skl_int3472_register_regulator(int3472, agpio); + case INT3472_GPIO_TYPE_HANDSHAKE: + gpio = skl_int3472_gpiod_get_from_temp_lookup(int3472, agpio, con_id, gpio_flags); + if (IS_ERR(gpio)) { + ret = PTR_ERR(gpio); + err_msg = "Failed to get GPIO\n"; + break; + } + + switch (type) { + case INT3472_GPIO_TYPE_CLK_ENABLE: + ret = skl_int3472_register_gpio_clock(int3472, gpio); + if (ret) + err_msg = "Failed to register clock\n"; + + break; + case INT3472_GPIO_TYPE_PRIVACY_LED: + ret = skl_int3472_register_pled(int3472, gpio); + if (ret) + err_msg = "Failed to register LED\n"; + + break; + case INT3472_GPIO_TYPE_POWER_ENABLE: + second_sensor = int3472->quirks.avdd_second_sensor; + fallthrough; + case INT3472_GPIO_TYPE_HANDSHAKE: + ret = skl_int3472_register_regulator(int3472, gpio, enable_time_us, + con_id, second_sensor); + if (ret) + err_msg = "Failed to register regulator\n"; + + break; + default: /* Never reached */ + ret = -EINVAL; + break; + } + if (ret) - err_msg = "Failed to map regulator to sensor\n"; + gpiod_put(gpio); break; default: @@ -292,19 +386,16 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares, if (ret < 0) return dev_err_probe(int3472->dev, ret, err_msg); - return ret; + /* Tell acpi_dev_get_resources() to not make a copy of the resource */ + return 1; } -static int skl_int3472_parse_crs(struct int3472_discrete_device *int3472) +int int3472_discrete_parse_crs(struct int3472_discrete_device *int3472) { LIST_HEAD(resource_list); int ret; - /* - * No error check, because not having a sensor config is not necessarily - * a failure mode. - */ - int3472->sensor_config = skl_int3472_get_sensor_module_config(int3472); + skl_int3472_log_sensor_module_name(int3472); ret = acpi_dev_get_resources(int3472->adev, &resource_list, skl_int3472_handle_gpio_resources, @@ -314,51 +405,49 @@ static int skl_int3472_parse_crs(struct int3472_discrete_device *int3472) acpi_dev_free_resource_list(&resource_list); - /* - * If we find no clock enable GPIO pin then the privacy LED won't work. - * We've never seen that situation, but it's possible. Warn the user so - * it's clear what's happened. - */ - if (int3472->clock.ena_gpio) { - ret = skl_int3472_register_clock(int3472); - if (ret) - return ret; - } else { - if (int3472->clock.led_gpio) - dev_warn(int3472->dev, - "No clk GPIO. The privacy LED won't work\n"); - } + /* Register _DSM based clock (no-op if a GPIO clock was already registered) */ + ret = skl_int3472_register_dsm_clock(int3472); + if (ret < 0) + return ret; int3472->gpios.dev_id = int3472->sensor_name; gpiod_add_lookup_table(&int3472->gpios); return 0; } +EXPORT_SYMBOL_NS_GPL(int3472_discrete_parse_crs, "INTEL_INT3472_DISCRETE"); -static int skl_int3472_discrete_remove(struct platform_device *pdev) +void int3472_discrete_cleanup(struct int3472_discrete_device *int3472) { - struct int3472_discrete_device *int3472 = platform_get_drvdata(pdev); - gpiod_remove_lookup_table(&int3472->gpios); - if (int3472->clock.cl) - skl_int3472_unregister_clock(int3472); - - gpiod_put(int3472->clock.ena_gpio); - gpiod_put(int3472->clock.led_gpio); - + skl_int3472_unregister_clock(int3472); + skl_int3472_unregister_pled(int3472); skl_int3472_unregister_regulator(int3472); +} +EXPORT_SYMBOL_NS_GPL(int3472_discrete_cleanup, "INTEL_INT3472_DISCRETE"); - return 0; +static void skl_int3472_discrete_remove(struct platform_device *pdev) +{ + int3472_discrete_cleanup(platform_get_drvdata(pdev)); } static int skl_int3472_discrete_probe(struct platform_device *pdev) { struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); + const struct int3472_discrete_quirks *quirks = NULL; struct int3472_discrete_device *int3472; + const struct dmi_system_id *id; struct int3472_cldb cldb; int ret; + if (!adev) + return -ENODEV; + + id = dmi_first_match(skl_int3472_discrete_quirks); + if (id) + quirks = id->driver_data; + ret = skl_int3472_fill_cldb(adev, &cldb); if (ret) { dev_err(&pdev->dev, "Couldn't fill CLDB structure\n"); @@ -380,6 +469,10 @@ static int skl_int3472_discrete_probe(struct platform_device *pdev) int3472->adev = adev; int3472->dev = &pdev->dev; platform_set_drvdata(pdev, int3472); + int3472->clock.imgclk_index = cldb.clock_source; + + if (quirks) + int3472->quirks = *quirks; ret = skl_int3472_get_sensor_adev_and_name(&pdev->dev, &int3472->sensor, &int3472->sensor_name); @@ -392,7 +485,7 @@ static int skl_int3472_discrete_probe(struct platform_device *pdev) */ INIT_LIST_HEAD(&int3472->gpios.list); - ret = skl_int3472_parse_crs(int3472); + ret = int3472_discrete_parse_crs(int3472); if (ret) { skl_int3472_discrete_remove(pdev); return ret; @@ -421,3 +514,4 @@ module_platform_driver(int3472_discrete); MODULE_DESCRIPTION("Intel SkyLake INT3472 ACPI Discrete Device Driver"); MODULE_AUTHOR("Daniel Scally <djrscally@gmail.com>"); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS("INTEL_INT3472"); diff --git a/drivers/platform/x86/intel/int3472/discrete_quirks.c b/drivers/platform/x86/intel/int3472/discrete_quirks.c new file mode 100644 index 000000000000..552869ef91ab --- /dev/null +++ b/drivers/platform/x86/intel/int3472/discrete_quirks.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Author: Hans de Goede <hansg@kernel.org> */ + +#include <linux/dmi.h> +#include <linux/platform_data/x86/int3472.h> + +static const struct int3472_discrete_quirks lenovo_miix_510_quirks = { + .avdd_second_sensor = "i2c-OVTI2680:00", +}; + +const struct dmi_system_id skl_int3472_discrete_quirks[] = { + { + /* Lenovo Miix 510-12IKB */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "MIIX 510-12IKB"), + }, + .driver_data = (void *)&lenovo_miix_510_quirks, + }, + { } +}; diff --git a/drivers/platform/x86/intel/int3472/led.c b/drivers/platform/x86/intel/int3472/led.c new file mode 100644 index 000000000000..b1d84b968112 --- /dev/null +++ b/drivers/platform/x86/intel/int3472/led.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Author: Hans de Goede <hdegoede@redhat.com> */ + +#include <linux/acpi.h> +#include <linux/gpio/consumer.h> +#include <linux/leds.h> +#include <linux/platform_data/x86/int3472.h> + +static int int3472_pled_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct int3472_discrete_device *int3472 = + container_of(led_cdev, struct int3472_discrete_device, pled.classdev); + + gpiod_set_value_cansleep(int3472->pled.gpio, brightness); + return 0; +} + +int skl_int3472_register_pled(struct int3472_discrete_device *int3472, struct gpio_desc *gpio) +{ + char *p; + int ret; + + if (int3472->pled.classdev.dev) + return -EBUSY; + + int3472->pled.gpio = gpio; + + /* Generate the name, replacing the ':' in the ACPI devname with '_' */ + snprintf(int3472->pled.name, sizeof(int3472->pled.name), + "%s::privacy_led", acpi_dev_name(int3472->sensor)); + p = strchr(int3472->pled.name, ':'); + if (p) + *p = '_'; + + int3472->pled.classdev.name = int3472->pled.name; + int3472->pled.classdev.max_brightness = 1; + int3472->pled.classdev.brightness_set_blocking = int3472_pled_set; + + ret = led_classdev_register(int3472->dev, &int3472->pled.classdev); + if (ret) + return ret; + + int3472->pled.lookup.provider = int3472->pled.name; + int3472->pled.lookup.dev_id = int3472->sensor_name; + int3472->pled.lookup.con_id = "privacy"; + led_add_lookup(&int3472->pled.lookup); + + return 0; +} + +void skl_int3472_unregister_pled(struct int3472_discrete_device *int3472) +{ + if (IS_ERR_OR_NULL(int3472->pled.classdev.dev)) + return; + + led_remove_lookup(&int3472->pled.lookup); + led_classdev_unregister(&int3472->pled.classdev); + gpiod_put(int3472->pled.gpio); +} diff --git a/drivers/platform/x86/intel/int3472/tps68470.c b/drivers/platform/x86/intel/int3472/tps68470.c index 5b8d1a9620a5..0133405697dc 100644 --- a/drivers/platform/x86/intel/int3472/tps68470.c +++ b/drivers/platform/x86/intel/int3472/tps68470.c @@ -8,10 +8,10 @@ #include <linux/mfd/tps68470.h> #include <linux/platform_device.h> #include <linux/platform_data/tps68470.h> +#include <linux/platform_data/x86/int3472.h> #include <linux/regmap.h> #include <linux/string.h> -#include "common.h" #include "tps68470.h" #define DESIGNED_FOR_CHROMEOS 1 @@ -152,6 +152,9 @@ static int skl_int3472_tps68470_probe(struct i2c_client *client) int ret; int i; + if (!adev) + return -ENODEV; + n_consumers = skl_int3472_fill_clk_pdata(&client->dev, &clk_pdata); if (n_consumers < 0) return n_consumers; @@ -250,7 +253,7 @@ static struct i2c_driver int3472_tps68470 = { .name = "int3472-tps68470", .acpi_match_table = int3472_device_id, }, - .probe_new = skl_int3472_tps68470_probe, + .probe = skl_int3472_tps68470_probe, .remove = skl_int3472_tps68470_remove, }; module_i2c_driver(int3472_tps68470); @@ -258,4 +261,5 @@ module_i2c_driver(int3472_tps68470); MODULE_DESCRIPTION("Intel SkyLake INT3472 ACPI TPS68470 Device Driver"); MODULE_AUTHOR("Daniel Scally <djrscally@gmail.com>"); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS("INTEL_INT3472"); MODULE_SOFTDEP("pre: clk-tps68470 tps68470-regulator"); diff --git a/drivers/platform/x86/intel/int3472/tps68470_board_data.c b/drivers/platform/x86/intel/int3472/tps68470_board_data.c index 309eab9c0558..71357a036292 100644 --- a/drivers/platform/x86/intel/int3472/tps68470_board_data.c +++ b/drivers/platform/x86/intel/int3472/tps68470_board_data.c @@ -129,6 +129,109 @@ static const struct tps68470_regulator_platform_data surface_go_tps68470_pdata = }, }; +/* Settings for Dell 7212 Tablet */ + +static struct regulator_consumer_supply int3479_vsio_consumer_supplies[] = { + REGULATOR_SUPPLY("avdd", "i2c-INT3479:00"), +}; + +static struct regulator_consumer_supply int3479_aux1_consumer_supplies[] = { + REGULATOR_SUPPLY("dvdd", "i2c-INT3479:00"), +}; + +static struct regulator_consumer_supply int3479_aux2_consumer_supplies[] = { + REGULATOR_SUPPLY("dovdd", "i2c-INT3479:00"), +}; + +static const struct regulator_init_data dell_7212_tps68470_core_reg_init_data = { + .constraints = { + .min_uV = 1200000, + .max_uV = 1200000, + .apply_uV = 1, + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = 0, + .consumer_supplies = NULL, +}; + +static const struct regulator_init_data dell_7212_tps68470_ana_reg_init_data = { + .constraints = { + .min_uV = 2815200, + .max_uV = 2815200, + .apply_uV = 1, + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = 0, + .consumer_supplies = NULL, +}; + +static const struct regulator_init_data dell_7212_tps68470_vcm_reg_init_data = { + .constraints = { + .min_uV = 2815200, + .max_uV = 2815200, + .apply_uV = 1, + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = 0, + .consumer_supplies = NULL, +}; + +static const struct regulator_init_data dell_7212_tps68470_vio_reg_init_data = { + .constraints = { + .min_uV = 1800600, + .max_uV = 1800600, + .apply_uV = 1, + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = 0, + .consumer_supplies = NULL, +}; + +static const struct regulator_init_data dell_7212_tps68470_vsio_reg_init_data = { + .constraints = { + .min_uV = 1800600, + .max_uV = 1800600, + .apply_uV = 1, + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = ARRAY_SIZE(int3479_vsio_consumer_supplies), + .consumer_supplies = int3479_vsio_consumer_supplies, +}; + +static const struct regulator_init_data dell_7212_tps68470_aux1_reg_init_data = { + .constraints = { + .min_uV = 1213200, + .max_uV = 1213200, + .apply_uV = 1, + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = ARRAY_SIZE(int3479_aux1_consumer_supplies), + .consumer_supplies = int3479_aux1_consumer_supplies, +}; + +static const struct regulator_init_data dell_7212_tps68470_aux2_reg_init_data = { + .constraints = { + .min_uV = 1800600, + .max_uV = 1800600, + .apply_uV = 1, + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = ARRAY_SIZE(int3479_aux2_consumer_supplies), + .consumer_supplies = int3479_aux2_consumer_supplies, +}; + +static const struct tps68470_regulator_platform_data dell_7212_tps68470_pdata = { + .reg_init_data = { + [TPS68470_CORE] = &dell_7212_tps68470_core_reg_init_data, + [TPS68470_ANA] = &dell_7212_tps68470_ana_reg_init_data, + [TPS68470_VCM] = &dell_7212_tps68470_vcm_reg_init_data, + [TPS68470_VIO] = &dell_7212_tps68470_vio_reg_init_data, + [TPS68470_VSIO] = &dell_7212_tps68470_vsio_reg_init_data, + [TPS68470_AUX1] = &dell_7212_tps68470_aux1_reg_init_data, + [TPS68470_AUX2] = &dell_7212_tps68470_aux2_reg_init_data, + }, +}; + static struct gpiod_lookup_table surface_go_int347a_gpios = { .dev_id = "i2c-INT347A:00", .table = { @@ -146,6 +249,15 @@ static struct gpiod_lookup_table surface_go_int347e_gpios = { } }; +static struct gpiod_lookup_table dell_7212_int3479_gpios = { + .dev_id = "i2c-INT3479:00", + .table = { + GPIO_LOOKUP("tps68470-gpio", 3, "reset", GPIO_ACTIVE_LOW), + GPIO_LOOKUP("tps68470-gpio", 4, "powerdown", GPIO_ACTIVE_LOW), + { } + } +}; + static const struct int3472_tps68470_board_data surface_go_tps68470_board_data = { .dev_name = "i2c-INT3472:05", .tps68470_regulator_pdata = &surface_go_tps68470_pdata, @@ -159,9 +271,19 @@ static const struct int3472_tps68470_board_data surface_go_tps68470_board_data = static const struct int3472_tps68470_board_data surface_go3_tps68470_board_data = { .dev_name = "i2c-INT3472:01", .tps68470_regulator_pdata = &surface_go_tps68470_pdata, + .n_gpiod_lookups = 2, + .tps68470_gpio_lookup_tables = { + &surface_go_int347a_gpios, + &surface_go_int347e_gpios, + }, +}; + +static const struct int3472_tps68470_board_data dell_7212_tps68470_board_data = { + .dev_name = "i2c-INT3472:05", + .tps68470_regulator_pdata = &dell_7212_tps68470_pdata, .n_gpiod_lookups = 1, .tps68470_gpio_lookup_tables = { - &surface_go_int347a_gpios + &dell_7212_int3479_gpios, }, }; @@ -187,6 +309,13 @@ static const struct dmi_system_id int3472_tps68470_board_data_table[] = { }, .driver_data = (void *)&surface_go3_tps68470_board_data, }, + { + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Latitude 7212 Rugged Extreme Tablet"), + }, + .driver_data = (void *)&dell_7212_tps68470_board_data, + }, { } }; diff --git a/drivers/platform/x86/intel/mrfld_pwrbtn.c b/drivers/platform/x86/intel/mrfld_pwrbtn.c index d58fea51747e..6c43f801c467 100644 --- a/drivers/platform/x86/intel/mrfld_pwrbtn.c +++ b/drivers/platform/x86/intel/mrfld_pwrbtn.c @@ -78,13 +78,12 @@ static int mrfld_pwrbtn_probe(struct platform_device *pdev) return 0; } -static int mrfld_pwrbtn_remove(struct platform_device *pdev) +static void mrfld_pwrbtn_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; dev_pm_clear_wake_irq(dev); device_init_wakeup(dev, false); - return 0; } static const struct platform_device_id mrfld_pwrbtn_id_table[] = { diff --git a/drivers/platform/x86/intel/oaktrail.c b/drivers/platform/x86/intel/oaktrail.c index 7c5c623630c1..265cef327b4f 100644 --- a/drivers/platform/x86/intel/oaktrail.c +++ b/drivers/platform/x86/intel/oaktrail.c @@ -28,7 +28,6 @@ #include <linux/backlight.h> #include <linux/dmi.h> #include <linux/err.h> -#include <linux/fb.h> #include <linux/i2c.h> #include <linux/kernel.h> #include <linux/module.h> @@ -250,7 +249,7 @@ static int oaktrail_backlight_init(void) oaktrail_bl_device = bd; bd->props.brightness = get_backlight_brightness(bd); - bd->props.power = FB_BLANK_UNBLANK; + bd->props.power = BACKLIGHT_POWER_ON; backlight_update_status(bd); return 0; @@ -266,17 +265,11 @@ static int oaktrail_probe(struct platform_device *pdev) return 0; } -static int oaktrail_remove(struct platform_device *pdev) -{ - return 0; -} - static struct platform_driver oaktrail_driver = { .driver = { .name = DRIVER_NAME, }, .probe = oaktrail_probe, - .remove = oaktrail_remove, }; static int dmi_check_cb(const struct dmi_system_id *id) @@ -371,7 +364,7 @@ static void __exit oaktrail_cleanup(void) module_init(oaktrail_init); module_exit(oaktrail_cleanup); -MODULE_AUTHOR("Yin Kangkai (kangkai.yin@intel.com)"); +MODULE_AUTHOR("Yin Kangkai <kangkai.yin@intel.com>"); MODULE_DESCRIPTION("Intel Oaktrail Platform ACPI Extras"); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/intel/plr_tpmi.c b/drivers/platform/x86/intel/plr_tpmi.c new file mode 100644 index 000000000000..58132da47745 --- /dev/null +++ b/drivers/platform/x86/intel/plr_tpmi.c @@ -0,0 +1,355 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Performance Limit Reasons via TPMI + * + * Copyright (c) 2024, Intel Corporation. + */ + +#include <linux/array_size.h> +#include <linux/auxiliary_bus.h> +#include <linux/bitfield.h> +#include <linux/bitmap.h> +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/gfp_types.h> +#include <linux/intel_tpmi.h> +#include <linux/intel_vsec.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kstrtox.h> +#include <linux/lockdep.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/mutex.h> +#include <linux/seq_file.h> +#include <linux/sprintf.h> +#include <linux/types.h> + +#include "tpmi_power_domains.h" + +#define PLR_HEADER 0x00 +#define PLR_MAILBOX_INTERFACE 0x08 +#define PLR_MAILBOX_DATA 0x10 +#define PLR_DIE_LEVEL 0x18 + +#define PLR_MODULE_ID_MASK GENMASK_ULL(19, 12) +#define PLR_RUN_BUSY BIT_ULL(63) + +#define PLR_COMMAND_WRITE 1 + +#define PLR_INVALID GENMASK_ULL(63, 0) + +#define PLR_TIMEOUT_US 5 +#define PLR_TIMEOUT_MAX_US 1000 + +#define PLR_COARSE_REASON_BITS 32 + +struct tpmi_plr; + +struct tpmi_plr_die { + void __iomem *base; + struct mutex lock; /* Protect access to PLR mailbox */ + int package_id; + int die_id; + struct tpmi_plr *plr; +}; + +struct tpmi_plr { + struct dentry *dbgfs_dir; + struct tpmi_plr_die *die_info; + int num_dies; + struct auxiliary_device *auxdev; +}; + +static const char * const plr_coarse_reasons[] = { + "FREQUENCY", + "CURRENT", + "POWER", + "THERMAL", + "PLATFORM", + "MCP", + "RAS", + "MISC", + "QOS", + "DFC", +}; + +static const char * const plr_fine_reasons[] = { + "FREQUENCY_CDYN0", + "FREQUENCY_CDYN1", + "FREQUENCY_CDYN2", + "FREQUENCY_CDYN3", + "FREQUENCY_CDYN4", + "FREQUENCY_CDYN5", + "FREQUENCY_FCT", + "FREQUENCY_PCS_TRL", + "CURRENT_MTPMAX", + "POWER_FAST_RAPL", + "POWER_PKG_PL1_MSR_TPMI", + "POWER_PKG_PL1_MMIO", + "POWER_PKG_PL1_PCS", + "POWER_PKG_PL2_MSR_TPMI", + "POWER_PKG_PL2_MMIO", + "POWER_PKG_PL2_PCS", + "POWER_PLATFORM_PL1_MSR_TPMI", + "POWER_PLATFORM_PL1_MMIO", + "POWER_PLATFORM_PL1_PCS", + "POWER_PLATFORM_PL2_MSR_TPMI", + "POWER_PLATFORM_PL2_MMIO", + "POWER_PLATFORM_PL2_PCS", + "UNKNOWN(22)", + "THERMAL_PER_CORE", + "DFC_UFS", + "PLATFORM_PROCHOT", + "PLATFORM_HOT_VR", + "UNKNOWN(27)", + "UNKNOWN(28)", + "MISC_PCS_PSTATE", +}; + +static u64 plr_read(struct tpmi_plr_die *plr_die, int offset) +{ + return readq(plr_die->base + offset); +} + +static void plr_write(u64 val, struct tpmi_plr_die *plr_die, int offset) +{ + writeq(val, plr_die->base + offset); +} + +static int plr_read_cpu_status(struct tpmi_plr_die *plr_die, int cpu, + u64 *status) +{ + u64 regval; + int ret; + + lockdep_assert_held(&plr_die->lock); + + regval = FIELD_PREP(PLR_MODULE_ID_MASK, tpmi_get_punit_core_number(cpu)); + regval |= PLR_RUN_BUSY; + + plr_write(regval, plr_die, PLR_MAILBOX_INTERFACE); + + ret = readq_poll_timeout(plr_die->base + PLR_MAILBOX_INTERFACE, regval, + !(regval & PLR_RUN_BUSY), PLR_TIMEOUT_US, + PLR_TIMEOUT_MAX_US); + if (ret) + return ret; + + *status = plr_read(plr_die, PLR_MAILBOX_DATA); + + return 0; +} + +static int plr_clear_cpu_status(struct tpmi_plr_die *plr_die, int cpu) +{ + u64 regval; + + lockdep_assert_held(&plr_die->lock); + + regval = FIELD_PREP(PLR_MODULE_ID_MASK, tpmi_get_punit_core_number(cpu)); + regval |= PLR_RUN_BUSY | PLR_COMMAND_WRITE; + + plr_write(0, plr_die, PLR_MAILBOX_DATA); + + plr_write(regval, plr_die, PLR_MAILBOX_INTERFACE); + + return readq_poll_timeout(plr_die->base + PLR_MAILBOX_INTERFACE, regval, + !(regval & PLR_RUN_BUSY), PLR_TIMEOUT_US, + PLR_TIMEOUT_MAX_US); +} + +static void plr_print_bits(struct seq_file *s, u64 val, int bits) +{ + const unsigned long mask[] = { BITMAP_FROM_U64(val) }; + int bit, index; + + for_each_set_bit(bit, mask, bits) { + const char *str = NULL; + + if (bit < PLR_COARSE_REASON_BITS) { + if (bit < ARRAY_SIZE(plr_coarse_reasons)) + str = plr_coarse_reasons[bit]; + } else { + index = bit - PLR_COARSE_REASON_BITS; + if (index < ARRAY_SIZE(plr_fine_reasons)) + str = plr_fine_reasons[index]; + } + + if (str) + seq_printf(s, " %s", str); + else + seq_printf(s, " UNKNOWN(%d)", bit); + } + + if (!val) + seq_puts(s, " none"); + + seq_putc(s, '\n'); +} + +static int plr_status_show(struct seq_file *s, void *unused) +{ + struct tpmi_plr_die *plr_die = s->private; + int ret; + u64 val; + + val = plr_read(plr_die, PLR_DIE_LEVEL); + seq_puts(s, "cpus"); + plr_print_bits(s, val, 32); + + guard(mutex)(&plr_die->lock); + + for (int cpu = 0; cpu < nr_cpu_ids; cpu++) { + if (plr_die->die_id != tpmi_get_power_domain_id(cpu)) + continue; + + if (plr_die->package_id != topology_physical_package_id(cpu)) + continue; + + seq_printf(s, "cpu%d", cpu); + ret = plr_read_cpu_status(plr_die, cpu, &val); + if (ret) { + dev_err(&plr_die->plr->auxdev->dev, "Failed to read PLR for cpu %d, ret=%d\n", + cpu, ret); + return ret; + } + + plr_print_bits(s, val, 64); + } + + return 0; +} + +static ssize_t plr_status_write(struct file *filp, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = filp->private_data; + struct tpmi_plr_die *plr_die = s->private; + bool val; + int ret; + + ret = kstrtobool_from_user(ubuf, count, &val); + if (ret) + return ret; + + if (val != 0) + return -EINVAL; + + plr_write(0, plr_die, PLR_DIE_LEVEL); + + guard(mutex)(&plr_die->lock); + + for (int cpu = 0; cpu < nr_cpu_ids; cpu++) { + if (plr_die->die_id != tpmi_get_power_domain_id(cpu)) + continue; + + if (plr_die->package_id != topology_physical_package_id(cpu)) + continue; + + plr_clear_cpu_status(plr_die, cpu); + } + + return count; +} +DEFINE_SHOW_STORE_ATTRIBUTE(plr_status); + +static int intel_plr_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id) +{ + struct oobmsm_plat_info *plat_info; + struct dentry *dentry; + int i, num_resources; + struct resource *res; + struct tpmi_plr *plr; + void __iomem *base; + char name[17]; + int err; + + plat_info = tpmi_get_platform_data(auxdev); + if (!plat_info) + return dev_err_probe(&auxdev->dev, -EINVAL, "No platform info\n"); + + dentry = tpmi_get_debugfs_dir(auxdev); + if (!dentry) + return dev_err_probe(&auxdev->dev, -ENODEV, "No TPMI debugfs directory.\n"); + + num_resources = tpmi_get_resource_count(auxdev); + if (!num_resources) + return -EINVAL; + + plr = devm_kzalloc(&auxdev->dev, sizeof(*plr), GFP_KERNEL); + if (!plr) + return -ENOMEM; + + plr->die_info = devm_kcalloc(&auxdev->dev, num_resources, sizeof(*plr->die_info), + GFP_KERNEL); + if (!plr->die_info) + return -ENOMEM; + + plr->num_dies = num_resources; + plr->dbgfs_dir = debugfs_create_dir("plr", dentry); + plr->auxdev = auxdev; + + for (i = 0; i < num_resources; i++) { + res = tpmi_get_resource_at_index(auxdev, i); + if (!res) { + err = dev_err_probe(&auxdev->dev, -EINVAL, "No resource\n"); + goto err; + } + + base = devm_ioremap_resource(&auxdev->dev, res); + if (IS_ERR(base)) { + err = PTR_ERR(base); + goto err; + } + + plr->die_info[i].base = base; + plr->die_info[i].package_id = plat_info->package_id; + plr->die_info[i].die_id = i; + plr->die_info[i].plr = plr; + mutex_init(&plr->die_info[i].lock); + + if (plr_read(&plr->die_info[i], PLR_HEADER) == PLR_INVALID) + continue; + + snprintf(name, sizeof(name), "domain%d", i); + + dentry = debugfs_create_dir(name, plr->dbgfs_dir); + debugfs_create_file("status", 0444, dentry, &plr->die_info[i], + &plr_status_fops); + } + + auxiliary_set_drvdata(auxdev, plr); + + return 0; + +err: + debugfs_remove_recursive(plr->dbgfs_dir); + return err; +} + +static void intel_plr_remove(struct auxiliary_device *auxdev) +{ + struct tpmi_plr *plr = auxiliary_get_drvdata(auxdev); + + debugfs_remove_recursive(plr->dbgfs_dir); +} + +static const struct auxiliary_device_id intel_plr_id_table[] = { + { .name = "intel_vsec.tpmi-plr" }, + {} +}; +MODULE_DEVICE_TABLE(auxiliary, intel_plr_id_table); + +static struct auxiliary_driver intel_plr_aux_driver = { + .id_table = intel_plr_id_table, + .remove = intel_plr_remove, + .probe = intel_plr_probe, +}; +module_auxiliary_driver(intel_plr_aux_driver); + +MODULE_IMPORT_NS("INTEL_TPMI"); +MODULE_IMPORT_NS("INTEL_TPMI_POWER_DOMAIN"); +MODULE_DESCRIPTION("Intel TPMI PLR Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/intel/pmc/Kconfig b/drivers/platform/x86/intel/pmc/Kconfig index b526597e4deb..c6ef0bcf76af 100644 --- a/drivers/platform/x86/intel/pmc/Kconfig +++ b/drivers/platform/x86/intel/pmc/Kconfig @@ -7,6 +7,8 @@ config INTEL_PMC_CORE tristate "Intel PMC Core driver" depends on PCI depends on ACPI + depends on INTEL_PMT_TELEMETRY + select INTEL_PMC_SSRAM_TELEMETRY help The Intel Platform Controller Hub for Intel Core SoCs provides access to Power Management Controller registers via various interfaces. This @@ -23,3 +25,6 @@ config INTEL_PMC_CORE - SLPS0 Debug registers (Cannonlake/Icelake PCH) - Low Power Mode registers (Tigerlake and beyond) - PMC quirks as needed to enable SLPS0/S0ix + +config INTEL_PMC_SSRAM_TELEMETRY + tristate diff --git a/drivers/platform/x86/intel/pmc/Makefile b/drivers/platform/x86/intel/pmc/Makefile index f96bc2e19503..bb960c8721d7 100644 --- a/drivers/platform/x86/intel/pmc/Makefile +++ b/drivers/platform/x86/intel/pmc/Makefile @@ -3,8 +3,12 @@ # Intel x86 Platform-Specific Drivers # -intel_pmc_core-y := core.o spt.o cnp.o icl.o tgl.o \ - adl.o mtl.o +intel_pmc_core-y := core.o spt.o cnp.o icl.o \ + tgl.o adl.o mtl.o arl.o lnl.o ptl.o wcl.o obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core.o intel_pmc_core_pltdrv-y := pltdrv.o obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core_pltdrv.o + +# Intel PMC SSRAM driver +intel_pmc_ssram_telemetry-y += ssram_telemetry.o +obj-$(CONFIG_INTEL_PMC_SSRAM_TELEMETRY) += intel_pmc_ssram_telemetry.o diff --git a/drivers/platform/x86/intel/pmc/adl.c b/drivers/platform/x86/intel/pmc/adl.c index 5cbd40979f2a..9e7dfd6e3310 100644 --- a/drivers/platform/x86/intel/pmc/adl.c +++ b/drivers/platform/x86/intel/pmc/adl.c @@ -11,7 +11,7 @@ #include "core.h" /* Alder Lake: PGD PFET Enable Ack Status Register(s) bitmap */ -const struct pmc_bit_map adl_pfear_map[] = { +static const struct pmc_bit_map adl_pfear_map[] = { {"SPI/eSPI", BIT(2)}, {"XHCI", BIT(3)}, {"SPA", BIT(4)}, @@ -54,7 +54,7 @@ const struct pmc_bit_map adl_pfear_map[] = { {} }; -const struct pmc_bit_map *ext_adl_pfear_map[] = { +static const struct pmc_bit_map *ext_adl_pfear_map[] = { /* * Check intel_pmc_core_ids[] users of cnp_reg_map for * a list of core SoCs using this. @@ -63,7 +63,7 @@ const struct pmc_bit_map *ext_adl_pfear_map[] = { NULL }; -const struct pmc_bit_map adl_ltr_show_map[] = { +static const struct pmc_bit_map adl_ltr_show_map[] = { {"SOUTHPORT_A", CNP_PMC_LTR_SPA}, {"SOUTHPORT_B", CNP_PMC_LTR_SPB}, {"SATA", CNP_PMC_LTR_SATA}, @@ -100,7 +100,7 @@ const struct pmc_bit_map adl_ltr_show_map[] = { {} }; -const struct pmc_bit_map adl_clocksource_status_map[] = { +static const struct pmc_bit_map adl_clocksource_status_map[] = { {"CLKPART1_OFF_STS", BIT(0)}, {"CLKPART2_OFF_STS", BIT(1)}, {"CLKPART3_OFF_STS", BIT(2)}, @@ -128,7 +128,7 @@ const struct pmc_bit_map adl_clocksource_status_map[] = { {} }; -const struct pmc_bit_map adl_power_gating_status_0_map[] = { +static const struct pmc_bit_map adl_power_gating_status_0_map[] = { {"PMC_PGD0_PG_STS", BIT(0)}, {"DMI_PGD0_PG_STS", BIT(1)}, {"ESPISPI_PGD0_PG_STS", BIT(2)}, @@ -158,7 +158,7 @@ const struct pmc_bit_map adl_power_gating_status_0_map[] = { {} }; -const struct pmc_bit_map adl_power_gating_status_1_map[] = { +static const struct pmc_bit_map adl_power_gating_status_1_map[] = { {"USBR0_PGD0_PG_STS", BIT(0)}, {"SMT1_PGD0_PG_STS", BIT(2)}, {"CSMERTC_PGD0_PG_STS", BIT(6)}, @@ -170,14 +170,14 @@ const struct pmc_bit_map adl_power_gating_status_1_map[] = { {} }; -const struct pmc_bit_map adl_power_gating_status_2_map[] = { +static const struct pmc_bit_map adl_power_gating_status_2_map[] = { {"THC0_PGD0_PG_STS", BIT(7)}, {"THC1_PGD0_PG_STS", BIT(8)}, {"SPF_PGD0_PG_STS", BIT(14)}, {} }; -const struct pmc_bit_map adl_d3_status_0_map[] = { +static const struct pmc_bit_map adl_d3_status_0_map[] = { {"ISH_D3_STS", BIT(2)}, {"LPSS_D3_STS", BIT(3)}, {"XDCI_D3_STS", BIT(4)}, @@ -193,13 +193,13 @@ const struct pmc_bit_map adl_d3_status_0_map[] = { {} }; -const struct pmc_bit_map adl_d3_status_1_map[] = { +static const struct pmc_bit_map adl_d3_status_1_map[] = { {"GBE_D3_STS", BIT(19)}, {"CNVI_D3_STS", BIT(27)}, {} }; -const struct pmc_bit_map adl_d3_status_2_map[] = { +static const struct pmc_bit_map adl_d3_status_2_map[] = { {"CSMERTC_D3_STS", BIT(1)}, {"CSE_D3_STS", BIT(4)}, {"KVMCC_D3_STS", BIT(5)}, @@ -210,20 +210,20 @@ const struct pmc_bit_map adl_d3_status_2_map[] = { {} }; -const struct pmc_bit_map adl_d3_status_3_map[] = { +static const struct pmc_bit_map adl_d3_status_3_map[] = { {"THC0_D3_STS", BIT(14)}, {"THC1_D3_STS", BIT(15)}, {} }; -const struct pmc_bit_map adl_vnn_req_status_0_map[] = { +static const struct pmc_bit_map adl_vnn_req_status_0_map[] = { {"ISH_VNN_REQ_STS", BIT(2)}, {"ESPISPI_VNN_REQ_STS", BIT(18)}, {"DSP_VNN_REQ_STS", BIT(19)}, {} }; -const struct pmc_bit_map adl_vnn_req_status_1_map[] = { +static const struct pmc_bit_map adl_vnn_req_status_1_map[] = { {"NPK_VNN_REQ_STS", BIT(4)}, {"EXI_VNN_REQ_STS", BIT(9)}, {"GBE_VNN_REQ_STS", BIT(19)}, @@ -232,7 +232,7 @@ const struct pmc_bit_map adl_vnn_req_status_1_map[] = { {} }; -const struct pmc_bit_map adl_vnn_req_status_2_map[] = { +static const struct pmc_bit_map adl_vnn_req_status_2_map[] = { {"CSMERTC_VNN_REQ_STS", BIT(1)}, {"CSE_VNN_REQ_STS", BIT(4)}, {"SMT1_VNN_REQ_STS", BIT(8)}, @@ -245,12 +245,12 @@ const struct pmc_bit_map adl_vnn_req_status_2_map[] = { {} }; -const struct pmc_bit_map adl_vnn_req_status_3_map[] = { +static const struct pmc_bit_map adl_vnn_req_status_3_map[] = { {"GPIOCOM5_VNN_REQ_STS", BIT(11)}, {} }; -const struct pmc_bit_map adl_vnn_misc_status_map[] = { +static const struct pmc_bit_map adl_vnn_misc_status_map[] = { {"CPU_C10_REQ_STS", BIT(0)}, {"PCIe_LPM_En_REQ_STS", BIT(3)}, {"ITH_REQ_STS", BIT(5)}, @@ -265,7 +265,7 @@ const struct pmc_bit_map adl_vnn_misc_status_map[] = { {} }; -const struct pmc_bit_map *adl_lpm_maps[] = { +static const struct pmc_bit_map *adl_lpm_maps[] = { adl_clocksource_status_map, adl_power_gating_status_0_map, adl_power_gating_status_1_map, @@ -307,19 +307,12 @@ const struct pmc_reg_map adl_reg_map = { .lpm_sts = adl_lpm_maps, .lpm_status_offset = ADL_LPM_STATUS_OFFSET, .lpm_live_status_offset = ADL_LPM_LIVE_STATUS_OFFSET, + .pson_residency_offset = TGL_PSON_RESIDENCY_OFFSET, + .pson_residency_counter_step = TGL_PSON_RES_COUNTER_STEP, }; -void adl_core_configure(struct pmc_dev *pmcdev) -{ - /* Due to a hardware limitation, the GBE LTR blocks PC10 - * when a cable is attached. Tell the PMC to ignore it. - */ - dev_dbg(&pmcdev->pdev->dev, "ignoring GBE LTR\n"); - pmc_core_send_ltr_ignore(pmcdev, 3); -} - -void adl_core_init(struct pmc_dev *pmcdev) -{ - pmcdev->map = &adl_reg_map; - pmcdev->core_configure = adl_core_configure; -} +struct pmc_dev_info adl_pmc_dev = { + .map = &adl_reg_map, + .suspend = cnl_suspend, + .resume = cnl_resume, +}; diff --git a/drivers/platform/x86/intel/pmc/arl.c b/drivers/platform/x86/intel/pmc/arl.c new file mode 100644 index 000000000000..eb23bc68340a --- /dev/null +++ b/drivers/platform/x86/intel/pmc/arl.c @@ -0,0 +1,745 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file contains platform specific structure definitions + * and init function used by Arrow Lake PCH. + * + * Copyright (c) 2022, Intel Corporation. + * All Rights Reserved. + * + */ + +#include <linux/pci.h> +#include "core.h" + +/* PMC SSRAM PMT Telemetry GUID */ +#define IOEP_LPM_REQ_GUID 0x5077612 +#define SOCS_LPM_REQ_GUID 0x8478657 +#define PCHS_LPM_REQ_GUID 0x9684572 +#define SOCM_LPM_REQ_GUID 0x2625030 + +static const u8 ARL_LPM_REG_INDEX[] = {0, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20}; + +static const struct pmc_bit_map arl_socs_ltr_show_map[] = { + {"SOUTHPORT_A", CNP_PMC_LTR_SPA}, + {"SOUTHPORT_B", CNP_PMC_LTR_SPB}, + {"SATA", CNP_PMC_LTR_SATA}, + {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE}, + {"XHCI", CNP_PMC_LTR_XHCI}, + {"SOUTHPORT_F", ADL_PMC_LTR_SPF}, + {"ME", CNP_PMC_LTR_ME}, + /* EVA is Enterprise Value Add, doesn't really exist on PCH */ + {"SATA1", CNP_PMC_LTR_EVA}, + {"SOUTHPORT_C", CNP_PMC_LTR_SPC}, + {"HD_AUDIO", CNP_PMC_LTR_AZ}, + {"CNV", CNP_PMC_LTR_CNV}, + {"LPSS", CNP_PMC_LTR_LPSS}, + {"SOUTHPORT_D", CNP_PMC_LTR_SPD}, + {"SOUTHPORT_E", CNP_PMC_LTR_SPE}, + {"SATA2", CNP_PMC_LTR_CAM}, + {"ESPI", CNP_PMC_LTR_ESPI}, + {"SCC", CNP_PMC_LTR_SCC}, + {"ISH", CNP_PMC_LTR_ISH}, + {"UFSX2", CNP_PMC_LTR_UFSX2}, + {"EMMC", CNP_PMC_LTR_EMMC}, + /* + * Check intel_pmc_core_ids[] users of cnp_reg_map for + * a list of core SoCs using this. + */ + {"WIGIG", ICL_PMC_LTR_WIGIG}, + {"THC0", TGL_PMC_LTR_THC0}, + {"THC1", TGL_PMC_LTR_THC1}, + {"SOUTHPORT_G", MTL_PMC_LTR_SPG}, + {"Reserved", ARL_SOCS_PMC_LTR_RESERVED}, + {"IOE_PMC", MTL_PMC_LTR_IOE_PMC}, + {"DMI3", ARL_PMC_LTR_DMI3}, + + /* Below two cannot be used for LTR_IGNORE */ + {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT}, + {"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT}, + {} +}; + +static const struct pmc_bit_map arl_socs_clocksource_status_map[] = { + {"AON2_OFF_STS", BIT(0)}, + {"AON3_OFF_STS", BIT(1)}, + {"AON4_OFF_STS", BIT(2)}, + {"AON5_OFF_STS", BIT(3)}, + {"AON1_OFF_STS", BIT(4)}, + {"XTAL_LVM_OFF_STS", BIT(5)}, + {"AON3_SPL_OFF_STS", BIT(9)}, + {"DMI3FPW_0_PLL_OFF_STS", BIT(10)}, + {"DMI3FPW_1_PLL_OFF_STS", BIT(11)}, + {"G5X16FPW_0_PLL_OFF_STS", BIT(14)}, + {"G5X16FPW_1_PLL_OFF_STS", BIT(15)}, + {"G5X16FPW_2_PLL_OFF_STS", BIT(16)}, + {"XTAL_AGGR_OFF_STS", BIT(17)}, + {"USB2_PLL_OFF_STS", BIT(18)}, + {"G5X16FPW_3_PLL_OFF_STS", BIT(19)}, + {"BCLK_EXT_INJ_CLK_OFF_STS", BIT(20)}, + {"PHY_OC_EXT_INJ_CLK_OFF_STS", BIT(21)}, + {"FILTER_PLL_OFF_STS", BIT(22)}, + {"FABRIC_PLL_OFF_STS", BIT(25)}, + {"SOC_PLL_OFF_STS", BIT(26)}, + {"PCIEFAB_PLL_OFF_STS", BIT(27)}, + {"REF_PLL_OFF_STS", BIT(28)}, + {"GENLOCK_FILTER_PLL_OFF_STS", BIT(30)}, + {"RTC_PLL_OFF_STS", BIT(31)}, + {} +}; + +static const struct pmc_bit_map arl_socs_power_gating_status_0_map[] = { + {"PMC_PGD0_PG_STS", BIT(0)}, + {"DMI_PGD0_PG_STS", BIT(1)}, + {"ESPISPI_PGD0_PG_STS", BIT(2)}, + {"XHCI_PGD0_PG_STS", BIT(3)}, + {"SPA_PGD0_PG_STS", BIT(4)}, + {"SPB_PGD0_PG_STS", BIT(5)}, + {"SPC_PGD0_PG_STS", BIT(6)}, + {"GBE_PGD0_PG_STS", BIT(7)}, + {"SATA_PGD0_PG_STS", BIT(8)}, + {"FIACPCB_P5x16_PGD0_PG_STS", BIT(9)}, + {"G5x16FPW_PGD0_PG_STS", BIT(10)}, + {"FIA_D_PGD0_PG_STS", BIT(11)}, + {"MPFPW2_PGD0_PG_STS", BIT(12)}, + {"SPD_PGD0_PG_STS", BIT(13)}, + {"LPSS_PGD0_PG_STS", BIT(14)}, + {"LPC_PGD0_PG_STS", BIT(15)}, + {"SMB_PGD0_PG_STS", BIT(16)}, + {"ISH_PGD0_PG_STS", BIT(17)}, + {"P2S_PGD0_PG_STS", BIT(18)}, + {"NPK_PGD0_PG_STS", BIT(19)}, + {"DMI3FPW_PGD0_PG_STS", BIT(20)}, + {"GBETSN1_PGD0_PG_STS", BIT(21)}, + {"FUSE_PGD0_PG_STS", BIT(22)}, + {"FIACPCB_D_PGD0_PG_STS", BIT(23)}, + {"FUSEGPSB_PGD0_PG_STS", BIT(24)}, + {"XDCI_PGD0_PG_STS", BIT(25)}, + {"EXI_PGD0_PG_STS", BIT(26)}, + {"CSE_PGD0_PG_STS", BIT(27)}, + {"KVMCC_PGD0_PG_STS", BIT(28)}, + {"PMT_PGD0_PG_STS", BIT(29)}, + {"CLINK_PGD0_PG_STS", BIT(30)}, + {"PTIO_PGD0_PG_STS", BIT(31)}, + {} +}; + +static const struct pmc_bit_map arl_socs_power_gating_status_1_map[] = { + {"USBR0_PGD0_PG_STS", BIT(0)}, + {"SUSRAM_PGD0_PG_STS", BIT(1)}, + {"SMT1_PGD0_PG_STS", BIT(2)}, + {"FIACPCB_U_PGD0_PG_STS", BIT(3)}, + {"SMS2_PGD0_PG_STS", BIT(4)}, + {"SMS1_PGD0_PG_STS", BIT(5)}, + {"CSMERTC_PGD0_PG_STS", BIT(6)}, + {"CSMEPSF_PGD0_PG_STS", BIT(7)}, + {"SBR0_PGD0_PG_STS", BIT(8)}, + {"SBR1_PGD0_PG_STS", BIT(9)}, + {"SBR2_PGD0_PG_STS", BIT(10)}, + {"SBR3_PGD0_PG_STS", BIT(11)}, + {"MPFPW1_PGD0_PG_STS", BIT(12)}, + {"SBR5_PGD0_PG_STS", BIT(13)}, + {"FIA_X_PGD0_PG_STS", BIT(14)}, + {"FIACPCB_X_PGD0_PG_STS", BIT(15)}, + {"SBRG_PGD0_PG_STS", BIT(16)}, + {"SOC_D2D_PGD1_PG_STS", BIT(17)}, + {"PSF4_PGD0_PG_STS", BIT(18)}, + {"CNVI_PGD0_PG_STS", BIT(19)}, + {"UFSX2_PGD0_PG_STS", BIT(20)}, + {"ENDBG_PGD0_PG_STS", BIT(21)}, + {"DBG_PSF_PGD0_PG_STS", BIT(22)}, + {"SBR6_PGD0_PG_STS", BIT(23)}, + {"SOC_D2D_PGD2_PG_STS", BIT(24)}, + {"NPK_PGD1_PG_STS", BIT(25)}, + {"DMI3_PGD0_PG_STS", BIT(26)}, + {"DBG_SBR_PGD0_PG_STS", BIT(27)}, + {"SOC_D2D_PGD0_PG_STS", BIT(28)}, + {"PSF6_PGD0_PG_STS", BIT(29)}, + {"PSF7_PGD0_PG_STS", BIT(30)}, + {"MPFPW3_PGD0_PG_STS", BIT(31)}, + {} +}; + +static const struct pmc_bit_map arl_socs_power_gating_status_2_map[] = { + {"PSF8_PGD0_PG_STS", BIT(0)}, + {"FIA_PGD0_PG_STS", BIT(1)}, + {"SOC_D2D_PGD3_PG_STS", BIT(2)}, + {"FIA_U_PGD0_PG_STS", BIT(3)}, + {"TAM_PGD0_PG_STS", BIT(4)}, + {"GBETSN_PGD0_PG_STS", BIT(5)}, + {"TBTLSX_PGD0_PG_STS", BIT(6)}, + {"THC0_PGD0_PG_STS", BIT(7)}, + {"THC1_PGD0_PG_STS", BIT(8)}, + {"PMC_PGD1_PG_STS", BIT(9)}, + {"FIA_P5x16_PGD0_PG_STS", BIT(10)}, + {"GNA_PGD0_PG_STS", BIT(11)}, + {"ACE_PGD0_PG_STS", BIT(12)}, + {"ACE_PGD1_PG_STS", BIT(13)}, + {"ACE_PGD2_PG_STS", BIT(14)}, + {"ACE_PGD3_PG_STS", BIT(15)}, + {"ACE_PGD4_PG_STS", BIT(16)}, + {"ACE_PGD5_PG_STS", BIT(17)}, + {"ACE_PGD6_PG_STS", BIT(18)}, + {"ACE_PGD7_PG_STS", BIT(19)}, + {"ACE_PGD8_PG_STS", BIT(20)}, + {"FIA_PGS_PGD0_PG_STS", BIT(21)}, + {"FIACPCB_PGS_PGD0_PG_STS", BIT(22)}, + {"FUSEPMSB_PGD0_PG_STS", BIT(23)}, + {} +}; + +static const struct pmc_bit_map arl_socs_d3_status_2_map[] = { + {"CSMERTC_D3_STS", BIT(1)}, + {"SUSRAM_D3_STS", BIT(2)}, + {"CSE_D3_STS", BIT(4)}, + {"KVMCC_D3_STS", BIT(5)}, + {"USBR0_D3_STS", BIT(6)}, + {"ISH_D3_STS", BIT(7)}, + {"SMT1_D3_STS", BIT(8)}, + {"SMT2_D3_STS", BIT(9)}, + {"SMT3_D3_STS", BIT(10)}, + {"GNA_D3_STS", BIT(12)}, + {"CLINK_D3_STS", BIT(14)}, + {"PTIO_D3_STS", BIT(16)}, + {"PMT_D3_STS", BIT(17)}, + {"SMS1_D3_STS", BIT(18)}, + {"SMS2_D3_STS", BIT(19)}, + {} +}; + +static const struct pmc_bit_map arl_socs_d3_status_3_map[] = { + {"GBETSN_D3_STS", BIT(13)}, + {"THC0_D3_STS", BIT(14)}, + {"THC1_D3_STS", BIT(15)}, + {"ACE_D3_STS", BIT(23)}, + {} +}; + +static const struct pmc_bit_map arl_socs_vnn_req_status_3_map[] = { + {"DTS0_VNN_REQ_STS", BIT(7)}, + {"GPIOCOM5_VNN_REQ_STS", BIT(11)}, + {} +}; + +static const struct pmc_bit_map *arl_socs_lpm_maps[] = { + arl_socs_clocksource_status_map, + arl_socs_power_gating_status_0_map, + arl_socs_power_gating_status_1_map, + arl_socs_power_gating_status_2_map, + mtl_socm_d3_status_0_map, + mtl_socm_d3_status_1_map, + arl_socs_d3_status_2_map, + arl_socs_d3_status_3_map, + mtl_socm_vnn_req_status_0_map, + mtl_socm_vnn_req_status_1_map, + mtl_socm_vnn_req_status_2_map, + arl_socs_vnn_req_status_3_map, + mtl_socm_vnn_misc_status_map, + mtl_socm_signal_status_map, + NULL +}; + +static const struct pmc_bit_map arl_socs_pfear_map[] = { + {"RSVD64", BIT(0)}, + {"RSVD65", BIT(1)}, + {"RSVD66", BIT(2)}, + {"RSVD67", BIT(3)}, + {"RSVD68", BIT(4)}, + {"GBETSN", BIT(5)}, + {"TBTLSX", BIT(6)}, + {} +}; + +static const struct pmc_bit_map *ext_arl_socs_pfear_map[] = { + mtl_socm_pfear_map, + arl_socs_pfear_map, + NULL +}; + +static const struct pmc_reg_map arl_socs_reg_map = { + .pfear_sts = ext_arl_socs_pfear_map, + .ppfear_buckets = ARL_SOCS_PPFEAR_NUM_ENTRIES, + .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, + .lpm_sts = arl_socs_lpm_maps, + .ltr_ignore_max = ARL_SOCS_NUM_IP_IGN_ALLOWED, + .ltr_show_sts = arl_socs_ltr_show_map, + .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, + .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP, + .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2, + .msr_sts = msr_map, + .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET, + .regmap_length = MTL_SOC_PMC_MMIO_REG_LEN, + .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A, + .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, + .lpm_priority_offset = MTL_LPM_PRI_OFFSET, + .lpm_en_offset = MTL_LPM_EN_OFFSET, + .lpm_residency_offset = MTL_LPM_RESIDENCY_OFFSET, + .lpm_status_offset = MTL_LPM_STATUS_OFFSET, + .lpm_sts_latch_en_offset = MTL_LPM_STATUS_LATCH_EN_OFFSET, + .lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET, + .lpm_num_maps = ADL_LPM_NUM_MAPS, + .lpm_reg_index = ARL_LPM_REG_INDEX, + .etr3_offset = ETR3_OFFSET, + .pson_residency_offset = TGL_PSON_RESIDENCY_OFFSET, + .pson_residency_counter_step = TGL_PSON_RES_COUNTER_STEP, + .lpm_req_guid = SOCS_LPM_REQ_GUID, +}; + +static const struct pmc_bit_map arl_pchs_ltr_show_map[] = { + {"SOUTHPORT_A", CNP_PMC_LTR_SPA}, + {"SOUTHPORT_B", CNP_PMC_LTR_SPB}, + {"SATA", CNP_PMC_LTR_SATA}, + {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE}, + {"XHCI", CNP_PMC_LTR_XHCI}, + {"SOUTHPORT_F", ADL_PMC_LTR_SPF}, + {"ME", CNP_PMC_LTR_ME}, + /* EVA is Enterprise Value Add, doesn't really exist on PCH */ + {"SATA1", CNP_PMC_LTR_EVA}, + {"SOUTHPORT_C", CNP_PMC_LTR_SPC}, + {"HD_AUDIO", CNP_PMC_LTR_AZ}, + {"CNV", CNP_PMC_LTR_CNV}, + {"LPSS", CNP_PMC_LTR_LPSS}, + {"SOUTHPORT_D", CNP_PMC_LTR_SPD}, + {"SOUTHPORT_E", CNP_PMC_LTR_SPE}, + {"SATA2", CNP_PMC_LTR_CAM}, + {"ESPI", CNP_PMC_LTR_ESPI}, + {"SCC", CNP_PMC_LTR_SCC}, + {"ISH", CNP_PMC_LTR_ISH}, + {"UFSX2", CNP_PMC_LTR_UFSX2}, + {"EMMC", CNP_PMC_LTR_EMMC}, + /* + * Check intel_pmc_core_ids[] users of cnp_reg_map for + * a list of core SoCs using this. + */ + {"WIGIG", ICL_PMC_LTR_WIGIG}, + {"THC0", TGL_PMC_LTR_THC0}, + {"THC1", TGL_PMC_LTR_THC1}, + {"SOUTHPORT_G", MTL_PMC_LTR_SPG}, + {"ESE", MTL_PMC_LTR_ESE}, + {"IOE_PMC", MTL_PMC_LTR_IOE_PMC}, + {"DMI3", ARL_PMC_LTR_DMI3}, + + /* Below two cannot be used for LTR_IGNORE */ + {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT}, + {"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT}, + {} +}; + +static const struct pmc_bit_map arl_pchs_clocksource_status_map[] = { + {"AON2_OFF_STS", BIT(0)}, + {"AON3_OFF_STS", BIT(1)}, + {"AON4_OFF_STS", BIT(2)}, + {"AON2_SPL_OFF_STS", BIT(3)}, + {"AONL_OFF_STS", BIT(4)}, + {"XTAL_LVM_OFF_STS", BIT(5)}, + {"AON5_ACRO_OFF_STS", BIT(6)}, + {"AON6_ACRO_OFF_STS", BIT(7)}, + {"USB3_PLL_OFF_STS", BIT(8)}, + {"ACRO_OFF_STS", BIT(9)}, + {"AUDIO_PLL_OFF_STS", BIT(10)}, + {"MAIN_CRO_OFF_STS", BIT(11)}, + {"MAIN_DIVIDER_OFF_STS", BIT(12)}, + {"REF_PLL_NON_OC_OFF_STS", BIT(13)}, + {"DMI_PLL_OFF_STS", BIT(14)}, + {"PHY_EXT_INJ_OFF_STS", BIT(15)}, + {"AON6_MCRO_OFF_STS", BIT(16)}, + {"XTAL_AGGR_OFF_STS", BIT(17)}, + {"USB2_PLL_OFF_STS", BIT(18)}, + {"TSN0_PLL_OFF_STS", BIT(19)}, + {"TSN1_PLL_OFF_STS", BIT(20)}, + {"GBE_PLL_OFF_STS", BIT(21)}, + {"SATA_PLL_OFF_STS", BIT(22)}, + {"PCIE0_PLL_OFF_STS", BIT(23)}, + {"PCIE1_PLL_OFF_STS", BIT(24)}, + {"PCIE2_PLL_OFF_STS", BIT(26)}, + {"PCIE3_PLL_OFF_STS", BIT(27)}, + {"REF_PLL_OFF_STS", BIT(28)}, + {"PCIE4_PLL_OFF_STS", BIT(29)}, + {"PCIE5_PLL_OFF_STS", BIT(30)}, + {"REF38P4_PLL_OFF_STS", BIT(31)}, + {} +}; + +static const struct pmc_bit_map arl_pchs_power_gating_status_0_map[] = { + {"PMC_PGD0_PG_STS", BIT(0)}, + {"DMI_PGD0_PG_STS", BIT(1)}, + {"ESPISPI_PGD0_PG_STS", BIT(2)}, + {"XHCI_PGD0_PG_STS", BIT(3)}, + {"SPA_PGD0_PG_STS", BIT(4)}, + {"SPB_PGD0_PG_STS", BIT(5)}, + {"SPC_PGD0_PG_STS", BIT(6)}, + {"GBE_PGD0_PG_STS", BIT(7)}, + {"SATA_PGD0_PG_STS", BIT(8)}, + {"FIA_X_PGD0_PG_STS", BIT(9)}, + {"MPFPW4_PGD0_PG_STS", BIT(10)}, + {"EAH_PGD0_PG_STS", BIT(11)}, + {"MPFPW1_PGD0_PG_STS", BIT(12)}, + {"SPD_PGD0_PG_STS", BIT(13)}, + {"LPSS_PGD0_PG_STS", BIT(14)}, + {"LPC_PGD0_PG_STS", BIT(15)}, + {"SMB_PGD0_PG_STS", BIT(16)}, + {"ISH_PGD0_PG_STS", BIT(17)}, + {"P2S_PGD0_PG_STS", BIT(18)}, + {"NPK_PGD0_PG_STS", BIT(19)}, + {"U3FPW1_PGD0_PG_STS", BIT(20)}, + {"PECI_PGD0_PG_STS", BIT(21)}, + {"FUSE_PGD0_PG_STS", BIT(22)}, + {"SBR8_PGD0_PG_STS", BIT(23)}, + {"EXE_PGD0_PG_STS", BIT(24)}, + {"XDCI_PGD0_PG_STS", BIT(25)}, + {"EXI_PGD0_PG_STS", BIT(26)}, + {"CSE_PGD0_PG_STS", BIT(27)}, + {"KVMCC_PGD0_PG_STS", BIT(28)}, + {"PMT_PGD0_PG_STS", BIT(29)}, + {"CLINK_PGD0_PG_STS", BIT(30)}, + {"PTIO_PGD0_PG_STS", BIT(31)}, + {} +}; + +static const struct pmc_bit_map arl_pchs_power_gating_status_1_map[] = { + {"USBR0_PGD0_PG_STS", BIT(0)}, + {"SUSRAM_PGD0_PG_STS", BIT(1)}, + {"SMT1_PGD0_PG_STS", BIT(2)}, + {"SMT4_PGD0_PG_STS", BIT(3)}, + {"SMS2_PGD0_PG_STS", BIT(4)}, + {"SMS1_PGD0_PG_STS", BIT(5)}, + {"CSMERTC_PGD0_PG_STS", BIT(6)}, + {"CSMEPSF_PGD0_PG_STS", BIT(7)}, + {"SBR0_PGD0_PG_STS", BIT(8)}, + {"SBR1_PGD0_PG_STS", BIT(9)}, + {"SBR2_PGD0_PG_STS", BIT(10)}, + {"SBR3_PGD0_PG_STS", BIT(11)}, + {"SBR4_PGD0_PG_STS", BIT(12)}, + {"SBR5_PGD0_PG_STS", BIT(13)}, + {"MPFPW3_PGD0_PG_STS", BIT(14)}, + {"PSF1_PGD0_PG_STS", BIT(15)}, + {"PSF2_PGD0_PG_STS", BIT(16)}, + {"PSF3_PGD0_PG_STS", BIT(17)}, + {"PSF4_PGD0_PG_STS", BIT(18)}, + {"CNVI_PGD0_PG_STS", BIT(19)}, + {"DMI3_PGD0_PG_STS", BIT(20)}, + {"ENDBG_PGD0_PG_STS", BIT(21)}, + {"DBG_SBR_PGD0_PG_STS", BIT(22)}, + {"SBR6_PGD0_PG_STS", BIT(23)}, + {"SBR7_PGD0_PG_STS", BIT(24)}, + {"NPK_PGD1_PG_STS", BIT(25)}, + {"U3FPW3_PGD0_PG_STS", BIT(26)}, + {"MPFPW2_PGD0_PG_STS", BIT(27)}, + {"MPFPW7_PGD0_PG_STS", BIT(28)}, + {"GBETSN1_PGD0_PG_STS", BIT(29)}, + {"PSF7_PGD0_PG_STS", BIT(30)}, + {"FIA2_PGD0_PG_STS", BIT(31)}, + {} +}; + +static const struct pmc_bit_map arl_pchs_power_gating_status_2_map[] = { + {"U3FPW2_PGD0_PG_STS", BIT(0)}, + {"FIA_PGD0_PG_STS", BIT(1)}, + {"FIACPCB_X_PGD0_PG_STS", BIT(2)}, + {"FIA1_PGD0_PG_STS", BIT(3)}, + {"TAM_PGD0_PG_STS", BIT(4)}, + {"GBETSN_PGD0_PG_STS", BIT(5)}, + {"SBR9_PGD0_PG_STS", BIT(6)}, + {"THC0_PGD0_PG_STS", BIT(7)}, + {"THC1_PGD0_PG_STS", BIT(8)}, + {"PMC_PGD1_PG_STS", BIT(9)}, + {"DBC_PGD0_PG_STS", BIT(10)}, + {"DBG_PSF_PGD0_PG_STS", BIT(11)}, + {"SPF_PGD0_PG_STS", BIT(12)}, + {"ACE_PGD0_PG_STS", BIT(13)}, + {"ACE_PGD1_PG_STS", BIT(14)}, + {"ACE_PGD2_PG_STS", BIT(15)}, + {"ACE_PGD3_PG_STS", BIT(16)}, + {"ACE_PGD4_PG_STS", BIT(17)}, + {"ACE_PGD5_PG_STS", BIT(18)}, + {"ACE_PGD6_PG_STS", BIT(19)}, + {"ACE_PGD7_PG_STS", BIT(20)}, + {"SPE_PGD0_PG_STS", BIT(21)}, + {"MPFPW5_PG_STS", BIT(22)}, + {} +}; + +static const struct pmc_bit_map arl_pchs_d3_status_0_map[] = { + {"SPF_D3_STS", BIT(0)}, + {"LPSS_D3_STS", BIT(3)}, + {"XDCI_D3_STS", BIT(4)}, + {"XHCI_D3_STS", BIT(5)}, + {"SPA_D3_STS", BIT(12)}, + {"SPB_D3_STS", BIT(13)}, + {"SPC_D3_STS", BIT(14)}, + {"SPD_D3_STS", BIT(15)}, + {"SPE_D3_STS", BIT(16)}, + {"ESPISPI_D3_STS", BIT(18)}, + {"SATA_D3_STS", BIT(20)}, + {"PSTH_D3_STS", BIT(21)}, + {"DMI_D3_STS", BIT(22)}, + {} +}; + +static const struct pmc_bit_map arl_pchs_d3_status_1_map[] = { + {"GBETSN1_D3_STS", BIT(14)}, + {"GBE_D3_STS", BIT(19)}, + {"ITSS_D3_STS", BIT(23)}, + {"P2S_D3_STS", BIT(24)}, + {"CNVI_D3_STS", BIT(27)}, + {} +}; + +static const struct pmc_bit_map arl_pchs_d3_status_2_map[] = { + {"CSMERTC_D3_STS", BIT(1)}, + {"SUSRAM_D3_STS", BIT(2)}, + {"CSE_D3_STS", BIT(4)}, + {"KVMCC_D3_STS", BIT(5)}, + {"USBR0_D3_STS", BIT(6)}, + {"ISH_D3_STS", BIT(7)}, + {"SMT1_D3_STS", BIT(8)}, + {"SMT2_D3_STS", BIT(9)}, + {"SMT3_D3_STS", BIT(10)}, + {"SMT4_D3_STS", BIT(11)}, + {"SMT5_D3_STS", BIT(12)}, + {"SMT6_D3_STS", BIT(13)}, + {"CLINK_D3_STS", BIT(14)}, + {"PTIO_D3_STS", BIT(16)}, + {"PMT_D3_STS", BIT(17)}, + {"SMS1_D3_STS", BIT(18)}, + {"SMS2_D3_STS", BIT(19)}, + {} +}; + +static const struct pmc_bit_map arl_pchs_d3_status_3_map[] = { + {"ESE_D3_STS", BIT(3)}, + {"GBETSN_D3_STS", BIT(13)}, + {"THC0_D3_STS", BIT(14)}, + {"THC1_D3_STS", BIT(15)}, + {"ACE_D3_STS", BIT(23)}, + {} +}; + +static const struct pmc_bit_map arl_pchs_vnn_req_status_0_map[] = { + {"FIA_VNN_REQ_STS", BIT(17)}, + {"ESPISPI_VNN_REQ_STS", BIT(18)}, + {} +}; + +static const struct pmc_bit_map arl_pchs_vnn_req_status_1_map[] = { + {"NPK_VNN_REQ_STS", BIT(4)}, + {"DFXAGG_VNN_REQ_STS", BIT(8)}, + {"EXI_VNN_REQ_STS", BIT(9)}, + {"GBE_VNN_REQ_STS", BIT(19)}, + {"SMB_VNN_REQ_STS", BIT(25)}, + {"LPC_VNN_REQ_STS", BIT(26)}, + {"CNVI_VNN_REQ_STS", BIT(27)}, + {} +}; + +static const struct pmc_bit_map arl_pchs_vnn_req_status_2_map[] = { + {"FIA2_VNN_REQ_STS", BIT(0)}, + {"CSMERTC_VNN_REQ_STS", BIT(1)}, + {"CSE_VNN_REQ_STS", BIT(4)}, + {"ISH_VNN_REQ_STS", BIT(7)}, + {"SMT1_VNN_REQ_STS", BIT(8)}, + {"SMT4_VNN_REQ_STS", BIT(11)}, + {"CLINK_VNN_REQ_STS", BIT(14)}, + {"SMS1_VNN_REQ_STS", BIT(18)}, + {"SMS2_VNN_REQ_STS", BIT(19)}, + {"GPIOCOM4_VNN_REQ_STS", BIT(20)}, + {"GPIOCOM3_VNN_REQ_STS", BIT(21)}, + {"GPIOCOM2_VNN_REQ_STS", BIT(22)}, + {"GPIOCOM1_VNN_REQ_STS", BIT(23)}, + {"GPIOCOM0_VNN_REQ_STS", BIT(24)}, + {} +}; + +static const struct pmc_bit_map arl_pchs_vnn_req_status_3_map[] = { + {"ESE_VNN_REQ_STS", BIT(3)}, + {"DTS0_VNN_REQ_STS", BIT(7)}, + {"GPIOCOM5_VNN_REQ_STS", BIT(11)}, + {"FIA1_VNN_REQ_STS", BIT(12)}, + {} +}; + +static const struct pmc_bit_map arl_pchs_vnn_misc_status_map[] = { + {"CPU_C10_REQ_STS", BIT(0)}, + {"TS_OFF_REQ_STS", BIT(1)}, + {"PNDE_MET_REQ_STS", BIT(2)}, + {"PCIE_DEEP_PM_REQ_STS", BIT(3)}, + {"FW_THROTTLE_ALLOWED_REQ_STS", BIT(4)}, + {"ISH_VNNAON_REQ_STS", BIT(7)}, + {"IOE_COND_MET_S02I2_0_REQ_STS", BIT(8)}, + {"IOE_COND_MET_S02I2_1_REQ_STS", BIT(9)}, + {"IOE_COND_MET_S02I2_2_REQ_STS", BIT(10)}, + {"PLT_GREATER_REQ_STS", BIT(11)}, + {"PMC_IDLE_FB_OCP_REQ_STS", BIT(13)}, + {"PM_SYNC_STATES_REQ_STS", BIT(14)}, + {"EA_REQ_STS", BIT(15)}, + {"DMI_CLKREQ_B_REQ_STS", BIT(16)}, + {"BRK_EV_EN_REQ_STS", BIT(17)}, + {"AUTO_DEMO_EN_REQ_STS", BIT(18)}, + {"ITSS_CLK_SRC_REQ_STS", BIT(19)}, + {"ARC_IDLE_REQ_STS", BIT(21)}, + {"DMI_IN_REQ_STS", BIT(22)}, + {"FIA_DEEP_PM_REQ_STS", BIT(23)}, + {"XDCI_ATTACHED_REQ_STS", BIT(24)}, + {"ARC_INTERRUPT_WAKE_REQ_STS", BIT(25)}, + {"PRE_WAKE0_REQ_STS", BIT(27)}, + {"PRE_WAKE1_REQ_STS", BIT(28)}, + {"PRE_WAKE2_EN_REQ_STS", BIT(29)}, + {"CNVI_V1P05_REQ_STS", BIT(31)}, + {} +}; + +static const struct pmc_bit_map arl_pchs_signal_status_map[] = { + {"LSX_Wake0_STS", BIT(0)}, + {"LSX_Wake1_STS", BIT(1)}, + {"LSX_Wake2_STS", BIT(2)}, + {"LSX_Wake3_STS", BIT(3)}, + {"LSX_Wake4_STS", BIT(4)}, + {"LSX_Wake5_STS", BIT(5)}, + {"LSX_Wake6_STS", BIT(6)}, + {"LSX_Wake7_STS", BIT(7)}, + {"Int_Timer_SS_Wake0_STS", BIT(8)}, + {"Int_Timer_SS_Wake1_STS", BIT(9)}, + {"Int_Timer_SS_Wake0_STS", BIT(10)}, + {"Int_Timer_SS_Wake1_STS", BIT(11)}, + {"Int_Timer_SS_Wake2_STS", BIT(12)}, + {"Int_Timer_SS_Wake3_STS", BIT(13)}, + {"Int_Timer_SS_Wake4_STS", BIT(14)}, + {"Int_Timer_SS_Wake5_STS", BIT(15)}, + {} +}; + +static const struct pmc_bit_map *arl_pchs_lpm_maps[] = { + arl_pchs_clocksource_status_map, + arl_pchs_power_gating_status_0_map, + arl_pchs_power_gating_status_1_map, + arl_pchs_power_gating_status_2_map, + arl_pchs_d3_status_0_map, + arl_pchs_d3_status_1_map, + arl_pchs_d3_status_2_map, + arl_pchs_d3_status_3_map, + arl_pchs_vnn_req_status_0_map, + arl_pchs_vnn_req_status_1_map, + arl_pchs_vnn_req_status_2_map, + arl_pchs_vnn_req_status_3_map, + arl_pchs_vnn_misc_status_map, + arl_pchs_signal_status_map, + NULL +}; + +static const struct pmc_reg_map arl_pchs_reg_map = { + .pfear_sts = ext_arl_socs_pfear_map, + .ppfear_buckets = ARL_SOCS_PPFEAR_NUM_ENTRIES, + .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, + .ltr_ignore_max = ARL_SOCS_NUM_IP_IGN_ALLOWED, + .lpm_sts = arl_pchs_lpm_maps, + .ltr_show_sts = arl_pchs_ltr_show_map, + .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, + .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP, + .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2, + .msr_sts = msr_map, + .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET, + .regmap_length = ARL_PCH_PMC_MMIO_REG_LEN, + .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A, + .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, + .lpm_priority_offset = MTL_LPM_PRI_OFFSET, + .lpm_en_offset = MTL_LPM_EN_OFFSET, + .lpm_residency_offset = MTL_LPM_RESIDENCY_OFFSET, + .lpm_status_offset = MTL_LPM_STATUS_OFFSET, + .lpm_sts_latch_en_offset = MTL_LPM_STATUS_LATCH_EN_OFFSET, + .lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET, + .lpm_num_maps = ADL_LPM_NUM_MAPS, + .lpm_reg_index = ARL_LPM_REG_INDEX, + .etr3_offset = ETR3_OFFSET, + .lpm_req_guid = PCHS_LPM_REQ_GUID, +}; + +static struct pmc_info arl_pmc_info_list[] = { + { + .devid = PMC_DEVID_ARL_IOEP, + .map = &mtl_ioep_reg_map, + }, + { + .devid = PMC_DEVID_ARL_SOCS, + .map = &arl_socs_reg_map, + }, + { + .devid = PMC_DEVID_ARL_PCHS, + .map = &arl_pchs_reg_map, + }, + { + .devid = PMC_DEVID_ARL_SOCM, + .map = &mtl_socm_reg_map, + }, + {} +}; + +#define ARL_NPU_PCI_DEV 0xad1d +#define ARL_GNA_PCI_DEV 0xae4c +#define ARL_H_NPU_PCI_DEV 0x7d1d +#define ARL_H_GNA_PCI_DEV 0x774c +/* + * Set power state of select devices that do not have drivers to D3 + * so that they do not block Package C entry. + */ +static void arl_d3_fixup(void) +{ + pmc_core_set_device_d3(ARL_NPU_PCI_DEV); + pmc_core_set_device_d3(ARL_GNA_PCI_DEV); +} + +static void arl_h_d3_fixup(void) +{ + pmc_core_set_device_d3(ARL_H_NPU_PCI_DEV); + pmc_core_set_device_d3(ARL_H_GNA_PCI_DEV); +} + +static int arl_resume(struct pmc_dev *pmcdev) +{ + arl_d3_fixup(); + + return cnl_resume(pmcdev); +} + +static int arl_h_resume(struct pmc_dev *pmcdev) +{ + arl_h_d3_fixup(); + + return cnl_resume(pmcdev); +} + +static int arl_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info) +{ + arl_d3_fixup(); + return generic_core_init(pmcdev, pmc_dev_info); +} + +static int arl_h_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info) +{ + arl_h_d3_fixup(); + return generic_core_init(pmcdev, pmc_dev_info); +} + +static u32 ARL_PMT_DMU_GUIDS[] = {ARL_PMT_DMU_GUID, 0x0}; +struct pmc_dev_info arl_pmc_dev = { + .pci_func = 0, + .dmu_guids = ARL_PMT_DMU_GUIDS, + .regmap_list = arl_pmc_info_list, + .map = &arl_socs_reg_map, + .sub_req_show = &pmc_core_substate_req_regs_fops, + .suspend = cnl_suspend, + .resume = arl_resume, + .init = arl_core_init, + .sub_req = pmc_core_pmt_get_lpm_req, +}; + +static u32 ARL_H_PMT_DMU_GUIDS[] = {ARL_PMT_DMU_GUID, ARL_H_PMT_DMU_GUID, 0x0}; +struct pmc_dev_info arl_h_pmc_dev = { + .pci_func = 2, + .dmu_guids = ARL_H_PMT_DMU_GUIDS, + .regmap_list = arl_pmc_info_list, + .map = &mtl_socm_reg_map, + .sub_req_show = &pmc_core_substate_req_regs_fops, + .suspend = cnl_suspend, + .resume = arl_h_resume, + .init = arl_h_core_init, + .sub_req = pmc_core_pmt_get_lpm_req, +}; diff --git a/drivers/platform/x86/intel/pmc/cnp.c b/drivers/platform/x86/intel/pmc/cnp.c index 7fb38815c4eb..efea4e1ba52b 100644 --- a/drivers/platform/x86/intel/pmc/cnp.c +++ b/drivers/platform/x86/intel/pmc/cnp.c @@ -8,6 +8,9 @@ * */ +#include <linux/smp.h> +#include <linux/suspend.h> +#include <asm/msr.h> #include "core.h" /* Cannon Lake: PGD PFET Enable Ack Status Register(s) bitmap */ @@ -86,7 +89,7 @@ const struct pmc_bit_map cnp_pfear_map[] = { {} }; -const struct pmc_bit_map *ext_cnp_pfear_map[] = { +static const struct pmc_bit_map *ext_cnp_pfear_map[] = { /* * Check intel_pmc_core_ids[] users of cnp_reg_map for * a list of core SoCs using this. @@ -95,7 +98,7 @@ const struct pmc_bit_map *ext_cnp_pfear_map[] = { NULL }; -const struct pmc_bit_map cnp_slps0_dbg0_map[] = { +static const struct pmc_bit_map cnp_slps0_dbg0_map[] = { {"AUDIO_D3", BIT(0)}, {"OTG_D3", BIT(1)}, {"XHCI_D3", BIT(2)}, @@ -108,7 +111,7 @@ const struct pmc_bit_map cnp_slps0_dbg0_map[] = { {} }; -const struct pmc_bit_map cnp_slps0_dbg1_map[] = { +static const struct pmc_bit_map cnp_slps0_dbg1_map[] = { {"SDIO_PLL_OFF", BIT(0)}, {"USB2_PLL_OFF", BIT(1)}, {"AUDIO_PLL_OFF", BIT(2)}, @@ -125,7 +128,7 @@ const struct pmc_bit_map cnp_slps0_dbg1_map[] = { {} }; -const struct pmc_bit_map cnp_slps0_dbg2_map[] = { +static const struct pmc_bit_map cnp_slps0_dbg2_map[] = { {"MPHY_CORE_GATED", BIT(0)}, {"CSME_GATED", BIT(1)}, {"USB2_SUS_GATED", BIT(2)}, @@ -204,7 +207,77 @@ const struct pmc_reg_map cnp_reg_map = { .etr3_offset = ETR3_OFFSET, }; -void cnp_core_init(struct pmc_dev *pmcdev) + +/* + * Disable C1 auto-demotion + * + * Aggressive C1 auto-demotion may lead to failure to enter the deepest C-state + * during suspend-to-idle, causing high power consumption. To prevent this, we + * disable C1 auto-demotion during suspend and re-enable on resume. + * + * Note that, although MSR_PKG_CST_CONFIG_CONTROL has 'package' in its name, it + * is actually a per-core MSR on client platforms, affecting only a single CPU. + * Therefore, it must be configured on all online CPUs. The online cpu mask is + * unchanged during the phase of suspend/resume as user space is frozen. + */ + +static DEFINE_PER_CPU(u64, pkg_cst_config); + +static void disable_c1_auto_demote(void *unused) +{ + int cpunum = smp_processor_id(); + u64 val; + + rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, val); + per_cpu(pkg_cst_config, cpunum) = val; + val &= ~NHM_C1_AUTO_DEMOTE; + wrmsrq(MSR_PKG_CST_CONFIG_CONTROL, val); + + pr_debug("%s: cpu:%d cst %llx\n", __func__, cpunum, val); +} + +static void restore_c1_auto_demote(void *unused) +{ + int cpunum = smp_processor_id(); + + wrmsrq(MSR_PKG_CST_CONFIG_CONTROL, per_cpu(pkg_cst_config, cpunum)); + + pr_debug("%s: cpu:%d cst %llx\n", __func__, cpunum, + per_cpu(pkg_cst_config, cpunum)); +} + +static void s2idle_cpu_quirk(smp_call_func_t func) +{ + if (pm_suspend_via_firmware()) + return; + + on_each_cpu(func, NULL, true); +} + +void cnl_suspend(struct pmc_dev *pmcdev) +{ + s2idle_cpu_quirk(disable_c1_auto_demote); + + /* + * Due to a hardware limitation, the GBE LTR blocks PC10 + * when a cable is attached. To unblock PC10 during suspend, + * tell the PMC to ignore it. + */ + pmc_core_send_ltr_ignore(pmcdev, 3, 1); +} + +int cnl_resume(struct pmc_dev *pmcdev) { - pmcdev->map = &cnp_reg_map; + s2idle_cpu_quirk(restore_c1_auto_demote); + + pmc_core_send_ltr_ignore(pmcdev, 3, 0); + + return pmc_core_resume_common(pmcdev); } + +struct pmc_dev_info cnp_pmc_dev = { + .map = &cnp_reg_map, + .suspend = cnl_suspend, + .resume = cnl_resume, +}; + diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c index 3a15d32d7644..7d7ae8a40b0e 100644 --- a/drivers/platform/x86/intel/pmc/core.c +++ b/drivers/platform/x86/intel/pmc/core.c @@ -11,22 +11,32 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +enum header_type { + HEADER_STATUS, + HEADER_VALUE, +}; + #include <linux/bitfield.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/dmi.h> +#include <linux/err.h> #include <linux/io.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/suspend.h> +#include <linux/units.h> +#include <asm/cpuid/api.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> #include <asm/msr.h> #include <asm/tsc.h> #include "core.h" +#include "ssram_telemetry.h" +#include "../pmt/telemetry.h" /* Maximum number of modes supported by platfoms that has low power mode capability */ const char *pmc_lpm_modes[] = { @@ -53,55 +63,58 @@ const struct pmc_bit_map msr_map[] = { {} }; -static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset) +static inline u32 pmc_core_reg_read(struct pmc *pmc, int reg_offset) { - return readl(pmcdev->regbase + reg_offset); + return readl(pmc->regbase + reg_offset); } -static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int reg_offset, +static inline void pmc_core_reg_write(struct pmc *pmc, int reg_offset, u32 val) { - writel(val, pmcdev->regbase + reg_offset); + writel(val, pmc->regbase + reg_offset); } -static inline u64 pmc_core_adjust_slp_s0_step(struct pmc_dev *pmcdev, u32 value) +static inline u64 pmc_core_adjust_slp_s0_step(struct pmc *pmc, u32 value) { - return (u64)value * pmcdev->map->slp_s0_res_counter_step; + /* + * ADL PCH does not have the SLP_S0 counter and LPM Residency counters are + * used as a workaround which uses 30.5 usec tick. All other client + * programs have the legacy SLP_S0 residency counter that is using the 122 + * usec tick. + */ + const int lpm_adj_x2 = pmc->map->lpm_res_counter_step_x2; + + if (pmc->map == &adl_reg_map) + return (u64)value * GET_X2_COUNTER((u64)lpm_adj_x2); + else + return (u64)value * pmc->map->slp_s0_res_counter_step; } static int set_etr3(struct pmc_dev *pmcdev) { - const struct pmc_reg_map *map = pmcdev->map; + struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; + const struct pmc_reg_map *map = pmc->map; u32 reg; - int err; if (!map->etr3_offset) return -EOPNOTSUPP; - mutex_lock(&pmcdev->lock); + guard(mutex)(&pmcdev->lock); /* check if CF9 is locked */ - reg = pmc_core_reg_read(pmcdev, map->etr3_offset); - if (reg & ETR3_CF9LOCK) { - err = -EACCES; - goto out_unlock; - } + reg = pmc_core_reg_read(pmc, map->etr3_offset); + if (reg & ETR3_CF9LOCK) + return -EACCES; /* write CF9 global reset bit */ reg |= ETR3_CF9GR; - pmc_core_reg_write(pmcdev, map->etr3_offset, reg); - - reg = pmc_core_reg_read(pmcdev, map->etr3_offset); - if (!(reg & ETR3_CF9GR)) { - err = -EIO; - goto out_unlock; - } + pmc_core_reg_write(pmc, map->etr3_offset, reg); - err = 0; + reg = pmc_core_reg_read(pmc, map->etr3_offset); + if (!(reg & ETR3_CF9GR)) + return -EIO; -out_unlock: - mutex_unlock(&pmcdev->lock); - return err; + return 0; } static umode_t etr3_is_visible(struct kobject *kobj, struct attribute *attr, @@ -109,12 +122,12 @@ static umode_t etr3_is_visible(struct kobject *kobj, { struct device *dev = kobj_to_dev(kobj); struct pmc_dev *pmcdev = dev_get_drvdata(dev); - const struct pmc_reg_map *map = pmcdev->map; + struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; + const struct pmc_reg_map *map = pmc->map; u32 reg; - mutex_lock(&pmcdev->lock); - reg = pmc_core_reg_read(pmcdev, map->etr3_offset); - mutex_unlock(&pmcdev->lock); + scoped_guard(mutex, &pmcdev->lock) + reg = pmc_core_reg_read(pmc, map->etr3_offset); return reg & ETR3_CF9LOCK ? attr->mode & (SYSFS_PREALLOC | 0444) : attr->mode; } @@ -123,18 +136,17 @@ static ssize_t etr3_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pmc_dev *pmcdev = dev_get_drvdata(dev); - const struct pmc_reg_map *map = pmcdev->map; + struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; + const struct pmc_reg_map *map = pmc->map; u32 reg; if (!map->etr3_offset) return -EOPNOTSUPP; - mutex_lock(&pmcdev->lock); - - reg = pmc_core_reg_read(pmcdev, map->etr3_offset); - reg &= ETR3_CF9GR | ETR3_CF9LOCK; - - mutex_unlock(&pmcdev->lock); + scoped_guard(mutex, &pmcdev->lock) { + reg = pmc_core_reg_read(pmc, map->etr3_offset); + reg &= ETR3_CF9GR | ETR3_CF9LOCK; + } return sysfs_emit(buf, "0x%08x", reg); } @@ -180,37 +192,51 @@ static const struct attribute_group *pmc_dev_groups[] = { static int pmc_core_dev_state_get(void *data, u64 *val) { - struct pmc_dev *pmcdev = data; - const struct pmc_reg_map *map = pmcdev->map; + struct pmc *pmc = data; + const struct pmc_reg_map *map = pmc->map; u32 value; - value = pmc_core_reg_read(pmcdev, map->slp_s0_offset); - *val = pmc_core_adjust_slp_s0_step(pmcdev, value); + value = pmc_core_reg_read(pmc, map->slp_s0_offset); + *val = pmc_core_adjust_slp_s0_step(pmc, value); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state, pmc_core_dev_state_get, NULL, "%llu\n"); -static int pmc_core_check_read_lock_bit(struct pmc_dev *pmcdev) +static int pmc_core_pson_residency_get(void *data, u64 *val) +{ + struct pmc *pmc = data; + const struct pmc_reg_map *map = pmc->map; + u32 value; + + value = pmc_core_reg_read(pmc, map->pson_residency_offset); + *val = (u64)value * map->pson_residency_counter_step; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_pson_residency, pmc_core_pson_residency_get, NULL, "%llu\n"); + +static int pmc_core_check_read_lock_bit(struct pmc *pmc) { u32 value; - value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_cfg_offset); - return value & BIT(pmcdev->map->pm_read_disable_bit); + value = pmc_core_reg_read(pmc, pmc->map->pm_cfg_offset); + return value & BIT(pmc->map->pm_read_disable_bit); } -static void pmc_core_slps0_display(struct pmc_dev *pmcdev, struct device *dev, +static void pmc_core_slps0_display(struct pmc *pmc, struct device *dev, struct seq_file *s) { - const struct pmc_bit_map **maps = pmcdev->map->slps0_dbg_maps; + const struct pmc_bit_map **maps = pmc->map->slps0_dbg_maps; const struct pmc_bit_map *map; - int offset = pmcdev->map->slps0_dbg_offset; + int offset = pmc->map->slps0_dbg_offset; u32 data; while (*maps) { map = *maps; - data = pmc_core_reg_read(pmcdev, offset); + data = pmc_core_reg_read(pmc, offset); offset += 4; while (map->name) { if (dev) @@ -227,9 +253,9 @@ static void pmc_core_slps0_display(struct pmc_dev *pmcdev, struct device *dev, } } -static int pmc_core_lpm_get_arr_size(const struct pmc_bit_map **maps) +static unsigned int pmc_core_lpm_get_arr_size(const struct pmc_bit_map **maps) { - int idx; + unsigned int idx; for (idx = 0; maps[idx]; idx++) ;/* Nothing */ @@ -237,13 +263,13 @@ static int pmc_core_lpm_get_arr_size(const struct pmc_bit_map **maps) return idx; } -static void pmc_core_lpm_display(struct pmc_dev *pmcdev, struct device *dev, - struct seq_file *s, u32 offset, +static void pmc_core_lpm_display(struct pmc *pmc, struct device *dev, + struct seq_file *s, u32 offset, int pmc_index, const char *str, const struct pmc_bit_map **maps) { - int index, idx, len = 32, bit_mask, arr_size; - u32 *lpm_regs; + unsigned int index, idx, len = 32, arr_size; + u32 bit_mask, *lpm_regs; arr_size = pmc_core_lpm_get_arr_size(maps); lpm_regs = kmalloc_array(arr_size, sizeof(*lpm_regs), GFP_KERNEL); @@ -251,25 +277,25 @@ static void pmc_core_lpm_display(struct pmc_dev *pmcdev, struct device *dev, return; for (index = 0; index < arr_size; index++) { - lpm_regs[index] = pmc_core_reg_read(pmcdev, offset); + lpm_regs[index] = pmc_core_reg_read(pmc, offset); offset += 4; } for (idx = 0; idx < arr_size; idx++) { if (dev) - dev_info(dev, "\nLPM_%s_%d:\t0x%x\n", str, idx, + dev_info(dev, "\nPMC%d:LPM_%s_%d:\t0x%x\n", pmc_index, str, idx, lpm_regs[idx]); if (s) - seq_printf(s, "\nLPM_%s_%d:\t0x%x\n", str, idx, + seq_printf(s, "\nPMC%d:LPM_%s_%d:\t0x%x\n", pmc_index, str, idx, lpm_regs[idx]); for (index = 0; maps[idx][index].name && index < len; index++) { bit_mask = maps[idx][index].bit_mask; if (dev) - dev_info(dev, "%-30s %-30d\n", + dev_info(dev, "PMC%d:%-30s %-30d\n", pmc_index, maps[idx][index].name, lpm_regs[idx] & bit_mask ? 1 : 0); if (s) - seq_printf(s, "%-30s %-30d\n", + seq_printf(s, "PMC%d:%-30s %-30d\n", pmc_index, maps[idx][index].name, lpm_regs[idx] & bit_mask ? 1 : 0); } @@ -280,37 +306,46 @@ static void pmc_core_lpm_display(struct pmc_dev *pmcdev, struct device *dev, static bool slps0_dbg_latch; -static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset) +static inline u8 pmc_core_reg_read_byte(struct pmc *pmc, int offset) { - return readb(pmcdev->regbase + offset); + return readb(pmc->regbase + offset); } static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip, - u8 pf_reg, const struct pmc_bit_map **pf_map) + int pmc_idx, u8 pf_reg, const struct pmc_bit_map **pf_map) { - seq_printf(s, "PCH IP: %-2d - %-32s\tState: %s\n", - ip, pf_map[idx][index].name, + seq_printf(s, "PMC%d:PCH IP: %-2d - %-32s\tState: %s\n", + pmc_idx, ip, pf_map[idx][index].name, pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On"); } static int pmc_core_ppfear_show(struct seq_file *s, void *unused) { struct pmc_dev *pmcdev = s->private; - const struct pmc_bit_map **maps = pmcdev->map->pfear_sts; - u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES]; - int index, iter, idx, ip = 0; + unsigned int pmc_idx; + + for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { + struct pmc *pmc = pmcdev->pmcs[pmc_idx]; + const struct pmc_bit_map **maps; + u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES]; + unsigned int index, iter, idx, ip = 0; - iter = pmcdev->map->ppfear0_offset; + if (!pmc) + continue; - for (index = 0; index < pmcdev->map->ppfear_buckets && - index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++) - pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter); + maps = pmc->map->pfear_sts; + iter = pmc->map->ppfear0_offset; - for (idx = 0; maps[idx]; idx++) { - for (index = 0; maps[idx][index].name && - index < pmcdev->map->ppfear_buckets * 8; ip++, index++) - pmc_core_display_map(s, index, idx, ip, - pf_regs[index / 8], maps); + for (index = 0; index < pmc->map->ppfear_buckets && + index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++) + pf_regs[index] = pmc_core_reg_read_byte(pmc, iter); + + for (idx = 0; maps[idx]; idx++) { + for (index = 0; maps[idx][index].name && + index < pmc->map->ppfear_buckets * 8; ip++, index++) + pmc_core_display_map(s, index, idx, ip, pmc_idx, + pf_regs[index / 8], maps); + } } return 0; @@ -318,40 +353,42 @@ static int pmc_core_ppfear_show(struct seq_file *s, void *unused) DEFINE_SHOW_ATTRIBUTE(pmc_core_ppfear); /* This function should return link status, 0 means ready */ -static int pmc_core_mtpmc_link_status(struct pmc_dev *pmcdev) +static int pmc_core_mtpmc_link_status(struct pmc *pmc) { u32 value; - value = pmc_core_reg_read(pmcdev, SPT_PMC_PM_STS_OFFSET); + value = pmc_core_reg_read(pmc, SPT_PMC_PM_STS_OFFSET); return value & BIT(SPT_PMC_MSG_FULL_STS_BIT); } -static int pmc_core_send_msg(struct pmc_dev *pmcdev, u32 *addr_xram) +static int pmc_core_send_msg(struct pmc *pmc, u32 *addr_xram) { u32 dest; int timeout; for (timeout = NUM_RETRIES; timeout > 0; timeout--) { - if (pmc_core_mtpmc_link_status(pmcdev) == 0) + if (pmc_core_mtpmc_link_status(pmc) == 0) break; msleep(5); } - if (timeout <= 0 && pmc_core_mtpmc_link_status(pmcdev)) + if (timeout <= 0 && pmc_core_mtpmc_link_status(pmc)) return -EBUSY; dest = (*addr_xram & MTPMC_MASK) | (1U << 1); - pmc_core_reg_write(pmcdev, SPT_PMC_MTPMC_OFFSET, dest); + pmc_core_reg_write(pmc, SPT_PMC_MTPMC_OFFSET, dest); return 0; } static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused) { struct pmc_dev *pmcdev = s->private; - const struct pmc_bit_map *map = pmcdev->map->mphy_sts; + struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; + const struct pmc_bit_map *map = pmc->map->mphy_sts; u32 mphy_core_reg_low, mphy_core_reg_high; u32 val_low, val_high; - int index, err = 0; + unsigned int index; + int err = 0; if (pmcdev->pmc_xram_read_bit) { seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS."); @@ -361,23 +398,21 @@ static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused) mphy_core_reg_low = (SPT_PMC_MPHY_CORE_STS_0 << 16); mphy_core_reg_high = (SPT_PMC_MPHY_CORE_STS_1 << 16); - mutex_lock(&pmcdev->lock); + guard(mutex)(&pmcdev->lock); - if (pmc_core_send_msg(pmcdev, &mphy_core_reg_low) != 0) { - err = -EBUSY; - goto out_unlock; - } + err = pmc_core_send_msg(pmc, &mphy_core_reg_low); + if (err) + return err; msleep(10); - val_low = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET); + val_low = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET); - if (pmc_core_send_msg(pmcdev, &mphy_core_reg_high) != 0) { - err = -EBUSY; - goto out_unlock; - } + err = pmc_core_send_msg(pmc, &mphy_core_reg_high); + if (err) + return err; msleep(10); - val_high = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET); + val_high = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET); for (index = 0; index < 8 && map[index].name; index++) { seq_printf(s, "%-32s\tState: %s\n", @@ -393,18 +428,18 @@ static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused) "Power gated"); } -out_unlock: - mutex_unlock(&pmcdev->lock); - return err; + return 0; } DEFINE_SHOW_ATTRIBUTE(pmc_core_mphy_pg); static int pmc_core_pll_show(struct seq_file *s, void *unused) { struct pmc_dev *pmcdev = s->private; - const struct pmc_bit_map *map = pmcdev->map->pll_sts; + struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; + const struct pmc_bit_map *map = pmc->map->pll_sts; u32 mphy_common_reg, val; - int index, err = 0; + unsigned int index; + int err = 0; if (pmcdev->pmc_xram_read_bit) { seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS."); @@ -412,16 +447,15 @@ static int pmc_core_pll_show(struct seq_file *s, void *unused) } mphy_common_reg = (SPT_PMC_MPHY_COM_STS_0 << 16); - mutex_lock(&pmcdev->lock); + guard(mutex)(&pmcdev->lock); - if (pmc_core_send_msg(pmcdev, &mphy_common_reg) != 0) { - err = -EBUSY; - goto out_unlock; - } + err = pmc_core_send_msg(pmc, &mphy_common_reg); + if (err) + return err; /* Observed PMC HW response latency for MTPMC-MFPMC is ~10 ms */ msleep(10); - val = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET); + val = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET); for (index = 0; map[index].name ; index++) { seq_printf(s, "%-32s\tState: %s\n", @@ -429,94 +463,125 @@ static int pmc_core_pll_show(struct seq_file *s, void *unused) map[index].bit_mask & val ? "Active" : "Idle"); } -out_unlock: - mutex_unlock(&pmcdev->lock); - return err; + return 0; } DEFINE_SHOW_ATTRIBUTE(pmc_core_pll); -int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value) +int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore) { - const struct pmc_reg_map *map = pmcdev->map; + struct pmc *pmc; + const struct pmc_reg_map *map; u32 reg; - int err = 0; + unsigned int pmc_idx; + int ltr_index; + + ltr_index = value; + /* For platforms with multiple pmcs, ltr index value given by user + * is based on the contiguous indexes from ltr_show output. + * pmc index and ltr index needs to be calculated from it. + */ + for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs) && ltr_index >= 0; pmc_idx++) { + pmc = pmcdev->pmcs[pmc_idx]; - mutex_lock(&pmcdev->lock); + if (!pmc) + continue; + + map = pmc->map; + if (ltr_index <= map->ltr_ignore_max) + break; - if (value > map->ltr_ignore_max) { - err = -EINVAL; - goto out_unlock; + /* Along with IP names, ltr_show map includes CURRENT_PLATFORM + * and AGGREGATED_SYSTEM values per PMC. Take these two index + * values into account in ltr_index calculation. Also, to start + * ltr index from zero for next pmc, subtract it by 1. + */ + ltr_index = ltr_index - (map->ltr_ignore_max + 2) - 1; } - reg = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset); - reg |= BIT(value); - pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, reg); + if (pmc_idx >= ARRAY_SIZE(pmcdev->pmcs) || ltr_index < 0) + return -EINVAL; + + pr_debug("ltr_ignore for pmc%d: ltr_index:%d\n", pmc_idx, ltr_index); -out_unlock: - mutex_unlock(&pmcdev->lock); + guard(mutex)(&pmcdev->lock); - return err; + reg = pmc_core_reg_read(pmc, map->ltr_ignore_offset); + if (ignore) + reg |= BIT(ltr_index); + else + reg &= ~BIT(ltr_index); + pmc_core_reg_write(pmc, map->ltr_ignore_offset, reg); + + return 0; } -static ssize_t pmc_core_ltr_ignore_write(struct file *file, - const char __user *userbuf, - size_t count, loff_t *ppos) +static ssize_t pmc_core_ltr_write(struct pmc_dev *pmcdev, + const char __user *userbuf, + size_t count, int ignore) { - struct seq_file *s = file->private_data; - struct pmc_dev *pmcdev = s->private; - u32 buf_size, value; + u32 value; int err; - buf_size = min_t(u32, count, 64); - - err = kstrtou32_from_user(userbuf, buf_size, 10, &value); + err = kstrtou32_from_user(userbuf, count, 10, &value); if (err) return err; - err = pmc_core_send_ltr_ignore(pmcdev, value); + err = pmc_core_send_ltr_ignore(pmcdev, value, ignore); + + return err ?: count; +} + +static ssize_t pmc_core_ltr_ignore_write(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct pmc_dev *pmcdev = s->private; - return err == 0 ? count : err; + return pmc_core_ltr_write(pmcdev, userbuf, count, 1); } static int pmc_core_ltr_ignore_show(struct seq_file *s, void *unused) { return 0; } +DEFINE_SHOW_STORE_ATTRIBUTE(pmc_core_ltr_ignore); -static int pmc_core_ltr_ignore_open(struct inode *inode, struct file *file) +static ssize_t pmc_core_ltr_restore_write(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) { - return single_open(file, pmc_core_ltr_ignore_show, inode->i_private); + struct seq_file *s = file->private_data; + struct pmc_dev *pmcdev = s->private; + + return pmc_core_ltr_write(pmcdev, userbuf, count, 0); } -static const struct file_operations pmc_core_ltr_ignore_ops = { - .open = pmc_core_ltr_ignore_open, - .read = seq_read, - .write = pmc_core_ltr_ignore_write, - .llseek = seq_lseek, - .release = single_release, -}; +static int pmc_core_ltr_restore_show(struct seq_file *s, void *unused) +{ + return 0; +} +DEFINE_SHOW_STORE_ATTRIBUTE(pmc_core_ltr_restore); static void pmc_core_slps0_dbg_latch(struct pmc_dev *pmcdev, bool reset) { - const struct pmc_reg_map *map = pmcdev->map; + struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; + const struct pmc_reg_map *map = pmc->map; u32 fd; - mutex_lock(&pmcdev->lock); + guard(mutex)(&pmcdev->lock); if (!reset && !slps0_dbg_latch) - goto out_unlock; + return; - fd = pmc_core_reg_read(pmcdev, map->slps0_dbg_offset); + fd = pmc_core_reg_read(pmc, map->slps0_dbg_offset); if (reset) fd &= ~CNP_PMC_LATCH_SLPS0_EVENTS; else fd |= CNP_PMC_LATCH_SLPS0_EVENTS; - pmc_core_reg_write(pmcdev, map->slps0_dbg_offset, fd); + pmc_core_reg_write(pmc, map->slps0_dbg_offset, fd); slps0_dbg_latch = false; - -out_unlock: - mutex_unlock(&pmcdev->lock); } static int pmc_core_slps0_dbg_show(struct seq_file *s, void *unused) @@ -524,7 +589,7 @@ static int pmc_core_slps0_dbg_show(struct seq_file *s, void *unused) struct pmc_dev *pmcdev = s->private; pmc_core_slps0_dbg_latch(pmcdev, false); - pmc_core_slps0_display(pmcdev, NULL, s); + pmc_core_slps0_display(pmcdev->pmcs[PMC_IDX_MAIN], NULL, s); pmc_core_slps0_dbg_latch(pmcdev, true); return 0; @@ -568,44 +633,142 @@ static u32 convert_ltr_scale(u32 val) static int pmc_core_ltr_show(struct seq_file *s, void *unused) { struct pmc_dev *pmcdev = s->private; - const struct pmc_bit_map *map = pmcdev->map->ltr_show_sts; - u64 decoded_snoop_ltr, decoded_non_snoop_ltr; - u32 ltr_raw_data, scale, val; + u64 decoded_snoop_ltr, decoded_non_snoop_ltr, val; + u32 ltr_raw_data, scale; u16 snoop_ltr, nonsnoop_ltr; - int index; + unsigned int pmc_idx, index, ltr_index = 0; - for (index = 0; map[index].name ; index++) { - decoded_snoop_ltr = decoded_non_snoop_ltr = 0; - ltr_raw_data = pmc_core_reg_read(pmcdev, - map[index].bit_mask); - snoop_ltr = ltr_raw_data & ~MTPMC_MASK; - nonsnoop_ltr = (ltr_raw_data >> 0x10) & ~MTPMC_MASK; - - if (FIELD_GET(LTR_REQ_NONSNOOP, ltr_raw_data)) { - scale = FIELD_GET(LTR_DECODED_SCALE, nonsnoop_ltr); - val = FIELD_GET(LTR_DECODED_VAL, nonsnoop_ltr); - decoded_non_snoop_ltr = val * convert_ltr_scale(scale); - } + for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { + struct pmc *pmc; + const struct pmc_bit_map *map; + u32 ltr_ign_reg; - if (FIELD_GET(LTR_REQ_SNOOP, ltr_raw_data)) { - scale = FIELD_GET(LTR_DECODED_SCALE, snoop_ltr); - val = FIELD_GET(LTR_DECODED_VAL, snoop_ltr); - decoded_snoop_ltr = val * convert_ltr_scale(scale); - } + pmc = pmcdev->pmcs[pmc_idx]; + if (!pmc) + continue; + + scoped_guard(mutex, &pmcdev->lock) + ltr_ign_reg = pmc_core_reg_read(pmc, pmc->map->ltr_ignore_offset); + + map = pmc->map->ltr_show_sts; + for (index = 0; map[index].name; index++) { + bool ltr_ign_data; + + if (index > pmc->map->ltr_ignore_max) + ltr_ign_data = false; + else + ltr_ign_data = ltr_ign_reg & BIT(index); + + decoded_snoop_ltr = decoded_non_snoop_ltr = 0; + ltr_raw_data = pmc_core_reg_read(pmc, + map[index].bit_mask); + snoop_ltr = ltr_raw_data & ~MTPMC_MASK; + nonsnoop_ltr = (ltr_raw_data >> 0x10) & ~MTPMC_MASK; + + if (FIELD_GET(LTR_REQ_NONSNOOP, ltr_raw_data)) { + scale = FIELD_GET(LTR_DECODED_SCALE, nonsnoop_ltr); + val = FIELD_GET(LTR_DECODED_VAL, nonsnoop_ltr); + decoded_non_snoop_ltr = val * convert_ltr_scale(scale); + } + if (FIELD_GET(LTR_REQ_SNOOP, ltr_raw_data)) { + scale = FIELD_GET(LTR_DECODED_SCALE, snoop_ltr); + val = FIELD_GET(LTR_DECODED_VAL, snoop_ltr); + decoded_snoop_ltr = val * convert_ltr_scale(scale); + } - seq_printf(s, "%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\n", - map[index].name, ltr_raw_data, - decoded_non_snoop_ltr, - decoded_snoop_ltr); + seq_printf(s, "%d\tPMC%d:%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\tLTR_IGNORE: %d\n", + ltr_index, pmc_idx, map[index].name, ltr_raw_data, + decoded_non_snoop_ltr, + decoded_snoop_ltr, ltr_ign_data); + ltr_index++; + } } return 0; } DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr); -static inline u64 adjust_lpm_residency(struct pmc_dev *pmcdev, u32 offset, +static int pmc_core_s0ix_blocker_show(struct seq_file *s, void *unused) +{ + struct pmc_dev *pmcdev = s->private; + unsigned int pmc_idx; + + for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); pmc_idx++) { + const struct pmc_bit_map **maps; + unsigned int arr_size, r_idx; + u32 offset, counter; + struct pmc *pmc; + + pmc = pmcdev->pmcs[pmc_idx]; + if (!pmc) + continue; + maps = pmc->map->s0ix_blocker_maps; + offset = pmc->map->s0ix_blocker_offset; + arr_size = pmc_core_lpm_get_arr_size(maps); + + for (r_idx = 0; r_idx < arr_size; r_idx++) { + const struct pmc_bit_map *map; + + for (map = maps[r_idx]; map->name; map++) { + if (!map->blk) + continue; + counter = pmc_core_reg_read(pmc, offset); + seq_printf(s, "PMC%d:%-30s %-30d\n", pmc_idx, + map->name, counter); + offset += map->blk * S0IX_BLK_SIZE; + } + } + } + return 0; +} +DEFINE_SHOW_ATTRIBUTE(pmc_core_s0ix_blocker); + +static void pmc_core_ltr_ignore_all(struct pmc_dev *pmcdev) +{ + unsigned int pmc_idx; + + for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); pmc_idx++) { + struct pmc *pmc; + u32 ltr_ign; + + pmc = pmcdev->pmcs[pmc_idx]; + if (!pmc) + continue; + + guard(mutex)(&pmcdev->lock); + pmc->ltr_ign = pmc_core_reg_read(pmc, pmc->map->ltr_ignore_offset); + + /* ltr_ignore_max is the max index value for LTR ignore register */ + ltr_ign = pmc->ltr_ign | GENMASK(pmc->map->ltr_ignore_max, 0); + pmc_core_reg_write(pmc, pmc->map->ltr_ignore_offset, ltr_ign); + } + + /* + * Ignoring ME during suspend is blocking platforms with ADL PCH to get to + * deeper S0ix substate. + */ + pmc_core_send_ltr_ignore(pmcdev, 6, 0); +} + +static void pmc_core_ltr_restore_all(struct pmc_dev *pmcdev) +{ + unsigned int pmc_idx; + + for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); pmc_idx++) { + struct pmc *pmc; + + pmc = pmcdev->pmcs[pmc_idx]; + if (!pmc) + continue; + + guard(mutex)(&pmcdev->lock); + pmc_core_reg_write(pmc, pmc->map->ltr_ignore_offset, pmc->ltr_ign); + } +} + +static inline u64 adjust_lpm_residency(struct pmc *pmc, u32 offset, const int lpm_adj_x2) { - u64 lpm_res = pmc_core_reg_read(pmcdev, offset); + u64 lpm_res = pmc_core_reg_read(pmc, offset); return GET_X2_COUNTER((u64)lpm_adj_x2 * lpm_res); } @@ -613,15 +776,16 @@ static inline u64 adjust_lpm_residency(struct pmc_dev *pmcdev, u32 offset, static int pmc_core_substate_res_show(struct seq_file *s, void *unused) { struct pmc_dev *pmcdev = s->private; - const int lpm_adj_x2 = pmcdev->map->lpm_res_counter_step_x2; - u32 offset = pmcdev->map->lpm_residency_offset; - int i, mode; + struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; + const int lpm_adj_x2 = pmc->map->lpm_res_counter_step_x2; + u32 offset = pmc->map->lpm_residency_offset; + int mode; seq_printf(s, "%-10s %-15s\n", "Substate", "Residency"); - pmc_for_each_mode(i, mode, pmcdev) { + pmc_for_each_mode(mode, pmcdev) { seq_printf(s, "%-10s %-15llu\n", pmc_lpm_modes[mode], - adjust_lpm_residency(pmcdev, offset + (4 * mode), lpm_adj_x2)); + adjust_lpm_residency(pmc, offset + (4 * mode), lpm_adj_x2)); } return 0; @@ -631,10 +795,19 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_res); static int pmc_core_substate_sts_regs_show(struct seq_file *s, void *unused) { struct pmc_dev *pmcdev = s->private; - const struct pmc_bit_map **maps = pmcdev->map->lpm_sts; - u32 offset = pmcdev->map->lpm_status_offset; + unsigned int pmc_idx; - pmc_core_lpm_display(pmcdev, NULL, s, offset, "STATUS", maps); + for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { + struct pmc *pmc = pmcdev->pmcs[pmc_idx]; + const struct pmc_bit_map **maps; + u32 offset; + + if (!pmc) + continue; + maps = pmc->map->lpm_sts; + offset = pmc->map->lpm_status_offset; + pmc_core_lpm_display(pmc, NULL, s, offset, pmc_idx, "STATUS", maps); + } return 0; } @@ -643,103 +816,258 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_sts_regs); static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused) { struct pmc_dev *pmcdev = s->private; - const struct pmc_bit_map **maps = pmcdev->map->lpm_sts; - u32 offset = pmcdev->map->lpm_live_status_offset; + unsigned int pmc_idx; - pmc_core_lpm_display(pmcdev, NULL, s, offset, "LIVE_STATUS", maps); + for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { + struct pmc *pmc = pmcdev->pmcs[pmc_idx]; + const struct pmc_bit_map **maps; + u32 offset; + + if (!pmc) + continue; + maps = pmc->map->lpm_sts; + offset = pmc->map->lpm_live_status_offset; + pmc_core_lpm_display(pmc, NULL, s, offset, pmc_idx, "LIVE_STATUS", maps); + } return 0; } DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_l_sts_regs); -static void pmc_core_substate_req_header_show(struct seq_file *s) +static void pmc_core_substate_req_header_show(struct seq_file *s, int pmc_index, + enum header_type type) { struct pmc_dev *pmcdev = s->private; - int i, mode; + int mode; - seq_printf(s, "%30s |", "Element"); - pmc_for_each_mode(i, mode, pmcdev) + seq_printf(s, "%40s |", "Element"); + pmc_for_each_mode(mode, pmcdev) seq_printf(s, " %9s |", pmc_lpm_modes[mode]); - seq_printf(s, " %9s |\n", "Status"); + if (type == HEADER_STATUS) { + seq_printf(s, " %9s |", "Status"); + seq_printf(s, " %11s |\n", "Live Status"); + } else { + seq_printf(s, " %9s |\n", "Value"); + } +} + +static int pmc_core_substate_blk_req_show(struct seq_file *s, void *unused) +{ + struct pmc_dev *pmcdev = s->private; + unsigned int pmc_idx; + + for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); pmc_idx++) { + const struct pmc_bit_map **maps; + unsigned int arr_size, r_idx; + u32 offset, counter; + u32 *lpm_req_regs; + struct pmc *pmc; + + pmc = pmcdev->pmcs[pmc_idx]; + if (!pmc || !pmc->lpm_req_regs) + continue; + + lpm_req_regs = pmc->lpm_req_regs; + maps = pmc->map->s0ix_blocker_maps; + offset = pmc->map->s0ix_blocker_offset; + arr_size = pmc_core_lpm_get_arr_size(maps); + + /* Display the header */ + pmc_core_substate_req_header_show(s, pmc_idx, HEADER_VALUE); + + for (r_idx = 0; r_idx < arr_size; r_idx++) { + const struct pmc_bit_map *map; + + for (map = maps[r_idx]; map->name; map++) { + int mode; + + if (!map->blk) + continue; + + counter = pmc_core_reg_read(pmc, offset); + seq_printf(s, "pmc%u: %34s |", pmc_idx, map->name); + pmc_for_each_mode(mode, pmcdev) { + bool required = *lpm_req_regs & BIT(mode); + + seq_printf(s, " %9s |", required ? "Required" : " "); + } + seq_printf(s, " %9u |\n", counter); + offset += map->blk * S0IX_BLK_SIZE; + lpm_req_regs++; + } + } + } + return 0; } +static int pmc_core_substate_blk_req_open(struct inode *inode, struct file *file) +{ + return single_open(file, pmc_core_substate_blk_req_show, inode->i_private); +} + +const struct file_operations pmc_core_substate_blk_req_fops = { + .owner = THIS_MODULE, + .open = pmc_core_substate_blk_req_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused) { struct pmc_dev *pmcdev = s->private; - const struct pmc_bit_map **maps = pmcdev->map->lpm_sts; - const struct pmc_bit_map *map; - const int num_maps = pmcdev->map->lpm_num_maps; - u32 sts_offset = pmcdev->map->lpm_status_offset; - u32 *lpm_req_regs = pmcdev->lpm_req_regs; - int mp; + u32 sts_offset; + u32 sts_offset_live; + u32 *lpm_req_regs; + unsigned int mp, pmc_idx; + int num_maps; + + for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { + struct pmc *pmc = pmcdev->pmcs[pmc_idx]; + const struct pmc_bit_map **maps; - /* Display the header */ - pmc_core_substate_req_header_show(s); + if (!pmc) + continue; - /* Loop over maps */ - for (mp = 0; mp < num_maps; mp++) { - u32 req_mask = 0; - u32 lpm_status; - int mode, idx, i, len = 32; + maps = pmc->map->lpm_sts; + num_maps = pmc->map->lpm_num_maps; + sts_offset = pmc->map->lpm_status_offset; + sts_offset_live = pmc->map->lpm_live_status_offset; + lpm_req_regs = pmc->lpm_req_regs; /* - * Capture the requirements and create a mask so that we only - * show an element if it's required for at least one of the - * enabled low power modes + * When there are multiple PMCs, though the PMC may exist, the + * requirement register discovery could have failed so check + * before accessing. */ - pmc_for_each_mode(idx, mode, pmcdev) - req_mask |= lpm_req_regs[mp + (mode * num_maps)]; - - /* Get the last latched status for this map */ - lpm_status = pmc_core_reg_read(pmcdev, sts_offset + (mp * 4)); - - /* Loop over elements in this map */ - map = maps[mp]; - for (i = 0; map[i].name && i < len; i++) { - u32 bit_mask = map[i].bit_mask; - - if (!(bit_mask & req_mask)) - /* - * Not required for any enabled states - * so don't display - */ - continue; - - /* Display the element name in the first column */ - seq_printf(s, "%30s |", map[i].name); - - /* Loop over the enabled states and display if required */ - pmc_for_each_mode(idx, mode, pmcdev) { - if (lpm_req_regs[mp + (mode * num_maps)] & bit_mask) - seq_printf(s, " %9s |", - "Required"); - else - seq_printf(s, " %9s |", " "); + if (!lpm_req_regs) + continue; + + /* Display the header */ + pmc_core_substate_req_header_show(s, pmc_idx, HEADER_STATUS); + + /* Loop over maps */ + for (mp = 0; mp < num_maps; mp++) { + u32 req_mask = 0; + u32 lpm_status; + u32 lpm_status_live; + const struct pmc_bit_map *map; + int mode, i, len = 32; + + /* + * Capture the requirements and create a mask so that we only + * show an element if it's required for at least one of the + * enabled low power modes + */ + pmc_for_each_mode(mode, pmcdev) + req_mask |= lpm_req_regs[mp + (mode * num_maps)]; + + /* Get the last latched status for this map */ + lpm_status = pmc_core_reg_read(pmc, sts_offset + (mp * 4)); + + /* Get the runtime status for this map */ + lpm_status_live = pmc_core_reg_read(pmc, sts_offset_live + (mp * 4)); + + /* Loop over elements in this map */ + map = maps[mp]; + for (i = 0; map[i].name && i < len; i++) { + u32 bit_mask = map[i].bit_mask; + + if (!(bit_mask & req_mask)) { + /* + * Not required for any enabled states + * so don't display + */ + continue; + } + + /* Display the element name in the first column */ + seq_printf(s, "pmc%d: %34s |", pmc_idx, map[i].name); + + /* Loop over the enabled states and display if required */ + pmc_for_each_mode(mode, pmcdev) { + bool required = lpm_req_regs[mp + (mode * num_maps)] & + bit_mask; + seq_printf(s, " %9s |", required ? "Required" : " "); + } + + /* In Status column, show the last captured state of this agent */ + seq_printf(s, " %9s |", lpm_status & bit_mask ? "Yes" : " "); + + /* In Live status column, show the live state of this agent */ + seq_printf(s, " %11s |", lpm_status_live & bit_mask ? "Yes" : " "); + + seq_puts(s, "\n"); } + } + } + return 0; +} - /* In Status column, show the last captured state of this agent */ - if (lpm_status & bit_mask) - seq_printf(s, " %9s |", "Yes"); - else - seq_printf(s, " %9s |", " "); +static int pmc_core_substate_req_regs_open(struct inode *inode, struct file *file) +{ + return single_open(file, pmc_core_substate_req_regs_show, inode->i_private); +} - seq_puts(s, "\n"); - } +const struct file_operations pmc_core_substate_req_regs_fops = { + .owner = THIS_MODULE, + .open = pmc_core_substate_req_regs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static unsigned int pmc_core_get_crystal_freq(void) +{ + unsigned int eax_denominator, ebx_numerator, ecx_hz, edx; + + if (boot_cpu_data.cpuid_level < CPUID_LEAF_TSC) + return 0; + + eax_denominator = ebx_numerator = ecx_hz = edx = 0; + + /* TSC/Crystal ratio, plus optionally Crystal Hz */ + cpuid(CPUID_LEAF_TSC, &eax_denominator, &ebx_numerator, &ecx_hz, &edx); + + if (ebx_numerator == 0 || eax_denominator == 0) + return 0; + + return ecx_hz; +} + +static int pmc_core_die_c6_us_show(struct seq_file *s, void *unused) +{ + struct pmc_dev *pmcdev = s->private; + u64 die_c6_res, count; + int ret; + + if (!pmcdev->crystal_freq) { + dev_warn_once(&pmcdev->pdev->dev, "Crystal frequency unavailable\n"); + return -ENXIO; } + ret = pmt_telem_read(pmcdev->punit_ep, pmcdev->die_c6_offset, + &count, 1); + if (ret) + return ret; + + die_c6_res = div64_u64(count * HZ_PER_MHZ, pmcdev->crystal_freq); + seq_printf(s, "%llu\n", die_c6_res); + return 0; } -DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_req_regs); +DEFINE_SHOW_ATTRIBUTE(pmc_core_die_c6_us); static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused) { struct pmc_dev *pmcdev = s->private; + struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; bool c10; u32 reg; - int idx, mode; + int mode; - reg = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_sts_latch_en_offset); + reg = pmc_core_reg_read(pmc, pmc->map->lpm_sts_latch_en_offset); if (reg & LPM_STS_LATCH_MODE) { seq_puts(s, "c10"); c10 = false; @@ -748,7 +1076,7 @@ static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused) c10 = true; } - pmc_for_each_mode(idx, mode, pmcdev) { + pmc_for_each_mode(mode, pmcdev) { if ((BIT(mode) & reg) && !c10) seq_printf(s, " [%s]", pmc_lpm_modes[mode]); else @@ -766,9 +1094,10 @@ static ssize_t pmc_core_lpm_latch_mode_write(struct file *file, { struct seq_file *s = file->private_data; struct pmc_dev *pmcdev = s->private; + struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; bool clear = false, c10 = false; unsigned char buf[8]; - int idx, m, mode; + int m, mode; u32 reg; if (count > sizeof(buf) - 1) @@ -786,7 +1115,7 @@ static ssize_t pmc_core_lpm_latch_mode_write(struct file *file, mode = sysfs_match_string(pmc_lpm_modes, buf); /* Check string matches enabled mode */ - pmc_for_each_mode(idx, m, pmcdev) + pmc_for_each_mode(m, pmcdev) if (mode == m) break; @@ -800,25 +1129,21 @@ static ssize_t pmc_core_lpm_latch_mode_write(struct file *file, } if (clear) { - mutex_lock(&pmcdev->lock); + guard(mutex)(&pmcdev->lock); - reg = pmc_core_reg_read(pmcdev, pmcdev->map->etr3_offset); + reg = pmc_core_reg_read(pmc, pmc->map->etr3_offset); reg |= ETR3_CLEAR_LPM_EVENTS; - pmc_core_reg_write(pmcdev, pmcdev->map->etr3_offset, reg); - - mutex_unlock(&pmcdev->lock); + pmc_core_reg_write(pmc, pmc->map->etr3_offset, reg); return count; } if (c10) { - mutex_lock(&pmcdev->lock); + guard(mutex)(&pmcdev->lock); - reg = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_sts_latch_en_offset); + reg = pmc_core_reg_read(pmc, pmc->map->lpm_sts_latch_en_offset); reg &= ~LPM_STS_LATCH_MODE; - pmc_core_reg_write(pmcdev, pmcdev->map->lpm_sts_latch_en_offset, reg); - - mutex_unlock(&pmcdev->lock); + pmc_core_reg_write(pmc, pmc->map->lpm_sts_latch_en_offset, reg); return count; } @@ -828,9 +1153,8 @@ static ssize_t pmc_core_lpm_latch_mode_write(struct file *file, * and clear everything else. */ reg = LPM_STS_LATCH_MODE | BIT(mode); - mutex_lock(&pmcdev->lock); - pmc_core_reg_write(pmcdev, pmcdev->map->lpm_sts_latch_en_offset, reg); - mutex_unlock(&pmcdev->lock); + guard(mutex)(&pmcdev->lock); + pmc_core_reg_write(pmc, pmc->map->lpm_sts_latch_en_offset, reg); return count; } @@ -838,13 +1162,13 @@ DEFINE_PMC_CORE_ATTR_WRITE(pmc_core_lpm_latch_mode); static int pmc_core_pkgc_show(struct seq_file *s, void *unused) { - struct pmc_dev *pmcdev = s->private; - const struct pmc_bit_map *map = pmcdev->map->msr_sts; + struct pmc *pmc = s->private; + const struct pmc_bit_map *map = pmc->map->msr_sts; u64 pcstate_count; - int index; + unsigned int index; for (index = 0; map[index].name ; index++) { - if (rdmsrl_safe(map[index].bit_mask, &pcstate_count)) + if (rdmsrq_safe(map[index].bit_mask, &pcstate_count)) continue; pcstate_count *= 1000; @@ -859,7 +1183,7 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_pkgc); static bool pmc_core_pri_verify(u32 lpm_pri, u8 *mode_order) { - int i, j; + unsigned int i, j; if (!lpm_pri) return false; @@ -887,20 +1211,21 @@ static bool pmc_core_pri_verify(u32 lpm_pri, u8 *mode_order) return true; } -static void pmc_core_get_low_power_modes(struct platform_device *pdev) +void pmc_core_get_low_power_modes(struct pmc_dev *pmcdev) { - struct pmc_dev *pmcdev = platform_get_drvdata(pdev); + struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; u8 pri_order[LPM_MAX_NUM_MODES] = LPM_DEFAULT_PRI; u8 mode_order[LPM_MAX_NUM_MODES]; u32 lpm_pri; u32 lpm_en; - int mode, i, p; + unsigned int i; + int mode, p; /* Use LPM Maps to indicate support for substates */ - if (!pmcdev->map->lpm_num_maps) + if (!pmc->map->lpm_num_maps) return; - lpm_en = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_en_offset); + lpm_en = pmc_core_reg_read(pmc, pmc->map->lpm_en_offset); /* For MTL, BIT 31 is not an lpm mode but a enable bit. * Lower byte is enough to cover the number of lpm modes for all * platforms and hence mask the upper 3 bytes. @@ -908,7 +1233,7 @@ static void pmc_core_get_low_power_modes(struct platform_device *pdev) pmcdev->num_lpm_modes = hweight32(lpm_en & 0xFF); /* Read 32 bit LPM_PRI register */ - lpm_pri = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_priority_offset); + lpm_pri = pmc_core_reg_read(pmc, pmc->map->lpm_priority_offset); /* @@ -920,7 +1245,8 @@ static void pmc_core_get_low_power_modes(struct platform_device *pdev) for (mode = 0; mode < LPM_MAX_NUM_MODES; mode++) pri_order[mode_order[mode]] = mode; else - dev_warn(&pdev->dev, "Assuming a default substate order for this platform\n"); + dev_dbg(&pmcdev->pdev->dev, + "Assuming a default substate order for this platform\n"); /* * Loop through all modes from lowest to highest priority, @@ -937,43 +1263,141 @@ static void pmc_core_get_low_power_modes(struct platform_device *pdev) } } +int get_primary_reg_base(struct pmc *pmc) +{ + u64 slp_s0_addr; + + if (lpit_read_residency_count_address(&slp_s0_addr)) { + pmc->base_addr = PMC_BASE_ADDR_DEFAULT; + + if (page_is_ram(PHYS_PFN(pmc->base_addr))) + return -ENODEV; + } else { + pmc->base_addr = slp_s0_addr - pmc->map->slp_s0_offset; + } + + pmc->regbase = ioremap(pmc->base_addr, pmc->map->regmap_length); + if (!pmc->regbase) + return -ENOMEM; + return 0; +} + +static struct telem_endpoint *pmc_core_register_endpoint(struct pci_dev *pcidev, u32 *guids) +{ + struct telem_endpoint *ep; + unsigned int i; + + for (i = 0; guids[i]; i++) { + ep = pmt_telem_find_and_register_endpoint(pcidev, guids[i], 0); + if (!IS_ERR(ep)) + return ep; + } + return ERR_PTR(-ENODEV); +} + +void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 *guids) +{ + struct telem_endpoint *ep; + struct pci_dev *pcidev; + + pcidev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(10, 0)); + if (!pcidev) { + dev_err(&pmcdev->pdev->dev, "PUNIT PMT device not found."); + return; + } + + ep = pmc_core_register_endpoint(pcidev, guids); + pci_dev_put(pcidev); + if (IS_ERR(ep)) { + dev_err(&pmcdev->pdev->dev, + "pmc_core: couldn't get DMU telem endpoint %ld", + PTR_ERR(ep)); + return; + } + + pmcdev->punit_ep = ep; + pmcdev->die_c6_offset = MTL_PMT_DMU_DIE_C6_OFFSET; +} + +void pmc_core_set_device_d3(unsigned int device) +{ + struct pci_dev *pcidev; + + pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); + if (pcidev) { + if (!device_trylock(&pcidev->dev)) { + pci_dev_put(pcidev); + return; + } + if (!pcidev->dev.driver) { + dev_info(&pcidev->dev, "Setting to D3hot\n"); + pci_set_power_state(pcidev, PCI_D3hot); + } + device_unlock(&pcidev->dev); + pci_dev_put(pcidev); + } +} + +static bool pmc_core_is_pson_residency_enabled(struct pmc_dev *pmcdev) +{ + struct platform_device *pdev = pmcdev->pdev; + struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); + u8 val; + + if (!adev) + return false; + + if (fwnode_property_read_u8(acpi_fwnode_handle(adev), + "intel-cec-pson-switching-enabled-in-s0", + &val)) + return false; + + return val == 1; +} + static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev) { debugfs_remove_recursive(pmcdev->dbgfs_dir); } -static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev) +static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info) { + struct pmc *primary_pmc = pmcdev->pmcs[PMC_IDX_MAIN]; struct dentry *dir; dir = debugfs_create_dir("pmc_core", NULL); pmcdev->dbgfs_dir = dir; - debugfs_create_file("slp_s0_residency_usec", 0444, dir, pmcdev, + debugfs_create_file("slp_s0_residency_usec", 0444, dir, primary_pmc, &pmc_core_dev_state); - if (pmcdev->map->pfear_sts) + if (primary_pmc->map->pfear_sts) debugfs_create_file("pch_ip_power_gating_status", 0444, dir, pmcdev, &pmc_core_ppfear_fops); debugfs_create_file("ltr_ignore", 0644, dir, pmcdev, - &pmc_core_ltr_ignore_ops); + &pmc_core_ltr_ignore_fops); + + debugfs_create_file("ltr_restore", 0200, dir, pmcdev, &pmc_core_ltr_restore_fops); debugfs_create_file("ltr_show", 0444, dir, pmcdev, &pmc_core_ltr_fops); - debugfs_create_file("package_cstate_show", 0444, dir, pmcdev, + if (primary_pmc->map->s0ix_blocker_maps) + debugfs_create_file("s0ix_blocker", 0444, dir, pmcdev, &pmc_core_s0ix_blocker_fops); + + debugfs_create_file("package_cstate_show", 0444, dir, primary_pmc, &pmc_core_pkgc_fops); - if (pmcdev->map->pll_sts) + if (primary_pmc->map->pll_sts) debugfs_create_file("pll_status", 0444, dir, pmcdev, &pmc_core_pll_fops); - if (pmcdev->map->mphy_sts) + if (primary_pmc->map->mphy_sts) debugfs_create_file("mphy_core_lanes_power_gating_status", 0444, dir, pmcdev, &pmc_core_mphy_pg_fops); - if (pmcdev->map->slps0_dbg_maps) { + if (primary_pmc->map->slps0_dbg_maps) { debugfs_create_file("slp_s0_debug_status", 0444, dir, pmcdev, &pmc_core_slps0_dbg_fops); @@ -982,13 +1406,13 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev) dir, &slps0_dbg_latch); } - if (pmcdev->map->lpm_en_offset) { + if (primary_pmc->map->lpm_en_offset) { debugfs_create_file("substate_residencies", 0444, pmcdev->dbgfs_dir, pmcdev, &pmc_core_substate_res_fops); } - if (pmcdev->map->lpm_status_offset) { + if (primary_pmc->map->lpm_status_offset) { debugfs_create_file("substate_status_registers", 0444, pmcdev->dbgfs_dir, pmcdev, &pmc_core_substate_sts_regs_fops); @@ -1000,46 +1424,331 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev) &pmc_core_lpm_latch_mode_fops); } - if (pmcdev->lpm_req_regs) { + if (primary_pmc->lpm_req_regs) { debugfs_create_file("substate_requirements", 0444, pmcdev->dbgfs_dir, pmcdev, - &pmc_core_substate_req_regs_fops); + pmc_dev_info->sub_req_show); + } + + if (primary_pmc->map->pson_residency_offset && pmc_core_is_pson_residency_enabled(pmcdev)) { + debugfs_create_file("pson_residency_usec", 0444, + pmcdev->dbgfs_dir, primary_pmc, &pmc_core_pson_residency); + } + + if (pmcdev->punit_ep) { + debugfs_create_file("die_c6_us_show", 0444, + pmcdev->dbgfs_dir, pmcdev, + &pmc_core_die_c6_us_fops); } } +/* + * This function retrieves low power mode requirement data from PMC Low + * Power Mode (LPM) table. + * + * In telemetry space, the LPM table contains a 4 byte header followed + * by 8 consecutive mode blocks (one for each LPM mode). Each block + * has a 4 byte header followed by a set of registers that describe the + * IP state requirements for the given mode. The IP mapping is platform + * specific but the same for each block, making for easy analysis. + * Platforms only use a subset of the space to track the requirements + * for their IPs. Callers provide the requirement registers they use as + * a list of indices. Each requirement register is associated with an + * IP map that's maintained by the caller. + * + * Header + * +----+----------------------------+----------------------------+ + * | 0 | REVISION | ENABLED MODES | + * +----+--------------+-------------+-------------+--------------+ + * + * Low Power Mode 0 Block + * +----+--------------+-------------+-------------+--------------+ + * | 1 | SUB ID | SIZE | MAJOR | MINOR | + * +----+--------------+-------------+-------------+--------------+ + * | 2 | LPM0 Requirements 0 | + * +----+---------------------------------------------------------+ + * | | ... | + * +----+---------------------------------------------------------+ + * | 29 | LPM0 Requirements 27 | + * +----+---------------------------------------------------------+ + * + * ... + * + * Low Power Mode 7 Block + * +----+--------------+-------------+-------------+--------------+ + * | | SUB ID | SIZE | MAJOR | MINOR | + * +----+--------------+-------------+-------------+--------------+ + * | 60 | LPM7 Requirements 0 | + * +----+---------------------------------------------------------+ + * | | ... | + * +----+---------------------------------------------------------+ + * | 87 | LPM7 Requirements 27 | + * +----+---------------------------------------------------------+ + * + */ +int pmc_core_pmt_get_lpm_req(struct pmc_dev *pmcdev, struct pmc *pmc, struct telem_endpoint *ep) +{ + const u8 *lpm_indices; + int num_maps, mode_offset = 0; + int ret, mode; + int lpm_size; + + lpm_indices = pmc->map->lpm_reg_index; + num_maps = pmc->map->lpm_num_maps; + lpm_size = LPM_MAX_NUM_MODES * num_maps; + + pmc->lpm_req_regs = devm_kzalloc(&pmcdev->pdev->dev, + lpm_size * sizeof(u32), + GFP_KERNEL); + if (!pmc->lpm_req_regs) + return -ENOMEM; + + mode_offset = LPM_HEADER_OFFSET + LPM_MODE_OFFSET; + pmc_for_each_mode(mode, pmcdev) { + u32 *req_offset = pmc->lpm_req_regs + (mode * num_maps); + int m; + + for (m = 0; m < num_maps; m++) { + u8 sample_id = lpm_indices[m] + mode_offset; + + ret = pmt_telem_read32(ep, sample_id, req_offset, 1); + if (ret) { + dev_err(&pmcdev->pdev->dev, + "couldn't read Low Power Mode requirements: %d\n", ret); + return ret; + } + ++req_offset; + } + mode_offset += LPM_REG_COUNT + LPM_MODE_OFFSET; + } + return ret; +} + +int pmc_core_pmt_get_blk_sub_req(struct pmc_dev *pmcdev, struct pmc *pmc, + struct telem_endpoint *ep) +{ + u32 num_blocker, sample_offset; + unsigned int index; + u32 *req_offset; + int ret; + + num_blocker = pmc->map->num_s0ix_blocker; + sample_offset = pmc->map->blocker_req_offset; + + pmc->lpm_req_regs = devm_kcalloc(&pmcdev->pdev->dev, num_blocker, + sizeof(u32), GFP_KERNEL); + if (!pmc->lpm_req_regs) + return -ENOMEM; + + req_offset = pmc->lpm_req_regs; + for (index = 0; index < num_blocker; index++, req_offset++) { + ret = pmt_telem_read32(ep, index + sample_offset, req_offset, 1); + if (ret) { + dev_err(&pmcdev->pdev->dev, + "couldn't read Low Power Mode requirements: %d\n", ret); + return ret; + } + } + return 0; +} + +static int pmc_core_get_telem_info(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info) +{ + struct pci_dev *pcidev __free(pci_dev_put) = NULL; + struct telem_endpoint *ep; + unsigned int pmc_idx; + int ret; + + pcidev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(20, pmc_dev_info->pci_func)); + if (!pcidev) + return -ENODEV; + + for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { + struct pmc *pmc; + + pmc = pmcdev->pmcs[pmc_idx]; + if (!pmc) + continue; + + if (!pmc->map->lpm_req_guid) + return -ENXIO; + + ep = pmt_telem_find_and_register_endpoint(pcidev, pmc->map->lpm_req_guid, 0); + if (IS_ERR(ep)) { + dev_dbg(&pmcdev->pdev->dev, "couldn't get telem endpoint %pe", ep); + return -EPROBE_DEFER; + } + + ret = pmc_dev_info->sub_req(pmcdev, pmc, ep); + pmt_telem_unregister_endpoint(ep); + if (ret) + return ret; + } + + return 0; +} + +static const struct pmc_reg_map *pmc_core_find_regmap(struct pmc_info *list, u16 devid) +{ + for (; list->map; ++list) + if (devid == list->devid) + return list->map; + + return NULL; +} + +static int pmc_core_pmc_add(struct pmc_dev *pmcdev, unsigned int pmc_idx) + +{ + struct pmc_ssram_telemetry pmc_ssram_telemetry; + const struct pmc_reg_map *map; + struct pmc *pmc; + int ret; + + ret = pmc_ssram_telemetry_get_pmc_info(pmc_idx, &pmc_ssram_telemetry); + if (ret) + return ret; + + map = pmc_core_find_regmap(pmcdev->regmap_list, pmc_ssram_telemetry.devid); + if (!map) + return -ENODEV; + + pmc = pmcdev->pmcs[pmc_idx]; + /* Memory for primary PMC has been allocated */ + if (!pmc) { + pmc = devm_kzalloc(&pmcdev->pdev->dev, sizeof(*pmc), GFP_KERNEL); + if (!pmc) + return -ENOMEM; + } + + pmc->map = map; + pmc->base_addr = pmc_ssram_telemetry.base_addr; + pmc->regbase = ioremap(pmc->base_addr, pmc->map->regmap_length); + + if (!pmc->regbase) { + devm_kfree(&pmcdev->pdev->dev, pmc); + return -ENOMEM; + } + + pmcdev->pmcs[pmc_idx] = pmc; + + return 0; +} + +static int pmc_core_ssram_get_reg_base(struct pmc_dev *pmcdev) +{ + int ret; + + ret = pmc_core_pmc_add(pmcdev, PMC_IDX_MAIN); + if (ret) + return ret; + + pmc_core_pmc_add(pmcdev, PMC_IDX_IOE); + pmc_core_pmc_add(pmcdev, PMC_IDX_PCH); + + return 0; +} + +/* + * When supported, ssram init is used to achieve all available PMCs. + * If ssram init fails, this function uses legacy method to at least get the + * primary PMC. + */ +int generic_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info) +{ + struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; + bool ssram; + int ret; + + pmcdev->suspend = pmc_dev_info->suspend; + pmcdev->resume = pmc_dev_info->resume; + + ssram = pmc_dev_info->regmap_list != NULL; + if (ssram) { + pmcdev->regmap_list = pmc_dev_info->regmap_list; + ret = pmc_core_ssram_get_reg_base(pmcdev); + /* + * EAGAIN error code indicates Intel PMC SSRAM Telemetry driver + * has not finished probe and PMC info is not available yet. Try + * again later. + */ + if (ret == -EAGAIN) + return -EPROBE_DEFER; + + if (ret) { + dev_warn(&pmcdev->pdev->dev, + "Failed to get PMC info from SSRAM, %d, using legacy init\n", ret); + ssram = false; + } + } + + if (!ssram) { + pmc->map = pmc_dev_info->map; + ret = get_primary_reg_base(pmc); + if (ret) + return ret; + } + + pmc_core_get_low_power_modes(pmcdev); + if (pmc_dev_info->dmu_guids) + pmc_core_punit_pmt_init(pmcdev, pmc_dev_info->dmu_guids); + + if (ssram) { + ret = pmc_core_get_telem_info(pmcdev, pmc_dev_info); + if (ret) + goto unmap_regbase; + } + + return 0; + +unmap_regbase: + for (unsigned int pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { + struct pmc *pmc = pmcdev->pmcs[pmc_idx]; + + if (pmc && pmc->regbase) + iounmap(pmc->regbase); + } + + if (pmcdev->punit_ep) + pmt_telem_unregister_endpoint(pmcdev->punit_ep); + + return ret; +} + static const struct x86_cpu_id intel_pmc_core_ids[] = { - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, spt_core_init), - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, spt_core_init), - X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, spt_core_init), - X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, spt_core_init), - X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, cnp_core_init), - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, icl_core_init), - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, icl_core_init), - X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, cnp_core_init), - X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, cnp_core_init), - X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, tgl_core_init), - X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, tgl_core_init), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, tgl_core_init), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, icl_core_init), - X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, tgl_core_init), - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, tgl_core_init), - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, tgl_core_init), - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, adl_core_init), - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, tgl_core_init), - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, adl_core_init), - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, adl_core_init), - X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, mtl_core_init), - X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, mtl_core_init), + X86_MATCH_VFM(INTEL_SKYLAKE_L, &spt_pmc_dev), + X86_MATCH_VFM(INTEL_SKYLAKE, &spt_pmc_dev), + X86_MATCH_VFM(INTEL_KABYLAKE_L, &spt_pmc_dev), + X86_MATCH_VFM(INTEL_KABYLAKE, &spt_pmc_dev), + X86_MATCH_VFM(INTEL_CANNONLAKE_L, &cnp_pmc_dev), + X86_MATCH_VFM(INTEL_ICELAKE_L, &icl_pmc_dev), + X86_MATCH_VFM(INTEL_ICELAKE_NNPI, &icl_pmc_dev), + X86_MATCH_VFM(INTEL_COMETLAKE, &cnp_pmc_dev), + X86_MATCH_VFM(INTEL_COMETLAKE_L, &cnp_pmc_dev), + X86_MATCH_VFM(INTEL_TIGERLAKE_L, &tgl_l_pmc_dev), + X86_MATCH_VFM(INTEL_TIGERLAKE, &tgl_pmc_dev), + X86_MATCH_VFM(INTEL_ATOM_TREMONT, &tgl_l_pmc_dev), + X86_MATCH_VFM(INTEL_ATOM_TREMONT_L, &icl_pmc_dev), + X86_MATCH_VFM(INTEL_ROCKETLAKE, &tgl_pmc_dev), + X86_MATCH_VFM(INTEL_ALDERLAKE_L, &tgl_l_pmc_dev), + X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &tgl_l_pmc_dev), + X86_MATCH_VFM(INTEL_ALDERLAKE, &adl_pmc_dev), + X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &tgl_l_pmc_dev), + X86_MATCH_VFM(INTEL_RAPTORLAKE, &adl_pmc_dev), + X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &adl_pmc_dev), + X86_MATCH_VFM(INTEL_BARTLETTLAKE, &adl_pmc_dev), + X86_MATCH_VFM(INTEL_METEORLAKE_L, &mtl_pmc_dev), + X86_MATCH_VFM(INTEL_ARROWLAKE, &arl_pmc_dev), + X86_MATCH_VFM(INTEL_ARROWLAKE_H, &arl_h_pmc_dev), + X86_MATCH_VFM(INTEL_ARROWLAKE_U, &arl_h_pmc_dev), + X86_MATCH_VFM(INTEL_LUNARLAKE_M, &lnl_pmc_dev), + X86_MATCH_VFM(INTEL_PANTHERLAKE_L, &ptl_pmc_dev), + X86_MATCH_VFM(INTEL_WILDCATLAKE_L, &wcl_pmc_dev), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids); -static const struct pci_device_id pmc_pci_ids[] = { - { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID) }, - { } -}; - /* * This quirk can be used on those platforms where * the platform BIOS enforces 24Mhz crystal to shutdown @@ -1052,16 +1761,16 @@ static int quirk_xtal_ignore(const struct dmi_system_id *id) return 0; } -static void pmc_core_xtal_ignore(struct pmc_dev *pmcdev) +static void pmc_core_xtal_ignore(struct pmc *pmc) { u32 value; - value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset); + value = pmc_core_reg_read(pmc, pmc->map->pm_vric1_offset); /* 24MHz Crystal Shutdown Qualification Disable */ value |= SPT_PMC_VRIC1_XTALSDQDIS; /* Low Voltage Mode Enable */ value &= ~SPT_PMC_VRIC1_SLPS0LVEN; - pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value); + pmc_core_reg_write(pmc, pmc->map->pm_vric1_offset, value); } static const struct dmi_system_id pmc_core_dmi_table[] = { @@ -1076,12 +1785,30 @@ static const struct dmi_system_id pmc_core_dmi_table[] = { {} }; -static void pmc_core_do_dmi_quirks(struct pmc_dev *pmcdev) +static void pmc_core_do_dmi_quirks(struct pmc *pmc) { dmi_check_system(pmc_core_dmi_table); if (xtal_ignore) - pmc_core_xtal_ignore(pmcdev); + pmc_core_xtal_ignore(pmc); +} + +static void pmc_core_clean_structure(struct platform_device *pdev) +{ + struct pmc_dev *pmcdev = platform_get_drvdata(pdev); + unsigned int pmc_idx; + + for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { + struct pmc *pmc = pmcdev->pmcs[pmc_idx]; + + if (pmc && pmc->regbase) + iounmap(pmc->regbase); + } + + if (pmcdev->punit_ep) + pmt_telem_unregister_endpoint(pmcdev->punit_ep); + + platform_set_drvdata(pdev, NULL); } static int pmc_core_probe(struct platform_device *pdev) @@ -1089,8 +1816,9 @@ static int pmc_core_probe(struct platform_device *pdev) static bool device_initialized; struct pmc_dev *pmcdev; const struct x86_cpu_id *cpu_id; - void (*core_init)(struct pmc_dev *pmcdev); - u64 slp_s0_addr; + struct pmc_dev_info *pmc_dev_info; + struct pmc *primary_pmc; + int ret; if (device_initialized) return -ENODEV; @@ -1099,6 +1827,8 @@ static int pmc_core_probe(struct platform_device *pdev) if (!pmcdev) return -ENOMEM; + pmcdev->crystal_freq = pmc_core_get_crystal_freq(); + platform_set_drvdata(pdev, pmcdev); pmcdev->pdev = pdev; @@ -1106,42 +1836,43 @@ static int pmc_core_probe(struct platform_device *pdev) if (!cpu_id) return -ENODEV; - core_init = (void (*)(struct pmc_dev *))cpu_id->driver_data; - - /* - * Coffee Lake has CPU ID of Kaby Lake and Cannon Lake PCH. So here - * Sunrisepoint PCH regmap can't be used. Use Cannon Lake PCH regmap - * in this case. - */ - if (core_init == spt_core_init && !pci_dev_present(pmc_pci_ids)) - core_init = cnp_core_init; + pmc_dev_info = (struct pmc_dev_info *)cpu_id->driver_data; - mutex_init(&pmcdev->lock); - core_init(pmcdev); + /* Primary PMC */ + primary_pmc = devm_kzalloc(&pdev->dev, sizeof(*primary_pmc), GFP_KERNEL); + if (!primary_pmc) + return -ENOMEM; + pmcdev->pmcs[PMC_IDX_MAIN] = primary_pmc; + + /* The last element in msr_map is empty */ + pmcdev->num_of_pkgc = ARRAY_SIZE(msr_map) - 1; + pmcdev->pkgc_res_cnt = devm_kcalloc(&pdev->dev, + pmcdev->num_of_pkgc, + sizeof(*pmcdev->pkgc_res_cnt), + GFP_KERNEL); + if (!pmcdev->pkgc_res_cnt) + return -ENOMEM; + ret = devm_mutex_init(&pdev->dev, &pmcdev->lock); + if (ret) + return ret; - if (lpit_read_residency_count_address(&slp_s0_addr)) { - pmcdev->base_addr = PMC_BASE_ADDR_DEFAULT; + if (pmc_dev_info->init) + ret = pmc_dev_info->init(pmcdev, pmc_dev_info); + else + ret = generic_core_init(pmcdev, pmc_dev_info); - if (page_is_ram(PHYS_PFN(pmcdev->base_addr))) - return -ENODEV; - } else { - pmcdev->base_addr = slp_s0_addr - pmcdev->map->slp_s0_offset; + if (ret) { + platform_set_drvdata(pdev, NULL); + return ret; } - pmcdev->regbase = ioremap(pmcdev->base_addr, - pmcdev->map->regmap_length); - if (!pmcdev->regbase) - return -ENOMEM; - - if (pmcdev->core_configure) - pmcdev->core_configure(pmcdev); + pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(primary_pmc); + pmc_core_do_dmi_quirks(primary_pmc); - pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(pmcdev); - pmc_core_get_low_power_modes(pdev); - pmc_core_do_dmi_quirks(pmcdev); - - pmc_core_dbgfs_register(pmcdev); + pmc_core_dbgfs_register(pmcdev, pmc_dev_info); + pm_report_max_hw_sleep(FIELD_MAX(SLP_S0_RES_COUNTER_MASK) * + pmc_core_adjust_slp_s0_step(primary_pmc, 1)); device_initialized = true; dev_info(&pdev->dev, " initialized\n"); @@ -1149,55 +1880,59 @@ static int pmc_core_probe(struct platform_device *pdev) return 0; } -static int pmc_core_remove(struct platform_device *pdev) +static void pmc_core_remove(struct platform_device *pdev) { struct pmc_dev *pmcdev = platform_get_drvdata(pdev); - pmc_core_dbgfs_unregister(pmcdev); - platform_set_drvdata(pdev, NULL); - mutex_destroy(&pmcdev->lock); - iounmap(pmcdev->regbase); - return 0; + pmc_core_clean_structure(pdev); } static bool warn_on_s0ix_failures; module_param(warn_on_s0ix_failures, bool, 0644); MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures"); +static bool ltr_ignore_all_suspend = true; +module_param(ltr_ignore_all_suspend, bool, 0644); +MODULE_PARM_DESC(ltr_ignore_all_suspend, "Ignore all LTRs during suspend"); + static __maybe_unused int pmc_core_suspend(struct device *dev) { struct pmc_dev *pmcdev = dev_get_drvdata(dev); + struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; + unsigned int i; - pmcdev->check_counters = false; + if (pmcdev->suspend) + pmcdev->suspend(pmcdev); - /* No warnings on S0ix failures */ - if (!warn_on_s0ix_failures) - return 0; + if (ltr_ignore_all_suspend) + pmc_core_ltr_ignore_all(pmcdev); /* Check if the syspend will actually use S0ix */ if (pm_suspend_via_firmware()) return 0; - /* Save PC10 residency for checking later */ - if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pmcdev->pc10_counter)) - return -EIO; + /* Save PKGC residency for checking later */ + for (i = 0; i < pmcdev->num_of_pkgc; i++) { + if (rdmsrq_safe(msr_map[i].bit_mask, &pmcdev->pkgc_res_cnt[i])) + return -EIO; + } /* Save S0ix residency for checking later */ - if (pmc_core_dev_state_get(pmcdev, &pmcdev->s0ix_counter)) + if (pmc_core_dev_state_get(pmc, &pmcdev->s0ix_counter)) return -EIO; - pmcdev->check_counters = true; return 0; } -static inline bool pmc_core_is_pc10_failed(struct pmc_dev *pmcdev) +static inline bool pmc_core_is_deepest_pkgc_failed(struct pmc_dev *pmcdev) { - u64 pc10_counter; + u32 deepest_pkgc_msr = msr_map[pmcdev->num_of_pkgc - 1].bit_mask; + u64 deepest_pkgc_residency; - if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pc10_counter)) + if (rdmsrq_safe(deepest_pkgc_msr, &deepest_pkgc_residency)) return false; - if (pc10_counter == pmcdev->pc10_counter) + if (deepest_pkgc_residency == pmcdev->pkgc_res_cnt[pmcdev->num_of_pkgc - 1]) return true; return false; @@ -1207,45 +1942,86 @@ static inline bool pmc_core_is_s0ix_failed(struct pmc_dev *pmcdev) { u64 s0ix_counter; - if (pmc_core_dev_state_get(pmcdev, &s0ix_counter)) + if (pmc_core_dev_state_get(pmcdev->pmcs[PMC_IDX_MAIN], &s0ix_counter)) return false; + pm_report_hw_sleep_time((u32)(s0ix_counter - pmcdev->s0ix_counter)); + if (s0ix_counter == pmcdev->s0ix_counter) return true; return false; } -static __maybe_unused int pmc_core_resume(struct device *dev) +int pmc_core_resume_common(struct pmc_dev *pmcdev) { - struct pmc_dev *pmcdev = dev_get_drvdata(dev); - const struct pmc_bit_map **maps = pmcdev->map->lpm_sts; - int offset = pmcdev->map->lpm_status_offset; + struct device *dev = &pmcdev->pdev->dev; + struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; + const struct pmc_bit_map **maps = pmc->map->lpm_sts; + int offset = pmc->map->lpm_status_offset; + unsigned int pmc_idx, i; - if (!pmcdev->check_counters) + /* Check if the syspend used S0ix */ + if (pm_suspend_via_firmware()) return 0; if (!pmc_core_is_s0ix_failed(pmcdev)) return 0; - if (pmc_core_is_pc10_failed(pmcdev)) { - /* S0ix failed because of PC10 entry failure */ - dev_info(dev, "CPU did not enter PC10!!! (PC10 cnt=0x%llx)\n", - pmcdev->pc10_counter); + if (!warn_on_s0ix_failures) + return 0; + + if (pmc_core_is_deepest_pkgc_failed(pmcdev)) { + /* S0ix failed because of deepest PKGC entry failure */ + dev_info(dev, "CPU did not enter %s!!! (%s cnt=0x%llx)\n", + msr_map[pmcdev->num_of_pkgc - 1].name, + msr_map[pmcdev->num_of_pkgc - 1].name, + pmcdev->pkgc_res_cnt[pmcdev->num_of_pkgc - 1]); + + for (i = 0; i < pmcdev->num_of_pkgc; i++) { + u64 pc_cnt; + + if (!rdmsrq_safe(msr_map[i].bit_mask, &pc_cnt)) { + dev_info(dev, "Prev %s cnt = 0x%llx, Current %s cnt = 0x%llx\n", + msr_map[i].name, pmcdev->pkgc_res_cnt[i], + msr_map[i].name, pc_cnt); + } + } return 0; } /* The real interesting case - S0ix failed - lets ask PMC why. */ dev_warn(dev, "CPU did not enter SLP_S0!!! (S0ix cnt=%llu)\n", pmcdev->s0ix_counter); - if (pmcdev->map->slps0_dbg_maps) - pmc_core_slps0_display(pmcdev, dev, NULL); - if (pmcdev->map->lpm_sts) - pmc_core_lpm_display(pmcdev, dev, NULL, offset, "STATUS", maps); + + if (pmc->map->slps0_dbg_maps) + pmc_core_slps0_display(pmc, dev, NULL); + + for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { + struct pmc *pmc = pmcdev->pmcs[pmc_idx]; + + if (!pmc) + continue; + if (pmc->map->lpm_sts) + pmc_core_lpm_display(pmc, dev, NULL, offset, pmc_idx, "STATUS", maps); + } return 0; } +static __maybe_unused int pmc_core_resume(struct device *dev) +{ + struct pmc_dev *pmcdev = dev_get_drvdata(dev); + + if (ltr_ignore_all_suspend) + pmc_core_ltr_restore_all(pmcdev); + + if (pmcdev->resume) + return pmcdev->resume(pmcdev); + + return pmc_core_resume_common(pmcdev); +} + static const struct dev_pm_ops pmc_core_pm_ops = { SET_LATE_SYSTEM_SLEEP_PM_OPS(pmc_core_suspend, pmc_core_resume) }; @@ -1269,5 +2045,6 @@ static struct platform_driver pmc_core_driver = { module_platform_driver(pmc_core_driver); +MODULE_IMPORT_NS("INTEL_PMT_TELEMETRY"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Intel PMC Core Driver"); diff --git a/drivers/platform/x86/intel/pmc/core.h b/drivers/platform/x86/intel/pmc/core.h index 810204d758ab..272fb4f57f34 100644 --- a/drivers/platform/x86/intel/pmc/core.h +++ b/drivers/platform/x86/intel/pmc/core.h @@ -16,7 +16,18 @@ #include <linux/bits.h> #include <linux/platform_device.h> +struct telem_endpoint; + +#define SLP_S0_RES_COUNTER_MASK GENMASK(31, 0) + #define PMC_BASE_ADDR_DEFAULT 0xFE000000 +#define MAX_NUM_PMC 3 +#define S0IX_BLK_SIZE 4 + +/* PCH query */ +#define LPM_HEADER_OFFSET 1 +#define LPM_REG_COUNT 28 +#define LPM_MODE_OFFSET 1 /* Sunrise Point Power Management Controller PCI Device ID */ #define SPT_PMC_PCI_DEVICE_ID 0x9d21 @@ -218,6 +229,10 @@ enum ppfear_regs { #define TGL_LPM_PRI_OFFSET 0x1C7C #define TGL_LPM_NUM_MAPS 6 +/* Tigerlake PSON residency register */ +#define TGL_PSON_RESIDENCY_OFFSET 0x18f8 +#define TGL_PSON_RES_COUNTER_STEP 0x7A + /* Extended Test Mode Register 3 (CNL and later) */ #define ETR3_OFFSET 0x1048 #define ETR3_CF9GR BIT(20) @@ -247,12 +262,79 @@ enum ppfear_regs { #define MTL_LPM_STATUS_LATCH_EN_OFFSET 0x16F8 #define MTL_LPM_STATUS_OFFSET 0x1700 #define MTL_LPM_LIVE_STATUS_OFFSET 0x175C +#define MTL_PMC_LTR_IOE_PMC 0x1C0C +#define MTL_PMC_LTR_ESE 0x1BAC +#define MTL_PMC_LTR_RESERVED 0x1BA4 +#define MTL_IOE_PMC_MMIO_REG_LEN 0x23A4 +#define MTL_SOCM_NUM_IP_IGN_ALLOWED 25 +#define MTL_SOC_PMC_MMIO_REG_LEN 0x2708 +#define MTL_PMC_LTR_SPG 0x1B74 +#define ARL_SOCS_PMC_LTR_RESERVED 0x1B88 +#define ARL_SOCS_NUM_IP_IGN_ALLOWED 26 +#define ARL_PMC_LTR_DMI3 0x1BE4 +#define ARL_PCH_PMC_MMIO_REG_LEN 0x2720 + +/* Meteor Lake PGD PFET Enable Ack Status */ +#define MTL_SOCM_PPFEAR_NUM_ENTRIES 8 +#define MTL_IOE_PPFEAR_NUM_ENTRIES 10 +#define ARL_SOCS_PPFEAR_NUM_ENTRIES 9 + +/* Die C6 from PUNIT telemetry */ +#define MTL_PMT_DMU_DIE_C6_OFFSET 15 +#define MTL_PMT_DMU_GUID 0x1A067102 +#define ARL_PMT_DMU_GUID 0x1A06A102 +#define ARL_H_PMT_DMU_GUID 0x1A06A101 + +#define LNL_PMC_MMIO_REG_LEN 0x2708 +#define LNL_PMC_LTR_OSSE 0x1B88 +#define LNL_NUM_IP_IGN_ALLOWED 27 +#define LNL_PPFEAR_NUM_ENTRIES 12 +#define LNL_S0IX_BLOCKER_OFFSET 0x2004 + +/* Panther Lake Power Management Controller register offsets */ +#define PTL_LPM_NUM_MAPS 14 +#define PTL_PMC_LTR_SATA2 0x1B90 +#define PTL_PMC_LTR_PMC 0x1BA8 +#define PTL_PMC_LTR_CUR_ASLT 0x1C28 +#define PTL_PMC_LTR_CUR_PLT 0x1C2C +#define PTL_PCD_PMC_MMIO_REG_LEN 0x31A8 +#define PTL_NUM_S0IX_BLOCKER 106 +#define PTL_BLK_REQ_OFFSET 55 + +/* Wildcat Lake */ +#define WCL_PMC_LTR_RESERVED 0x1B64 +#define WCL_PCD_PMC_MMIO_REG_LEN 0x3178 +#define WCL_NUM_S0IX_BLOCKER 94 +#define WCL_BLK_REQ_OFFSET 50 + +/* SSRAM PMC Device ID */ +/* LNL */ +#define PMC_DEVID_LNL_SOCM 0xa87f + +/* PTL */ +#define PMC_DEVID_PTL_PCDH 0xe37f +#define PMC_DEVID_PTL_PCDP 0xe47f + +/* WCL */ +#define PMC_DEVID_WCL_PCDN 0x4d7f + +/* ARL */ +#define PMC_DEVID_ARL_SOCM 0x777f +#define PMC_DEVID_ARL_SOCS 0xae7f +#define PMC_DEVID_ARL_IOEP 0x7ecf +#define PMC_DEVID_ARL_PCHS 0x7f27 + +/* MTL */ +#define PMC_DEVID_MTL_SOCM 0x7e7f +#define PMC_DEVID_MTL_IOEP 0x7ecf +#define PMC_DEVID_MTL_IOEM 0x7ebf extern const char *pmc_lpm_modes[]; struct pmc_bit_map { const char *name; u32 bit_mask; + u8 blk; }; /** @@ -263,6 +345,7 @@ struct pmc_bit_map { * @pll_sts: Maps name of PLL to corresponding bit status * @slps0_dbg_maps: Array of SLP_S0_DBG* registers containing debug info * @ltr_show_sts: Maps PCH IP Names to their MMIO register offsets + * @s0ix_blocker_maps: Maps name of IP block to S0ix blocker counter * @slp_s0_offset: PWRMBASE offset to read SLP_S0 residency * @ltr_ignore_offset: PWRMBASE offset to read/write LTR ignore bit * @regmap_length: Length of memory to map from PWRMBASE address to access @@ -272,6 +355,10 @@ struct pmc_bit_map { * @pm_cfg_offset: PWRMBASE offset to PM_CFG register * @pm_read_disable_bit: Bit index to read PMC_READ_DISABLE * @slps0_dbg_offset: PWRMBASE offset to SLP_S0_DEBUG_REG* + * @s0ix_blocker_offset PWRMBASE offset to S0ix blocker counter + * @num_s0ix_blocker: Number of S0ix blockers + * @blocker_req_offset: Telemetry offset to S0ix blocker low power mode substate requirement table + * @lpm_req_guid: Telemetry GUID to read low power mode substate requirement table * * Each PCH has unique set of register offsets and bit indexes. This structure * captures them to have a common implementation. @@ -284,6 +371,7 @@ struct pmc_reg_map { const struct pmc_bit_map *ltr_show_sts; const struct pmc_bit_map *msr_sts; const struct pmc_bit_map **lpm_sts; + const struct pmc_bit_map **s0ix_blocker_maps; const u32 slp_s0_offset; const int slp_s0_res_counter_step; const u32 ltr_ignore_offset; @@ -295,6 +383,9 @@ struct pmc_reg_map { const u32 slps0_dbg_offset; const u32 ltr_ignore_max; const u32 pm_vric1_offset; + const u32 s0ix_blocker_offset; + const u32 num_s0ix_blocker; + const u32 blocker_req_offset; /* Low Power Mode registers */ const int lpm_num_maps; const int lpm_num_modes; @@ -306,112 +397,178 @@ struct pmc_reg_map { const u32 lpm_status_offset; const u32 lpm_live_status_offset; const u32 etr3_offset; + const u8 *lpm_reg_index; + const u32 pson_residency_offset; + const u32 pson_residency_counter_step; + /* GUID for telemetry regions */ + const u32 lpm_req_guid; }; /** - * struct pmc_dev - pmc device structure + * struct pmc_info - Structure to keep pmc info + * @devid: device id of the pmc device + * @map: pointer to a pmc_reg_map struct that contains platform + * specific attributes + */ +struct pmc_info { + u16 devid; + const struct pmc_reg_map *map; +}; + +/** + * struct pmc - pmc private info structure * @base_addr: contains pmc base address * @regbase: pointer to io-remapped memory location * @map: pointer to pmc_reg_map struct that contains platform * specific attributes + * @lpm_req_regs: List of substate requirements + * @ltr_ign: Holds LTR ignore data while suspended + * + * pmc contains info about one power management controller device. + */ +struct pmc { + u64 base_addr; + void __iomem *regbase; + const struct pmc_reg_map *map; + u32 *lpm_req_regs; + u32 ltr_ign; +}; + +/** + * struct pmc_dev - pmc device structure + * @devs: pointer to an array of pmc pointers * @pdev: pointer to platform_device struct + * @crystal_freq: crystal frequency from cpuid * @dbgfs_dir: path to debugfs interface * @pmc_xram_read_bit: flag to indicate whether PMC XRAM shadow registers * used to read MPHY PG and PLL status are available * @mutex_lock: mutex to complete one transcation - * @check_counters: On resume, check if counters are getting incremented - * @pc10_counter: PC10 residency counter + * @pkgc_res_cnt: Array of PKGC residency counters + * @num_of_pkgc: Number of PKGC * @s0ix_counter: S0ix residency (step adjusted) * @num_lpm_modes: Count of enabled modes * @lpm_en_modes: Array of enabled modes from lowest to highest priority - * @lpm_req_regs: List of substate requirements - * @core_configure: Function pointer to configure the platform + * @suspend: Function to perform platform specific suspend + * @resume: Function to perform platform specific resume * * pmc_dev contains info about power management controller device. */ struct pmc_dev { - u32 base_addr; - void __iomem *regbase; - const struct pmc_reg_map *map; + struct pmc *pmcs[MAX_NUM_PMC]; struct dentry *dbgfs_dir; struct platform_device *pdev; + unsigned int crystal_freq; int pmc_xram_read_bit; struct mutex lock; /* generic mutex lock for PMC Core */ - bool check_counters; /* Check for counter increments on resume */ - u64 pc10_counter; u64 s0ix_counter; int num_lpm_modes; int lpm_en_modes[LPM_MAX_NUM_MODES]; - u32 *lpm_req_regs; - void (*core_configure)(struct pmc_dev *pmcdev); + void (*suspend)(struct pmc_dev *pmcdev); + int (*resume)(struct pmc_dev *pmcdev); + + u64 *pkgc_res_cnt; + u8 num_of_pkgc; + + u32 die_c6_offset; + struct telem_endpoint *punit_ep; + struct pmc_info *regmap_list; +}; + +enum pmc_index { + PMC_IDX_MAIN, + PMC_IDX_IOE, + PMC_IDX_PCH, + PMC_IDX_MAX +}; + +/** + * struct pmc_dev_info - Structure to keep PMC device info + * @pci_func: Function number of the primary PMC + * @dmu_guids: List of Die Management Unit GUID + * @regmap_list: Pointer to a list of pmc_info structure that could be + * available for the platform. When set, this field implies + * SSRAM support. + * @map: Pointer to a pmc_reg_map struct that contains platform + * specific attributes of the primary PMC + * @sub_req_show: File operations to show substate requirements + * @suspend: Function to perform platform specific suspend + * @resume: Function to perform platform specific resume + * @init: Function to perform platform specific init action + * @sub_req: Function to achieve low power mode substate requirements + */ +struct pmc_dev_info { + u8 pci_func; + u32 *dmu_guids; + struct pmc_info *regmap_list; + const struct pmc_reg_map *map; + const struct file_operations *sub_req_show; + void (*suspend)(struct pmc_dev *pmcdev); + int (*resume)(struct pmc_dev *pmcdev); + int (*init)(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info); + int (*sub_req)(struct pmc_dev *pmcdev, struct pmc *pmc, struct telem_endpoint *ep); }; extern const struct pmc_bit_map msr_map[]; -extern const struct pmc_bit_map spt_pll_map[]; -extern const struct pmc_bit_map spt_mphy_map[]; -extern const struct pmc_bit_map spt_pfear_map[]; -extern const struct pmc_bit_map *ext_spt_pfear_map[]; -extern const struct pmc_bit_map spt_ltr_show_map[]; -extern const struct pmc_reg_map spt_reg_map; extern const struct pmc_bit_map cnp_pfear_map[]; -extern const struct pmc_bit_map *ext_cnp_pfear_map[]; -extern const struct pmc_bit_map cnp_slps0_dbg0_map[]; -extern const struct pmc_bit_map cnp_slps0_dbg1_map[]; -extern const struct pmc_bit_map cnp_slps0_dbg2_map[]; extern const struct pmc_bit_map *cnp_slps0_dbg_maps[]; extern const struct pmc_bit_map cnp_ltr_show_map[]; extern const struct pmc_reg_map cnp_reg_map; -extern const struct pmc_bit_map icl_pfear_map[]; -extern const struct pmc_bit_map *ext_icl_pfear_map[]; -extern const struct pmc_reg_map icl_reg_map; -extern const struct pmc_bit_map tgl_pfear_map[]; -extern const struct pmc_bit_map *ext_tgl_pfear_map[]; -extern const struct pmc_bit_map tgl_clocksource_status_map[]; -extern const struct pmc_bit_map tgl_power_gating_status_map[]; -extern const struct pmc_bit_map tgl_d3_status_map[]; -extern const struct pmc_bit_map tgl_vnn_req_status_map[]; -extern const struct pmc_bit_map tgl_vnn_misc_status_map[]; extern const struct pmc_bit_map tgl_signal_status_map[]; -extern const struct pmc_bit_map *tgl_lpm_maps[]; -extern const struct pmc_reg_map tgl_reg_map; -extern const struct pmc_bit_map adl_pfear_map[]; -extern const struct pmc_bit_map *ext_adl_pfear_map[]; -extern const struct pmc_bit_map adl_ltr_show_map[]; -extern const struct pmc_bit_map adl_clocksource_status_map[]; -extern const struct pmc_bit_map adl_power_gating_status_0_map[]; -extern const struct pmc_bit_map adl_power_gating_status_1_map[]; -extern const struct pmc_bit_map adl_power_gating_status_2_map[]; -extern const struct pmc_bit_map adl_d3_status_0_map[]; -extern const struct pmc_bit_map adl_d3_status_1_map[]; -extern const struct pmc_bit_map adl_d3_status_2_map[]; -extern const struct pmc_bit_map adl_d3_status_3_map[]; -extern const struct pmc_bit_map adl_vnn_req_status_0_map[]; -extern const struct pmc_bit_map adl_vnn_req_status_1_map[]; -extern const struct pmc_bit_map adl_vnn_req_status_2_map[]; -extern const struct pmc_bit_map adl_vnn_req_status_3_map[]; -extern const struct pmc_bit_map adl_vnn_misc_status_map[]; -extern const struct pmc_bit_map *adl_lpm_maps[]; extern const struct pmc_reg_map adl_reg_map; -extern const struct pmc_reg_map mtl_reg_map; - -extern void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev); -extern int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value); - -void spt_core_init(struct pmc_dev *pmcdev); -void cnp_core_init(struct pmc_dev *pmcdev); -void icl_core_init(struct pmc_dev *pmcdev); -void tgl_core_init(struct pmc_dev *pmcdev); -void adl_core_init(struct pmc_dev *pmcdev); -void mtl_core_init(struct pmc_dev *pmcdev); -void tgl_core_configure(struct pmc_dev *pmcdev); -void adl_core_configure(struct pmc_dev *pmcdev); -void mtl_core_configure(struct pmc_dev *pmcdev); - -#define pmc_for_each_mode(i, mode, pmcdev) \ - for (i = 0, mode = pmcdev->lpm_en_modes[i]; \ - i < pmcdev->num_lpm_modes; \ - i++, mode = pmcdev->lpm_en_modes[i]) +extern const struct pmc_bit_map mtl_socm_pfear_map[]; +extern const struct pmc_bit_map mtl_socm_d3_status_0_map[]; +extern const struct pmc_bit_map mtl_socm_d3_status_1_map[]; +extern const struct pmc_bit_map mtl_socm_vnn_req_status_0_map[]; +extern const struct pmc_bit_map mtl_socm_vnn_req_status_1_map[]; +extern const struct pmc_bit_map mtl_socm_vnn_req_status_2_map[]; +extern const struct pmc_bit_map mtl_socm_vnn_misc_status_map[]; +extern const struct pmc_bit_map mtl_socm_signal_status_map[]; +extern const struct pmc_reg_map mtl_socm_reg_map; +extern const struct pmc_reg_map mtl_ioep_reg_map; +extern const struct pmc_bit_map ptl_pcdp_clocksource_status_map[]; +extern const struct pmc_bit_map ptl_pcdp_vnn_req_status_3_map[]; +extern const struct pmc_bit_map ptl_pcdp_signal_status_map[]; + +void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev); +int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore); + +int pmc_core_resume_common(struct pmc_dev *pmcdev); +int get_primary_reg_base(struct pmc *pmc); +void pmc_core_get_low_power_modes(struct pmc_dev *pmcdev); +void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 *guids); +void pmc_core_set_device_d3(unsigned int device); + +int generic_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info); + +extern struct pmc_dev_info spt_pmc_dev; +extern struct pmc_dev_info cnp_pmc_dev; +extern struct pmc_dev_info icl_pmc_dev; +extern struct pmc_dev_info tgl_l_pmc_dev; +extern struct pmc_dev_info tgl_pmc_dev; +extern struct pmc_dev_info adl_pmc_dev; +extern struct pmc_dev_info mtl_pmc_dev; +extern struct pmc_dev_info arl_pmc_dev; +extern struct pmc_dev_info arl_h_pmc_dev; +extern struct pmc_dev_info lnl_pmc_dev; +extern struct pmc_dev_info ptl_pmc_dev; +extern struct pmc_dev_info wcl_pmc_dev; + +void cnl_suspend(struct pmc_dev *pmcdev); +int cnl_resume(struct pmc_dev *pmcdev); +int pmc_core_pmt_get_lpm_req(struct pmc_dev *pmcdev, struct pmc *pmc, struct telem_endpoint *ep); +int pmc_core_pmt_get_blk_sub_req(struct pmc_dev *pmcdev, struct pmc *pmc, + struct telem_endpoint *ep); + +extern const struct file_operations pmc_core_substate_req_regs_fops; +extern const struct file_operations pmc_core_substate_blk_req_fops; + +#define pmc_for_each_mode(mode, pmcdev) \ + for (unsigned int __i = 0, __cond; \ + __cond = __i < (pmcdev)->num_lpm_modes, \ + __cond && ((mode) = (pmcdev)->lpm_en_modes[__i]), \ + __cond; \ + __i++) #define DEFINE_PMC_CORE_ATTR_WRITE(__name) \ static int __name ## _open(struct inode *inode, struct file *file) \ diff --git a/drivers/platform/x86/intel/pmc/icl.c b/drivers/platform/x86/intel/pmc/icl.c index 2f11b1a6daeb..db7ed15bf863 100644 --- a/drivers/platform/x86/intel/pmc/icl.c +++ b/drivers/platform/x86/intel/pmc/icl.c @@ -10,7 +10,7 @@ #include "core.h" -const struct pmc_bit_map icl_pfear_map[] = { +static const struct pmc_bit_map icl_pfear_map[] = { {"RES_65", BIT(0)}, {"RES_66", BIT(1)}, {"RES_67", BIT(2)}, @@ -22,7 +22,7 @@ const struct pmc_bit_map icl_pfear_map[] = { {} }; -const struct pmc_bit_map *ext_icl_pfear_map[] = { +static const struct pmc_bit_map *ext_icl_pfear_map[] = { /* * Check intel_pmc_core_ids[] users of icl_reg_map for * a list of core SoCs using this. @@ -32,7 +32,7 @@ const struct pmc_bit_map *ext_icl_pfear_map[] = { NULL }; -const struct pmc_reg_map icl_reg_map = { +static const struct pmc_reg_map icl_reg_map = { .pfear_sts = ext_icl_pfear_map, .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, .slp_s0_res_counter_step = ICL_PMC_SLP_S0_RES_COUNTER_STEP, @@ -50,7 +50,6 @@ const struct pmc_reg_map icl_reg_map = { .etr3_offset = ETR3_OFFSET, }; -void icl_core_init(struct pmc_dev *pmcdev) -{ - pmcdev->map = &icl_reg_map; -} +struct pmc_dev_info icl_pmc_dev = { + .map = &icl_reg_map, +}; diff --git a/drivers/platform/x86/intel/pmc/lnl.c b/drivers/platform/x86/intel/pmc/lnl.c new file mode 100644 index 000000000000..1cd81ee54dcf --- /dev/null +++ b/drivers/platform/x86/intel/pmc/lnl.c @@ -0,0 +1,582 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file contains platform specific structure definitions + * and init function used by Lunar Lake PCH. + * + * Copyright (c) 2022, Intel Corporation. + * All Rights Reserved. + * + */ + +#include <linux/cpu.h> +#include <linux/pci.h> + +#include "core.h" + +#define SOCM_LPM_REQ_GUID 0x15099748 + +static const u8 LNL_LPM_REG_INDEX[] = {0, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20}; + +static const struct pmc_bit_map lnl_ltr_show_map[] = { + {"SOUTHPORT_A", CNP_PMC_LTR_SPA}, + {"SOUTHPORT_B", CNP_PMC_LTR_SPB}, + {"SATA", CNP_PMC_LTR_SATA}, + {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE}, + {"XHCI", CNP_PMC_LTR_XHCI}, + {"SOUTHPORT_F", ADL_PMC_LTR_SPF}, + {"ME", CNP_PMC_LTR_ME}, + /* EVA is Enterprise Value Add, doesn't really exist on PCH */ + {"SATA1", CNP_PMC_LTR_EVA}, + {"SOUTHPORT_C", CNP_PMC_LTR_SPC}, + {"HD_AUDIO", CNP_PMC_LTR_AZ}, + {"CNV", CNP_PMC_LTR_CNV}, + {"LPSS", CNP_PMC_LTR_LPSS}, + {"SOUTHPORT_D", CNP_PMC_LTR_SPD}, + {"SOUTHPORT_E", CNP_PMC_LTR_SPE}, + {"SATA2", CNP_PMC_LTR_CAM}, + {"ESPI", CNP_PMC_LTR_ESPI}, + {"SCC", CNP_PMC_LTR_SCC}, + {"ISH", CNP_PMC_LTR_ISH}, + {"UFSX2", CNP_PMC_LTR_UFSX2}, + {"EMMC", CNP_PMC_LTR_EMMC}, + /* + * Check intel_pmc_core_ids[] users of cnp_reg_map for + * a list of core SoCs using this. + */ + {"WIGIG", ICL_PMC_LTR_WIGIG}, + {"THC0", TGL_PMC_LTR_THC0}, + {"THC1", TGL_PMC_LTR_THC1}, + {"SOUTHPORT_G", CNP_PMC_LTR_RESERVED}, + + {"ESE", MTL_PMC_LTR_ESE}, + {"IOE_PMC", MTL_PMC_LTR_IOE_PMC}, + {"DMI3", ARL_PMC_LTR_DMI3}, + {"OSSE", LNL_PMC_LTR_OSSE}, + + /* Below two cannot be used for LTR_IGNORE */ + {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT}, + {"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT}, + {} +}; + +static const struct pmc_bit_map lnl_power_gating_status_0_map[] = { + {"PMC_PGD0_PG_STS", BIT(0), 0}, + {"FUSE_OSSE_PGD0_PG_STS", BIT(1), 0}, + {"ESPISPI_PGD0_PG_STS", BIT(2), 0}, + {"XHCI_PGD0_PG_STS", BIT(3), 1}, + {"SPA_PGD0_PG_STS", BIT(4), 1}, + {"SPB_PGD0_PG_STS", BIT(5), 1}, + {"SPR16B0_PGD0_PG_STS", BIT(6), 0}, + {"GBE_PGD0_PG_STS", BIT(7), 1}, + {"SBR8B7_PGD0_PG_STS", BIT(8), 0}, + {"SBR8B6_PGD0_PG_STS", BIT(9), 0}, + {"SBR16B1_PGD0_PG_STS", BIT(10), 0}, + {"SBR8B8_PGD0_PG_STS", BIT(11), 0}, + {"ESE_PGD3_PG_STS", BIT(12), 1}, + {"D2D_DISP_PGD0_PG_STS", BIT(13), 1}, + {"LPSS_PGD0_PG_STS", BIT(14), 1}, + {"LPC_PGD0_PG_STS", BIT(15), 0}, + {"SMB_PGD0_PG_STS", BIT(16), 0}, + {"ISH_PGD0_PG_STS", BIT(17), 0}, + {"SBR8B2_PGD0_PG_STS", BIT(18), 0}, + {"NPK_PGD0_PG_STS", BIT(19), 0}, + {"D2D_NOC_PGD0_PG_STS", BIT(20), 0}, + {"SAFSS_PGD0_PG_STS", BIT(21), 0}, + {"FUSE_PGD0_PG_STS", BIT(22), 0}, + {"D2D_DISP_PGD1_PG_STS", BIT(23), 1}, + {"MPFPW1_PGD0_PG_STS", BIT(24), 0}, + {"XDCI_PGD0_PG_STS", BIT(25), 1}, + {"EXI_PGD0_PG_STS", BIT(26), 0}, + {"CSE_PGD0_PG_STS", BIT(27), 1}, + {"KVMCC_PGD0_PG_STS", BIT(28), 1}, + {"PMT_PGD0_PG_STS", BIT(29), 1}, + {"CLINK_PGD0_PG_STS", BIT(30), 1}, + {"PTIO_PGD0_PG_STS", BIT(31), 1}, + {} +}; + +static const struct pmc_bit_map lnl_power_gating_status_1_map[] = { + {"USBR0_PGD0_PG_STS", BIT(0), 1}, + {"SUSRAM_PGD0_PG_STS", BIT(1), 1}, + {"SMT1_PGD0_PG_STS", BIT(2), 1}, + {"U3FPW1_PGD0_PG_STS", BIT(3), 0}, + {"SMS2_PGD0_PG_STS", BIT(4), 1}, + {"SMS1_PGD0_PG_STS", BIT(5), 1}, + {"CSMERTC_PGD0_PG_STS", BIT(6), 0}, + {"CSMEPSF_PGD0_PG_STS", BIT(7), 0}, + {"FIA_PG_PGD0_PG_STS", BIT(8), 0}, + {"SBR16B4_PGD0_PG_STS", BIT(9), 0}, + {"P2SB8B_PGD0_PG_STS", BIT(10), 1}, + {"DBG_SBR_PGD0_PG_STS", BIT(11), 0}, + {"SBR8B9_PGD0_PG_STS", BIT(12), 0}, + {"OSSE_SMT1_PGD0_PG_STS", BIT(13), 1}, + {"SBR8B10_PGD0_PG_STS", BIT(14), 0}, + {"SBR16B3_PGD0_PG_STS", BIT(15), 0}, + {"G5FPW1_PGD0_PG_STS", BIT(16), 0}, + {"SBRG_PGD0_PG_STS", BIT(17), 0}, + {"PSF4_PGD0_PG_STS", BIT(18), 0}, + {"CNVI_PGD0_PG_STS", BIT(19), 0}, + {"USFX2_PGD0_PG_STS", BIT(20), 1}, + {"ENDBG_PGD0_PG_STS", BIT(21), 0}, + {"FIACPCB_P5X4_PGD0_PG_STS", BIT(22), 0}, + {"SBR8B3_PGD0_PG_STS", BIT(23), 0}, + {"SBR8B0_PGD0_PG_STS", BIT(24), 0}, + {"NPK_PGD1_PG_STS", BIT(25), 0}, + {"OSSE_HOTHAM_PGD0_PG_STS", BIT(26), 1}, + {"D2D_NOC_PGD2_PG_STS", BIT(27), 1}, + {"SBR8B1_PGD0_PG_STS", BIT(28), 0}, + {"PSF6_PGD0_PG_STS", BIT(29), 0}, + {"PSF7_PGD0_PG_STS", BIT(30), 0}, + {"FIA_U_PGD0_PG_STS", BIT(31), 0}, + {} +}; + +static const struct pmc_bit_map lnl_power_gating_status_2_map[] = { + {"PSF8_PGD0_PG_STS", BIT(0), 0}, + {"SBR16B2_PGD0_PG_STS", BIT(1), 0}, + {"D2D_IPU_PGD0_PG_STS", BIT(2), 1}, + {"FIACPCB_U_PGD0_PG_STS", BIT(3), 0}, + {"TAM_PGD0_PG_STS", BIT(4), 1}, + {"D2D_NOC_PGD1_PG_STS", BIT(5), 1}, + {"TBTLSX_PGD0_PG_STS", BIT(6), 1}, + {"THC0_PGD0_PG_STS", BIT(7), 1}, + {"THC1_PGD0_PG_STS", BIT(8), 1}, + {"PMC_PGD0_PG_STS", BIT(9), 0}, + {"SBR8B5_PGD0_PG_STS", BIT(10), 0}, + {"UFSPW1_PGD0_PG_STS", BIT(11), 0}, + {"DBC_PGD0_PG_STS", BIT(12), 0}, + {"TCSS_PGD0_PG_STS", BIT(13), 0}, + {"FIA_P5X4_PGD0_PG_STS", BIT(14), 0}, + {"DISP_PGA_PGD0_PG_STS", BIT(15), 0}, + {"DISP_PSF_PGD0_PG_STS", BIT(16), 0}, + {"PSF0_PGD0_PG_STS", BIT(17), 0}, + {"P2SB16B_PGD0_PG_STS", BIT(18), 1}, + {"ACE_PGD0_PG_STS", BIT(19), 0}, + {"ACE_PGD1_PG_STS", BIT(20), 0}, + {"ACE_PGD2_PG_STS", BIT(21), 0}, + {"ACE_PGD3_PG_STS", BIT(22), 0}, + {"ACE_PGD4_PG_STS", BIT(23), 0}, + {"ACE_PGD5_PG_STS", BIT(24), 0}, + {"ACE_PGD6_PG_STS", BIT(25), 0}, + {"ACE_PGD7_PG_STS", BIT(26), 0}, + {"ACE_PGD8_PG_STS", BIT(27), 0}, + {"ACE_PGD9_PG_STS", BIT(28), 0}, + {"ACE_PGD10_PG_STS", BIT(29), 0}, + {"FIACPCB_PG_PGD0_PG_STS", BIT(30), 0}, + {"OSSE_PGD0_PG_STS", BIT(31), 1}, + {} +}; + +static const struct pmc_bit_map lnl_d3_status_0_map[] = { + {"LPSS_D3_STS", BIT(3), 1}, + {"XDCI_D3_STS", BIT(4), 1}, + {"XHCI_D3_STS", BIT(5), 1}, + {"SPA_D3_STS", BIT(12), 0}, + {"SPB_D3_STS", BIT(13), 0}, + {"OSSE_D3_STS", BIT(15), 0}, + {"ESPISPI_D3_STS", BIT(18), 0}, + {"PSTH_D3_STS", BIT(21), 0}, + {} +}; + +static const struct pmc_bit_map lnl_d3_status_1_map[] = { + {"OSSE_SMT1_D3_STS", BIT(7), 0}, + {"GBE_D3_STS", BIT(19), 0}, + {"ITSS_D3_STS", BIT(23), 0}, + {"CNVI_D3_STS", BIT(27), 0}, + {"UFSX2_D3_STS", BIT(28), 1}, + {"OSSE_HOTHAM_D3_STS", BIT(31), 0}, + {} +}; + +static const struct pmc_bit_map lnl_d3_status_2_map[] = { + {"ESE_D3_STS", BIT(0), 0}, + {"CSMERTC_D3_STS", BIT(1), 0}, + {"SUSRAM_D3_STS", BIT(2), 0}, + {"CSE_D3_STS", BIT(4), 0}, + {"KVMCC_D3_STS", BIT(5), 0}, + {"USBR0_D3_STS", BIT(6), 0}, + {"ISH_D3_STS", BIT(7), 0}, + {"SMT1_D3_STS", BIT(8), 0}, + {"SMT2_D3_STS", BIT(9), 0}, + {"SMT3_D3_STS", BIT(10), 0}, + {"OSSE_SMT2_D3_STS", BIT(13), 0}, + {"CLINK_D3_STS", BIT(14), 0}, + {"PTIO_D3_STS", BIT(16), 0}, + {"PMT_D3_STS", BIT(17), 0}, + {"SMS1_D3_STS", BIT(18), 0}, + {"SMS2_D3_STS", BIT(19), 0}, + {} +}; + +static const struct pmc_bit_map lnl_d3_status_3_map[] = { + {"THC0_D3_STS", BIT(14), 1}, + {"THC1_D3_STS", BIT(15), 1}, + {"OSSE_SMT3_D3_STS", BIT(21), 0}, + {"ACE_D3_STS", BIT(23), 0}, + {} +}; + +static const struct pmc_bit_map lnl_vnn_req_status_0_map[] = { + {"LPSS_VNN_REQ_STS", BIT(3), 1}, + {"OSSE_VNN_REQ_STS", BIT(15), 1}, + {"ESPISPI_VNN_REQ_STS", BIT(18), 1}, + {} +}; + +static const struct pmc_bit_map lnl_vnn_req_status_1_map[] = { + {"NPK_VNN_REQ_STS", BIT(4), 1}, + {"OSSE_SMT1_VNN_REQ_STS", BIT(7), 1}, + {"DFXAGG_VNN_REQ_STS", BIT(8), 0}, + {"EXI_VNN_REQ_STS", BIT(9), 1}, + {"P2D_VNN_REQ_STS", BIT(18), 1}, + {"GBE_VNN_REQ_STS", BIT(19), 1}, + {"SMB_VNN_REQ_STS", BIT(25), 1}, + {"LPC_VNN_REQ_STS", BIT(26), 0}, + {} +}; + +static const struct pmc_bit_map lnl_vnn_req_status_2_map[] = { + {"eSE_VNN_REQ_STS", BIT(0), 1}, + {"CSMERTC_VNN_REQ_STS", BIT(1), 1}, + {"CSE_VNN_REQ_STS", BIT(4), 1}, + {"ISH_VNN_REQ_STS", BIT(7), 1}, + {"SMT1_VNN_REQ_STS", BIT(8), 1}, + {"CLINK_VNN_REQ_STS", BIT(14), 1}, + {"SMS1_VNN_REQ_STS", BIT(18), 1}, + {"SMS2_VNN_REQ_STS", BIT(19), 1}, + {"GPIOCOM4_VNN_REQ_STS", BIT(20), 1}, + {"GPIOCOM3_VNN_REQ_STS", BIT(21), 1}, + {"GPIOCOM2_VNN_REQ_STS", BIT(22), 0}, + {"GPIOCOM1_VNN_REQ_STS", BIT(23), 1}, + {"GPIOCOM0_VNN_REQ_STS", BIT(24), 1}, + {} +}; + +static const struct pmc_bit_map lnl_vnn_req_status_3_map[] = { + {"DISP_SHIM_VNN_REQ_STS", BIT(2), 0}, + {"DTS0_VNN_REQ_STS", BIT(7), 0}, + {"GPIOCOM5_VNN_REQ_STS", BIT(11), 2}, + {} +}; + +static const struct pmc_bit_map lnl_vnn_misc_status_map[] = { + {"CPU_C10_REQ_STS", BIT(0), 0}, + {"TS_OFF_REQ_STS", BIT(1), 0}, + {"PNDE_MET_REQ_STS", BIT(2), 1}, + {"PCIE_DEEP_PM_REQ_STS", BIT(3), 0}, + {"PMC_CLK_THROTTLE_EN_REQ_STS", BIT(4), 0}, + {"NPK_VNNAON_REQ_STS", BIT(5), 0}, + {"VNN_SOC_REQ_STS", BIT(6), 1}, + {"ISH_VNNAON_REQ_STS", BIT(7), 0}, + {"D2D_NOC_CFI_QACTIVE_REQ_STS", BIT(8), 1}, + {"D2D_NOC_GPSB_QACTIVE_REQ_STS", BIT(9), 1}, + {"D2D_NOC_IPU_QACTIVE_REQ_STS", BIT(10), 1}, + {"PLT_GREATER_REQ_STS", BIT(11), 1}, + {"PCIE_CLKREQ_REQ_STS", BIT(12), 0}, + {"PMC_IDLE_FB_OCP_REQ_STS", BIT(13), 0}, + {"PM_SYNC_STATES_REQ_STS", BIT(14), 0}, + {"EA_REQ_STS", BIT(15), 0}, + {"MPHY_CORE_OFF_REQ_STS", BIT(16), 0}, + {"BRK_EV_EN_REQ_STS", BIT(17), 0}, + {"AUTO_DEMO_EN_REQ_STS", BIT(18), 0}, + {"ITSS_CLK_SRC_REQ_STS", BIT(19), 1}, + {"LPC_CLK_SRC_REQ_STS", BIT(20), 0}, + {"ARC_IDLE_REQ_STS", BIT(21), 0}, + {"MPHY_SUS_REQ_STS", BIT(22), 0}, + {"FIA_DEEP_PM_REQ_STS", BIT(23), 0}, + {"UXD_CONNECTED_REQ_STS", BIT(24), 1}, + {"ARC_INTERRUPT_WAKE_REQ_STS", BIT(25), 0}, + {"D2D_NOC_DISP_DDI_QACTIVE_REQ_STS", BIT(26), 1}, + {"PRE_WAKE0_REQ_STS", BIT(27), 1}, + {"PRE_WAKE1_REQ_STS", BIT(28), 1}, + {"PRE_WAKE2_EN_REQ_STS", BIT(29), 1}, + {"WOV_REQ_STS", BIT(30), 0}, + {"D2D_NOC_DISP_EDP_QACTIVE_REQ_STS_31", BIT(31), 1}, + {} +}; + +static const struct pmc_bit_map lnl_clocksource_status_map[] = { + {"AON2_OFF_STS", BIT(0), 0}, + {"AON3_OFF_STS", BIT(1), 1}, + {"AON4_OFF_STS", BIT(2), 1}, + {"AON5_OFF_STS", BIT(3), 1}, + {"AON1_OFF_STS", BIT(4), 0}, + {"MPFPW1_0_PLL_OFF_STS", BIT(6), 1}, + {"USB3_PLL_OFF_STS", BIT(8), 1}, + {"AON3_SPL_OFF_STS", BIT(9), 1}, + {"G5FPW1_PLL_OFF_STS", BIT(15), 1}, + {"XTAL_AGGR_OFF_STS", BIT(17), 1}, + {"USB2_PLL_OFF_STS", BIT(18), 0}, + {"SAF_PLL_OFF_STS", BIT(19), 1}, + {"SE_TCSS_PLL_OFF_STS", BIT(20), 1}, + {"DDI_PLL_OFF_STS", BIT(21), 1}, + {"FILTER_PLL_OFF_STS", BIT(22), 1}, + {"ACE_PLL_OFF_STS", BIT(24), 0}, + {"FABRIC_PLL_OFF_STS", BIT(25), 1}, + {"SOC_PLL_OFF_STS", BIT(26), 1}, + {"REF_OFF_STS", BIT(28), 1}, + {"IMG_OFF_STS", BIT(29), 1}, + {"RTC_PLL_OFF_STS", BIT(31), 0}, + {} +}; + +static const struct pmc_bit_map lnl_signal_status_map[] = { + {"LSX_Wake0_STS", BIT(0), 0}, + {"LSX_Wake1_STS", BIT(1), 0}, + {"LSX_Wake2_STS", BIT(2), 0}, + {"LSX_Wake3_STS", BIT(3), 0}, + {"LSX_Wake4_STS", BIT(4), 0}, + {"LSX_Wake5_STS", BIT(5), 0}, + {"LSX_Wake6_STS", BIT(6), 0}, + {"LSX_Wake7_STS", BIT(7), 0}, + {"LPSS_Wake0_STS", BIT(8), 1}, + {"LPSS_Wake1_STS", BIT(9), 1}, + {"Int_Timer_SS_Wake0_STS", BIT(10), 1}, + {"Int_Timer_SS_Wake1_STS", BIT(11), 1}, + {"Int_Timer_SS_Wake2_STS", BIT(12), 1}, + {"Int_Timer_SS_Wake3_STS", BIT(13), 1}, + {"Int_Timer_SS_Wake4_STS", BIT(14), 1}, + {"Int_Timer_SS_Wake5_STS", BIT(15), 1}, + {} +}; + +static const struct pmc_bit_map lnl_rsc_status_map[] = { + {"Memory", 0, 1}, + {"PSF0", 0, 1}, + {"PSF4", 0, 1}, + {"PSF6", 0, 1}, + {"PSF7", 0, 1}, + {"PSF8", 0, 1}, + {"SAF_CFI_LINK", 0, 1}, + {"SBR", 0, 1}, + {} +}; + +static const struct pmc_bit_map *lnl_lpm_maps[] = { + lnl_clocksource_status_map, + lnl_power_gating_status_0_map, + lnl_power_gating_status_1_map, + lnl_power_gating_status_2_map, + lnl_d3_status_0_map, + lnl_d3_status_1_map, + lnl_d3_status_2_map, + lnl_d3_status_3_map, + lnl_vnn_req_status_0_map, + lnl_vnn_req_status_1_map, + lnl_vnn_req_status_2_map, + lnl_vnn_req_status_3_map, + lnl_vnn_misc_status_map, + lnl_signal_status_map, + NULL +}; + +static const struct pmc_bit_map *lnl_blk_maps[] = { + lnl_power_gating_status_0_map, + lnl_power_gating_status_1_map, + lnl_power_gating_status_2_map, + lnl_rsc_status_map, + lnl_vnn_req_status_0_map, + lnl_vnn_req_status_1_map, + lnl_vnn_req_status_2_map, + lnl_vnn_req_status_3_map, + lnl_d3_status_0_map, + lnl_d3_status_1_map, + lnl_d3_status_2_map, + lnl_d3_status_3_map, + lnl_clocksource_status_map, + lnl_vnn_misc_status_map, + lnl_signal_status_map, + NULL +}; + +static const struct pmc_bit_map lnl_pfear_map[] = { + {"PMC_0", BIT(0)}, + {"FUSE_OSSE", BIT(1)}, + {"ESPISPI", BIT(2)}, + {"XHCI", BIT(3)}, + {"SPA", BIT(4)}, + {"SPB", BIT(5)}, + {"SBR16B0", BIT(6)}, + {"GBE", BIT(7)}, + + {"SBR8B7", BIT(0)}, + {"SBR8B6", BIT(1)}, + {"SBR16B1", BIT(1)}, + {"SBR8B8", BIT(2)}, + {"ESE", BIT(3)}, + {"SBR8B10", BIT(4)}, + {"D2D_DISP_0", BIT(5)}, + {"LPSS", BIT(6)}, + {"LPC", BIT(7)}, + + {"SMB", BIT(0)}, + {"ISH", BIT(1)}, + {"SBR8B2", BIT(2)}, + {"NPK_0", BIT(3)}, + {"D2D_NOC_0", BIT(4)}, + {"SAFSS", BIT(5)}, + {"FUSE", BIT(6)}, + {"D2D_DISP_1", BIT(7)}, + + {"MPFPW1", BIT(0)}, + {"XDCI", BIT(1)}, + {"EXI", BIT(2)}, + {"CSE", BIT(3)}, + {"KVMCC", BIT(4)}, + {"PMT", BIT(5)}, + {"CLINK", BIT(6)}, + {"PTIO", BIT(7)}, + + {"USBR", BIT(0)}, + {"SUSRAM", BIT(1)}, + {"SMT1", BIT(2)}, + {"U3FPW1", BIT(3)}, + {"SMS2", BIT(4)}, + {"SMS1", BIT(5)}, + {"CSMERTC", BIT(6)}, + {"CSMEPSF", BIT(7)}, + + {"FIA_PG", BIT(0)}, + {"SBR16B4", BIT(1)}, + {"P2SB8B", BIT(2)}, + {"DBG_SBR", BIT(3)}, + {"SBR8B9", BIT(4)}, + {"OSSE_SMT1", BIT(5)}, + {"SBR8B10", BIT(6)}, + {"SBR16B3", BIT(7)}, + + {"G5FPW1", BIT(0)}, + {"SBRG", BIT(1)}, + {"PSF4", BIT(2)}, + {"CNVI", BIT(3)}, + {"UFSX2", BIT(4)}, + {"ENDBG", BIT(5)}, + {"FIACPCB_P5X4", BIT(6)}, + {"SBR8B3", BIT(7)}, + + {"SBR8B0", BIT(0)}, + {"NPK_1", BIT(1)}, + {"OSSE_HOTHAM", BIT(2)}, + {"D2D_NOC_2", BIT(3)}, + {"SBR8B1", BIT(4)}, + {"PSF6", BIT(5)}, + {"PSF7", BIT(6)}, + {"FIA_U", BIT(7)}, + + {"PSF8", BIT(0)}, + {"SBR16B2", BIT(1)}, + {"D2D_IPU", BIT(2)}, + {"FIACPCB_U", BIT(3)}, + {"TAM", BIT(4)}, + {"D2D_NOC_1", BIT(5)}, + {"TBTLSX", BIT(6)}, + {"THC0", BIT(7)}, + + {"THC1", BIT(0)}, + {"PMC_1", BIT(1)}, + {"SBR8B5", BIT(2)}, + {"UFSPW1", BIT(3)}, + {"DBC", BIT(4)}, + {"TCSS", BIT(5)}, + {"FIA_P5X4", BIT(6)}, + {"DISP_PGA", BIT(7)}, + + {"DBG_PSF", BIT(0)}, + {"PSF0", BIT(1)}, + {"P2SB16B", BIT(2)}, + {"ACE0", BIT(3)}, + {"ACE1", BIT(4)}, + {"ACE2", BIT(5)}, + {"ACE3", BIT(6)}, + {"ACE4", BIT(7)}, + + {"ACE5", BIT(0)}, + {"ACE6", BIT(1)}, + {"ACE7", BIT(2)}, + {"ACE8", BIT(3)}, + {"ACE9", BIT(4)}, + {"ACE10", BIT(5)}, + {"FIACPCB", BIT(6)}, + {"OSSE", BIT(7)}, + {} +}; + +static const struct pmc_bit_map *ext_lnl_pfear_map[] = { + lnl_pfear_map, + NULL +}; + +static const struct pmc_reg_map lnl_socm_reg_map = { + .pfear_sts = ext_lnl_pfear_map, + .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, + .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP, + .ltr_show_sts = lnl_ltr_show_map, + .msr_sts = msr_map, + .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET, + .regmap_length = LNL_PMC_MMIO_REG_LEN, + .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A, + .ppfear_buckets = LNL_PPFEAR_NUM_ENTRIES, + .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, + .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, + .ltr_ignore_max = LNL_NUM_IP_IGN_ALLOWED, + .lpm_num_maps = ADL_LPM_NUM_MAPS, + .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2, + .etr3_offset = ETR3_OFFSET, + .lpm_sts_latch_en_offset = MTL_LPM_STATUS_LATCH_EN_OFFSET, + .lpm_priority_offset = MTL_LPM_PRI_OFFSET, + .lpm_en_offset = MTL_LPM_EN_OFFSET, + .lpm_residency_offset = MTL_LPM_RESIDENCY_OFFSET, + .lpm_sts = lnl_lpm_maps, + .lpm_status_offset = MTL_LPM_STATUS_OFFSET, + .lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET, + .s0ix_blocker_maps = lnl_blk_maps, + .s0ix_blocker_offset = LNL_S0IX_BLOCKER_OFFSET, + .lpm_reg_index = LNL_LPM_REG_INDEX, + .lpm_req_guid = SOCM_LPM_REQ_GUID, +}; + +static struct pmc_info lnl_pmc_info_list[] = { + { + .devid = PMC_DEVID_LNL_SOCM, + .map = &lnl_socm_reg_map, + }, + {} +}; + +#define LNL_NPU_PCI_DEV 0x643e +#define LNL_IPU_PCI_DEV 0x645d + +/* + * Set power state of select devices that do not have drivers to D3 + * so that they do not block Package C entry. + */ +static void lnl_d3_fixup(void) +{ + pmc_core_set_device_d3(LNL_IPU_PCI_DEV); + pmc_core_set_device_d3(LNL_NPU_PCI_DEV); +} + +static int lnl_resume(struct pmc_dev *pmcdev) +{ + lnl_d3_fixup(); + + return cnl_resume(pmcdev); +} + +static int lnl_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info) +{ + lnl_d3_fixup(); + return generic_core_init(pmcdev, pmc_dev_info); +} + +struct pmc_dev_info lnl_pmc_dev = { + .pci_func = 2, + .regmap_list = lnl_pmc_info_list, + .map = &lnl_socm_reg_map, + .sub_req_show = &pmc_core_substate_req_regs_fops, + .suspend = cnl_suspend, + .resume = lnl_resume, + .init = lnl_core_init, + .sub_req = pmc_core_pmt_get_lpm_req, +}; diff --git a/drivers/platform/x86/intel/pmc/mtl.c b/drivers/platform/x86/intel/pmc/mtl.c index eeb3bd8c2502..57508cbf9cd4 100644 --- a/drivers/platform/x86/intel/pmc/mtl.c +++ b/drivers/platform/x86/intel/pmc/mtl.c @@ -8,45 +8,999 @@ * */ +#include <linux/pci.h> #include "core.h" -const struct pmc_reg_map mtl_reg_map = { - .pfear_sts = ext_tgl_pfear_map, +/* PMC SSRAM PMT Telemetry GUIDS */ +#define SOCP_LPM_REQ_GUID 0x2625030 +#define IOEM_LPM_REQ_GUID 0x4357464 +#define IOEP_LPM_REQ_GUID 0x5077612 + +static const u8 MTL_LPM_REG_INDEX[] = {0, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20}; + +/* + * Die Mapping to Product. + * Product SOCDie IOEDie PCHDie + * MTL-M SOC-M IOE-M None + * MTL-P SOC-M IOE-P None + * MTL-S SOC-S IOE-P PCH-S + */ + +const struct pmc_bit_map mtl_socm_pfear_map[] = { + {"PMC", BIT(0)}, + {"OPI", BIT(1)}, + {"SPI", BIT(2)}, + {"XHCI", BIT(3)}, + {"SPA", BIT(4)}, + {"SPB", BIT(5)}, + {"SPC", BIT(6)}, + {"GBE", BIT(7)}, + + {"SATA", BIT(0)}, + {"DSP0", BIT(1)}, + {"DSP1", BIT(2)}, + {"DSP2", BIT(3)}, + {"DSP3", BIT(4)}, + {"SPD", BIT(5)}, + {"LPSS", BIT(6)}, + {"LPC", BIT(7)}, + + {"SMB", BIT(0)}, + {"ISH", BIT(1)}, + {"P2SB", BIT(2)}, + {"NPK_VNN", BIT(3)}, + {"SDX", BIT(4)}, + {"SPE", BIT(5)}, + {"FUSE", BIT(6)}, + {"SBR8", BIT(7)}, + + {"RSVD24", BIT(0)}, + {"OTG", BIT(1)}, + {"EXI", BIT(2)}, + {"CSE", BIT(3)}, + {"CSME_KVM", BIT(4)}, + {"CSME_PMT", BIT(5)}, + {"CSME_CLINK", BIT(6)}, + {"CSME_PTIO", BIT(7)}, + + {"CSME_USBR", BIT(0)}, + {"CSME_SUSRAM", BIT(1)}, + {"CSME_SMT1", BIT(2)}, + {"RSVD35", BIT(3)}, + {"CSME_SMS2", BIT(4)}, + {"CSME_SMS", BIT(5)}, + {"CSME_RTC", BIT(6)}, + {"CSME_PSF", BIT(7)}, + + {"SBR0", BIT(0)}, + {"SBR1", BIT(1)}, + {"SBR2", BIT(2)}, + {"SBR3", BIT(3)}, + {"SBR4", BIT(4)}, + {"SBR5", BIT(5)}, + {"RSVD46", BIT(6)}, + {"PSF1", BIT(7)}, + + {"PSF2", BIT(0)}, + {"PSF3", BIT(1)}, + {"PSF4", BIT(2)}, + {"CNVI", BIT(3)}, + {"UFSX2", BIT(4)}, + {"EMMC", BIT(5)}, + {"SPF", BIT(6)}, + {"SBR6", BIT(7)}, + + {"SBR7", BIT(0)}, + {"NPK_AON", BIT(1)}, + {"HDA4", BIT(2)}, + {"HDA5", BIT(3)}, + {"HDA6", BIT(4)}, + {"PSF6", BIT(5)}, + {"RSVD62", BIT(6)}, + {"RSVD63", BIT(7)}, + {} +}; + +static const struct pmc_bit_map *ext_mtl_socm_pfear_map[] = { + mtl_socm_pfear_map, + NULL +}; + +static const struct pmc_bit_map mtl_socm_ltr_show_map[] = { + {"SOUTHPORT_A", CNP_PMC_LTR_SPA}, + {"SOUTHPORT_B", CNP_PMC_LTR_SPB}, + {"SATA", CNP_PMC_LTR_SATA}, + {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE}, + {"XHCI", CNP_PMC_LTR_XHCI}, + {"SOUTHPORT_F", ADL_PMC_LTR_SPF}, + {"ME", CNP_PMC_LTR_ME}, + {"SATA1", CNP_PMC_LTR_EVA}, + {"SOUTHPORT_C", CNP_PMC_LTR_SPC}, + {"HD_AUDIO", CNP_PMC_LTR_AZ}, + {"CNV", CNP_PMC_LTR_CNV}, + {"LPSS", CNP_PMC_LTR_LPSS}, + {"SOUTHPORT_D", CNP_PMC_LTR_SPD}, + {"SOUTHPORT_E", CNP_PMC_LTR_SPE}, + {"SATA2", CNP_PMC_LTR_CAM}, + {"ESPI", CNP_PMC_LTR_ESPI}, + {"SCC", CNP_PMC_LTR_SCC}, + {"ISH", CNP_PMC_LTR_ISH}, + {"UFSX2", CNP_PMC_LTR_UFSX2}, + {"EMMC", CNP_PMC_LTR_EMMC}, + {"WIGIG", ICL_PMC_LTR_WIGIG}, + {"THC0", TGL_PMC_LTR_THC0}, + {"THC1", TGL_PMC_LTR_THC1}, + {"SOUTHPORT_G", MTL_PMC_LTR_SPG}, + {"ESE", MTL_PMC_LTR_ESE}, + {"IOE_PMC", MTL_PMC_LTR_IOE_PMC}, + + /* Below two cannot be used for LTR_IGNORE */ + {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT}, + {"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT}, + {} +}; + +static const struct pmc_bit_map mtl_socm_clocksource_status_map[] = { + {"AON2_OFF_STS", BIT(0)}, + {"AON3_OFF_STS", BIT(1)}, + {"AON4_OFF_STS", BIT(2)}, + {"AON5_OFF_STS", BIT(3)}, + {"AON1_OFF_STS", BIT(4)}, + {"XTAL_LVM_OFF_STS", BIT(5)}, + {"MPFPW1_0_PLL_OFF_STS", BIT(6)}, + {"MPFPW1_1_PLL_OFF_STS", BIT(7)}, + {"USB3_PLL_OFF_STS", BIT(8)}, + {"AON3_SPL_OFF_STS", BIT(9)}, + {"MPFPW2_0_PLL_OFF_STS", BIT(12)}, + {"MPFPW3_0_PLL_OFF_STS", BIT(13)}, + {"XTAL_AGGR_OFF_STS", BIT(17)}, + {"USB2_PLL_OFF_STS", BIT(18)}, + {"FILTER_PLL_OFF_STS", BIT(22)}, + {"ACE_PLL_OFF_STS", BIT(24)}, + {"FABRIC_PLL_OFF_STS", BIT(25)}, + {"SOC_PLL_OFF_STS", BIT(26)}, + {"PCIFAB_PLL_OFF_STS", BIT(27)}, + {"REF_PLL_OFF_STS", BIT(28)}, + {"IMG_PLL_OFF_STS", BIT(29)}, + {"RTC_PLL_OFF_STS", BIT(31)}, + {} +}; + +static const struct pmc_bit_map mtl_socm_power_gating_status_0_map[] = { + {"PMC_PGD0_PG_STS", BIT(0)}, + {"DMI_PGD0_PG_STS", BIT(1)}, + {"ESPISPI_PGD0_PG_STS", BIT(2)}, + {"XHCI_PGD0_PG_STS", BIT(3)}, + {"SPA_PGD0_PG_STS", BIT(4)}, + {"SPB_PGD0_PG_STS", BIT(5)}, + {"SPC_PGD0_PG_STS", BIT(6)}, + {"GBE_PGD0_PG_STS", BIT(7)}, + {"SATA_PGD0_PG_STS", BIT(8)}, + {"PSF13_PGD0_PG_STS", BIT(9)}, + {"SOC_D2D_PGD3_PG_STS", BIT(10)}, + {"MPFPW3_PGD0_PG_STS", BIT(11)}, + {"ESE_PGD0_PG_STS", BIT(12)}, + {"SPD_PGD0_PG_STS", BIT(13)}, + {"LPSS_PGD0_PG_STS", BIT(14)}, + {"LPC_PGD0_PG_STS", BIT(15)}, + {"SMB_PGD0_PG_STS", BIT(16)}, + {"ISH_PGD0_PG_STS", BIT(17)}, + {"P2S_PGD0_PG_STS", BIT(18)}, + {"NPK_PGD0_PG_STS", BIT(19)}, + {"DBG_SBR_PGD0_PG_STS", BIT(20)}, + {"SBRG_PGD0_PG_STS", BIT(21)}, + {"FUSE_PGD0_PG_STS", BIT(22)}, + {"SBR8_PGD0_PG_STS", BIT(23)}, + {"SOC_D2D_PGD2_PG_STS", BIT(24)}, + {"XDCI_PGD0_PG_STS", BIT(25)}, + {"EXI_PGD0_PG_STS", BIT(26)}, + {"CSE_PGD0_PG_STS", BIT(27)}, + {"KVMCC_PGD0_PG_STS", BIT(28)}, + {"PMT_PGD0_PG_STS", BIT(29)}, + {"CLINK_PGD0_PG_STS", BIT(30)}, + {"PTIO_PGD0_PG_STS", BIT(31)}, + {} +}; + +static const struct pmc_bit_map mtl_socm_power_gating_status_1_map[] = { + {"USBR0_PGD0_PG_STS", BIT(0)}, + {"SUSRAM_PGD0_PG_STS", BIT(1)}, + {"SMT1_PGD0_PG_STS", BIT(2)}, + {"FIACPCB_U_PGD0_PG_STS", BIT(3)}, + {"SMS2_PGD0_PG_STS", BIT(4)}, + {"SMS1_PGD0_PG_STS", BIT(5)}, + {"CSMERTC_PGD0_PG_STS", BIT(6)}, + {"CSMEPSF_PGD0_PG_STS", BIT(7)}, + {"SBR0_PGD0_PG_STS", BIT(8)}, + {"SBR1_PGD0_PG_STS", BIT(9)}, + {"SBR2_PGD0_PG_STS", BIT(10)}, + {"SBR3_PGD0_PG_STS", BIT(11)}, + {"U3FPW1_PGD0_PG_STS", BIT(12)}, + {"SBR5_PGD0_PG_STS", BIT(13)}, + {"MPFPW1_PGD0_PG_STS", BIT(14)}, + {"UFSPW1_PGD0_PG_STS", BIT(15)}, + {"FIA_X_PGD0_PG_STS", BIT(16)}, + {"SOC_D2D_PGD0_PG_STS", BIT(17)}, + {"MPFPW2_PGD0_PG_STS", BIT(18)}, + {"CNVI_PGD0_PG_STS", BIT(19)}, + {"UFSX2_PGD0_PG_STS", BIT(20)}, + {"ENDBG_PGD0_PG_STS", BIT(21)}, + {"DBG_PSF_PGD0_PG_STS", BIT(22)}, + {"SBR6_PGD0_PG_STS", BIT(23)}, + {"SBR7_PGD0_PG_STS", BIT(24)}, + {"NPK_PGD1_PG_STS", BIT(25)}, + {"FIACPCB_X_PGD0_PG_STS", BIT(26)}, + {"DBC_PGD0_PG_STS", BIT(27)}, + {"FUSEGPSB_PGD0_PG_STS", BIT(28)}, + {"PSF6_PGD0_PG_STS", BIT(29)}, + {"PSF7_PGD0_PG_STS", BIT(30)}, + {"GBETSN1_PGD0_PG_STS", BIT(31)}, + {} +}; + +static const struct pmc_bit_map mtl_socm_power_gating_status_2_map[] = { + {"PSF8_PGD0_PG_STS", BIT(0)}, + {"FIA_PGD0_PG_STS", BIT(1)}, + {"SOC_D2D_PGD1_PG_STS", BIT(2)}, + {"FIA_U_PGD0_PG_STS", BIT(3)}, + {"TAM_PGD0_PG_STS", BIT(4)}, + {"GBETSN_PGD0_PG_STS", BIT(5)}, + {"TBTLSX_PGD0_PG_STS", BIT(6)}, + {"THC0_PGD0_PG_STS", BIT(7)}, + {"THC1_PGD0_PG_STS", BIT(8)}, + {"PMC_PGD1_PG_STS", BIT(9)}, + {"GNA_PGD0_PG_STS", BIT(10)}, + {"ACE_PGD0_PG_STS", BIT(11)}, + {"ACE_PGD1_PG_STS", BIT(12)}, + {"ACE_PGD2_PG_STS", BIT(13)}, + {"ACE_PGD3_PG_STS", BIT(14)}, + {"ACE_PGD4_PG_STS", BIT(15)}, + {"ACE_PGD5_PG_STS", BIT(16)}, + {"ACE_PGD6_PG_STS", BIT(17)}, + {"ACE_PGD7_PG_STS", BIT(18)}, + {"ACE_PGD8_PG_STS", BIT(19)}, + {"FIA_PGS_PGD0_PG_STS", BIT(20)}, + {"FIACPCB_PGS_PGD0_PG_STS", BIT(21)}, + {"FUSEPMSB_PGD0_PG_STS", BIT(22)}, + {} +}; + +const struct pmc_bit_map mtl_socm_d3_status_0_map[] = { + {"LPSS_D3_STS", BIT(3)}, + {"XDCI_D3_STS", BIT(4)}, + {"XHCI_D3_STS", BIT(5)}, + {"SPA_D3_STS", BIT(12)}, + {"SPB_D3_STS", BIT(13)}, + {"SPC_D3_STS", BIT(14)}, + {"SPD_D3_STS", BIT(15)}, + {"ESPISPI_D3_STS", BIT(18)}, + {"SATA_D3_STS", BIT(20)}, + {"PSTH_D3_STS", BIT(21)}, + {"DMI_D3_STS", BIT(22)}, + {} +}; + +const struct pmc_bit_map mtl_socm_d3_status_1_map[] = { + {"GBETSN1_D3_STS", BIT(14)}, + {"GBE_D3_STS", BIT(19)}, + {"ITSS_D3_STS", BIT(23)}, + {"P2S_D3_STS", BIT(24)}, + {"CNVI_D3_STS", BIT(27)}, + {"UFSX2_D3_STS", BIT(28)}, + {} +}; + +static const struct pmc_bit_map mtl_socm_d3_status_2_map[] = { + {"GNA_D3_STS", BIT(0)}, + {"CSMERTC_D3_STS", BIT(1)}, + {"SUSRAM_D3_STS", BIT(2)}, + {"CSE_D3_STS", BIT(4)}, + {"KVMCC_D3_STS", BIT(5)}, + {"USBR0_D3_STS", BIT(6)}, + {"ISH_D3_STS", BIT(7)}, + {"SMT1_D3_STS", BIT(8)}, + {"SMT2_D3_STS", BIT(9)}, + {"SMT3_D3_STS", BIT(10)}, + {"CLINK_D3_STS", BIT(14)}, + {"PTIO_D3_STS", BIT(16)}, + {"PMT_D3_STS", BIT(17)}, + {"SMS1_D3_STS", BIT(18)}, + {"SMS2_D3_STS", BIT(19)}, + {} +}; + +static const struct pmc_bit_map mtl_socm_d3_status_3_map[] = { + {"ESE_D3_STS", BIT(2)}, + {"GBETSN_D3_STS", BIT(13)}, + {"THC0_D3_STS", BIT(14)}, + {"THC1_D3_STS", BIT(15)}, + {"ACE_D3_STS", BIT(23)}, + {} +}; + +const struct pmc_bit_map mtl_socm_vnn_req_status_0_map[] = { + {"LPSS_VNN_REQ_STS", BIT(3)}, + {"FIA_VNN_REQ_STS", BIT(17)}, + {"ESPISPI_VNN_REQ_STS", BIT(18)}, + {} +}; + +const struct pmc_bit_map mtl_socm_vnn_req_status_1_map[] = { + {"NPK_VNN_REQ_STS", BIT(4)}, + {"DFXAGG_VNN_REQ_STS", BIT(8)}, + {"EXI_VNN_REQ_STS", BIT(9)}, + {"P2D_VNN_REQ_STS", BIT(18)}, + {"GBE_VNN_REQ_STS", BIT(19)}, + {"SMB_VNN_REQ_STS", BIT(25)}, + {"LPC_VNN_REQ_STS", BIT(26)}, + {} +}; + +const struct pmc_bit_map mtl_socm_vnn_req_status_2_map[] = { + {"CSMERTC_VNN_REQ_STS", BIT(1)}, + {"CSE_VNN_REQ_STS", BIT(4)}, + {"ISH_VNN_REQ_STS", BIT(7)}, + {"SMT1_VNN_REQ_STS", BIT(8)}, + {"CLINK_VNN_REQ_STS", BIT(14)}, + {"SMS1_VNN_REQ_STS", BIT(18)}, + {"SMS2_VNN_REQ_STS", BIT(19)}, + {"GPIOCOM4_VNN_REQ_STS", BIT(20)}, + {"GPIOCOM3_VNN_REQ_STS", BIT(21)}, + {"GPIOCOM2_VNN_REQ_STS", BIT(22)}, + {"GPIOCOM1_VNN_REQ_STS", BIT(23)}, + {"GPIOCOM0_VNN_REQ_STS", BIT(24)}, + {} +}; + +static const struct pmc_bit_map mtl_socm_vnn_req_status_3_map[] = { + {"ESE_VNN_REQ_STS", BIT(2)}, + {"DTS0_VNN_REQ_STS", BIT(7)}, + {"GPIOCOM5_VNN_REQ_STS", BIT(11)}, + {} +}; + +const struct pmc_bit_map mtl_socm_vnn_misc_status_map[] = { + {"CPU_C10_REQ_STS", BIT(0)}, + {"TS_OFF_REQ_STS", BIT(1)}, + {"PNDE_MET_REQ_STS", BIT(2)}, + {"PCIE_DEEP_PM_REQ_STS", BIT(3)}, + {"PMC_CLK_THROTTLE_EN_REQ_STS", BIT(4)}, + {"NPK_VNNAON_REQ_STS", BIT(5)}, + {"VNN_SOC_REQ_STS", BIT(6)}, + {"ISH_VNNAON_REQ_STS", BIT(7)}, + {"IOE_COND_MET_S02I2_0_REQ_STS", BIT(8)}, + {"IOE_COND_MET_S02I2_1_REQ_STS", BIT(9)}, + {"IOE_COND_MET_S02I2_2_REQ_STS", BIT(10)}, + {"PLT_GREATER_REQ_STS", BIT(11)}, + {"PCIE_CLKREQ_REQ_STS", BIT(12)}, + {"PMC_IDLE_FB_OCP_REQ_STS", BIT(13)}, + {"PM_SYNC_STATES_REQ_STS", BIT(14)}, + {"EA_REQ_STS", BIT(15)}, + {"MPHY_CORE_OFF_REQ_STS", BIT(16)}, + {"BRK_EV_EN_REQ_STS", BIT(17)}, + {"AUTO_DEMO_EN_REQ_STS", BIT(18)}, + {"ITSS_CLK_SRC_REQ_STS", BIT(19)}, + {"LPC_CLK_SRC_REQ_STS", BIT(20)}, + {"ARC_IDLE_REQ_STS", BIT(21)}, + {"MPHY_SUS_REQ_STS", BIT(22)}, + {"FIA_DEEP_PM_REQ_STS", BIT(23)}, + {"UXD_CONNECTED_REQ_STS", BIT(24)}, + {"ARC_INTERRUPT_WAKE_REQ_STS", BIT(25)}, + {"USB2_VNNAON_ACT_REQ_STS", BIT(26)}, + {"PRE_WAKE0_REQ_STS", BIT(27)}, + {"PRE_WAKE1_REQ_STS", BIT(28)}, + {"PRE_WAKE2_EN_REQ_STS", BIT(29)}, + {"WOV_REQ_STS", BIT(30)}, + {"CNVI_V1P05_REQ_STS", BIT(31)}, + {} +}; + +const struct pmc_bit_map mtl_socm_signal_status_map[] = { + {"LSX_Wake0_En_STS", BIT(0)}, + {"LSX_Wake0_Pol_STS", BIT(1)}, + {"LSX_Wake1_En_STS", BIT(2)}, + {"LSX_Wake1_Pol_STS", BIT(3)}, + {"LSX_Wake2_En_STS", BIT(4)}, + {"LSX_Wake2_Pol_STS", BIT(5)}, + {"LSX_Wake3_En_STS", BIT(6)}, + {"LSX_Wake3_Pol_STS", BIT(7)}, + {"LSX_Wake4_En_STS", BIT(8)}, + {"LSX_Wake4_Pol_STS", BIT(9)}, + {"LSX_Wake5_En_STS", BIT(10)}, + {"LSX_Wake5_Pol_STS", BIT(11)}, + {"LSX_Wake6_En_STS", BIT(12)}, + {"LSX_Wake6_Pol_STS", BIT(13)}, + {"LSX_Wake7_En_STS", BIT(14)}, + {"LSX_Wake7_Pol_STS", BIT(15)}, + {"LPSS_Wake0_En_STS", BIT(16)}, + {"LPSS_Wake0_Pol_STS", BIT(17)}, + {"LPSS_Wake1_En_STS", BIT(18)}, + {"LPSS_Wake1_Pol_STS", BIT(19)}, + {"Int_Timer_SS_Wake0_En_STS", BIT(20)}, + {"Int_Timer_SS_Wake0_Pol_STS", BIT(21)}, + {"Int_Timer_SS_Wake1_En_STS", BIT(22)}, + {"Int_Timer_SS_Wake1_Pol_STS", BIT(23)}, + {"Int_Timer_SS_Wake2_En_STS", BIT(24)}, + {"Int_Timer_SS_Wake2_Pol_STS", BIT(25)}, + {"Int_Timer_SS_Wake3_En_STS", BIT(26)}, + {"Int_Timer_SS_Wake3_Pol_STS", BIT(27)}, + {"Int_Timer_SS_Wake4_En_STS", BIT(28)}, + {"Int_Timer_SS_Wake4_Pol_STS", BIT(29)}, + {"Int_Timer_SS_Wake5_En_STS", BIT(30)}, + {"Int_Timer_SS_Wake5_Pol_STS", BIT(31)}, + {} +}; + +static const struct pmc_bit_map *mtl_socm_lpm_maps[] = { + mtl_socm_clocksource_status_map, + mtl_socm_power_gating_status_0_map, + mtl_socm_power_gating_status_1_map, + mtl_socm_power_gating_status_2_map, + mtl_socm_d3_status_0_map, + mtl_socm_d3_status_1_map, + mtl_socm_d3_status_2_map, + mtl_socm_d3_status_3_map, + mtl_socm_vnn_req_status_0_map, + mtl_socm_vnn_req_status_1_map, + mtl_socm_vnn_req_status_2_map, + mtl_socm_vnn_req_status_3_map, + mtl_socm_vnn_misc_status_map, + mtl_socm_signal_status_map, + NULL +}; + +const struct pmc_reg_map mtl_socm_reg_map = { + .pfear_sts = ext_mtl_socm_pfear_map, .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP, - .ltr_show_sts = adl_ltr_show_map, + .ltr_show_sts = mtl_socm_ltr_show_map, .msr_sts = msr_map, .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET, - .regmap_length = CNP_PMC_MMIO_REG_LEN, + .regmap_length = MTL_SOC_PMC_MMIO_REG_LEN, .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A, - .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES, + .ppfear_buckets = MTL_SOCM_PPFEAR_NUM_ENTRIES, .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, - .ltr_ignore_max = ADL_NUM_IP_IGN_ALLOWED, - .lpm_num_modes = ADL_LPM_NUM_MODES, .lpm_num_maps = ADL_LPM_NUM_MAPS, + .ltr_ignore_max = MTL_SOCM_NUM_IP_IGN_ALLOWED, .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2, .etr3_offset = ETR3_OFFSET, .lpm_sts_latch_en_offset = MTL_LPM_STATUS_LATCH_EN_OFFSET, .lpm_priority_offset = MTL_LPM_PRI_OFFSET, .lpm_en_offset = MTL_LPM_EN_OFFSET, .lpm_residency_offset = MTL_LPM_RESIDENCY_OFFSET, - .lpm_sts = adl_lpm_maps, + .lpm_sts = mtl_socm_lpm_maps, .lpm_status_offset = MTL_LPM_STATUS_OFFSET, .lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET, + .lpm_reg_index = MTL_LPM_REG_INDEX, + .lpm_req_guid = SOCP_LPM_REQ_GUID, +}; + +static const struct pmc_bit_map mtl_ioep_pfear_map[] = { + {"PMC_0", BIT(0)}, + {"OPI", BIT(1)}, + {"TCSS", BIT(2)}, + {"RSVD3", BIT(3)}, + {"SPA", BIT(4)}, + {"SPB", BIT(5)}, + {"SPC", BIT(6)}, + {"IOE_D2D_3", BIT(7)}, + + {"RSVD8", BIT(0)}, + {"RSVD9", BIT(1)}, + {"SPE", BIT(2)}, + {"RSVD11", BIT(3)}, + {"RSVD12", BIT(4)}, + {"SPD", BIT(5)}, + {"ACE_7", BIT(6)}, + {"RSVD15", BIT(7)}, + + {"ACE_0", BIT(0)}, + {"FIACPCB_P", BIT(1)}, + {"P2S", BIT(2)}, + {"RSVD19", BIT(3)}, + {"ACE_8", BIT(4)}, + {"IOE_D2D_0", BIT(5)}, + {"FUSE", BIT(6)}, + {"RSVD23", BIT(7)}, + + {"FIACPCB_P5", BIT(0)}, + {"ACE_3", BIT(1)}, + {"RSF5", BIT(2)}, + {"ACE_2", BIT(3)}, + {"ACE_4", BIT(4)}, + {"RSVD29", BIT(5)}, + {"RSF10", BIT(6)}, + {"MPFPW5", BIT(7)}, + + {"PSF9", BIT(0)}, + {"MPFPW4", BIT(1)}, + {"RSVD34", BIT(2)}, + {"RSVD35", BIT(3)}, + {"RSVD36", BIT(4)}, + {"RSVD37", BIT(5)}, + {"RSVD38", BIT(6)}, + {"RSVD39", BIT(7)}, + + {"SBR0", BIT(0)}, + {"SBR1", BIT(1)}, + {"SBR2", BIT(2)}, + {"SBR3", BIT(3)}, + {"SBR4", BIT(4)}, + {"SBR5", BIT(5)}, + {"RSVD46", BIT(6)}, + {"RSVD47", BIT(7)}, + + {"RSVD48", BIT(0)}, + {"FIA_P5", BIT(1)}, + {"RSVD50", BIT(2)}, + {"RSVD51", BIT(3)}, + {"RSVD52", BIT(4)}, + {"RSVD53", BIT(5)}, + {"RSVD54", BIT(6)}, + {"ACE_1", BIT(7)}, + + {"RSVD56", BIT(0)}, + {"ACE_5", BIT(1)}, + {"RSVD58", BIT(2)}, + {"G5FPW1", BIT(3)}, + {"RSVD60", BIT(4)}, + {"ACE_6", BIT(5)}, + {"RSVD62", BIT(6)}, + {"GBETSN1", BIT(7)}, + + {"RSVD64", BIT(0)}, + {"FIA", BIT(1)}, + {"RSVD66", BIT(2)}, + {"FIA_P", BIT(3)}, + {"TAM", BIT(4)}, + {"GBETSN", BIT(5)}, + {"IOE_D2D_2", BIT(6)}, + {"IOE_D2D_1", BIT(7)}, + + {"SPF", BIT(0)}, + {"PMC_1", BIT(1)}, + {} +}; + +static const struct pmc_bit_map *ext_mtl_ioep_pfear_map[] = { + mtl_ioep_pfear_map, + NULL +}; + +static const struct pmc_bit_map mtl_ioep_ltr_show_map[] = { + {"SOUTHPORT_A", CNP_PMC_LTR_SPA}, + {"SOUTHPORT_B", CNP_PMC_LTR_SPB}, + {"SATA", CNP_PMC_LTR_SATA}, + {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE}, + {"XHCI", CNP_PMC_LTR_XHCI}, + {"SOUTHPORT_F", ADL_PMC_LTR_SPF}, + {"ME", CNP_PMC_LTR_ME}, + {"SATA1", CNP_PMC_LTR_EVA}, + {"SOUTHPORT_C", CNP_PMC_LTR_SPC}, + {"HD_AUDIO", CNP_PMC_LTR_AZ}, + {"CNV", CNP_PMC_LTR_CNV}, + {"LPSS", CNP_PMC_LTR_LPSS}, + {"SOUTHPORT_D", CNP_PMC_LTR_SPD}, + {"SOUTHPORT_E", CNP_PMC_LTR_SPE}, + {"SATA2", CNP_PMC_LTR_CAM}, + {"ESPI", CNP_PMC_LTR_ESPI}, + {"SCC", CNP_PMC_LTR_SCC}, + {"Reserved", MTL_PMC_LTR_RESERVED}, + {"UFSX2", CNP_PMC_LTR_UFSX2}, + {"EMMC", CNP_PMC_LTR_EMMC}, + {"WIGIG", ICL_PMC_LTR_WIGIG}, + {"THC0", TGL_PMC_LTR_THC0}, + {"THC1", TGL_PMC_LTR_THC1}, + {"SOUTHPORT_G", MTL_PMC_LTR_SPG}, + + /* Below two cannot be used for LTR_IGNORE */ + {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT}, + {"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT}, + {} +}; + +static const struct pmc_bit_map mtl_ioep_clocksource_status_map[] = { + {"AON2_OFF_STS", BIT(0)}, + {"AON3_OFF_STS", BIT(1)}, + {"AON4_OFF_STS", BIT(2)}, + {"AON5_OFF_STS", BIT(3)}, + {"AON1_OFF_STS", BIT(4)}, + {"TBT_PLL_OFF_STS", BIT(5)}, + {"TMU_PLL_OFF_STS", BIT(6)}, + {"BCLK_PLL_OFF_STS", BIT(7)}, + {"D2D_PLL_OFF_STS", BIT(8)}, + {"AON3_SPL_OFF_STS", BIT(9)}, + {"MPFPW4_0_PLL_OFF_STS", BIT(12)}, + {"MPFPW5_0_PLL_OFF_STS", BIT(13)}, + {"G5FPW_0_PLL_OFF_STS", BIT(14)}, + {"G5FPW_1_PLL_OFF_STS", BIT(15)}, + {"XTAL_AGGR_OFF_STS", BIT(17)}, + {"FABRIC_PLL_OFF_STS", BIT(25)}, + {"SOC_PLL_OFF_STS", BIT(26)}, + {"REF_PLL_OFF_STS", BIT(28)}, + {"RTC_PLL_OFF_STS", BIT(31)}, + {} +}; + +static const struct pmc_bit_map mtl_ioep_power_gating_status_0_map[] = { + {"PMC_PGD0_PG_STS", BIT(0)}, + {"DMI_PGD0_PG_STS", BIT(1)}, + {"TCSS_PGD0_PG_STS", BIT(2)}, + {"SPA_PGD0_PG_STS", BIT(4)}, + {"SPB_PGD0_PG_STS", BIT(5)}, + {"SPC_PGD0_PG_STS", BIT(6)}, + {"IOE_D2D_PGD3_PG_STS", BIT(7)}, + {"SPE_PGD0_PG_STS", BIT(10)}, + {"SPD_PGD0_PG_STS", BIT(13)}, + {"ACE_PGD7_PG_STS", BIT(14)}, + {"ACE_PGD0_PG_STS", BIT(16)}, + {"FIACPCB_P_PGD0_PG_STS", BIT(17)}, + {"P2S_PGD0_PG_STS", BIT(18)}, + {"ACE_PGD8_PG_STS", BIT(20)}, + {"IOE_D2D_PGD0_PG_STS", BIT(21)}, + {"FUSE_PGD0_PG_STS", BIT(22)}, + {"FIACPCB_P5_PGD0_PG_STS", BIT(24)}, + {"ACE_PGD3_PG_STS", BIT(25)}, + {"PSF5_PGD0_PG_STS", BIT(26)}, + {"ACE_PGD2_PG_STS", BIT(27)}, + {"ACE_PGD4_PG_STS", BIT(28)}, + {"PSF10_PGD0_PG_STS", BIT(30)}, + {"MPFPW5_PGD0_PG_STS", BIT(31)}, + {} +}; + +static const struct pmc_bit_map mtl_ioep_power_gating_status_1_map[] = { + {"PSF9_PGD0_PG_STS", BIT(0)}, + {"MPFPW4_PGD0_PG_STS", BIT(1)}, + {"SBR0_PGD0_PG_STS", BIT(8)}, + {"SBR1_PGD0_PG_STS", BIT(9)}, + {"SBR2_PGD0_PG_STS", BIT(10)}, + {"SBR3_PGD0_PG_STS", BIT(11)}, + {"SBR4_PGD0_PG_STS", BIT(12)}, + {"SBR5_PGD0_PG_STS", BIT(13)}, + {"FIA_P5_PGD0_PG_STS", BIT(17)}, + {"ACE_PGD1_PGD0_PG_STS", BIT(23)}, + {"ACE_PGD5_PGD1_PG_STS", BIT(25)}, + {"G5FPW1_PGD0_PG_STS", BIT(27)}, + {"ACE_PGD6_PG_STS", BIT(29)}, + {"GBETSN1_PGD0_PG_STS", BIT(31)}, + {} +}; + +static const struct pmc_bit_map mtl_ioep_power_gating_status_2_map[] = { + {"FIA_PGD0_PG_STS", BIT(1)}, + {"FIA_P_PGD0_PG_STS", BIT(3)}, + {"TAM_PGD0_PG_STS", BIT(4)}, + {"GBETSN_PGD0_PG_STS", BIT(5)}, + {"IOE_D2D_PGD2_PG_STS", BIT(6)}, + {"IOE_D2D_PGD1_PG_STS", BIT(7)}, + {"SPF_PGD0_PG_STS", BIT(8)}, + {"PMC_PGD1_PG_STS", BIT(9)}, + {} }; -void mtl_core_configure(struct pmc_dev *pmcdev) +static const struct pmc_bit_map mtl_ioep_d3_status_0_map[] = { + {"SPF_D3_STS", BIT(0)}, + {"SPA_D3_STS", BIT(12)}, + {"SPB_D3_STS", BIT(13)}, + {"SPC_D3_STS", BIT(14)}, + {"SPD_D3_STS", BIT(15)}, + {"SPE_D3_STS", BIT(16)}, + {"DMI_D3_STS", BIT(22)}, + {} +}; + +static const struct pmc_bit_map mtl_ioep_d3_status_1_map[] = { + {"GBETSN1_D3_STS", BIT(14)}, + {"P2S_D3_STS", BIT(24)}, + {} +}; + +static const struct pmc_bit_map mtl_ioep_d3_status_2_map[] = { + {} +}; + +static const struct pmc_bit_map mtl_ioep_d3_status_3_map[] = { + {"GBETSN_D3_STS", BIT(13)}, + {"ACE_D3_STS", BIT(23)}, + {} +}; + +static const struct pmc_bit_map mtl_ioep_vnn_req_status_0_map[] = { + {"FIA_VNN_REQ_STS", BIT(17)}, + {} +}; + +static const struct pmc_bit_map mtl_ioep_vnn_req_status_1_map[] = { + {"DFXAGG_VNN_REQ_STS", BIT(8)}, + {} +}; + +static const struct pmc_bit_map mtl_ioep_vnn_req_status_2_map[] = { + {} +}; + +static const struct pmc_bit_map mtl_ioep_vnn_req_status_3_map[] = { + {"DTS0_VNN_REQ_STS", BIT(7)}, + {"DISP_VNN_REQ_STS", BIT(19)}, + {} +}; + +static const struct pmc_bit_map mtl_ioep_vnn_misc_status_map[] = { + {"CPU_C10_REQ_STS", BIT(0)}, + {"TS_OFF_REQ_STS", BIT(1)}, + {"PNDE_MET_REQ_STS", BIT(2)}, + {"PCIE_DEEP_PM_REQ_STS", BIT(3)}, + {"PMC_CLK_THROTTLE_EN_REQ_STS", BIT(4)}, + {"NPK_VNNAON_REQ_STS", BIT(5)}, + {"VNN_SOC_REQ_STS", BIT(6)}, + {"USB_DEVICE_ATTACHED_REQ_STS", BIT(8)}, + {"FIA_EXIT_REQ_STS", BIT(9)}, + {"USB2_SUS_PG_REQ_STS", BIT(10)}, + {"PLT_GREATER_REQ_STS", BIT(11)}, + {"PCIE_CLKREQ_REQ_STS", BIT(12)}, + {"PMC_IDLE_FB_OCP_REQ_STS", BIT(13)}, + {"PM_SYNC_STATES_REQ_STS", BIT(14)}, + {"EA_REQ_STS", BIT(15)}, + {"MPHY_CORE_OFF_REQ_STS", BIT(16)}, + {"BRK_EV_EN_REQ_STS", BIT(17)}, + {"AUTO_DEMO_EN_REQ_STS", BIT(18)}, + {"ITSS_CLK_SRC_REQ_STS", BIT(19)}, + {"LPC_CLK_SRC_REQ_STS", BIT(20)}, + {"ARC_IDLE_REQ_STS", BIT(21)}, + {"MPHY_SUS_REQ_STS", BIT(22)}, + {"FIA_DEEP_PM_REQ_STS", BIT(23)}, + {"UXD_CONNECTED_REQ_STS", BIT(24)}, + {"ARC_INTERRUPT_WAKE_REQ_STS", BIT(25)}, + {"USB2_VNNAON_ACT_REQ_STS", BIT(26)}, + {"PRE_WAKE0_REQ_STS", BIT(27)}, + {"PRE_WAKE1_REQ_STS", BIT(28)}, + {"PRE_WAKE2_EN_REQ_STS", BIT(29)}, + {"WOV_REQ_STS", BIT(30)}, + {"CNVI_V1P05_REQ_STS", BIT(31)}, + {} +}; + +static const struct pmc_bit_map *mtl_ioep_lpm_maps[] = { + mtl_ioep_clocksource_status_map, + mtl_ioep_power_gating_status_0_map, + mtl_ioep_power_gating_status_1_map, + mtl_ioep_power_gating_status_2_map, + mtl_ioep_d3_status_0_map, + mtl_ioep_d3_status_1_map, + mtl_ioep_d3_status_2_map, + mtl_ioep_d3_status_3_map, + mtl_ioep_vnn_req_status_0_map, + mtl_ioep_vnn_req_status_1_map, + mtl_ioep_vnn_req_status_2_map, + mtl_ioep_vnn_req_status_3_map, + mtl_ioep_vnn_misc_status_map, + mtl_socm_signal_status_map, + NULL +}; + +const struct pmc_reg_map mtl_ioep_reg_map = { + .regmap_length = MTL_IOE_PMC_MMIO_REG_LEN, + .pfear_sts = ext_mtl_ioep_pfear_map, + .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A, + .ppfear_buckets = MTL_IOE_PPFEAR_NUM_ENTRIES, + .lpm_status_offset = MTL_LPM_STATUS_OFFSET, + .lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET, + .lpm_sts = mtl_ioep_lpm_maps, + .ltr_show_sts = mtl_ioep_ltr_show_map, + .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET, + .ltr_ignore_max = ADL_NUM_IP_IGN_ALLOWED, + .lpm_num_maps = ADL_LPM_NUM_MAPS, + .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2, + .lpm_residency_offset = MTL_LPM_RESIDENCY_OFFSET, + .lpm_priority_offset = MTL_LPM_PRI_OFFSET, + .lpm_en_offset = MTL_LPM_EN_OFFSET, + .lpm_sts_latch_en_offset = MTL_LPM_STATUS_LATCH_EN_OFFSET, + .lpm_reg_index = MTL_LPM_REG_INDEX, + .lpm_req_guid = IOEP_LPM_REQ_GUID, +}; + +static const struct pmc_bit_map mtl_ioem_pfear_map[] = { + {"PMC_0", BIT(0)}, + {"OPI", BIT(1)}, + {"TCSS", BIT(2)}, + {"RSVD3", BIT(3)}, + {"SPA", BIT(4)}, + {"SPB", BIT(5)}, + {"SPC", BIT(6)}, + {"IOE_D2D_3", BIT(7)}, + + {"RSVD8", BIT(0)}, + {"RSVD9", BIT(1)}, + {"SPE", BIT(2)}, + {"RSVD11", BIT(3)}, + {"RSVD12", BIT(4)}, + {"SPD", BIT(5)}, + {"ACE_7", BIT(6)}, + {"RSVD15", BIT(7)}, + + {"ACE_0", BIT(0)}, + {"FIACPCB_P", BIT(1)}, + {"P2S", BIT(2)}, + {"RSVD19", BIT(3)}, + {"ACE_8", BIT(4)}, + {"IOE_D2D_0", BIT(5)}, + {"FUSE", BIT(6)}, + {"RSVD23", BIT(7)}, + + {"FIACPCB_P5", BIT(0)}, + {"ACE_3", BIT(1)}, + {"RSF5", BIT(2)}, + {"ACE_2", BIT(3)}, + {"ACE_4", BIT(4)}, + {"RSVD29", BIT(5)}, + {"RSF10", BIT(6)}, + {"MPFPW5", BIT(7)}, + + {"PSF9", BIT(0)}, + {"MPFPW4", BIT(1)}, + {"RSVD34", BIT(2)}, + {"RSVD35", BIT(3)}, + {"RSVD36", BIT(4)}, + {"RSVD37", BIT(5)}, + {"RSVD38", BIT(6)}, + {"RSVD39", BIT(7)}, + + {"SBR0", BIT(0)}, + {"SBR1", BIT(1)}, + {"SBR2", BIT(2)}, + {"SBR3", BIT(3)}, + {"SBR4", BIT(4)}, + {"RSVD45", BIT(5)}, + {"RSVD46", BIT(6)}, + {"RSVD47", BIT(7)}, + + {"RSVD48", BIT(0)}, + {"FIA_P5", BIT(1)}, + {"RSVD50", BIT(2)}, + {"RSVD51", BIT(3)}, + {"RSVD52", BIT(4)}, + {"RSVD53", BIT(5)}, + {"RSVD54", BIT(6)}, + {"ACE_1", BIT(7)}, + + {"RSVD56", BIT(0)}, + {"ACE_5", BIT(1)}, + {"RSVD58", BIT(2)}, + {"G5FPW1", BIT(3)}, + {"RSVD60", BIT(4)}, + {"ACE_6", BIT(5)}, + {"RSVD62", BIT(6)}, + {"GBETSN1", BIT(7)}, + + {"RSVD64", BIT(0)}, + {"FIA", BIT(1)}, + {"RSVD66", BIT(2)}, + {"FIA_P", BIT(3)}, + {"TAM", BIT(4)}, + {"GBETSN", BIT(5)}, + {"IOE_D2D_2", BIT(6)}, + {"IOE_D2D_1", BIT(7)}, + + {"SPF", BIT(0)}, + {"PMC_1", BIT(1)}, + {} +}; + +static const struct pmc_bit_map *ext_mtl_ioem_pfear_map[] = { + mtl_ioem_pfear_map, + NULL +}; + +static const struct pmc_bit_map mtl_ioem_power_gating_status_1_map[] = { + {"PSF9_PGD0_PG_STS", BIT(0)}, + {"MPFPW4_PGD0_PG_STS", BIT(1)}, + {"SBR0_PGD0_PG_STS", BIT(8)}, + {"SBR1_PGD0_PG_STS", BIT(9)}, + {"SBR2_PGD0_PG_STS", BIT(10)}, + {"SBR3_PGD0_PG_STS", BIT(11)}, + {"SBR4_PGD0_PG_STS", BIT(12)}, + {"FIA_P5_PGD0_PG_STS", BIT(17)}, + {"ACE_PGD1_PGD0_PG_STS", BIT(23)}, + {"ACE_PGD5_PGD1_PG_STS", BIT(25)}, + {"G5FPW1_PGD0_PG_STS", BIT(27)}, + {"ACE_PGD6_PG_STS", BIT(29)}, + {"GBETSN1_PGD0_PG_STS", BIT(31)}, + {} +}; + +static const struct pmc_bit_map *mtl_ioem_lpm_maps[] = { + mtl_ioep_clocksource_status_map, + mtl_ioep_power_gating_status_0_map, + mtl_ioem_power_gating_status_1_map, + mtl_ioep_power_gating_status_2_map, + mtl_ioep_d3_status_0_map, + mtl_ioep_d3_status_1_map, + mtl_ioep_d3_status_2_map, + mtl_ioep_d3_status_3_map, + mtl_ioep_vnn_req_status_0_map, + mtl_ioep_vnn_req_status_1_map, + mtl_ioep_vnn_req_status_2_map, + mtl_ioep_vnn_req_status_3_map, + mtl_ioep_vnn_misc_status_map, + mtl_socm_signal_status_map, + NULL +}; + +static const struct pmc_reg_map mtl_ioem_reg_map = { + .regmap_length = MTL_IOE_PMC_MMIO_REG_LEN, + .pfear_sts = ext_mtl_ioem_pfear_map, + .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A, + .ppfear_buckets = MTL_IOE_PPFEAR_NUM_ENTRIES, + .lpm_status_offset = MTL_LPM_STATUS_OFFSET, + .lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET, + .lpm_sts = mtl_ioem_lpm_maps, + .ltr_show_sts = mtl_ioep_ltr_show_map, + .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET, + .ltr_ignore_max = ADL_NUM_IP_IGN_ALLOWED, + .lpm_sts_latch_en_offset = MTL_LPM_STATUS_LATCH_EN_OFFSET, + .lpm_num_maps = ADL_LPM_NUM_MAPS, + .lpm_priority_offset = MTL_LPM_PRI_OFFSET, + .lpm_en_offset = MTL_LPM_EN_OFFSET, + .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2, + .lpm_residency_offset = MTL_LPM_RESIDENCY_OFFSET, + .lpm_reg_index = MTL_LPM_REG_INDEX, + .lpm_req_guid = IOEM_LPM_REQ_GUID, +}; + +static struct pmc_info mtl_pmc_info_list[] = { + { + .devid = PMC_DEVID_MTL_SOCM, + .map = &mtl_socm_reg_map, + }, + { + .devid = PMC_DEVID_MTL_IOEP, + .map = &mtl_ioep_reg_map, + }, + { + .devid = PMC_DEVID_MTL_IOEM, + .map = &mtl_ioem_reg_map + }, + {} +}; + +#define MTL_GNA_PCI_DEV 0x7e4c +#define MTL_IPU_PCI_DEV 0x7d19 +#define MTL_VPU_PCI_DEV 0x7d1d +/* + * Set power state of select devices that do not have drivers to D3 + * so that they do not block Package C entry. + */ +static void mtl_d3_fixup(void) { - /* Due to a hardware limitation, the GBE LTR blocks PC10 - * when a cable is attached. Tell the PMC to ignore it. - */ - dev_dbg(&pmcdev->pdev->dev, "ignoring GBE LTR\n"); - pmc_core_send_ltr_ignore(pmcdev, 3); + pmc_core_set_device_d3(MTL_GNA_PCI_DEV); + pmc_core_set_device_d3(MTL_IPU_PCI_DEV); + pmc_core_set_device_d3(MTL_VPU_PCI_DEV); } -void mtl_core_init(struct pmc_dev *pmcdev) +static int mtl_resume(struct pmc_dev *pmcdev) { - pmcdev->map = &mtl_reg_map; - pmcdev->core_configure = mtl_core_configure; + mtl_d3_fixup(); + + return cnl_resume(pmcdev); } + +static int mtl_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info) +{ + mtl_d3_fixup(); + return generic_core_init(pmcdev, pmc_dev_info); +} + +static u32 MTL_PMT_DMU_GUIDS[] = {MTL_PMT_DMU_GUID, 0x0}; +struct pmc_dev_info mtl_pmc_dev = { + .pci_func = 2, + .dmu_guids = MTL_PMT_DMU_GUIDS, + .regmap_list = mtl_pmc_info_list, + .map = &mtl_socm_reg_map, + .sub_req_show = &pmc_core_substate_req_regs_fops, + .suspend = cnl_suspend, + .resume = mtl_resume, + .init = mtl_core_init, + .sub_req = pmc_core_pmt_get_lpm_req, +}; diff --git a/drivers/platform/x86/intel/pmc/pltdrv.c b/drivers/platform/x86/intel/pmc/pltdrv.c index ddfba38c2104..3141d6cbc41b 100644 --- a/drivers/platform/x86/intel/pmc/pltdrv.c +++ b/drivers/platform/x86/intel/pmc/pltdrv.c @@ -35,14 +35,14 @@ static struct platform_device *pmc_core_device; * other list may grow, but this list should not. */ static const struct x86_cpu_id intel_pmc_core_platform_ids[] = { - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &pmc_core_device), - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &pmc_core_device), - X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &pmc_core_device), - X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &pmc_core_device), - X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &pmc_core_device), - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &pmc_core_device), - X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &pmc_core_device), - X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &pmc_core_device), + X86_MATCH_VFM(INTEL_SKYLAKE_L, &pmc_core_device), + X86_MATCH_VFM(INTEL_SKYLAKE, &pmc_core_device), + X86_MATCH_VFM(INTEL_KABYLAKE_L, &pmc_core_device), + X86_MATCH_VFM(INTEL_KABYLAKE, &pmc_core_device), + X86_MATCH_VFM(INTEL_CANNONLAKE_L, &pmc_core_device), + X86_MATCH_VFM(INTEL_ICELAKE_L, &pmc_core_device), + X86_MATCH_VFM(INTEL_COMETLAKE, &pmc_core_device), + X86_MATCH_VFM(INTEL_COMETLAKE_L, &pmc_core_device), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_platform_ids); @@ -86,4 +86,5 @@ static void __exit pmc_core_platform_exit(void) module_init(pmc_core_platform_init); module_exit(pmc_core_platform_exit); +MODULE_DESCRIPTION("Intel PMC Core platform driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/x86/intel/pmc/ptl.c b/drivers/platform/x86/intel/pmc/ptl.c new file mode 100644 index 000000000000..1f48e2bbc699 --- /dev/null +++ b/drivers/platform/x86/intel/pmc/ptl.c @@ -0,0 +1,580 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file contains platform specific structure definitions + * and init function used by Panther Lake PCH. + * + * Copyright (c) 2025, Intel Corporation. + */ + +#include <linux/pci.h> + +#include "core.h" + +/* PMC SSRAM PMT Telemetry GUIDS */ +#define PCDP_LPM_REQ_GUID 0x47179370 + +/* + * Die Mapping to Product. + * Product PCDDie + * PTL-H PCD-H + * PTL-P PCD-P + * PTL-U PCD-P + */ + +static const struct pmc_bit_map ptl_pcdp_pfear_map[] = { + {"PMC_0", BIT(0)}, + {"FUSE_OSSE", BIT(1)}, + {"ESPISPI", BIT(2)}, + {"XHCI", BIT(3)}, + {"SPA", BIT(4)}, + {"SPB", BIT(5)}, + {"MPFPW2", BIT(6)}, + {"GBE", BIT(7)}, + + {"SBR16B20", BIT(0)}, + {"SBR8B20", BIT(1)}, + {"SBR16B21", BIT(2)}, + {"DBG_SBR16B", BIT(3)}, + {"OSSE_HOTHAM", BIT(4)}, + {"D2D_DISP_1", BIT(5)}, + {"LPSS", BIT(6)}, + {"LPC", BIT(7)}, + + {"SMB", BIT(0)}, + {"ISH", BIT(1)}, + {"SBR16B2", BIT(2)}, + {"NPK_0", BIT(3)}, + {"D2D_NOC_1", BIT(4)}, + {"SBR8B2", BIT(5)}, + {"FUSE", BIT(6)}, + {"SBR16B0", BIT(7)}, + + {"PSF0", BIT(0)}, + {"XDCI", BIT(1)}, + {"EXI", BIT(2)}, + {"CSE", BIT(3)}, + {"KVMCC", BIT(4)}, + {"PMT", BIT(5)}, + {"CLINK", BIT(6)}, + {"PTIO", BIT(7)}, + + {"USBR0", BIT(0)}, + {"SUSRAM", BIT(1)}, + {"SMT1", BIT(2)}, + {"MPFPW1", BIT(3)}, + {"SMS2", BIT(4)}, + {"SMS1", BIT(5)}, + {"CSMERTC", BIT(6)}, + {"CSMEPSF", BIT(7)}, + + {"D2D_NOC_0", BIT(0)}, + {"ESE", BIT(1)}, + {"P2SB8B", BIT(2)}, + {"SBR16B7", BIT(3)}, + {"SBR16B3", BIT(4)}, + {"OSSE_SMT1", BIT(5)}, + {"D2D_DISP", BIT(6)}, + {"DBG_SBR", BIT(7)}, + + {"U3FPW1", BIT(0)}, + {"FIA_X", BIT(1)}, + {"PSF4", BIT(2)}, + {"CNVI", BIT(3)}, + {"UFSX2", BIT(4)}, + {"ENDBG", BIT(5)}, + {"DBC", BIT(6)}, + {"FIA_PG", BIT(7)}, + + {"D2D_IPU", BIT(0)}, + {"NPK1", BIT(1)}, + {"FIACPCB_X", BIT(2)}, + {"SBR8B4", BIT(3)}, + {"DBG_PSF", BIT(4)}, + {"PSF6", BIT(5)}, + {"UFSPW1", BIT(6)}, + {"FIA_U", BIT(7)}, + + {"PSF8", BIT(0)}, + {"SBR16B4", BIT(1)}, + {"SBR16B5", BIT(2)}, + {"FIACPCB_U", BIT(3)}, + {"TAM", BIT(4)}, + {"D2D_NOC_2", BIT(5)}, + {"TBTLSX", BIT(6)}, + {"THC0", BIT(7)}, + + {"THC1", BIT(0)}, + {"PMC_1", BIT(1)}, + {"SBR8B1", BIT(2)}, + {"TCSS", BIT(3)}, + {"DISP_PGA", BIT(4)}, + {"SBR16B1", BIT(5)}, + {"SBRG", BIT(6)}, + {"PSF5", BIT(7)}, + + {"P2SB16B", BIT(0)}, + {"ACE_0", BIT(1)}, + {"ACE_1", BIT(2)}, + {"ACE_2", BIT(3)}, + {"ACE_3", BIT(4)}, + {"ACE_4", BIT(5)}, + {"ACE_5", BIT(6)}, + {"ACE_6", BIT(7)}, + + {"ACE_7", BIT(0)}, + {"ACE_8", BIT(1)}, + {"ACE_9", BIT(2)}, + {"ACE_10", BIT(3)}, + {"FIACPCB_PG", BIT(4)}, + {"SBR16B6", BIT(5)}, + {"OSSE", BIT(6)}, + {"SBR8B0", BIT(7)}, + {} +}; + +static const struct pmc_bit_map *ext_ptl_pcdp_pfear_map[] = { + ptl_pcdp_pfear_map, + NULL +}; + +static const struct pmc_bit_map ptl_pcdp_ltr_show_map[] = { + {"SOUTHPORT_A", CNP_PMC_LTR_SPA}, + {"SOUTHPORT_B", CNP_PMC_LTR_SPB}, + {"SATA", CNP_PMC_LTR_SATA}, + {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE}, + {"XHCI", CNP_PMC_LTR_XHCI}, + {"SOUTHPORT_F", ADL_PMC_LTR_SPF}, + {"ME", CNP_PMC_LTR_ME}, + {"SATA1", CNP_PMC_LTR_EVA}, + {"SOUTHPORT_C", CNP_PMC_LTR_SPC}, + {"HD_AUDIO", CNP_PMC_LTR_AZ}, + {"CNV", CNP_PMC_LTR_CNV}, + {"LPSS", CNP_PMC_LTR_LPSS}, + {"SOUTHPORT_D", CNP_PMC_LTR_SPD}, + {"SOUTHPORT_E", CNP_PMC_LTR_SPE}, + {"SATA2", PTL_PMC_LTR_SATA2}, + {"ESPI", CNP_PMC_LTR_ESPI}, + {"SCC", CNP_PMC_LTR_SCC}, + {"ISH", CNP_PMC_LTR_ISH}, + {"UFSX2", CNP_PMC_LTR_UFSX2}, + {"EMMC", CNP_PMC_LTR_EMMC}, + {"WIGIG", ICL_PMC_LTR_WIGIG}, + {"THC0", TGL_PMC_LTR_THC0}, + {"THC1", TGL_PMC_LTR_THC1}, + {"SOUTHPORT_G", MTL_PMC_LTR_SPG}, + {"ESE", MTL_PMC_LTR_ESE}, + {"IOE_PMC", MTL_PMC_LTR_IOE_PMC}, + {"DMI3", ARL_PMC_LTR_DMI3}, + {"OSSE", LNL_PMC_LTR_OSSE}, + + /* Below two cannot be used for LTR_IGNORE */ + {"CURRENT_PLATFORM", PTL_PMC_LTR_CUR_PLT}, + {"AGGREGATED_SYSTEM", PTL_PMC_LTR_CUR_ASLT}, + {} +}; + +const struct pmc_bit_map ptl_pcdp_clocksource_status_map[] = { + {"AON2_OFF_STS", BIT(0), 1}, + {"AON3_OFF_STS", BIT(1), 0}, + {"AON4_OFF_STS", BIT(2), 1}, + {"AON5_OFF_STS", BIT(3), 1}, + {"AON1_OFF_STS", BIT(4), 0}, + {"XTAL_LVM_OFF_STS", BIT(5), 0}, + {"MPFPW1_0_PLL_OFF_STS", BIT(6), 1}, + {"USB3_PLL_OFF_STS", BIT(8), 1}, + {"AON3_SPL_OFF_STS", BIT(9), 1}, + {"MPFPW2_0_PLL_OFF_STS", BIT(12), 1}, + {"XTAL_AGGR_OFF_STS", BIT(17), 1}, + {"USB2_PLL_OFF_STS", BIT(18), 0}, + {"SAF_PLL_OFF_STS", BIT(19), 1}, + {"SE_TCSS_PLL_OFF_STS", BIT(20), 1}, + {"DDI_PLL_OFF_STS", BIT(21), 1}, + {"FILTER_PLL_OFF_STS", BIT(22), 1}, + {"ACE_PLL_OFF_STS", BIT(24), 0}, + {"FABRIC_PLL_OFF_STS", BIT(25), 1}, + {"SOC_PLL_OFF_STS", BIT(26), 1}, + {"REF_PLL_OFF_STS", BIT(28), 1}, + {"IMG_PLL_OFF_STS", BIT(29), 1}, + {"RTC_PLL_OFF_STS", BIT(31), 0}, + {} +}; + +static const struct pmc_bit_map ptl_pcdp_power_gating_status_0_map[] = { + {"PMC_PGD0_PG_STS", BIT(0), 0}, + {"FUSE_OSSE_PGD0_PG_STS", BIT(1), 0}, + {"ESPISPI_PGD0_PG_STS", BIT(2), 0}, + {"XHCI_PGD0_PG_STS", BIT(3), 1}, + {"SPA_PGD0_PG_STS", BIT(4), 1}, + {"SPB_PGD0_PG_STS", BIT(5), 1}, + {"MPFPW2_PGD0_PG_STS", BIT(6), 0}, + {"GBE_PGD0_PG_STS", BIT(7), 1}, + {"SBR16B20_PGD0_PG_STS", BIT(8), 0}, + {"SBR8B20_PGD0_PG_STS", BIT(9), 0}, + {"SBR16B21_PGD0_PG_STS", BIT(10), 0}, + {"DBG_PGD0_PG_STS", BIT(11), 0}, + {"OSSE_HOTHAM_PGD0_PG_STS", BIT(12), 1}, + {"D2D_DISP_PGD1_PG_STS", BIT(13), 1}, + {"LPSS_PGD0_PG_STS", BIT(14), 1}, + {"LPC_PGD0_PG_STS", BIT(15), 0}, + {"SMB_PGD0_PG_STS", BIT(16), 0}, + {"ISH_PGD0_PG_STS", BIT(17), 0}, + {"SBR16B2_PGD0_PG_STS", BIT(18), 0}, + {"NPK_PGD0_PG_STS", BIT(19), 0}, + {"D2D_NOC_PGD1_PG_STS", BIT(20), 1}, + {"SBR8B2_PGD0_PG_STS", BIT(21), 0}, + {"FUSE_PGD0_PG_STS", BIT(22), 0}, + {"SBR16B0_PGD0_PG_STS", BIT(23), 0}, + {"PSF0_PGD0_PG_STS", BIT(24), 0}, + {"XDCI_PGD0_PG_STS", BIT(25), 1}, + {"EXI_PGD0_PG_STS", BIT(26), 0}, + {"CSE_PGD0_PG_STS", BIT(27), 1}, + {"KVMCC_PGD0_PG_STS", BIT(28), 1}, + {"PMT_PGD0_PG_STS", BIT(29), 1}, + {"CLINK_PGD0_PG_STS", BIT(30), 1}, + {"PTIO_PGD0_PG_STS", BIT(31), 1}, + {} +}; + +static const struct pmc_bit_map ptl_pcdp_power_gating_status_1_map[] = { + {"USBR0_PGD0_PG_STS", BIT(0), 1}, + {"SUSRAM_PGD0_PG_STS", BIT(1), 1}, + {"SMT1_PGD0_PG_STS", BIT(2), 1}, + {"MPFPW1_PGD0_PG_STS", BIT(3), 0}, + {"SMS2_PGD0_PG_STS", BIT(4), 1}, + {"SMS1_PGD0_PG_STS", BIT(5), 1}, + {"CSMERTC_PGD0_PG_STS", BIT(6), 0}, + {"CSMEPSF_PGD0_PG_STS", BIT(7), 0}, + {"D2D_NOC_PGD0_PG_STS", BIT(8), 0}, + {"ESE_PGD0_PG_STS", BIT(9), 1}, + {"P2SB8B_PGD0_PG_STS", BIT(10), 1}, + {"SBR16B7_PGD0_PG_STS", BIT(11), 0}, + {"SBR16B3_PGD0_PG_STS", BIT(12), 0}, + {"OSSE_SMT1_PGD0_PG_STS", BIT(13), 1}, + {"D2D_DISP_PGD0_PG_STS", BIT(14), 1}, + {"DBG_SBR_PGD0_PG_STS", BIT(15), 0}, + {"U3FPW1_PGD0_PG_STS", BIT(16), 0}, + {"FIA_X_PGD0_PG_STS", BIT(17), 0}, + {"PSF4_PGD0_PG_STS", BIT(18), 0}, + {"CNVI_PGD0_PG_STS", BIT(19), 0}, + {"UFSX2_PGD0_PG_STS", BIT(20), 1}, + {"ENDBG_PGD0_PG_STS", BIT(21), 0}, + {"DBC_PGD0_PG_STS", BIT(22), 0}, + {"FIA_PG_PGD0_PG_STS", BIT(23), 0}, + {"D2D_IPU_PGD0_PG_STS", BIT(24), 1}, + {"NPK_PGD1_PG_STS", BIT(25), 0}, + {"FIACPCB_X_PGD0_PG_STS", BIT(26), 0}, + {"SBR8B4_PGD0_PG_STS", BIT(27), 0}, + {"DBG_PSF_PGD0_PG_STS", BIT(28), 0}, + {"PSF6_PGD0_PG_STS", BIT(29), 0}, + {"UFSPW1_PGD0_PG_STS", BIT(30), 0}, + {"FIA_U_PGD0_PG_STS", BIT(31), 0}, + {} +}; + +static const struct pmc_bit_map ptl_pcdp_power_gating_status_2_map[] = { + {"PSF8_PGD0_PG_STS", BIT(0), 0}, + {"SBR16B4_PGD0_PG_STS", BIT(1), 0}, + {"SBR16B5_PGD0_PG_STS", BIT(2), 0}, + {"FIACPCB_U_PGD0_PG_STS", BIT(3), 0}, + {"TAM_PGD0_PG_STS", BIT(4), 1}, + {"D2D_NOC_PGD0_PG_STS", BIT(5), 1}, + {"TBTLSX_PGD0_PG_STS", BIT(6), 1}, + {"THC0_PGD0_PG_STS", BIT(7), 1}, + {"THC1_PGD0_PG_STS", BIT(8), 1}, + {"PMC_PGD1_PG_STS", BIT(9), 0}, + {"SBR8B1_PGD0_PG_STS", BIT(10), 0}, + {"TCSS_PGD0_PG_STS", BIT(11), 0}, + {"DISP_PGA_PGD0_PG_STS", BIT(12), 0}, + {"SBR16B1_PGD0_PG_STS", BIT(13), 0}, + {"SBRG_PGD0_PG_STS", BIT(14), 0}, + {"PSF5_PGD0_PG_STS", BIT(15), 0}, + {"P2SB16B_PGD0_PG_STS", BIT(16), 1}, + {"ACE_PGD0_PG_STS", BIT(17), 0}, + {"ACE_PGD1_PG_STS", BIT(18), 0}, + {"ACE_PGD2_PG_STS", BIT(19), 0}, + {"ACE_PGD3_PG_STS", BIT(20), 0}, + {"ACE_PGD4_PG_STS", BIT(21), 0}, + {"ACE_PGD5_PG_STS", BIT(22), 0}, + {"ACE_PGD6_PG_STS", BIT(23), 0}, + {"ACE_PGD7_PG_STS", BIT(24), 0}, + {"ACE_PGD8_PG_STS", BIT(25), 0}, + {"ACE_PGD9_PG_STS", BIT(26), 0}, + {"ACE_PGD10_PG_STS", BIT(27), 0}, + {"FIACPCB_PG_PGD0_PG_STS", BIT(28), 0}, + {"SBR16B6_PGD0_PG_STS", BIT(29), 0}, + {"OSSE_PGD0_PG_STS", BIT(30), 1}, + {"SBR8B0_PGD0_PG_STS", BIT(31), 0}, + {} +}; + +static const struct pmc_bit_map ptl_pcdp_d3_status_0_map[] = { + {"LPSS_D3_STS", BIT(3), 1}, + {"XDCI_D3_STS", BIT(4), 1}, + {"XHCI_D3_STS", BIT(5), 1}, + {"OSSE_D3_STS", BIT(6), 0}, + {"SPA_D3_STS", BIT(12), 0}, + {"SPB_D3_STS", BIT(13), 0}, + {"ESPISPI_D3_STS", BIT(18), 0}, + {"PSTH_D3_STS", BIT(21), 0}, + {"OSSE_SMT1_D3_STS", BIT(30), 0}, + {} +}; + +static const struct pmc_bit_map ptl_pcdp_d3_status_1_map[] = { + {"GBE_D3_STS", BIT(19), 0}, + {"ITSS_D3_STS", BIT(23), 0}, + {"CNVI_D3_STS", BIT(27), 0}, + {"UFSX2_D3_STS", BIT(28), 1}, + {"OSSE_HOTHAM_D3_STS", BIT(29), 0}, + {"ESE_D3_STS", BIT(30), 0}, + {} +}; + +static const struct pmc_bit_map ptl_pcdp_d3_status_2_map[] = { + {"CSMERTC_D3_STS", BIT(1), 0}, + {"SUSRAM_D3_STS", BIT(2), 0}, + {"CSE_D3_STS", BIT(4), 0}, + {"KVMCC_D3_STS", BIT(5), 0}, + {"USBR0_D3_STS", BIT(6), 0}, + {"ISH_D3_STS", BIT(7), 0}, + {"SMT1_D3_STS", BIT(8), 0}, + {"SMT2_D3_STS", BIT(9), 0}, + {"SMT3_D3_STS", BIT(10), 0}, + {"OSSE_SMT2_D3_STS", BIT(12), 0}, + {"CLINK_D3_STS", BIT(14), 0}, + {"PTIO_D3_STS", BIT(16), 0}, + {"PMT_D3_STS", BIT(17), 0}, + {"SMS1_D3_STS", BIT(18), 0}, + {"SMS2_D3_STS", BIT(19), 0}, + {} +}; + +static const struct pmc_bit_map ptl_pcdp_d3_status_3_map[] = { + {"THC0_D3_STS", BIT(14), 1}, + {"THC1_D3_STS", BIT(15), 1}, + {"OSSE_SMT3_D3_STS", BIT(18), 0}, + {"ACE_D3_STS", BIT(23), 0}, + {} +}; + +static const struct pmc_bit_map ptl_pcdp_vnn_req_status_0_map[] = { + {"LPSS_VNN_REQ_STS", BIT(3), 1}, + {"OSSE_VNN_REQ_STS", BIT(6), 1}, + {"ESPISPI_VNN_REQ_STS", BIT(18), 1}, + {"OSSE_SMT1_VNN_REQ_STS", BIT(30), 1}, + {} +}; + +static const struct pmc_bit_map ptl_pcdp_vnn_req_status_1_map[] = { + {"NPK_VNN_REQ_STS", BIT(4), 1}, + {"DFXAGG_VNN_REQ_STS", BIT(8), 0}, + {"EXI_VNN_REQ_STS", BIT(9), 1}, + {"P2D_VNN_REQ_STS", BIT(18), 1}, + {"GBE_VNN_REQ_STS", BIT(19), 1}, + {"SMB_VNN_REQ_STS", BIT(25), 1}, + {"LPC_VNN_REQ_STS", BIT(26), 0}, + {"ESE_VNN_REQ_STS", BIT(30), 1}, + {} +}; + +static const struct pmc_bit_map ptl_pcdp_vnn_req_status_2_map[] = { + {"CSMERTC_VNN_REQ_STS", BIT(1), 1}, + {"CSE_VNN_REQ_STS", BIT(4), 1}, + {"ISH_VNN_REQ_STS", BIT(7), 1}, + {"SMT1_VNN_REQ_STS", BIT(8), 1}, + {"CLINK_VNN_REQ_STS", BIT(14), 1}, + {"SMS1_VNN_REQ_STS", BIT(18), 1}, + {"SMS2_VNN_REQ_STS", BIT(19), 1}, + {"GPIOCOM4_VNN_REQ_STS", BIT(20), 1}, + {"GPIOCOM3_VNN_REQ_STS", BIT(21), 1}, + {"GPIOCOM1_VNN_REQ_STS", BIT(23), 1}, + {"GPIOCOM0_VNN_REQ_STS", BIT(24), 1}, + {"DISP_SHIM_VNN_REQ_STS", BIT(26), 1}, + {} +}; + +const struct pmc_bit_map ptl_pcdp_vnn_req_status_3_map[] = { + {"DTS0_VNN_REQ_STS", BIT(7), 0}, + {"GPIOCOM5_VNN_REQ_STS", BIT(11), 1}, + {} +}; + +static const struct pmc_bit_map ptl_pcdp_vnn_misc_status_map[] = { + {"CPU_C10_REQ_STS", BIT(0), 0}, + {"TS_OFF_REQ_STS", BIT(1), 0}, + {"PNDE_MET_REQ_STS", BIT(2), 1}, + {"PG5_PMA0_REQ_STS", BIT(3), 0}, + {"FW_THROTTLE_ALLOWED_REQ_STS", BIT(4), 0}, + {"VNN_SOC_REQ_STS", BIT(6), 1}, + {"ISH_VNNAON_REQ_STS", BIT(7), 0}, + {"D2D_NOC_CFI_QACTIVE_REQ_STS", BIT(8), 1}, + {"D2D_NOC_GPSB_QACTIVE_REQ_STS", BIT(9), 1}, + {"D2D_IPU_QACTIVE_REQ_STS", BIT(10), 1}, + {"PLT_GREATER_REQ_STS", BIT(11), 1}, + {"ALL_SBR_IDLE_REQ_STS", BIT(12), 0}, + {"PMC_IDLE_FB_OCP_REQ_STS", BIT(13), 0}, + {"PM_SYNC_STATES_REQ_STS", BIT(14), 0}, + {"EA_REQ_STS", BIT(15), 0}, + {"MPHY_CORE_OFF_REQ_STS", BIT(16), 0}, + {"BRK_EV_EN_REQ_STS", BIT(17), 0}, + {"AUTO_DEMO_EN_REQ_STS", BIT(18), 0}, + {"ITSS_CLK_SRC_REQ_STS", BIT(19), 1}, + {"ARC_IDLE_REQ_STS", BIT(21), 0}, + {"PG5_PMA1_REQ_STS", BIT(22), 0}, + {"FIA_DEEP_PM_REQ_STS", BIT(23), 0}, + {"XDCI_ATTACHED_REQ_STS", BIT(24), 1}, + {"ARC_INTERRUPT_WAKE_REQ_STS", BIT(25), 0}, + {"D2D_DISP_DDI_QACTIVE_REQ_STS", BIT(26), 1}, + {"PRE_WAKE0_REQ_STS", BIT(27), 1}, + {"PRE_WAKE1_REQ_STS", BIT(28), 1}, + {"PRE_WAKE2_REQ_STS", BIT(29), 1}, + {"D2D_DISP_EDP_QACTIVE_REQ_STS", BIT(31), 1}, + {} +}; + +const struct pmc_bit_map ptl_pcdp_signal_status_map[] = { + {"LSX_Wake0_STS", BIT(0), 0}, + {"LSX_Wake1_STS", BIT(1), 0}, + {"LSX_Wake2_STS", BIT(2), 0}, + {"LSX_Wake3_STS", BIT(3), 0}, + {"LSX_Wake4_STS", BIT(4), 0}, + {"LSX_Wake5_STS", BIT(5), 0}, + {"LSX_Wake6_STS", BIT(6), 0}, + {"LSX_Wake7_STS", BIT(7), 0}, + {"LPSS_Wake0_STS", BIT(8), 1}, + {"LPSS_Wake1_STS", BIT(9), 1}, + {"Int_Timer_SS_Wake0_STS", BIT(10), 1}, + {"Int_Timer_SS_Wake1_STS", BIT(11), 1}, + {"Int_Timer_SS_Wake2_STS", BIT(12), 1}, + {"Int_Timer_SS_Wake3_STS", BIT(13), 1}, + {"Int_Timer_SS_Wake4_STS", BIT(14), 1}, + {"Int_Timer_SS_Wake5_STS", BIT(15), 1}, + {} +}; + +static const struct pmc_bit_map ptl_pcdp_rsc_status_map[] = { + {"Memory", 0, 1}, + {"PSF0", 0, 1}, + {"PSF4", 0, 1}, + {"PSF5", 0, 1}, + {"PSF6", 0, 1}, + {"PSF8", 0, 1}, + {"SAF_CFI_LINK", 0, 1}, + {"SB", 0, 1}, + {} +}; + +static const struct pmc_bit_map *ptl_pcdp_lpm_maps[] = { + ptl_pcdp_clocksource_status_map, + ptl_pcdp_power_gating_status_0_map, + ptl_pcdp_power_gating_status_1_map, + ptl_pcdp_power_gating_status_2_map, + ptl_pcdp_d3_status_0_map, + ptl_pcdp_d3_status_1_map, + ptl_pcdp_d3_status_2_map, + ptl_pcdp_d3_status_3_map, + ptl_pcdp_vnn_req_status_0_map, + ptl_pcdp_vnn_req_status_1_map, + ptl_pcdp_vnn_req_status_2_map, + ptl_pcdp_vnn_req_status_3_map, + ptl_pcdp_vnn_misc_status_map, + ptl_pcdp_signal_status_map, + NULL +}; + +static const struct pmc_bit_map *ptl_pcdp_blk_maps[] = { + ptl_pcdp_power_gating_status_0_map, + ptl_pcdp_power_gating_status_1_map, + ptl_pcdp_power_gating_status_2_map, + ptl_pcdp_rsc_status_map, + ptl_pcdp_vnn_req_status_0_map, + ptl_pcdp_vnn_req_status_1_map, + ptl_pcdp_vnn_req_status_2_map, + ptl_pcdp_vnn_req_status_3_map, + ptl_pcdp_d3_status_0_map, + ptl_pcdp_d3_status_1_map, + ptl_pcdp_d3_status_2_map, + ptl_pcdp_d3_status_3_map, + ptl_pcdp_clocksource_status_map, + ptl_pcdp_vnn_misc_status_map, + ptl_pcdp_signal_status_map, + NULL +}; + +static const struct pmc_reg_map ptl_pcdp_reg_map = { + .pfear_sts = ext_ptl_pcdp_pfear_map, + .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, + .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP, + .ltr_show_sts = ptl_pcdp_ltr_show_map, + .msr_sts = msr_map, + .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET, + .regmap_length = PTL_PCD_PMC_MMIO_REG_LEN, + .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A, + .ppfear_buckets = LNL_PPFEAR_NUM_ENTRIES, + .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, + .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, + .lpm_num_maps = PTL_LPM_NUM_MAPS, + .ltr_ignore_max = LNL_NUM_IP_IGN_ALLOWED, + .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2, + .etr3_offset = ETR3_OFFSET, + .lpm_sts_latch_en_offset = MTL_LPM_STATUS_LATCH_EN_OFFSET, + .lpm_priority_offset = MTL_LPM_PRI_OFFSET, + .lpm_en_offset = MTL_LPM_EN_OFFSET, + .lpm_residency_offset = MTL_LPM_RESIDENCY_OFFSET, + .lpm_sts = ptl_pcdp_lpm_maps, + .lpm_status_offset = MTL_LPM_STATUS_OFFSET, + .lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET, + .s0ix_blocker_maps = ptl_pcdp_blk_maps, + .s0ix_blocker_offset = LNL_S0IX_BLOCKER_OFFSET, + .num_s0ix_blocker = PTL_NUM_S0IX_BLOCKER, + .blocker_req_offset = PTL_BLK_REQ_OFFSET, + .lpm_req_guid = PCDP_LPM_REQ_GUID, +}; + +static struct pmc_info ptl_pmc_info_list[] = { + { + .devid = PMC_DEVID_PTL_PCDH, + .map = &ptl_pcdp_reg_map, + }, + { + .devid = PMC_DEVID_PTL_PCDP, + .map = &ptl_pcdp_reg_map, + }, + {} +}; + +#define PTL_NPU_PCI_DEV 0xb03e +#define PTL_IPU_PCI_DEV 0xb05d + +/* + * Set power state of select devices that do not have drivers to D3 + * so that they do not block Package C entry. + */ +static void ptl_d3_fixup(void) +{ + pmc_core_set_device_d3(PTL_IPU_PCI_DEV); + pmc_core_set_device_d3(PTL_NPU_PCI_DEV); +} + +static int ptl_resume(struct pmc_dev *pmcdev) +{ + ptl_d3_fixup(); + return cnl_resume(pmcdev); +} + +static int ptl_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info) +{ + ptl_d3_fixup(); + return generic_core_init(pmcdev, pmc_dev_info); +} + +struct pmc_dev_info ptl_pmc_dev = { + .pci_func = 2, + .regmap_list = ptl_pmc_info_list, + .map = &ptl_pcdp_reg_map, + .sub_req_show = &pmc_core_substate_blk_req_fops, + .suspend = cnl_suspend, + .resume = ptl_resume, + .init = ptl_core_init, + .sub_req = pmc_core_pmt_get_blk_sub_req, +}; diff --git a/drivers/platform/x86/intel/pmc/spt.c b/drivers/platform/x86/intel/pmc/spt.c index e16982236778..b50534aa2cba 100644 --- a/drivers/platform/x86/intel/pmc/spt.c +++ b/drivers/platform/x86/intel/pmc/spt.c @@ -8,9 +8,11 @@ * */ +#include <linux/pci.h> + #include "core.h" -const struct pmc_bit_map spt_pll_map[] = { +static const struct pmc_bit_map spt_pll_map[] = { {"MIPI PLL", SPT_PMC_BIT_MPHY_CMN_LANE0}, {"GEN2 USB2PCIE2 PLL", SPT_PMC_BIT_MPHY_CMN_LANE1}, {"DMIPCIE3 PLL", SPT_PMC_BIT_MPHY_CMN_LANE2}, @@ -18,7 +20,7 @@ const struct pmc_bit_map spt_pll_map[] = { {} }; -const struct pmc_bit_map spt_mphy_map[] = { +static const struct pmc_bit_map spt_mphy_map[] = { {"MPHY CORE LANE 0", SPT_PMC_BIT_MPHY_LANE0}, {"MPHY CORE LANE 1", SPT_PMC_BIT_MPHY_LANE1}, {"MPHY CORE LANE 2", SPT_PMC_BIT_MPHY_LANE2}, @@ -38,7 +40,7 @@ const struct pmc_bit_map spt_mphy_map[] = { {} }; -const struct pmc_bit_map spt_pfear_map[] = { +static const struct pmc_bit_map spt_pfear_map[] = { {"PMC", SPT_PMC_BIT_PMC}, {"OPI-DMI", SPT_PMC_BIT_OPI}, {"SPI / eSPI", SPT_PMC_BIT_SPI}, @@ -82,7 +84,7 @@ const struct pmc_bit_map spt_pfear_map[] = { {} }; -const struct pmc_bit_map *ext_spt_pfear_map[] = { +static const struct pmc_bit_map *ext_spt_pfear_map[] = { /* * Check intel_pmc_core_ids[] users of spt_reg_map for * a list of core SoCs using this. @@ -91,7 +93,7 @@ const struct pmc_bit_map *ext_spt_pfear_map[] = { NULL }; -const struct pmc_bit_map spt_ltr_show_map[] = { +static const struct pmc_bit_map spt_ltr_show_map[] = { {"SOUTHPORT_A", SPT_PMC_LTR_SPA}, {"SOUTHPORT_B", SPT_PMC_LTR_SPB}, {"SATA", SPT_PMC_LTR_SATA}, @@ -116,7 +118,7 @@ const struct pmc_bit_map spt_ltr_show_map[] = { {} }; -const struct pmc_reg_map spt_reg_map = { +static const struct pmc_reg_map spt_reg_map = { .pfear_sts = ext_spt_pfear_map, .mphy_sts = spt_mphy_map, .pll_sts = spt_pll_map, @@ -134,7 +136,25 @@ const struct pmc_reg_map spt_reg_map = { .pm_vric1_offset = SPT_PMC_VRIC1_OFFSET, }; -void spt_core_init(struct pmc_dev *pmcdev) +static const struct pci_device_id spt_pmc_pci_id[] = { + { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID) }, + { } +}; + +static int spt_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info) { - pmcdev->map = &spt_reg_map; + /* + * Coffee Lake has CPU ID of Kaby Lake and Cannon Lake PCH. So here + * Sunrisepoint PCH regmap can't be used. Use Cannon Lake PCH regmap + * in this case. + */ + if (!pci_dev_present(spt_pmc_pci_id)) + return generic_core_init(pmcdev, &cnp_pmc_dev); + + return generic_core_init(pmcdev, pmc_dev_info); } + +struct pmc_dev_info spt_pmc_dev = { + .map = &spt_reg_map, + .init = spt_core_init, +}; diff --git a/drivers/platform/x86/intel/pmc/ssram_telemetry.c b/drivers/platform/x86/intel/pmc/ssram_telemetry.c new file mode 100644 index 000000000000..03fad9331fc0 --- /dev/null +++ b/drivers/platform/x86/intel/pmc/ssram_telemetry.c @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel PMC SSRAM TELEMETRY PCI Driver + * + * Copyright (c) 2023, Intel Corporation. + */ + +#include <linux/cleanup.h> +#include <linux/intel_vsec.h> +#include <linux/pci.h> +#include <linux/types.h> +#include <linux/io-64-nonatomic-lo-hi.h> + +#include "core.h" +#include "ssram_telemetry.h" + +#define SSRAM_HDR_SIZE 0x100 +#define SSRAM_PWRM_OFFSET 0x14 +#define SSRAM_DVSEC_OFFSET 0x1C +#define SSRAM_DVSEC_SIZE 0x10 +#define SSRAM_PCH_OFFSET 0x60 +#define SSRAM_IOE_OFFSET 0x68 +#define SSRAM_DEVID_OFFSET 0x70 + +DEFINE_FREE(pmc_ssram_telemetry_iounmap, void __iomem *, if (_T) iounmap(_T)) + +static struct pmc_ssram_telemetry *pmc_ssram_telems; +static bool device_probed; + +static int +pmc_ssram_telemetry_add_pmt(struct pci_dev *pcidev, u64 ssram_base, void __iomem *ssram) +{ + struct intel_vsec_platform_info info = {}; + struct intel_vsec_header *headers[2] = {}; + struct intel_vsec_header header; + void __iomem *dvsec; + u32 dvsec_offset; + u32 table, hdr; + + dvsec_offset = readl(ssram + SSRAM_DVSEC_OFFSET); + dvsec = ioremap(ssram_base + dvsec_offset, SSRAM_DVSEC_SIZE); + if (!dvsec) + return -ENOMEM; + + hdr = readl(dvsec + PCI_DVSEC_HEADER1); + header.id = readw(dvsec + PCI_DVSEC_HEADER2); + header.rev = PCI_DVSEC_HEADER1_REV(hdr); + header.length = PCI_DVSEC_HEADER1_LEN(hdr); + header.num_entries = readb(dvsec + INTEL_DVSEC_ENTRIES); + header.entry_size = readb(dvsec + INTEL_DVSEC_SIZE); + + table = readl(dvsec + INTEL_DVSEC_TABLE); + header.tbir = INTEL_DVSEC_TABLE_BAR(table); + header.offset = INTEL_DVSEC_TABLE_OFFSET(table); + iounmap(dvsec); + + headers[0] = &header; + info.caps = VSEC_CAP_TELEMETRY; + info.headers = headers; + info.base_addr = ssram_base; + info.parent = &pcidev->dev; + + return intel_vsec_register(pcidev, &info); +} + +static inline u64 get_base(void __iomem *addr, u32 offset) +{ + return lo_hi_readq(addr + offset) & GENMASK_ULL(63, 3); +} + +static int +pmc_ssram_telemetry_get_pmc(struct pci_dev *pcidev, unsigned int pmc_idx, u32 offset) +{ + void __iomem __free(pmc_ssram_telemetry_iounmap) *tmp_ssram = NULL; + void __iomem __free(pmc_ssram_telemetry_iounmap) *ssram = NULL; + u64 ssram_base, pwrm_base; + u16 devid; + + ssram_base = pci_resource_start(pcidev, 0); + tmp_ssram = ioremap(ssram_base, SSRAM_HDR_SIZE); + if (!tmp_ssram) + return -ENOMEM; + + if (pmc_idx != PMC_IDX_MAIN) { + /* + * The secondary PMC BARS (which are behind hidden PCI devices) + * are read from fixed offsets in MMIO of the primary PMC BAR. + * If a device is not present, the value will be 0. + */ + ssram_base = get_base(tmp_ssram, offset); + if (!ssram_base) + return 0; + + ssram = ioremap(ssram_base, SSRAM_HDR_SIZE); + if (!ssram) + return -ENOMEM; + + } else { + ssram = no_free_ptr(tmp_ssram); + } + + pwrm_base = get_base(ssram, SSRAM_PWRM_OFFSET); + devid = readw(ssram + SSRAM_DEVID_OFFSET); + + pmc_ssram_telems[pmc_idx].devid = devid; + pmc_ssram_telems[pmc_idx].base_addr = pwrm_base; + + /* Find and register and PMC telemetry entries */ + return pmc_ssram_telemetry_add_pmt(pcidev, ssram_base, ssram); +} + +/** + * pmc_ssram_telemetry_get_pmc_info() - Get a PMC devid and base_addr information + * @pmc_idx: Index of the PMC + * @pmc_ssram_telemetry: pmc_ssram_telemetry structure to store the PMC information + * + * Return: + * * 0 - Success + * * -EAGAIN - Probe function has not finished yet. Try again. + * * -EINVAL - Invalid pmc_idx + * * -ENODEV - PMC device is not available + */ +int pmc_ssram_telemetry_get_pmc_info(unsigned int pmc_idx, + struct pmc_ssram_telemetry *pmc_ssram_telemetry) +{ + /* + * PMCs are discovered in probe function. If this function is called before + * probe function complete, the result would be invalid. Use device_probed + * variable to avoid this case. Return -EAGAIN to inform the consumer to call + * again later. + */ + if (!device_probed) + return -EAGAIN; + + /* + * Memory barrier is used to ensure the correct read order between + * device_probed variable and PMC info. + */ + smp_rmb(); + if (pmc_idx >= MAX_NUM_PMC) + return -EINVAL; + + if (!pmc_ssram_telems || !pmc_ssram_telems[pmc_idx].devid) + return -ENODEV; + + pmc_ssram_telemetry->devid = pmc_ssram_telems[pmc_idx].devid; + pmc_ssram_telemetry->base_addr = pmc_ssram_telems[pmc_idx].base_addr; + return 0; +} +EXPORT_SYMBOL_GPL(pmc_ssram_telemetry_get_pmc_info); + +static int intel_pmc_ssram_telemetry_probe(struct pci_dev *pcidev, const struct pci_device_id *id) +{ + int ret; + + pmc_ssram_telems = devm_kzalloc(&pcidev->dev, sizeof(*pmc_ssram_telems) * MAX_NUM_PMC, + GFP_KERNEL); + if (!pmc_ssram_telems) { + ret = -ENOMEM; + goto probe_finish; + } + + ret = pcim_enable_device(pcidev); + if (ret) { + dev_dbg(&pcidev->dev, "failed to enable PMC SSRAM device\n"); + goto probe_finish; + } + + ret = pmc_ssram_telemetry_get_pmc(pcidev, PMC_IDX_MAIN, 0); + if (ret) + goto probe_finish; + + pmc_ssram_telemetry_get_pmc(pcidev, PMC_IDX_IOE, SSRAM_IOE_OFFSET); + pmc_ssram_telemetry_get_pmc(pcidev, PMC_IDX_PCH, SSRAM_PCH_OFFSET); + +probe_finish: + /* + * Memory barrier is used to ensure the correct write order between PMC info + * and device_probed variable. + */ + smp_wmb(); + device_probed = true; + return ret; +} + +static const struct pci_device_id intel_pmc_ssram_telemetry_pci_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PMC_DEVID_MTL_SOCM) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PMC_DEVID_ARL_SOCS) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PMC_DEVID_ARL_SOCM) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PMC_DEVID_LNL_SOCM) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PMC_DEVID_PTL_PCDH) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PMC_DEVID_PTL_PCDP) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PMC_DEVID_WCL_PCDN) }, + { } +}; +MODULE_DEVICE_TABLE(pci, intel_pmc_ssram_telemetry_pci_ids); + +static struct pci_driver intel_pmc_ssram_telemetry_driver = { + .name = "intel_pmc_ssram_telemetry", + .id_table = intel_pmc_ssram_telemetry_pci_ids, + .probe = intel_pmc_ssram_telemetry_probe, +}; +module_pci_driver(intel_pmc_ssram_telemetry_driver); + +MODULE_IMPORT_NS("INTEL_VSEC"); +MODULE_AUTHOR("Xi Pardee <xi.pardee@intel.com>"); +MODULE_DESCRIPTION("Intel PMC SSRAM Telemetry driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/intel/pmc/ssram_telemetry.h b/drivers/platform/x86/intel/pmc/ssram_telemetry.h new file mode 100644 index 000000000000..daf8aeeb2275 --- /dev/null +++ b/drivers/platform/x86/intel/pmc/ssram_telemetry.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Intel PMC SSRAM Telemetry PCI Driver Header File + * + * Copyright (c) 2024, Intel Corporation. + */ + +#ifndef PMC_SSRAM_H +#define PMC_SSRAM_H + +/** + * struct pmc_ssram_telemetry - Structure to keep pmc info in ssram device + * @devid: device id of the pmc device + * @base_addr: contains PWRM base address + */ +struct pmc_ssram_telemetry { + u16 devid; + u64 base_addr; +}; + +int pmc_ssram_telemetry_get_pmc_info(unsigned int pmc_idx, + struct pmc_ssram_telemetry *pmc_ssram_telemetry); + +#endif /* PMC_SSRAM_H */ diff --git a/drivers/platform/x86/intel/pmc/tgl.c b/drivers/platform/x86/intel/pmc/tgl.c index e3e50538465d..fc5b4cacc1c6 100644 --- a/drivers/platform/x86/intel/pmc/tgl.c +++ b/drivers/platform/x86/intel/pmc/tgl.c @@ -13,7 +13,12 @@ #define ACPI_S0IX_DSM_UUID "57a6512e-3979-4e9d-9708-ff13b2508972" #define ACPI_GET_LOW_MODE_REGISTERS 1 -const struct pmc_bit_map tgl_pfear_map[] = { +enum pch_type { + PCH_H, + PCH_LP +}; + +static const struct pmc_bit_map tgl_pfear_map[] = { {"PSF9", BIT(0)}, {"RES_66", BIT(1)}, {"RES_67", BIT(2)}, @@ -24,7 +29,7 @@ const struct pmc_bit_map tgl_pfear_map[] = { {} }; -const struct pmc_bit_map *ext_tgl_pfear_map[] = { +static const struct pmc_bit_map *ext_tgl_pfear_map[] = { /* * Check intel_pmc_core_ids[] users of tgl_reg_map for * a list of core SoCs using this. @@ -34,7 +39,7 @@ const struct pmc_bit_map *ext_tgl_pfear_map[] = { NULL }; -const struct pmc_bit_map tgl_clocksource_status_map[] = { +static const struct pmc_bit_map tgl_clocksource_status_map[] = { {"USB2PLL_OFF_STS", BIT(18)}, {"PCIe/USB3.1_Gen2PLL_OFF_STS", BIT(19)}, {"PCIe_Gen3PLL_OFF_STS", BIT(20)}, @@ -50,7 +55,7 @@ const struct pmc_bit_map tgl_clocksource_status_map[] = { {} }; -const struct pmc_bit_map tgl_power_gating_status_map[] = { +static const struct pmc_bit_map tgl_power_gating_status_map[] = { {"CSME_PG_STS", BIT(0)}, {"SATA_PG_STS", BIT(1)}, {"xHCI_PG_STS", BIT(2)}, @@ -78,7 +83,7 @@ const struct pmc_bit_map tgl_power_gating_status_map[] = { {} }; -const struct pmc_bit_map tgl_d3_status_map[] = { +static const struct pmc_bit_map tgl_d3_status_map[] = { {"ADSP_D3_STS", BIT(0)}, {"SATA_D3_STS", BIT(1)}, {"xHCI0_D3_STS", BIT(2)}, @@ -93,7 +98,7 @@ const struct pmc_bit_map tgl_d3_status_map[] = { {} }; -const struct pmc_bit_map tgl_vnn_req_status_map[] = { +static const struct pmc_bit_map tgl_vnn_req_status_map[] = { {"GPIO_COM0_VNN_REQ_STS", BIT(1)}, {"GPIO_COM1_VNN_REQ_STS", BIT(2)}, {"GPIO_COM2_VNN_REQ_STS", BIT(3)}, @@ -118,7 +123,7 @@ const struct pmc_bit_map tgl_vnn_req_status_map[] = { {} }; -const struct pmc_bit_map tgl_vnn_misc_status_map[] = { +static const struct pmc_bit_map tgl_vnn_misc_status_map[] = { {"CPU_C10_REQ_STS_0", BIT(0)}, {"PCIe_LPM_En_REQ_STS_3", BIT(3)}, {"ITH_REQ_STS_5", BIT(5)}, @@ -170,7 +175,7 @@ const struct pmc_bit_map tgl_signal_status_map[] = { {} }; -const struct pmc_bit_map *tgl_lpm_maps[] = { +static const struct pmc_bit_map *tgl_lpm_maps[] = { tgl_clocksource_status_map, tgl_power_gating_status_map, tgl_d3_status_map, @@ -180,7 +185,32 @@ const struct pmc_bit_map *tgl_lpm_maps[] = { NULL }; -const struct pmc_reg_map tgl_reg_map = { +static const struct pmc_reg_map tgl_reg_map = { + .pfear_sts = ext_tgl_pfear_map, + .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, + .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP, + .ltr_show_sts = cnp_ltr_show_map, + .msr_sts = msr_map, + .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET, + .regmap_length = CNP_PMC_MMIO_REG_LEN, + .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A, + .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES, + .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, + .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, + .ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED, + .lpm_num_maps = TGL_LPM_NUM_MAPS, + .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2, + .lpm_sts_latch_en_offset = TGL_LPM_STS_LATCH_EN_OFFSET, + .lpm_en_offset = TGL_LPM_EN_OFFSET, + .lpm_priority_offset = TGL_LPM_PRI_OFFSET, + .lpm_residency_offset = TGL_LPM_RESIDENCY_OFFSET, + .lpm_sts = tgl_lpm_maps, + .lpm_status_offset = TGL_LPM_STATUS_OFFSET, + .lpm_live_status_offset = TGL_LPM_LIVE_STATUS_OFFSET, + .etr3_offset = ETR3_OFFSET, +}; + +static const struct pmc_reg_map tgl_h_reg_map = { .pfear_sts = ext_tgl_pfear_map, .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP, @@ -203,12 +233,15 @@ const struct pmc_reg_map tgl_reg_map = { .lpm_status_offset = TGL_LPM_STATUS_OFFSET, .lpm_live_status_offset = TGL_LPM_LIVE_STATUS_OFFSET, .etr3_offset = ETR3_OFFSET, + .pson_residency_offset = TGL_PSON_RESIDENCY_OFFSET, + .pson_residency_counter_step = TGL_PSON_RES_COUNTER_STEP, }; void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev) { struct pmc_dev *pmcdev = platform_get_drvdata(pdev); - const int num_maps = pmcdev->map->lpm_num_maps; + struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; + const int num_maps = pmc->map->lpm_num_maps; u32 lpm_size = LPM_MAX_NUM_MODES * num_maps * 4; union acpi_object *out_obj; struct acpi_device *adev; @@ -221,9 +254,9 @@ void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev) guid_parse(ACPI_S0IX_DSM_UUID, &s0ix_dsm_guid); - out_obj = acpi_evaluate_dsm(adev->handle, &s0ix_dsm_guid, 0, - ACPI_GET_LOW_MODE_REGISTERS, NULL); - if (out_obj && out_obj->type == ACPI_TYPE_BUFFER) { + out_obj = acpi_evaluate_dsm_typed(adev->handle, &s0ix_dsm_guid, 0, + ACPI_GET_LOW_MODE_REGISTERS, NULL, ACPI_TYPE_BUFFER); + if (out_obj) { u32 size = out_obj->buffer.length; if (size != lpm_size) { @@ -240,30 +273,40 @@ void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev) addr = (u32 *)out_obj->buffer.pointer; - lpm_req_regs = devm_kzalloc(&pdev->dev, lpm_size * sizeof(u32), - GFP_KERNEL); + lpm_req_regs = devm_kcalloc(&pdev->dev, lpm_size, sizeof(u32), + GFP_KERNEL); if (!lpm_req_regs) goto free_acpi_obj; memcpy(lpm_req_regs, addr, lpm_size); - pmcdev->lpm_req_regs = lpm_req_regs; + pmc->lpm_req_regs = lpm_req_regs; free_acpi_obj: ACPI_FREE(out_obj); } -void tgl_core_configure(struct pmc_dev *pmcdev) +static int tgl_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info) { + int ret; + + ret = generic_core_init(pmcdev, pmc_dev_info); + if (ret) + return ret; + pmc_core_get_tgl_lpm_reqs(pmcdev->pdev); - /* Due to a hardware limitation, the GBE LTR blocks PC10 - * when a cable is attached. Tell the PMC to ignore it. - */ - dev_dbg(&pmcdev->pdev->dev, "ignoring GBE LTR\n"); - pmc_core_send_ltr_ignore(pmcdev, 3); + return 0; } -void tgl_core_init(struct pmc_dev *pmcdev) -{ - pmcdev->map = &tgl_reg_map; - pmcdev->core_configure = tgl_core_configure; -} +struct pmc_dev_info tgl_l_pmc_dev = { + .map = &tgl_reg_map, + .suspend = cnl_suspend, + .resume = cnl_resume, + .init = tgl_core_init, +}; + +struct pmc_dev_info tgl_pmc_dev = { + .map = &tgl_h_reg_map, + .suspend = cnl_suspend, + .resume = cnl_resume, + .init = tgl_core_init, +}; diff --git a/drivers/platform/x86/intel/pmc/wcl.c b/drivers/platform/x86/intel/pmc/wcl.c new file mode 100644 index 000000000000..a45707e6364f --- /dev/null +++ b/drivers/platform/x86/intel/pmc/wcl.c @@ -0,0 +1,504 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file contains platform specific structure definitions + * and init function used by Wildcat Lake PCH. + * + * Copyright (c) 2025, Intel Corporation. + */ + +#include <linux/bits.h> +#include <linux/pci.h> + +#include "core.h" + +/* PMC SSRAM PMT Telemetry GUIDS */ +#define PCDN_LPM_REQ_GUID 0x33747648 + +static const struct pmc_bit_map wcl_pcdn_pfear_map[] = { + {"PMC_0", BIT(0)}, + {"FUSE_OSSE", BIT(1)}, + {"ESPISPI", BIT(2)}, + {"XHCI", BIT(3)}, + {"SPA", BIT(4)}, + {"RSVD", BIT(5)}, + {"MPFPW2", BIT(6)}, + {"GBE", BIT(7)}, + + {"SBR16B21", BIT(0)}, + {"SBR16B5", BIT(1)}, + {"SBR8B1", BIT(2)}, + {"SBR8B0", BIT(3)}, + {"P2SB0", BIT(4)}, + {"D2D_DISP_1", BIT(5)}, + {"LPSS", BIT(6)}, + {"LPC", BIT(7)}, + + {"SMB", BIT(0)}, + {"ISH", BIT(1)}, + {"DBG_SBR16B", BIT(2)}, + {"NPK_0", BIT(3)}, + {"D2D_NOC_1", BIT(4)}, + {"FIA_P", BIT(5)}, + {"FUSE", BIT(6)}, + {"DBG_PSF", BIT(7)}, + + {"DISP_PGA1", BIT(0)}, + {"XDCI", BIT(1)}, + {"EXI", BIT(2)}, + {"CSE", BIT(3)}, + {"KVMCC", BIT(4)}, + {"PMT", BIT(5)}, + {"CLINK", BIT(6)}, + {"PTIO", BIT(7)}, + + {"USBR0", BIT(0)}, + {"SBR16B22", BIT(1)}, + {"SMT1", BIT(2)}, + {"MPFPW1", BIT(3)}, + {"SMS2", BIT(4)}, + {"SMS1", BIT(5)}, + {"CSMERTC", BIT(6)}, + {"CSMEPSF", BIT(7)}, + + {"D2D_NOC_0", BIT(0)}, + {"ESE", BIT(1)}, + {"FIACPCB_P", BIT(2)}, + {"RSVD", BIT(3)}, + {"SBR8B2", BIT(4)}, + {"OSSE_SMT1", BIT(5)}, + {"D2D_DISP", BIT(6)}, + {"P2SB1", BIT(7)}, + + {"U3FPW1", BIT(0)}, + {"SBR16B3", BIT(1)}, + {"PSF4", BIT(2)}, + {"CNVI", BIT(3)}, + {"UFSX2", BIT(4)}, + {"ENDBG", BIT(5)}, + {"DBC", BIT(6)}, + {"SBRG", BIT(7)}, + + {"RSVD", BIT(0)}, + {"NPK1", BIT(1)}, + {"SBR16B7", BIT(2)}, + {"SBR16B4", BIT(3)}, + {"FIA_XG", BIT(4)}, + {"PSF6", BIT(5)}, + {"UFSPW1", BIT(6)}, + {"FIA_U", BIT(7)}, + + {"PSF8", BIT(0)}, + {"PSF0", BIT(1)}, + {"RSVD", BIT(2)}, + {"FIACPCB_U", BIT(3)}, + {"TAM", BIT(4)}, + {"SBR16B0", BIT(5)}, + {"TBTLSX", BIT(6)}, + {"THC0", BIT(7)}, + + {"THC1", BIT(0)}, + {"PMC_1", BIT(1)}, + {"FIACPCB_XG", BIT(2)}, + {"TCSS", BIT(3)}, + {"DISP_PGA", BIT(4)}, + {"SBR16B20", BIT(5)}, + {"SBR8B20", BIT(6)}, + {"DBG_SBR", BIT(7)}, + + {"SPC", BIT(0)}, + {"ACE_0", BIT(1)}, + {"ACE_1", BIT(2)}, + {"ACE_2", BIT(3)}, + {"ACE_3", BIT(4)}, + {"ACE_4", BIT(5)}, + {"ACE_5", BIT(6)}, + {"ACE_6", BIT(7)}, + + {"ACE_7", BIT(0)}, + {"ACE_8", BIT(1)}, + {"ACE_9", BIT(2)}, + {"ACE_10", BIT(3)}, + {"SBR16B2", BIT(4)}, + {"SBR8B4", BIT(5)}, + {"OSSE", BIT(6)}, + {"SBR16B1", BIT(7)}, + {} +}; + +static const struct pmc_bit_map *ext_wcl_pcdn_pfear_map[] = { + wcl_pcdn_pfear_map, + NULL +}; + +static const struct pmc_bit_map wcl_pcdn_ltr_show_map[] = { + {"SOUTHPORT_A", CNP_PMC_LTR_SPA}, + {"RSVD", WCL_PMC_LTR_RESERVED}, + {"SATA", CNP_PMC_LTR_SATA}, + {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE}, + {"XHCI", CNP_PMC_LTR_XHCI}, + {"SOUTHPORT_F", ADL_PMC_LTR_SPF}, + {"ME", CNP_PMC_LTR_ME}, + {"SATA1", CNP_PMC_LTR_EVA}, + {"SOUTHPORT_C", CNP_PMC_LTR_SPC}, + {"HD_AUDIO", CNP_PMC_LTR_AZ}, + {"CNV", CNP_PMC_LTR_CNV}, + {"LPSS", CNP_PMC_LTR_LPSS}, + {"SOUTHPORT_D", CNP_PMC_LTR_SPD}, + {"SOUTHPORT_E", CNP_PMC_LTR_SPE}, + {"SATA2", PTL_PMC_LTR_SATA2}, + {"ESPI", CNP_PMC_LTR_ESPI}, + {"SCC", CNP_PMC_LTR_SCC}, + {"ISH", CNP_PMC_LTR_ISH}, + {"UFSX2", CNP_PMC_LTR_UFSX2}, + {"EMMC", CNP_PMC_LTR_EMMC}, + {"WIGIG", ICL_PMC_LTR_WIGIG}, + {"THC0", TGL_PMC_LTR_THC0}, + {"THC1", TGL_PMC_LTR_THC1}, + {"SOUTHPORT_G", MTL_PMC_LTR_SPG}, + {"ESE", MTL_PMC_LTR_ESE}, + {"IOE_PMC", MTL_PMC_LTR_IOE_PMC}, + {"DMI3", ARL_PMC_LTR_DMI3}, + {"OSSE", LNL_PMC_LTR_OSSE}, + + /* Below two cannot be used for LTR_IGNORE */ + {"CURRENT_PLATFORM", PTL_PMC_LTR_CUR_PLT}, + {"AGGREGATED_SYSTEM", PTL_PMC_LTR_CUR_ASLT}, + {} +}; + +static const struct pmc_bit_map wcl_pcdn_power_gating_status_0_map[] = { + {"PMC_PGD0_PG_STS", BIT(0), 0}, + {"FUSE_OSSE_PGD0_PG_STS", BIT(1), 0}, + {"ESPISPI_PGD0_PG_STS", BIT(2), 0}, + {"XHCI_PGD0_PG_STS", BIT(3), 1}, + {"SPA_PGD0_PG_STS", BIT(4), 1}, + {"RSVD_5", BIT(5), 0}, + {"MPFPW2_PGD0_PG_STS", BIT(6), 0}, + {"GBE_PGD0_PG_STS", BIT(7), 1}, + {"SBR16B21_PGD0_PG_STS", BIT(8), 0}, + {"SBR16B5_PGD0_PG_STS", BIT(9), 0}, + {"SBR8B1_PGD0_PG_STS", BIT(10), 0}, + {"SBR8B0_PGD0_PG_STS", BIT(11), 0}, + {"P2SB0_PG_STS", BIT(12), 1}, + {"D2D_DISP_PGD1_PG_STS", BIT(13), 0}, + {"LPSS_PGD0_PG_STS", BIT(14), 1}, + {"LPC_PGD0_PG_STS", BIT(15), 0}, + {"SMB_PGD0_PG_STS", BIT(16), 0}, + {"ISH_PGD0_PG_STS", BIT(17), 0}, + {"DBG_SBR16B_PGD0_PG_STS", BIT(18), 0}, + {"NPK_PGD0_PG_STS", BIT(19), 0}, + {"D2D_NOC_PGD1_PG_STS", BIT(20), 0}, + {"FIA_P_PGD0_PG_STS", BIT(21), 0}, + {"FUSE_PGD0_PG_STS", BIT(22), 0}, + {"DBG_PSF_PGD0_PG_STS", BIT(23), 0}, + {"DISP_PGA1_PGD0_PG_STS", BIT(24), 0}, + {"XDCI_PGD0_PG_STS", BIT(25), 1}, + {"EXI_PGD0_PG_STS", BIT(26), 0}, + {"CSE_PGD0_PG_STS", BIT(27), 1}, + {"KVMCC_PGD0_PG_STS", BIT(28), 1}, + {"PMT_PGD0_PG_STS", BIT(29), 1}, + {"CLINK_PGD0_PG_STS", BIT(30), 1}, + {"PTIO_PGD0_PG_STS", BIT(31), 1}, + {} +}; + +static const struct pmc_bit_map wcl_pcdn_power_gating_status_1_map[] = { + {"USBR0_PGD0_PG_STS", BIT(0), 1}, + {"SBR16B22_PGD0_PG_STS", BIT(1), 0}, + {"SMT1_PGD0_PG_STS", BIT(2), 1}, + {"MPFPW1_PGD0_PG_STS", BIT(3), 0}, + {"SMS2_PGD0_PG_STS", BIT(4), 1}, + {"SMS1_PGD0_PG_STS", BIT(5), 1}, + {"CSMERTC_PGD0_PG_STS", BIT(6), 0}, + {"CSMEPSF_PGD0_PG_STS", BIT(7), 0}, + {"D2D_NOC_PGD0_PG_STS", BIT(8), 0}, + {"ESE_PGD0_PG_STS", BIT(9), 1}, + {"FIACPCB_P_PGD0_PG_STS", BIT(10), 0}, + {"SBR8B2_PGD0_PG_STS", BIT(12), 0}, + {"OSSE_SMT1_PGD0_PG_STS", BIT(13), 1}, + {"D2D_DISP_PGD0_PG_STS", BIT(14), 0}, + {"P2SB1_PGD0_PG_STS", BIT(15), 1}, + {"U3FPW1_PGD0_PG_STS", BIT(16), 0}, + {"SBR16B3_PGD0_PG_STS", BIT(17), 0}, + {"PSF4_PGD0_PG_STS", BIT(18), 0}, + {"CNVI_PGD0_PG_STS", BIT(19), 0}, + {"UFSX2_PGD0_PG_STS", BIT(20), 1}, + {"ENDBG_PGD0_PG_STS", BIT(21), 0}, + {"DBC_PGD0_PG_STS", BIT(22), 0}, + {"SBRG_PGD0_PG_STS", BIT(23), 0}, + {"NPK_PGD1_PG_STS", BIT(25), 0}, + {"SBR16B7_PGD0_PG_STS", BIT(26), 0}, + {"SBR16B4_PGD0_PG_STS", BIT(27), 0}, + {"FIA_XG_PSF_PGD0_PG_STS", BIT(28), 0}, + {"PSF6_PGD0_PG_STS", BIT(29), 0}, + {"UFSPW1_PGD0_PG_STS", BIT(30), 0}, + {"FIA_U_PGD0_PG_STS", BIT(31), 0}, + {} +}; + +static const struct pmc_bit_map wcl_pcdn_power_gating_status_2_map[] = { + {"PSF8_PGD0_PG_STS", BIT(0), 0}, + {"PSF0_PGD0_PG_STS", BIT(1), 0}, + {"FIACPCB_U_PGD0_PG_STS", BIT(3), 0}, + {"TAM_PGD0_PG_STS", BIT(4), 1}, + {"SBR16B0_PGD0_PG_STS", BIT(5), 0}, + {"TBTLSX_PGD0_PG_STS", BIT(6), 1}, + {"THC0_PGD0_PG_STS", BIT(7), 1}, + {"THC1_PGD0_PG_STS", BIT(8), 1}, + {"PMC_PGD1_PG_STS", BIT(9), 0}, + {"FIACPCB_XG_PGD0_PG_STS", BIT(10), 0}, + {"TCSS_PGD0_PG_STS", BIT(11), 0}, + {"DISP_PGA_PGD0_PG_STS", BIT(12), 0}, + {"SBR8B4_PGD0_PG_STS", BIT(13), 0}, + {"SBR8B20_PGD0_PG_STS", BIT(14), 0}, + {"DBG_PGD0_PG_STS", BIT(15), 0}, + {"SPC_PGD0_PG_STS", BIT(16), 1}, + {"ACE_PGD0_PG_STS", BIT(17), 0}, + {"ACE_PGD1_PG_STS", BIT(18), 0}, + {"ACE_PGD2_PG_STS", BIT(19), 0}, + {"ACE_PGD3_PG_STS", BIT(20), 0}, + {"ACE_PGD4_PG_STS", BIT(21), 0}, + {"ACE_PGD5_PG_STS", BIT(22), 0}, + {"ACE_PGD6_PG_STS", BIT(23), 0}, + {"ACE_PGD7_PG_STS", BIT(24), 0}, + {"ACE_PGD8_PG_STS", BIT(25), 0}, + {"ACE_PGD9_PG_STS", BIT(26), 0}, + {"ACE_PGD10_PG_STS", BIT(27), 0}, + {"SBR16B2_PG_PGD0_PG_STS", BIT(28), 0}, + {"SBR16B20_PGD0_PG_STS", BIT(29), 0}, + {"OSSE_PGD0_PG_STS", BIT(30), 1}, + {"SBR16B1_PGD0_PG_STS", BIT(31), 0}, + {} +}; + +static const struct pmc_bit_map wcl_pcdn_d3_status_0_map[] = { + {"LPSS_D3_STS", BIT(3), 1}, + {"XDCI_D3_STS", BIT(4), 1}, + {"XHCI_D3_STS", BIT(5), 1}, + {"SPA_D3_STS", BIT(12), 0}, + {"SPC_D3_STS", BIT(14), 0}, + {"OSSE_D3_STS", BIT(15), 0}, + {"ESPISPI_D3_STS", BIT(18), 0}, + {"PSTH_D3_STS", BIT(21), 0}, + {} +}; + +static const struct pmc_bit_map wcl_pcdn_d3_status_1_map[] = { + {"OSSE_SMT1_D3_STS", BIT(16), 0}, + {"GBE_D3_STS", BIT(19), 0}, + {"ITSS_D3_STS", BIT(23), 0}, + {"CNVI_D3_STS", BIT(27), 0}, + {"UFSX2_D3_STS", BIT(28), 0}, + {} +}; + +static const struct pmc_bit_map wcl_pcdn_d3_status_2_map[] = { + {"CSMERTC_D3_STS", BIT(1), 0}, + {"ESE_D3_STS", BIT(2), 0}, + {"CSE_D3_STS", BIT(4), 0}, + {"KVMCC_D3_STS", BIT(5), 0}, + {"USBR0_D3_STS", BIT(6), 0}, + {"ISH_D3_STS", BIT(7), 0}, + {"SMT1_D3_STS", BIT(8), 0}, + {"SMT2_D3_STS", BIT(9), 0}, + {"SMT3_D3_STS", BIT(10), 0}, + {"CLINK_D3_STS", BIT(14), 0}, + {"PTIO_D3_STS", BIT(16), 0}, + {"PMT_D3_STS", BIT(17), 0}, + {"SMS1_D3_STS", BIT(18), 0}, + {"SMS2_D3_STS", BIT(19), 0}, + {"OSSE_SMT2_D3_STS", BIT(22), 0}, + {} +}; + +static const struct pmc_bit_map wcl_pcdn_d3_status_3_map[] = { + {"THC0_D3_STS", BIT(14), 1}, + {"THC1_D3_STS", BIT(15), 1}, + {"OSSE_SMT3_D3_STS", BIT(16), 0}, + {"ACE_D3_STS", BIT(23), 0}, + {} +}; + +static const struct pmc_bit_map wcl_pcdn_vnn_req_status_0_map[] = { + {"LPSS_VNN_REQ_STS", BIT(3), 1}, + {"OSSE_VNN_REQ_STS", BIT(15), 1}, + {"ESPISPI_VNN_REQ_STS", BIT(18), 1}, + {} +}; + +static const struct pmc_bit_map wcl_pcdn_vnn_req_status_1_map[] = { + {"NPK_VNN_REQ_STS", BIT(4), 1}, + {"DFXAGG_VNN_REQ_STS", BIT(8), 0}, + {"EXI_VNN_REQ_STS", BIT(9), 1}, + {"OSSE_SMT1_VNN_REQ_STS", BIT(16), 1}, + {"P2D_VNN_REQ_STS", BIT(18), 1}, + {"GBE_VNN_REQ_STS", BIT(19), 1}, + {"SMB_VNN_REQ_STS", BIT(25), 1}, + {"LPC_VNN_REQ_STS", BIT(26), 0}, + {} +}; + +static const struct pmc_bit_map wcl_pcdn_vnn_req_status_2_map[] = { + {"CSMERTC_VNN_REQ_STS", BIT(1), 1}, + {"ESE_VNN_REQ_STS", BIT(2), 1}, + {"CSE_VNN_REQ_STS", BIT(4), 1}, + {"ISH_VNN_REQ_STS", BIT(7), 1}, + {"SMT1_VNN_REQ_STS", BIT(8), 1}, + {"CLINK_VNN_REQ_STS", BIT(14), 1}, + {"SMS1_VNN_REQ_STS", BIT(18), 1}, + {"SMS2_VNN_REQ_STS", BIT(19), 1}, + {"GPIOCOM4_VNN_REQ_STS", BIT(20), 1}, + {"GPIOCOM3_VNN_REQ_STS", BIT(21), 1}, + {"GPIOCOM1_VNN_REQ_STS", BIT(23), 1}, + {"GPIOCOM0_VNN_REQ_STS", BIT(24), 1}, + {"DISP_SHIM_VNN_REQ_STS", BIT(31), 1}, + {} +}; + +static const struct pmc_bit_map wcl_pcdn_vnn_misc_status_map[] = { + {"CPU_C10_REQ_STS", BIT(0), 0}, + {"TS_OFF_REQ_STS", BIT(1), 0}, + {"PNDE_MET_REQ_STS", BIT(2), 1}, + {"FW_THROTTLE_ALLOWED_REQ_STS", BIT(4), 0}, + {"VNN_SOC_REQ_STS", BIT(6), 1}, + {"ISH_VNNAON_REQ_STS", BIT(7), 0}, + {"D2D_NOC_CFI_QACTIVE_REQ_STS", BIT(8), 1}, + {"D2D_NOC_GPSB_QACTIVE_REQ_STS", BIT(9), 1}, + {"PLT_GREATER_REQ_STS", BIT(11), 1}, + {"ALL_SBR_IDLE_REQ_STS", BIT(12), 0}, + {"PMC_IDLE_FB_OCP_REQ_STS", BIT(13), 0}, + {"PM_SYNC_STATES_REQ_STS", BIT(14), 0}, + {"EA_REQ_STS", BIT(15), 0}, + {"MPHY_CORE_OFF_REQ_STS", BIT(16), 0}, + {"BRK_EV_EN_REQ_STS", BIT(17), 0}, + {"AUTO_DEMO_EN_REQ_STS", BIT(18), 0}, + {"ITSS_CLK_SRC_REQ_STS", BIT(19), 1}, + {"ARC_IDLE_REQ_STS", BIT(21), 0}, + {"FIA_DEEP_PM_REQ_STS", BIT(23), 0}, + {"XDCI_ATTACHED_REQ_STS", BIT(24), 1}, + {"ARC_INTERRUPT_WAKE_REQ_STS", BIT(25), 0}, + {"D2D_DISP_DDI_QACTIVE_REQ_STS", BIT(26), 1}, + {"PRE_WAKE0_REQ_STS", BIT(27), 1}, + {"PRE_WAKE1_REQ_STS", BIT(28), 1}, + {"PRE_WAKE2_REQ_STS", BIT(29), 1}, + {} +}; + +static const struct pmc_bit_map wcl_pcdn_rsc_status_map[] = { + {"Memory", 0, 1}, + {"PSF0", 0, 1}, + {"PSF6", 0, 1}, + {"PSF8", 0, 1}, + {"SAF_CFI_LINK", 0, 1}, + {"SB", 0, 1}, + {} +}; + +static const struct pmc_bit_map *wcl_pcdn_lpm_maps[] = { + ptl_pcdp_clocksource_status_map, + wcl_pcdn_power_gating_status_0_map, + wcl_pcdn_power_gating_status_1_map, + wcl_pcdn_power_gating_status_2_map, + wcl_pcdn_d3_status_0_map, + wcl_pcdn_d3_status_1_map, + wcl_pcdn_d3_status_2_map, + wcl_pcdn_d3_status_3_map, + wcl_pcdn_vnn_req_status_0_map, + wcl_pcdn_vnn_req_status_1_map, + wcl_pcdn_vnn_req_status_2_map, + ptl_pcdp_vnn_req_status_3_map, + wcl_pcdn_vnn_misc_status_map, + ptl_pcdp_signal_status_map, + NULL +}; + +static const struct pmc_bit_map *wcl_pcdn_blk_maps[] = { + wcl_pcdn_power_gating_status_0_map, + wcl_pcdn_power_gating_status_1_map, + wcl_pcdn_power_gating_status_2_map, + wcl_pcdn_rsc_status_map, + wcl_pcdn_vnn_req_status_0_map, + wcl_pcdn_vnn_req_status_1_map, + wcl_pcdn_vnn_req_status_2_map, + ptl_pcdp_vnn_req_status_3_map, + wcl_pcdn_d3_status_0_map, + wcl_pcdn_d3_status_1_map, + wcl_pcdn_d3_status_2_map, + wcl_pcdn_d3_status_3_map, + ptl_pcdp_clocksource_status_map, + wcl_pcdn_vnn_misc_status_map, + ptl_pcdp_signal_status_map, + NULL +}; + +static const struct pmc_reg_map wcl_pcdn_reg_map = { + .pfear_sts = ext_wcl_pcdn_pfear_map, + .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, + .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP, + .ltr_show_sts = wcl_pcdn_ltr_show_map, + .msr_sts = msr_map, + .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET, + .regmap_length = WCL_PCD_PMC_MMIO_REG_LEN, + .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A, + .ppfear_buckets = LNL_PPFEAR_NUM_ENTRIES, + .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, + .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, + .lpm_num_maps = PTL_LPM_NUM_MAPS, + .ltr_ignore_max = LNL_NUM_IP_IGN_ALLOWED, + .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2, + .etr3_offset = ETR3_OFFSET, + .lpm_sts_latch_en_offset = MTL_LPM_STATUS_LATCH_EN_OFFSET, + .lpm_priority_offset = MTL_LPM_PRI_OFFSET, + .lpm_en_offset = MTL_LPM_EN_OFFSET, + .lpm_residency_offset = MTL_LPM_RESIDENCY_OFFSET, + .lpm_sts = wcl_pcdn_lpm_maps, + .lpm_status_offset = MTL_LPM_STATUS_OFFSET, + .lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET, + .s0ix_blocker_maps = wcl_pcdn_blk_maps, + .s0ix_blocker_offset = LNL_S0IX_BLOCKER_OFFSET, + .num_s0ix_blocker = WCL_NUM_S0IX_BLOCKER, + .blocker_req_offset = WCL_BLK_REQ_OFFSET, + .lpm_req_guid = PCDN_LPM_REQ_GUID, +}; + +static struct pmc_info wcl_pmc_info_list[] = { + { + .devid = PMC_DEVID_WCL_PCDN, + .map = &wcl_pcdn_reg_map, + }, + {} +}; + +#define WCL_NPU_PCI_DEV 0xfd3e + +/* + * Set power state of select devices that do not have drivers to D3 + * so that they do not block Package C entry. + */ +static void wcl_d3_fixup(void) +{ + pmc_core_set_device_d3(WCL_NPU_PCI_DEV); +} + +static int wcl_resume(struct pmc_dev *pmcdev) +{ + wcl_d3_fixup(); + return cnl_resume(pmcdev); +} + +static int wcl_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info) +{ + wcl_d3_fixup(); + return generic_core_init(pmcdev, pmc_dev_info); +} + +struct pmc_dev_info wcl_pmc_dev = { + .pci_func = 2, + .regmap_list = wcl_pmc_info_list, + .map = &wcl_pcdn_reg_map, + .sub_req_show = &pmc_core_substate_blk_req_fops, + .suspend = cnl_suspend, + .resume = wcl_resume, + .init = wcl_core_init, + .sub_req = pmc_core_pmt_get_blk_sub_req, +}; diff --git a/drivers/platform/x86/intel/pmt/Kconfig b/drivers/platform/x86/intel/pmt/Kconfig index e916fc966221..7363446b7773 100644 --- a/drivers/platform/x86/intel/pmt/Kconfig +++ b/drivers/platform/x86/intel/pmt/Kconfig @@ -18,6 +18,7 @@ config INTEL_PMT_CLASS config INTEL_PMT_TELEMETRY tristate "Intel Platform Monitoring Technology (PMT) Telemetry driver" depends on INTEL_VSEC + select INTEL_PMT_DISCOVERY select INTEL_PMT_CLASS help The Intel Platform Monitory Technology (PMT) Telemetry driver provides @@ -38,3 +39,30 @@ config INTEL_PMT_CRASHLOG To compile this driver as a module, choose M here: the module will be called intel_pmt_crashlog. + +config INTEL_PMT_DISCOVERY + tristate "Intel Platform Monitoring Technology (PMT) Discovery driver" + depends on INTEL_VSEC + select INTEL_PMT_CLASS + help + The Intel Platform Monitoring Technology (PMT) discovery driver provides + access to details about the various PMT features and feature specific + attributes. + + To compile this driver as a module, choose M here: the module + will be called pmt_discovery. + +config INTEL_PMT_KUNIT_TEST + tristate "KUnit tests for Intel PMT driver" + depends on INTEL_PMT_DISCOVERY + depends on INTEL_PMT_TELEMETRY || !INTEL_PMT_TELEMETRY + depends on KUNIT + help + Enable this option to compile and run a suite of KUnit tests for the Intel + Platform Monitoring Technology (PMT) driver. These tests are designed to + validate the driver's functionality, error handling, and overall stability, + helping developers catch regressions and ensure code quality during changes. + + This option is intended for development and testing environments. It is + recommended to disable it in production builds. To compile this driver as a + module, choose M here: the module will be called pmt-discovery-kunit. diff --git a/drivers/platform/x86/intel/pmt/Makefile b/drivers/platform/x86/intel/pmt/Makefile index 279e158c7c23..47f692c091c9 100644 --- a/drivers/platform/x86/intel/pmt/Makefile +++ b/drivers/platform/x86/intel/pmt/Makefile @@ -10,3 +10,7 @@ obj-$(CONFIG_INTEL_PMT_TELEMETRY) += pmt_telemetry.o pmt_telemetry-y := telemetry.o obj-$(CONFIG_INTEL_PMT_CRASHLOG) += pmt_crashlog.o pmt_crashlog-y := crashlog.o +obj-$(CONFIG_INTEL_PMT_DISCOVERY) += pmt_discovery.o +pmt_discovery-y := discovery.o features.o +obj-$(CONFIG_INTEL_PMT_KUNIT_TEST) += pmt-discovery-kunit.o +pmt-discovery-kunit-y := discovery-kunit.o diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c index 46598dcb634a..7c3023d5d91d 100644 --- a/drivers/platform/x86/intel/pmt/class.c +++ b/drivers/platform/x86/intel/pmt/class.c @@ -9,15 +9,17 @@ */ #include <linux/kernel.h> +#include <linux/log2.h> +#include <linux/intel_vsec.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/pci.h> +#include <linux/sysfs.h> -#include "../vsec.h" #include "class.h" -#define PMT_XA_START 0 +#define PMT_XA_START 1 #define PMT_XA_MAX INT_MAX #define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX) #define GUID_SPR_PUNIT 0x9956f43f @@ -31,9 +33,9 @@ bool intel_pmt_is_early_client_hw(struct device *dev) * differences from the server platforms (which use the Out Of Band * Management Services Module OOBMSM). */ - return !!(ivdev->info->quirks & VSEC_QUIRK_EARLY_HW); + return !!(ivdev->quirks & VSEC_QUIRK_EARLY_HW); } -EXPORT_SYMBOL_GPL(intel_pmt_is_early_client_hw); +EXPORT_SYMBOL_NS_GPL(intel_pmt_is_early_client_hw, "INTEL_PMT"); static inline int pmt_memcpy64_fromio(void *to, const u64 __iomem *from, size_t count) @@ -58,12 +60,30 @@ pmt_memcpy64_fromio(void *to, const u64 __iomem *from, size_t count) return count; } +int pmt_telem_read_mmio(struct pci_dev *pdev, struct pmt_callbacks *cb, u32 guid, void *buf, + void __iomem *addr, loff_t off, u32 count) +{ + if (cb && cb->read_telem) + return cb->read_telem(pdev, guid, buf, off, count); + + addr += off; + + if (guid == GUID_SPR_PUNIT) + /* PUNIT on SPR only supports aligned 64-bit read */ + return pmt_memcpy64_fromio(buf, addr, count); + + memcpy_fromio(buf, addr, count); + + return count; +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_read_mmio, "INTEL_PMT"); + /* * sysfs */ static ssize_t intel_pmt_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t off, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct intel_pmt_entry *entry = container_of(attr, @@ -79,18 +99,15 @@ intel_pmt_read(struct file *filp, struct kobject *kobj, if (count > entry->size - off) count = entry->size - off; - if (entry->guid == GUID_SPR_PUNIT) - /* PUNIT on SPR only supports aligned 64-bit read */ - count = pmt_memcpy64_fromio(buf, entry->base + off, count); - else - memcpy_fromio(buf, entry->base + off, count); + count = pmt_telem_read_mmio(entry->pcidev, entry->cb, entry->header.guid, buf, + entry->base, off, count); return count; } static int intel_pmt_mmap(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, struct vm_area_struct *vma) + const struct bin_attribute *attr, struct vm_area_struct *vma) { struct intel_pmt_entry *entry = container_of(attr, struct intel_pmt_entry, @@ -151,20 +168,49 @@ static struct attribute *intel_pmt_attrs[] = { &dev_attr_offset.attr, NULL }; -ATTRIBUTE_GROUPS(intel_pmt); -static struct class intel_pmt_class = { +static umode_t intel_pmt_attr_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct auxiliary_device *auxdev = to_auxiliary_dev(dev->parent); + struct intel_vsec_device *ivdev = auxdev_to_ivdev(auxdev); + + /* + * Place the discovery features folder in /sys/class/intel_pmt, but + * exclude the common attributes as they are not applicable. + */ + if (ivdev->cap_id == ilog2(VSEC_CAP_DISCOVERY)) + return 0; + + return attr->mode; +} + +static bool intel_pmt_group_visible(struct kobject *kobj) +{ + return true; +} +DEFINE_SYSFS_GROUP_VISIBLE(intel_pmt); + +static const struct attribute_group intel_pmt_group = { + .attrs = intel_pmt_attrs, + .is_visible = SYSFS_GROUP_VISIBLE(intel_pmt), +}; +__ATTRIBUTE_GROUPS(intel_pmt); + +struct class intel_pmt_class = { .name = "intel_pmt", - .owner = THIS_MODULE, .dev_groups = intel_pmt_groups, }; +EXPORT_SYMBOL_GPL(intel_pmt_class); static int intel_pmt_populate_entry(struct intel_pmt_entry *entry, - struct intel_pmt_header *header, - struct device *dev, + struct intel_vsec_device *ivdev, struct resource *disc_res) { - struct pci_dev *pci_dev = to_pci_dev(dev->parent); + struct pci_dev *pci_dev = ivdev->pcidev; + struct device *dev = &ivdev->auxdev.dev; + struct intel_pmt_header *header = &entry->header; u8 bir; /* @@ -194,7 +240,7 @@ static int intel_pmt_populate_entry(struct intel_pmt_entry *entry, /* * Some hardware use a different calculation for the base address * when access_type == ACCESS_LOCAL. On the these systems - * ACCCESS_LOCAL refers to an address in the same BAR as the + * ACCESS_LOCAL refers to an address in the same BAR as the * header but at a fixed offset. But as the header address was * supplied to the driver, we don't know which BAR it was in. * So search for the bar whose range includes the header address. @@ -216,6 +262,13 @@ static int intel_pmt_populate_entry(struct intel_pmt_entry *entry, break; case ACCESS_BARID: + /* Use the provided base address if it exists */ + if (ivdev->base_addr) { + entry->base_addr = ivdev->base_addr + + GET_ADDRESS(header->base_offset); + break; + } + /* * If another BAR was specified then the base offset * represents the offset within that BAR. SO retrieve the @@ -230,8 +283,10 @@ static int intel_pmt_populate_entry(struct intel_pmt_entry *entry, return -EINVAL; } + entry->pcidev = pci_dev; entry->guid = header->guid; entry->size = header->size; + entry->cb = ivdev->priv_data; return 0; } @@ -240,6 +295,7 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry, struct intel_pmt_namespace *ns, struct device *parent) { + struct intel_vsec_device *ivdev = dev_to_ivdev(parent); struct resource res = {0}; struct device *dev; int ret; @@ -260,10 +316,10 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry, entry->kobj = &dev->kobj; - if (ns->attr_grp) { - ret = sysfs_create_group(entry->kobj, ns->attr_grp); + if (entry->attr_grp) { + ret = sysfs_create_group(entry->kobj, entry->attr_grp); if (ret) - goto fail_sysfs; + goto fail_sysfs_create_group; } /* if size is 0 assume no data buffer, so no file needed */ @@ -288,13 +344,23 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry, entry->pmt_bin_attr.size = entry->size; ret = sysfs_create_bin_file(&dev->kobj, &entry->pmt_bin_attr); - if (!ret) - return 0; + if (ret) + goto fail_ioremap; + + if (ns->pmt_add_endpoint) { + ret = ns->pmt_add_endpoint(ivdev, entry); + if (ret) + goto fail_add_endpoint; + } + + return 0; +fail_add_endpoint: + sysfs_remove_bin_file(entry->kobj, &entry->pmt_bin_attr); fail_ioremap: - if (ns->attr_grp) - sysfs_remove_group(entry->kobj, ns->attr_grp); -fail_sysfs: + if (entry->attr_grp) + sysfs_remove_group(entry->kobj, entry->attr_grp); +fail_sysfs_create_group: device_unregister(dev); fail_dev_create: xa_erase(ns->xa, entry->devid); @@ -306,7 +372,6 @@ int intel_pmt_dev_create(struct intel_pmt_entry *entry, struct intel_pmt_namespa struct intel_vsec_device *intel_vsec_dev, int idx) { struct device *dev = &intel_vsec_dev->auxdev.dev; - struct intel_pmt_header header; struct resource *disc_res; int ret; @@ -316,18 +381,17 @@ int intel_pmt_dev_create(struct intel_pmt_entry *entry, struct intel_pmt_namespa if (IS_ERR(entry->disc_table)) return PTR_ERR(entry->disc_table); - ret = ns->pmt_header_decode(entry, &header, dev); + ret = ns->pmt_header_decode(entry, dev); if (ret) return ret; - ret = intel_pmt_populate_entry(entry, &header, dev, disc_res); + ret = intel_pmt_populate_entry(entry, intel_vsec_dev, disc_res); if (ret) return ret; return intel_pmt_dev_register(entry, ns, dev); - } -EXPORT_SYMBOL_GPL(intel_pmt_dev_create); +EXPORT_SYMBOL_NS_GPL(intel_pmt_dev_create, "INTEL_PMT"); void intel_pmt_dev_destroy(struct intel_pmt_entry *entry, struct intel_pmt_namespace *ns) @@ -337,13 +401,13 @@ void intel_pmt_dev_destroy(struct intel_pmt_entry *entry, if (entry->size) sysfs_remove_bin_file(entry->kobj, &entry->pmt_bin_attr); - if (ns->attr_grp) - sysfs_remove_group(entry->kobj, ns->attr_grp); + if (entry->attr_grp) + sysfs_remove_group(entry->kobj, entry->attr_grp); device_unregister(dev); xa_erase(ns->xa, entry->devid); } -EXPORT_SYMBOL_GPL(intel_pmt_dev_destroy); +EXPORT_SYMBOL_NS_GPL(intel_pmt_dev_destroy, "INTEL_PMT"); static int __init pmt_class_init(void) { diff --git a/drivers/platform/x86/intel/pmt/class.h b/drivers/platform/x86/intel/pmt/class.h index db11d58867ce..3c5ad5f52bca 100644 --- a/drivers/platform/x86/intel/pmt/class.h +++ b/drivers/platform/x86/intel/pmt/class.h @@ -2,13 +2,14 @@ #ifndef _INTEL_PMT_CLASS_H #define _INTEL_PMT_CLASS_H +#include <linux/intel_vsec.h> #include <linux/xarray.h> #include <linux/types.h> #include <linux/bits.h> #include <linux/err.h> #include <linux/io.h> -#include "../vsec.h" +#include "telemetry.h" /* PMT access types */ #define ACCESS_BARID 2 @@ -18,37 +19,64 @@ #define GET_BIR(v) ((v) & GENMASK(2, 0)) #define GET_ADDRESS(v) ((v) & GENMASK(31, 3)) +struct pci_dev; +extern struct class intel_pmt_class; + +struct telem_endpoint { + struct pci_dev *pcidev; + struct telem_header header; + struct pmt_callbacks *cb; + void __iomem *base; + bool present; + struct kref kref; +}; + +struct intel_pmt_header { + u32 base_offset; + u32 size; + u32 guid; + u8 access_type; +}; + struct intel_pmt_entry { + struct telem_endpoint *ep; + struct pci_dev *pcidev; + struct intel_pmt_header header; struct bin_attribute pmt_bin_attr; + const struct attribute_group *attr_grp; struct kobject *kobj; void __iomem *disc_table; void __iomem *base; + struct pmt_callbacks *cb; unsigned long base_addr; size_t size; + u64 feature_flags; u32 guid; + u32 num_rmids; /* Number of Resource Monitoring IDs */ int devid; }; -struct intel_pmt_header { - u32 base_offset; - u32 size; - u32 guid; - u8 access_type; -}; - struct intel_pmt_namespace { const char *name; struct xarray *xa; - const struct attribute_group *attr_grp; int (*pmt_header_decode)(struct intel_pmt_entry *entry, - struct intel_pmt_header *header, struct device *dev); + int (*pmt_add_endpoint)(struct intel_vsec_device *ivdev, + struct intel_pmt_entry *entry); }; +int pmt_telem_read_mmio(struct pci_dev *pdev, struct pmt_callbacks *cb, u32 guid, void *buf, + void __iomem *addr, loff_t off, u32 count); bool intel_pmt_is_early_client_hw(struct device *dev); int intel_pmt_dev_create(struct intel_pmt_entry *entry, struct intel_pmt_namespace *ns, struct intel_vsec_device *dev, int idx); void intel_pmt_dev_destroy(struct intel_pmt_entry *entry, struct intel_pmt_namespace *ns); +#if IS_ENABLED(CONFIG_INTEL_PMT_DISCOVERY) +void intel_pmt_get_features(struct intel_pmt_entry *entry); +#else +static inline void intel_pmt_get_features(struct intel_pmt_entry *entry) {} +#endif + #endif diff --git a/drivers/platform/x86/intel/pmt/crashlog.c b/drivers/platform/x86/intel/pmt/crashlog.c index ace1239bc0a0..b0393c9c5b4b 100644 --- a/drivers/platform/x86/intel/pmt/crashlog.c +++ b/drivers/platform/x86/intel/pmt/crashlog.c @@ -9,34 +9,21 @@ */ #include <linux/auxiliary_bus.h> +#include <linux/cleanup.h> +#include <linux/intel_vsec.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/mutex.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/overflow.h> -#include "../vsec.h" #include "class.h" /* Crashlog discovery header types */ #define CRASH_TYPE_OOBMSM 1 -/* Control Flags */ -#define CRASHLOG_FLAG_DISABLE BIT(28) - -/* - * Bits 29 and 30 control the state of bit 31. - * - * Bit 29 will clear bit 31, if set, allowing a new crashlog to be captured. - * Bit 30 will immediately trigger a crashlog to be generated, setting bit 31. - * Bit 31 is the read-only status with a 1 indicating log is complete. - */ -#define CRASHLOG_FLAG_TRIGGER_CLEAR BIT(29) -#define CRASHLOG_FLAG_TRIGGER_EXECUTE BIT(30) -#define CRASHLOG_FLAG_TRIGGER_COMPLETE BIT(31) -#define CRASHLOG_FLAG_TRIGGER_MASK GENMASK(31, 28) - /* Crashlog Discovery Header */ #define CONTROL_OFFSET 0x0 #define GUID_OFFSET 0x4 @@ -48,10 +35,84 @@ /* size is in bytes */ #define GET_SIZE(v) ((v) * sizeof(u32)) +/* + * Type 1 Version 0 + * status and control registers are combined. + * + * Bits 29 and 30 control the state of bit 31. + * Bit 29 will clear bit 31, if set, allowing a new crashlog to be captured. + * Bit 30 will immediately trigger a crashlog to be generated, setting bit 31. + * Bit 31 is the read-only status with a 1 indicating log is complete. + */ +#define TYPE1_VER0_STATUS_OFFSET 0x00 +#define TYPE1_VER0_CONTROL_OFFSET 0x00 + +#define TYPE1_VER0_DISABLE BIT(28) +#define TYPE1_VER0_CLEAR BIT(29) +#define TYPE1_VER0_EXECUTE BIT(30) +#define TYPE1_VER0_COMPLETE BIT(31) +#define TYPE1_VER0_TRIGGER_MASK GENMASK(31, 28) + +/* + * Type 1 Version 2 + * status and control are different registers + */ +#define TYPE1_VER2_STATUS_OFFSET 0x00 +#define TYPE1_VER2_CONTROL_OFFSET 0x14 + +/* status register */ +#define TYPE1_VER2_CLEAR_SUPPORT BIT(20) +#define TYPE1_VER2_REARMED BIT(25) +#define TYPE1_VER2_ERROR BIT(26) +#define TYPE1_VER2_CONSUMED BIT(27) +#define TYPE1_VER2_DISABLED BIT(28) +#define TYPE1_VER2_CLEARED BIT(29) +#define TYPE1_VER2_IN_PROGRESS BIT(30) +#define TYPE1_VER2_COMPLETE BIT(31) + +/* control register */ +#define TYPE1_VER2_CONSUME BIT(25) +#define TYPE1_VER2_REARM BIT(28) +#define TYPE1_VER2_EXECUTE BIT(29) +#define TYPE1_VER2_CLEAR BIT(30) +#define TYPE1_VER2_DISABLE BIT(31) +#define TYPE1_VER2_TRIGGER_MASK \ + (TYPE1_VER2_EXECUTE | TYPE1_VER2_CLEAR | TYPE1_VER2_DISABLE) + +/* After offset, order alphabetically, not bit ordered */ +struct crashlog_status { + u32 offset; + u32 clear_supported; + u32 cleared; + u32 complete; + u32 consumed; + u32 disabled; + u32 error; + u32 in_progress; + u32 rearmed; +}; + +struct crashlog_control { + u32 offset; + u32 trigger_mask; + u32 clear; + u32 consume; + u32 disable; + u32 manual; + u32 rearm; +}; + +struct crashlog_info { + const struct crashlog_status status; + const struct crashlog_control control; + const struct attribute_group *attr_grp; +}; + struct crashlog_entry { /* entry must be first member of struct */ struct intel_pmt_entry entry; struct mutex control_mutex; + const struct crashlog_info *info; }; struct pmt_crashlog_priv { @@ -62,180 +123,397 @@ struct pmt_crashlog_priv { /* * I/O */ -static bool pmt_crashlog_complete(struct intel_pmt_entry *entry) + +/* Read, modify, write the control register, setting or clearing @bit based on @set */ +static void pmt_crashlog_rmw(struct crashlog_entry *crashlog, u32 bit, bool set) { - u32 control = readl(entry->disc_table + CONTROL_OFFSET); + const struct crashlog_control *control = &crashlog->info->control; + struct intel_pmt_entry *entry = &crashlog->entry; + u32 reg = readl(entry->disc_table + control->offset); - /* return current value of the crashlog complete flag */ - return !!(control & CRASHLOG_FLAG_TRIGGER_COMPLETE); + reg &= ~control->trigger_mask; + + if (set) + reg |= bit; + else + reg &= ~bit; + + writel(reg, entry->disc_table + control->offset); } -static bool pmt_crashlog_disabled(struct intel_pmt_entry *entry) +/* Read the status register and see if the specified @bit is set */ +static bool pmt_crashlog_rc(struct crashlog_entry *crashlog, u32 bit) { - u32 control = readl(entry->disc_table + CONTROL_OFFSET); + const struct crashlog_status *status = &crashlog->info->status; + u32 reg = readl(crashlog->entry.disc_table + status->offset); + return !!(reg & bit); +} + +static bool pmt_crashlog_complete(struct crashlog_entry *crashlog) +{ + /* return current value of the crashlog complete flag */ + return pmt_crashlog_rc(crashlog, crashlog->info->status.complete); +} + +static bool pmt_crashlog_disabled(struct crashlog_entry *crashlog) +{ /* return current value of the crashlog disabled flag */ - return !!(control & CRASHLOG_FLAG_DISABLE); + return pmt_crashlog_rc(crashlog, crashlog->info->status.disabled); } -static bool pmt_crashlog_supported(struct intel_pmt_entry *entry) +static bool pmt_crashlog_supported(struct intel_pmt_entry *entry, u32 *crash_type, u32 *version) { u32 discovery_header = readl(entry->disc_table + CONTROL_OFFSET); - u32 crash_type, version; - crash_type = GET_TYPE(discovery_header); - version = GET_VERSION(discovery_header); + *crash_type = GET_TYPE(discovery_header); + *version = GET_VERSION(discovery_header); /* - * Currently we only recognize OOBMSM version 0 devices. - * We can ignore all other crashlog devices in the system. + * Currently we only recognize OOBMSM (type 1) and version 0 or 2 + * devices. + * + * Ignore all other crashlog devices in the system. */ - return crash_type == CRASH_TYPE_OOBMSM && version == 0; + if (*crash_type == CRASH_TYPE_OOBMSM && (*version == 0 || *version == 2)) + return true; + + return false; } -static void pmt_crashlog_set_disable(struct intel_pmt_entry *entry, +static void pmt_crashlog_set_disable(struct crashlog_entry *crashlog, bool disable) { - u32 control = readl(entry->disc_table + CONTROL_OFFSET); - - /* clear trigger bits so we are only modifying disable flag */ - control &= ~CRASHLOG_FLAG_TRIGGER_MASK; + pmt_crashlog_rmw(crashlog, crashlog->info->control.disable, disable); +} - if (disable) - control |= CRASHLOG_FLAG_DISABLE; - else - control &= ~CRASHLOG_FLAG_DISABLE; +static void pmt_crashlog_set_clear(struct crashlog_entry *crashlog) +{ + pmt_crashlog_rmw(crashlog, crashlog->info->control.clear, true); +} - writel(control, entry->disc_table + CONTROL_OFFSET); +static void pmt_crashlog_set_execute(struct crashlog_entry *crashlog) +{ + pmt_crashlog_rmw(crashlog, crashlog->info->control.manual, true); } -static void pmt_crashlog_set_clear(struct intel_pmt_entry *entry) +static bool pmt_crashlog_cleared(struct crashlog_entry *crashlog) { - u32 control = readl(entry->disc_table + CONTROL_OFFSET); + return pmt_crashlog_rc(crashlog, crashlog->info->status.cleared); +} - control &= ~CRASHLOG_FLAG_TRIGGER_MASK; - control |= CRASHLOG_FLAG_TRIGGER_CLEAR; +static bool pmt_crashlog_consumed(struct crashlog_entry *crashlog) +{ + return pmt_crashlog_rc(crashlog, crashlog->info->status.consumed); +} - writel(control, entry->disc_table + CONTROL_OFFSET); +static void pmt_crashlog_set_consumed(struct crashlog_entry *crashlog) +{ + pmt_crashlog_rmw(crashlog, crashlog->info->control.consume, true); } -static void pmt_crashlog_set_execute(struct intel_pmt_entry *entry) +static bool pmt_crashlog_error(struct crashlog_entry *crashlog) { - u32 control = readl(entry->disc_table + CONTROL_OFFSET); + return pmt_crashlog_rc(crashlog, crashlog->info->status.error); +} - control &= ~CRASHLOG_FLAG_TRIGGER_MASK; - control |= CRASHLOG_FLAG_TRIGGER_EXECUTE; +static bool pmt_crashlog_rearm(struct crashlog_entry *crashlog) +{ + return pmt_crashlog_rc(crashlog, crashlog->info->status.rearmed); +} - writel(control, entry->disc_table + CONTROL_OFFSET); +static void pmt_crashlog_set_rearm(struct crashlog_entry *crashlog) +{ + pmt_crashlog_rmw(crashlog, crashlog->info->control.rearm, true); } /* * sysfs */ static ssize_t +clear_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct crashlog_entry *crashlog = dev_get_drvdata(dev); + bool cleared = pmt_crashlog_cleared(crashlog); + + return sysfs_emit(buf, "%d\n", cleared); +} + +static ssize_t +clear_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct crashlog_entry *crashlog; + bool clear; + int result; + + crashlog = dev_get_drvdata(dev); + + result = kstrtobool(buf, &clear); + if (result) + return result; + + /* set bit only */ + if (!clear) + return -EINVAL; + + guard(mutex)(&crashlog->control_mutex); + + pmt_crashlog_set_clear(crashlog); + + return count; +} +static DEVICE_ATTR_RW(clear); + +static ssize_t +consumed_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct crashlog_entry *crashlog = dev_get_drvdata(dev); + bool consumed = pmt_crashlog_consumed(crashlog); + + return sysfs_emit(buf, "%d\n", consumed); +} + +static ssize_t +consumed_store(struct device *dev, struct device_attribute *attr, const char *buf, + size_t count) +{ + struct crashlog_entry *crashlog; + bool consumed; + int result; + + crashlog = dev_get_drvdata(dev); + + result = kstrtobool(buf, &consumed); + if (result) + return result; + + /* set bit only */ + if (!consumed) + return -EINVAL; + + guard(mutex)(&crashlog->control_mutex); + + if (pmt_crashlog_disabled(crashlog)) + return -EBUSY; + + if (!pmt_crashlog_complete(crashlog)) + return -EEXIST; + + pmt_crashlog_set_consumed(crashlog); + + return count; +} +static DEVICE_ATTR_RW(consumed); + +static ssize_t enable_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct intel_pmt_entry *entry = dev_get_drvdata(dev); - int enabled = !pmt_crashlog_disabled(entry); + struct crashlog_entry *crashlog = dev_get_drvdata(dev); + bool enabled = !pmt_crashlog_disabled(crashlog); return sprintf(buf, "%d\n", enabled); } static ssize_t enable_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) + const char *buf, size_t count) { - struct crashlog_entry *entry; + struct crashlog_entry *crashlog; bool enabled; int result; - entry = dev_get_drvdata(dev); + crashlog = dev_get_drvdata(dev); result = kstrtobool(buf, &enabled); if (result) return result; - mutex_lock(&entry->control_mutex); - pmt_crashlog_set_disable(&entry->entry, !enabled); - mutex_unlock(&entry->control_mutex); + guard(mutex)(&crashlog->control_mutex); + + pmt_crashlog_set_disable(crashlog, !enabled); return count; } static DEVICE_ATTR_RW(enable); static ssize_t +error_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct crashlog_entry *crashlog = dev_get_drvdata(dev); + bool error = pmt_crashlog_error(crashlog); + + return sysfs_emit(buf, "%d\n", error); +} +static DEVICE_ATTR_RO(error); + +static ssize_t +rearm_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct crashlog_entry *crashlog = dev_get_drvdata(dev); + int rearmed = pmt_crashlog_rearm(crashlog); + + return sysfs_emit(buf, "%d\n", rearmed); +} + +static ssize_t +rearm_store(struct device *dev, struct device_attribute *attr, const char *buf, + size_t count) +{ + struct crashlog_entry *crashlog; + bool rearm; + int result; + + crashlog = dev_get_drvdata(dev); + + result = kstrtobool(buf, &rearm); + if (result) + return result; + + /* set only */ + if (!rearm) + return -EINVAL; + + guard(mutex)(&crashlog->control_mutex); + + pmt_crashlog_set_rearm(crashlog); + + return count; +} +static DEVICE_ATTR_RW(rearm); + +static ssize_t trigger_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct intel_pmt_entry *entry; - int trigger; + struct crashlog_entry *crashlog; + bool trigger; - entry = dev_get_drvdata(dev); - trigger = pmt_crashlog_complete(entry); + crashlog = dev_get_drvdata(dev); + trigger = pmt_crashlog_complete(crashlog); return sprintf(buf, "%d\n", trigger); } static ssize_t trigger_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) + const char *buf, size_t count) { - struct crashlog_entry *entry; + struct crashlog_entry *crashlog; bool trigger; int result; - entry = dev_get_drvdata(dev); + crashlog = dev_get_drvdata(dev); result = kstrtobool(buf, &trigger); if (result) return result; - mutex_lock(&entry->control_mutex); + guard(mutex)(&crashlog->control_mutex); + + /* if device is currently disabled, return busy */ + if (pmt_crashlog_disabled(crashlog)) + return -EBUSY; if (!trigger) { - pmt_crashlog_set_clear(&entry->entry); - } else if (pmt_crashlog_complete(&entry->entry)) { - /* we cannot trigger a new crash if one is still pending */ - result = -EEXIST; - goto err; - } else if (pmt_crashlog_disabled(&entry->entry)) { - /* if device is currently disabled, return busy */ - result = -EBUSY; - goto err; - } else { - pmt_crashlog_set_execute(&entry->entry); + pmt_crashlog_set_clear(crashlog); + return count; } - result = count; -err: - mutex_unlock(&entry->control_mutex); - return result; + /* we cannot trigger a new crash if one is still pending */ + if (pmt_crashlog_complete(crashlog)) + return -EEXIST; + + pmt_crashlog_set_execute(crashlog); + + return count; } static DEVICE_ATTR_RW(trigger); -static struct attribute *pmt_crashlog_attrs[] = { +static struct attribute *pmt_crashlog_type1_ver0_attrs[] = { &dev_attr_enable.attr, &dev_attr_trigger.attr, NULL }; -static const struct attribute_group pmt_crashlog_group = { - .attrs = pmt_crashlog_attrs, +static struct attribute *pmt_crashlog_type1_ver2_attrs[] = { + &dev_attr_clear.attr, + &dev_attr_consumed.attr, + &dev_attr_enable.attr, + &dev_attr_error.attr, + &dev_attr_rearm.attr, + &dev_attr_trigger.attr, + NULL +}; + +static const struct attribute_group pmt_crashlog_type1_ver0_group = { + .attrs = pmt_crashlog_type1_ver0_attrs, +}; + +static const struct attribute_group pmt_crashlog_type1_ver2_group = { + .attrs = pmt_crashlog_type1_ver2_attrs, +}; + +static const struct crashlog_info crashlog_type1_ver0 = { + .status.offset = TYPE1_VER0_STATUS_OFFSET, + .status.cleared = TYPE1_VER0_CLEAR, + .status.complete = TYPE1_VER0_COMPLETE, + .status.disabled = TYPE1_VER0_DISABLE, + + .control.offset = TYPE1_VER0_CONTROL_OFFSET, + .control.trigger_mask = TYPE1_VER0_TRIGGER_MASK, + .control.clear = TYPE1_VER0_CLEAR, + .control.disable = TYPE1_VER0_DISABLE, + .control.manual = TYPE1_VER0_EXECUTE, + .attr_grp = &pmt_crashlog_type1_ver0_group, +}; + +static const struct crashlog_info crashlog_type1_ver2 = { + .status.offset = TYPE1_VER2_STATUS_OFFSET, + .status.clear_supported = TYPE1_VER2_CLEAR_SUPPORT, + .status.cleared = TYPE1_VER2_CLEARED, + .status.complete = TYPE1_VER2_COMPLETE, + .status.consumed = TYPE1_VER2_CONSUMED, + .status.disabled = TYPE1_VER2_DISABLED, + .status.error = TYPE1_VER2_ERROR, + .status.in_progress = TYPE1_VER2_IN_PROGRESS, + .status.rearmed = TYPE1_VER2_REARMED, + + .control.offset = TYPE1_VER2_CONTROL_OFFSET, + .control.trigger_mask = TYPE1_VER2_TRIGGER_MASK, + .control.clear = TYPE1_VER2_CLEAR, + .control.consume = TYPE1_VER2_CONSUME, + .control.disable = TYPE1_VER2_DISABLE, + .control.manual = TYPE1_VER2_EXECUTE, + .control.rearm = TYPE1_VER2_REARM, + .attr_grp = &pmt_crashlog_type1_ver2_group, }; +static const struct crashlog_info *select_crashlog_info(u32 type, u32 version) +{ + if (version == 0) + return &crashlog_type1_ver0; + + return &crashlog_type1_ver2; +} + static int pmt_crashlog_header_decode(struct intel_pmt_entry *entry, - struct intel_pmt_header *header, struct device *dev) { void __iomem *disc_table = entry->disc_table; + struct intel_pmt_header *header = &entry->header; struct crashlog_entry *crashlog; + u32 version; + u32 type; - if (!pmt_crashlog_supported(entry)) + if (!pmt_crashlog_supported(entry, &type, &version)) return 1; - /* initialize control mutex */ + /* initialize the crashlog struct */ crashlog = container_of(entry, struct crashlog_entry, entry); mutex_init(&crashlog->control_mutex); + crashlog->info = select_crashlog_info(type, version); + header->access_type = GET_ACCESS(readl(disc_table)); header->guid = readl(disc_table + GUID_OFFSET); header->base_offset = readl(disc_table + BASE_OFFSET); @@ -243,6 +521,8 @@ static int pmt_crashlog_header_decode(struct intel_pmt_entry *entry, /* Size is measured in DWORDS, but accessor returns bytes */ header->size = GET_SIZE(readl(disc_table + SIZE_OFFSET)); + entry->attr_grp = crashlog->info->attr_grp; + return 0; } @@ -250,7 +530,6 @@ static DEFINE_XARRAY_ALLOC(crashlog_array); static struct intel_pmt_namespace pmt_crashlog_ns = { .name = "crashlog", .xa = &crashlog_array, - .attr_grp = &pmt_crashlog_group, .pmt_header_decode = pmt_crashlog_header_decode, }; @@ -262,8 +541,12 @@ static void pmt_crashlog_remove(struct auxiliary_device *auxdev) struct pmt_crashlog_priv *priv = auxiliary_get_drvdata(auxdev); int i; - for (i = 0; i < priv->num_entries; i++) - intel_pmt_dev_destroy(&priv->entry[i].entry, &pmt_crashlog_ns); + for (i = 0; i < priv->num_entries; i++) { + struct crashlog_entry *crashlog = &priv->entry[i]; + + intel_pmt_dev_destroy(&crashlog->entry, &pmt_crashlog_ns); + mutex_destroy(&crashlog->control_mutex); + } } static int pmt_crashlog_probe(struct auxiliary_device *auxdev, @@ -328,3 +611,4 @@ module_exit(pmt_crashlog_exit); MODULE_AUTHOR("Alexander Duyck <alexander.h.duyck@linux.intel.com>"); MODULE_DESCRIPTION("Intel PMT Crashlog driver"); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS("INTEL_PMT"); diff --git a/drivers/platform/x86/intel/pmt/discovery-kunit.c b/drivers/platform/x86/intel/pmt/discovery-kunit.c new file mode 100644 index 000000000000..f44eb41d58f6 --- /dev/null +++ b/drivers/platform/x86/intel/pmt/discovery-kunit.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Platform Monitory Technology Discovery KUNIT tests + * + * Copyright (c) 2025, Intel Corporation. + * All Rights Reserved. + */ + +#include <kunit/test.h> +#include <linux/err.h> +#include <linux/intel_pmt_features.h> +#include <linux/intel_vsec.h> +#include <linux/module.h> +#include <linux/slab.h> + +#define PMT_FEATURE_COUNT (FEATURE_MAX + 1) + +static void +validate_pmt_regions(struct kunit *test, struct pmt_feature_group *feature_group, int feature_id) +{ + int i; + + kunit_info(test, "Feature ID %d [%s] has %d regions.\n", feature_id, + pmt_feature_names[feature_id], feature_group->count); + + for (i = 0; i < feature_group->count; i++) { + struct telemetry_region *region = &feature_group->regions[i]; + + kunit_info(test, " - Region %d: cdie_mask=%u, package_id=%u, partition=%u, segment=%u,", + i, region->plat_info.cdie_mask, region->plat_info.package_id, + region->plat_info.partition, region->plat_info.segment); + kunit_info(test, "\t\tbus=%u, device=%u, function=%u, guid=0x%x,", + region->plat_info.bus_number, region->plat_info.device_number, + region->plat_info.function_number, region->guid); + kunit_info(test, "\t\taddr=%p, size=%zu, num_rmids=%u", region->addr, region->size, + region->num_rmids); + + + KUNIT_ASSERT_GE(test, region->plat_info.cdie_mask, 0); + KUNIT_ASSERT_GE(test, region->plat_info.package_id, 0); + KUNIT_ASSERT_GE(test, region->plat_info.partition, 0); + KUNIT_ASSERT_GE(test, region->plat_info.segment, 0); + KUNIT_ASSERT_GE(test, region->plat_info.bus_number, 0); + KUNIT_ASSERT_GE(test, region->plat_info.device_number, 0); + KUNIT_ASSERT_GE(test, region->plat_info.function_number, 0); + + KUNIT_ASSERT_NE(test, region->guid, 0); + + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, (__force const void *)region->addr); + } +} + +static void linebreak(struct kunit *test) +{ + kunit_info(test, "*****************************************************************************\n"); +} + +static void test_intel_pmt_get_regions_by_feature(struct kunit *test) +{ + struct pmt_feature_group *feature_group; + int num_available = 0; + int feature_id; + + /* Iterate through all possible feature IDs */ + for (feature_id = 1; feature_id < PMT_FEATURE_COUNT; feature_id++, linebreak(test)) { + const char *name; + + if (!pmt_feature_id_is_valid(feature_id)) + continue; + + name = pmt_feature_names[feature_id]; + + feature_group = intel_pmt_get_regions_by_feature(feature_id); + if (IS_ERR(feature_group)) { + if (PTR_ERR(feature_group) == -ENOENT) + kunit_warn(test, "intel_pmt_get_regions_by_feature() reporting feature %d [%s] is not present.\n", + feature_id, name); + else + kunit_warn(test, "intel_pmt_get_regions_by_feature() returned error %ld while attempt to lookup %d [%s].\n", + PTR_ERR(feature_group), feature_id, name); + + continue; + } + + if (!feature_group) { + kunit_warn(test, "Feature ID %d: %s is not available.\n", feature_id, name); + continue; + } + + num_available++; + + validate_pmt_regions(test, feature_group, feature_id); + + intel_pmt_put_feature_group(feature_group); + } + + if (num_available == 0) + kunit_warn(test, "No PMT region groups were available for any feature ID (0-10).\n"); +} + +static struct kunit_case intel_pmt_discovery_test_cases[] = { + KUNIT_CASE(test_intel_pmt_get_regions_by_feature), + {} +}; + +static struct kunit_suite intel_pmt_discovery_test_suite = { + .name = "pmt_discovery_test", + .test_cases = intel_pmt_discovery_test_cases, +}; + +kunit_test_suite(intel_pmt_discovery_test_suite); + +MODULE_IMPORT_NS("INTEL_PMT_DISCOVERY"); +MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>"); +MODULE_DESCRIPTION("Intel PMT Discovery KUNIT test driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/intel/pmt/discovery.c b/drivers/platform/x86/intel/pmt/discovery.c new file mode 100644 index 000000000000..32713a194a55 --- /dev/null +++ b/drivers/platform/x86/intel/pmt/discovery.c @@ -0,0 +1,635 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Platform Monitory Technology Discovery driver + * + * Copyright (c) 2025, Intel Corporation. + * All Rights Reserved. + */ + +#include <linux/auxiliary_bus.h> +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/bug.h> +#include <linux/cleanup.h> +#include <linux/container_of.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/kdev_t.h> +#include <linux/kobject.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/overflow.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/string_choices.h> +#include <linux/sysfs.h> +#include <linux/types.h> +#include <linux/uaccess.h> + +#include <linux/intel_pmt_features.h> +#include <linux/intel_vsec.h> + +#include "class.h" + +#define MAX_FEATURE_VERSION 0 +#define DT_TBIR GENMASK(2, 0) +#define FEAT_ATTR_SIZE(x) ((x) * sizeof(u32)) +#define PMT_GUID_SIZE(x) ((x) * sizeof(u32)) +#define PMT_ACCESS_TYPE_RSVD 0xF +#define SKIP_FEATURE 1 + +struct feature_discovery_table { + u32 access_type:4; + u32 version:8; + u32 size:16; + u32 reserved:4; + u32 id; + u32 offset; + u32 reserved2; +}; + +/* Common feature table header */ +struct feature_header { + u32 attr_size:8; + u32 num_guids:8; + u32 reserved:16; +}; + +/* Feature attribute fields */ +struct caps { + u32 caps; +}; + +struct command { + u32 max_stream_size:16; + u32 max_command_size:16; +}; + +struct watcher { + u32 reserved:21; + u32 period:11; + struct command command; +}; + +struct rmid { + u32 num_rmids:16; /* Number of Resource Monitoring IDs */ + u32 reserved:16; + struct watcher watcher; +}; + +struct feature_table { + struct feature_header header; + struct caps caps; + union { + struct command command; + struct watcher watcher; + struct rmid rmid; + }; + u32 *guids; +}; + +/* For backreference in struct feature */ +struct pmt_features_priv; + +struct feature { + struct feature_table table; + struct kobject kobj; + struct pmt_features_priv *priv; + struct list_head list; + const struct attribute_group *attr_group; + enum pmt_feature_id id; +}; + +struct pmt_features_priv { + struct device *parent; + struct device *dev; + int count; + u32 mask; + struct feature feature[]; +}; + +static LIST_HEAD(pmt_feature_list); +static DEFINE_MUTEX(feature_list_lock); + +#define to_pmt_feature(x) container_of(x, struct feature, kobj) +static void pmt_feature_release(struct kobject *kobj) +{ +} + +static ssize_t caps_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + struct feature *feature = to_pmt_feature(kobj); + struct pmt_cap **pmt_caps; + u32 caps = feature->table.caps.caps; + ssize_t ret = 0; + + switch (feature->id) { + case FEATURE_PER_CORE_PERF_TELEM: + pmt_caps = pmt_caps_pcpt; + break; + case FEATURE_PER_CORE_ENV_TELEM: + pmt_caps = pmt_caps_pcet; + break; + case FEATURE_PER_RMID_PERF_TELEM: + pmt_caps = pmt_caps_rmid_perf; + break; + case FEATURE_ACCEL_TELEM: + pmt_caps = pmt_caps_accel; + break; + case FEATURE_UNCORE_TELEM: + pmt_caps = pmt_caps_uncore; + break; + case FEATURE_CRASH_LOG: + pmt_caps = pmt_caps_crashlog; + break; + case FEATURE_PETE_LOG: + pmt_caps = pmt_caps_pete; + break; + case FEATURE_TPMI_CTRL: + pmt_caps = pmt_caps_tpmi; + break; + case FEATURE_TRACING: + pmt_caps = pmt_caps_tracing; + break; + case FEATURE_PER_RMID_ENERGY_TELEM: + pmt_caps = pmt_caps_rmid_energy; + break; + default: + return -EINVAL; + } + + while (*pmt_caps) { + struct pmt_cap *pmt_cap = *pmt_caps; + + while (pmt_cap->name) { + ret += sysfs_emit_at(buf, ret, "%-40s Available: %s\n", pmt_cap->name, + str_yes_no(pmt_cap->mask & caps)); + pmt_cap++; + } + pmt_caps++; + } + + return ret; +} +static struct kobj_attribute caps_attribute = __ATTR_RO(caps); + +static struct watcher *get_watcher(struct feature *feature) +{ + switch (feature_layout[feature->id]) { + case LAYOUT_RMID: + return &feature->table.rmid.watcher; + case LAYOUT_WATCHER: + return &feature->table.watcher; + default: + return ERR_PTR(-EINVAL); + } +} + +static struct command *get_command(struct feature *feature) +{ + switch (feature_layout[feature->id]) { + case LAYOUT_RMID: + return &feature->table.rmid.watcher.command; + case LAYOUT_WATCHER: + return &feature->table.watcher.command; + case LAYOUT_COMMAND: + return &feature->table.command; + default: + return ERR_PTR(-EINVAL); + } +} + +static ssize_t num_rmids_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct feature *feature = to_pmt_feature(kobj); + + return sysfs_emit(buf, "%u\n", feature->table.rmid.num_rmids); +} +static struct kobj_attribute num_rmids_attribute = __ATTR_RO(num_rmids); + +static ssize_t min_watcher_period_ms_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct feature *feature = to_pmt_feature(kobj); + struct watcher *watcher = get_watcher(feature); + + if (IS_ERR(watcher)) + return PTR_ERR(watcher); + + return sysfs_emit(buf, "%u\n", watcher->period); +} +static struct kobj_attribute min_watcher_period_ms_attribute = + __ATTR_RO(min_watcher_period_ms); + +static ssize_t max_stream_size_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct feature *feature = to_pmt_feature(kobj); + struct command *command = get_command(feature); + + if (IS_ERR(command)) + return PTR_ERR(command); + + return sysfs_emit(buf, "%u\n", command->max_stream_size); +} +static struct kobj_attribute max_stream_size_attribute = + __ATTR_RO(max_stream_size); + +static ssize_t max_command_size_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct feature *feature = to_pmt_feature(kobj); + struct command *command = get_command(feature); + + if (IS_ERR(command)) + return PTR_ERR(command); + + return sysfs_emit(buf, "%u\n", command->max_command_size); +} +static struct kobj_attribute max_command_size_attribute = + __ATTR_RO(max_command_size); + +static ssize_t guids_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + struct feature *feature = to_pmt_feature(kobj); + int i, count = 0; + + for (i = 0; i < feature->table.header.num_guids; i++) + count += sysfs_emit_at(buf, count, "0x%x\n", + feature->table.guids[i]); + + return count; +} +static struct kobj_attribute guids_attribute = __ATTR_RO(guids); + +static struct attribute *pmt_feature_rmid_attrs[] = { + &caps_attribute.attr, + &num_rmids_attribute.attr, + &min_watcher_period_ms_attribute.attr, + &max_stream_size_attribute.attr, + &max_command_size_attribute.attr, + &guids_attribute.attr, + NULL +}; +ATTRIBUTE_GROUPS(pmt_feature_rmid); + +static const struct kobj_type pmt_feature_rmid_ktype = { + .sysfs_ops = &kobj_sysfs_ops, + .release = pmt_feature_release, + .default_groups = pmt_feature_rmid_groups, +}; + +static struct attribute *pmt_feature_watcher_attrs[] = { + &caps_attribute.attr, + &min_watcher_period_ms_attribute.attr, + &max_stream_size_attribute.attr, + &max_command_size_attribute.attr, + &guids_attribute.attr, + NULL +}; +ATTRIBUTE_GROUPS(pmt_feature_watcher); + +static const struct kobj_type pmt_feature_watcher_ktype = { + .sysfs_ops = &kobj_sysfs_ops, + .release = pmt_feature_release, + .default_groups = pmt_feature_watcher_groups, +}; + +static struct attribute *pmt_feature_command_attrs[] = { + &caps_attribute.attr, + &max_stream_size_attribute.attr, + &max_command_size_attribute.attr, + &guids_attribute.attr, + NULL +}; +ATTRIBUTE_GROUPS(pmt_feature_command); + +static const struct kobj_type pmt_feature_command_ktype = { + .sysfs_ops = &kobj_sysfs_ops, + .release = pmt_feature_release, + .default_groups = pmt_feature_command_groups, +}; + +static struct attribute *pmt_feature_guids_attrs[] = { + &caps_attribute.attr, + &guids_attribute.attr, + NULL +}; +ATTRIBUTE_GROUPS(pmt_feature_guids); + +static const struct kobj_type pmt_feature_guids_ktype = { + .sysfs_ops = &kobj_sysfs_ops, + .release = pmt_feature_release, + .default_groups = pmt_feature_guids_groups, +}; + +static int +pmt_feature_get_disc_table(struct pmt_features_priv *priv, + struct resource *disc_res, + struct feature_discovery_table *disc_tbl) +{ + void __iomem *disc_base; + + disc_base = devm_ioremap_resource(priv->dev, disc_res); + if (IS_ERR(disc_base)) + return PTR_ERR(disc_base); + + memcpy_fromio(disc_tbl, disc_base, sizeof(*disc_tbl)); + + devm_iounmap(priv->dev, disc_base); + + if (priv->mask & BIT(disc_tbl->id)) + return dev_err_probe(priv->dev, -EINVAL, "Duplicate feature: %s\n", + pmt_feature_names[disc_tbl->id]); + + /* + * Some devices may expose non-functioning entries that are + * reserved for future use. They have zero size. Do not fail + * probe for these. Just ignore them. + */ + if (disc_tbl->size == 0 || disc_tbl->access_type == PMT_ACCESS_TYPE_RSVD) + return SKIP_FEATURE; + + if (disc_tbl->version > MAX_FEATURE_VERSION) + return SKIP_FEATURE; + + if (!pmt_feature_id_is_valid(disc_tbl->id)) + return SKIP_FEATURE; + + priv->mask |= BIT(disc_tbl->id); + + return 0; +} + +static int +pmt_feature_get_feature_table(struct pmt_features_priv *priv, + struct feature *feature, + struct feature_discovery_table *disc_tbl, + struct resource *disc_res) +{ + struct feature_table *feat_tbl = &feature->table; + struct feature_header *header; + struct resource res = {}; + resource_size_t res_size; + void __iomem *feat_base, *feat_offset; + void *tbl_offset; + size_t size; + u32 *guids; + u8 tbir; + + tbir = FIELD_GET(DT_TBIR, disc_tbl->offset); + + switch (disc_tbl->access_type) { + case ACCESS_LOCAL: + if (tbir) + return dev_err_probe(priv->dev, -EINVAL, + "Unsupported BAR index %u for access type %u\n", + tbir, disc_tbl->access_type); + + + /* + * For access_type LOCAL, the base address is as follows: + * base address = end of discovery region + base offset + 1 + */ + res = DEFINE_RES_MEM(disc_res->end + disc_tbl->offset + 1, + disc_tbl->size * sizeof(u32)); + break; + + default: + return dev_err_probe(priv->dev, -EINVAL, "Unrecognized access_type %u\n", + disc_tbl->access_type); + } + + feature->id = disc_tbl->id; + + /* Get the feature table */ + feat_base = devm_ioremap_resource(priv->dev, &res); + if (IS_ERR(feat_base)) + return PTR_ERR(feat_base); + + feat_offset = feat_base; + tbl_offset = feat_tbl; + + /* Get the header */ + header = &feat_tbl->header; + memcpy_fromio(header, feat_offset, sizeof(*header)); + + /* Validate fields fit within mapped resource */ + size = sizeof(*header) + FEAT_ATTR_SIZE(header->attr_size) + + PMT_GUID_SIZE(header->num_guids); + res_size = resource_size(&res); + if (WARN(size > res_size, "Bad table size %zu > %pa", size, &res_size)) + return -EINVAL; + + /* Get the feature attributes, including capability fields */ + tbl_offset += sizeof(*header); + feat_offset += sizeof(*header); + + memcpy_fromio(tbl_offset, feat_offset, FEAT_ATTR_SIZE(header->attr_size)); + + /* Finally, get the guids */ + guids = devm_kmalloc(priv->dev, PMT_GUID_SIZE(header->num_guids), GFP_KERNEL); + if (!guids) + return -ENOMEM; + + feat_offset += FEAT_ATTR_SIZE(header->attr_size); + + memcpy_fromio(guids, feat_offset, PMT_GUID_SIZE(header->num_guids)); + + feat_tbl->guids = guids; + + devm_iounmap(priv->dev, feat_base); + + return 0; +} + +static void pmt_features_add_feat(struct feature *feature) +{ + guard(mutex)(&feature_list_lock); + list_add(&feature->list, &pmt_feature_list); +} + +static void pmt_features_remove_feat(struct feature *feature) +{ + guard(mutex)(&feature_list_lock); + list_del(&feature->list); +} + +/* Get the discovery table and use it to get the feature table */ +static int pmt_features_discovery(struct pmt_features_priv *priv, + struct feature *feature, + struct intel_vsec_device *ivdev, + int idx) +{ + struct feature_discovery_table disc_tbl = {}; /* Avoid false warning */ + struct resource *disc_res = &ivdev->resource[idx]; + const struct kobj_type *ktype; + int ret; + + ret = pmt_feature_get_disc_table(priv, disc_res, &disc_tbl); + if (ret) + return ret; + + ret = pmt_feature_get_feature_table(priv, feature, &disc_tbl, disc_res); + if (ret) + return ret; + + switch (feature_layout[feature->id]) { + case LAYOUT_RMID: + ktype = &pmt_feature_rmid_ktype; + feature->attr_group = &pmt_feature_rmid_group; + break; + case LAYOUT_WATCHER: + ktype = &pmt_feature_watcher_ktype; + feature->attr_group = &pmt_feature_watcher_group; + break; + case LAYOUT_COMMAND: + ktype = &pmt_feature_command_ktype; + feature->attr_group = &pmt_feature_command_group; + break; + case LAYOUT_CAPS_ONLY: + ktype = &pmt_feature_guids_ktype; + feature->attr_group = &pmt_feature_guids_group; + break; + default: + return -EINVAL; + } + + ret = kobject_init_and_add(&feature->kobj, ktype, &priv->dev->kobj, + "%s", pmt_feature_names[feature->id]); + if (ret) + return ret; + + kobject_uevent(&feature->kobj, KOBJ_ADD); + pmt_features_add_feat(feature); + + return 0; +} + +static void pmt_features_remove(struct auxiliary_device *auxdev) +{ + struct pmt_features_priv *priv = auxiliary_get_drvdata(auxdev); + int i; + + for (i = 0; i < priv->count; i++) { + struct feature *feature = &priv->feature[i]; + + pmt_features_remove_feat(feature); + sysfs_remove_group(&feature->kobj, feature->attr_group); + kobject_put(&feature->kobj); + } + + device_unregister(priv->dev); +} + +static int pmt_features_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id) +{ + struct intel_vsec_device *ivdev = auxdev_to_ivdev(auxdev); + struct pmt_features_priv *priv; + size_t size; + int ret, i; + + size = struct_size(priv, feature, ivdev->num_resources); + priv = devm_kzalloc(&auxdev->dev, size, GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->parent = &ivdev->pcidev->dev; + auxiliary_set_drvdata(auxdev, priv); + + priv->dev = device_create(&intel_pmt_class, &auxdev->dev, MKDEV(0, 0), priv, + "%s-%s", "features", dev_name(priv->parent)); + if (IS_ERR(priv->dev)) + return dev_err_probe(priv->dev, PTR_ERR(priv->dev), + "Could not create %s-%s device node\n", + "features", dev_name(priv->dev)); + + /* Initialize each feature */ + for (i = 0; i < ivdev->num_resources; i++) { + struct feature *feature = &priv->feature[priv->count]; + + ret = pmt_features_discovery(priv, feature, ivdev, i); + if (ret == SKIP_FEATURE) + continue; + if (ret != 0) + goto abort_probe; + + feature->priv = priv; + priv->count++; + } + + return 0; + +abort_probe: + /* + * Only fully initialized features are tracked in priv->count, which is + * incremented only after a feature is completely set up (i.e., after + * discovery and sysfs registration). If feature initialization fails, + * the failing feature's state is local and does not require rollback. + * + * Therefore, on error, we can safely call the driver's remove() routine + * pmt_features_remove() to clean up only those features that were + * fully initialized and counted. All other resources are device-managed + * and will be cleaned up automatically during device_unregister(). + */ + pmt_features_remove(auxdev); + + return ret; +} + +static void pmt_get_features(struct intel_pmt_entry *entry, struct feature *f) +{ + int num_guids = f->table.header.num_guids; + int i; + + for (i = 0; i < num_guids; i++) { + if (f->table.guids[i] != entry->guid) + continue; + + entry->feature_flags |= BIT(f->id); + + if (feature_layout[f->id] == LAYOUT_RMID) + entry->num_rmids = f->table.rmid.num_rmids; + else + entry->num_rmids = 0; /* entry is kzalloc but set anyway */ + } +} + +void intel_pmt_get_features(struct intel_pmt_entry *entry) +{ + struct feature *feature; + + mutex_lock(&feature_list_lock); + list_for_each_entry(feature, &pmt_feature_list, list) { + if (feature->priv->parent != &entry->ep->pcidev->dev) + continue; + + pmt_get_features(entry, feature); + } + mutex_unlock(&feature_list_lock); +} +EXPORT_SYMBOL_NS_GPL(intel_pmt_get_features, "INTEL_PMT"); + +static const struct auxiliary_device_id pmt_features_id_table[] = { + { .name = "intel_vsec.discovery" }, + {} +}; +MODULE_DEVICE_TABLE(auxiliary, pmt_features_id_table); + +static struct auxiliary_driver pmt_features_aux_driver = { + .id_table = pmt_features_id_table, + .remove = pmt_features_remove, + .probe = pmt_features_probe, +}; +module_auxiliary_driver(pmt_features_aux_driver); + +MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>"); +MODULE_DESCRIPTION("Intel PMT Discovery driver"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS("INTEL_PMT"); diff --git a/drivers/platform/x86/intel/pmt/features.c b/drivers/platform/x86/intel/pmt/features.c new file mode 100644 index 000000000000..8a39cddc75c8 --- /dev/null +++ b/drivers/platform/x86/intel/pmt/features.c @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2025, Intel Corporation. + * All Rights Reserved. + * + * Author: "David E. Box" <david.e.box@linux.intel.com> + */ + +#include <linux/export.h> +#include <linux/types.h> + +#include <linux/intel_pmt_features.h> + +const char * const pmt_feature_names[] = { + [FEATURE_PER_CORE_PERF_TELEM] = "per_core_performance_telemetry", + [FEATURE_PER_CORE_ENV_TELEM] = "per_core_environment_telemetry", + [FEATURE_PER_RMID_PERF_TELEM] = "per_rmid_perf_telemetry", + [FEATURE_ACCEL_TELEM] = "accelerator_telemetry", + [FEATURE_UNCORE_TELEM] = "uncore_telemetry", + [FEATURE_CRASH_LOG] = "crash_log", + [FEATURE_PETE_LOG] = "pete_log", + [FEATURE_TPMI_CTRL] = "tpmi_control", + [FEATURE_TRACING] = "tracing", + [FEATURE_PER_RMID_ENERGY_TELEM] = "per_rmid_energy_telemetry", +}; +EXPORT_SYMBOL_NS_GPL(pmt_feature_names, "INTEL_PMT_DISCOVERY"); + +enum feature_layout feature_layout[] = { + [FEATURE_PER_CORE_PERF_TELEM] = LAYOUT_WATCHER, + [FEATURE_PER_CORE_ENV_TELEM] = LAYOUT_WATCHER, + [FEATURE_PER_RMID_PERF_TELEM] = LAYOUT_RMID, + [FEATURE_ACCEL_TELEM] = LAYOUT_WATCHER, + [FEATURE_UNCORE_TELEM] = LAYOUT_WATCHER, + [FEATURE_CRASH_LOG] = LAYOUT_COMMAND, + [FEATURE_PETE_LOG] = LAYOUT_COMMAND, + [FEATURE_TPMI_CTRL] = LAYOUT_CAPS_ONLY, + [FEATURE_TRACING] = LAYOUT_CAPS_ONLY, + [FEATURE_PER_RMID_ENERGY_TELEM] = LAYOUT_RMID, +}; + +struct pmt_cap pmt_cap_common[] = { + {PMT_CAP_TELEM, "telemetry"}, + {PMT_CAP_WATCHER, "watcher"}, + {PMT_CAP_CRASHLOG, "crashlog"}, + {PMT_CAP_STREAMING, "streaming"}, + {PMT_CAP_THRESHOLD, "threshold"}, + {PMT_CAP_WINDOW, "window"}, + {PMT_CAP_CONFIG, "config"}, + {PMT_CAP_TRACING, "tracing"}, + {PMT_CAP_INBAND, "inband"}, + {PMT_CAP_OOB, "oob"}, + {PMT_CAP_SECURED_CHAN, "secure_chan"}, + {PMT_CAP_PMT_SP, "pmt_sp"}, + {PMT_CAP_PMT_SP_POLICY, "pmt_sp_policy"}, + {} +}; + +struct pmt_cap pmt_cap_pcpt[] = { + {PMT_CAP_PCPT_CORE_PERF, "core_performance"}, + {PMT_CAP_PCPT_CORE_C0_RES, "core_c0_residency"}, + {PMT_CAP_PCPT_CORE_ACTIVITY, "core_activity"}, + {PMT_CAP_PCPT_CACHE_PERF, "cache_performance"}, + {PMT_CAP_PCPT_QUALITY_TELEM, "quality_telemetry"}, + {} +}; + +struct pmt_cap *pmt_caps_pcpt[] = { + pmt_cap_common, + pmt_cap_pcpt, + NULL +}; + +struct pmt_cap pmt_cap_pcet[] = { + {PMT_CAP_PCET_WORKPOINT_HIST, "workpoint_histogram"}, + {PMT_CAP_PCET_CORE_CURR_TEMP, "core_current_temp"}, + {PMT_CAP_PCET_CORE_INST_RES, "core_inst_residency"}, + {PMT_CAP_PCET_QUALITY_TELEM, "quality_telemetry"}, + {PMT_CAP_PCET_CORE_CDYN_LVL, "core_cdyn_level"}, + {PMT_CAP_PCET_CORE_STRESS_LVL, "core_stress_level"}, + {PMT_CAP_PCET_CORE_DAS, "core_digital_aging_sensor"}, + {PMT_CAP_PCET_FIVR_HEALTH, "fivr_health"}, + {PMT_CAP_PCET_ENERGY, "energy"}, + {PMT_CAP_PCET_PEM_STATUS, "pem_status"}, + {PMT_CAP_PCET_CORE_C_STATE, "core_c_state"}, + {} +}; + +struct pmt_cap *pmt_caps_pcet[] = { + pmt_cap_common, + pmt_cap_pcet, + NULL +}; + +struct pmt_cap pmt_cap_rmid_perf[] = { + {PMT_CAP_RMID_CORES_PERF, "core_performance"}, + {PMT_CAP_RMID_CACHE_PERF, "cache_performance"}, + {PMT_CAP_RMID_PERF_QUAL, "performance_quality"}, + {} +}; + +struct pmt_cap *pmt_caps_rmid_perf[] = { + pmt_cap_common, + pmt_cap_rmid_perf, + NULL +}; + +struct pmt_cap pmt_cap_accel[] = { + {PMT_CAP_ACCEL_CPM_TELEM, "content_processing_module"}, + {PMT_CAP_ACCEL_TIP_TELEM, "content_turbo_ip"}, + {} +}; + +struct pmt_cap *pmt_caps_accel[] = { + pmt_cap_common, + pmt_cap_accel, + NULL +}; + +struct pmt_cap pmt_cap_uncore[] = { + {PMT_CAP_UNCORE_IO_CA_TELEM, "io_ca"}, + {PMT_CAP_UNCORE_RMID_TELEM, "rmid"}, + {PMT_CAP_UNCORE_D2D_ULA_TELEM, "d2d_ula"}, + {PMT_CAP_UNCORE_PKGC_TELEM, "package_c"}, + {} +}; + +struct pmt_cap *pmt_caps_uncore[] = { + pmt_cap_common, + pmt_cap_uncore, + NULL +}; + +struct pmt_cap pmt_cap_crashlog[] = { + {PMT_CAP_CRASHLOG_MAN_TRIG, "manual_trigger"}, + {PMT_CAP_CRASHLOG_CORE, "core"}, + {PMT_CAP_CRASHLOG_UNCORE, "uncore"}, + {PMT_CAP_CRASHLOG_TOR, "tor"}, + {PMT_CAP_CRASHLOG_S3M, "s3m"}, + {PMT_CAP_CRASHLOG_PERSISTENCY, "persistency"}, + {PMT_CAP_CRASHLOG_CLIP_GPIO, "crashlog_in_progress"}, + {PMT_CAP_CRASHLOG_PRE_RESET, "pre_reset_extraction"}, + {PMT_CAP_CRASHLOG_POST_RESET, "post_reset_extraction"}, + {} +}; + +struct pmt_cap *pmt_caps_crashlog[] = { + pmt_cap_common, + pmt_cap_crashlog, + NULL +}; + +struct pmt_cap pmt_cap_pete[] = { + {PMT_CAP_PETE_MAN_TRIG, "manual_trigger"}, + {PMT_CAP_PETE_ENCRYPTION, "encryption"}, + {PMT_CAP_PETE_PERSISTENCY, "persistency"}, + {PMT_CAP_PETE_REQ_TOKENS, "required_tokens"}, + {PMT_CAP_PETE_PROD_ENABLED, "production_enabled"}, + {PMT_CAP_PETE_DEBUG_ENABLED, "debug_enabled"}, + {} +}; + +struct pmt_cap *pmt_caps_pete[] = { + pmt_cap_common, + pmt_cap_pete, + NULL +}; + +struct pmt_cap pmt_cap_tpmi[] = { + {PMT_CAP_TPMI_MAILBOX, "mailbox"}, + {PMT_CAP_TPMI_LOCK, "bios_lock"}, + {} +}; + +struct pmt_cap *pmt_caps_tpmi[] = { + pmt_cap_common, + pmt_cap_tpmi, + NULL +}; + +struct pmt_cap pmt_cap_tracing[] = { + {PMT_CAP_TRACE_SRAR, "srar_errors"}, + {PMT_CAP_TRACE_CORRECTABLE, "correctable_errors"}, + {PMT_CAP_TRACE_MCTP, "mctp"}, + {PMT_CAP_TRACE_MRT, "memory_resiliency"}, + {} +}; + +struct pmt_cap *pmt_caps_tracing[] = { + pmt_cap_common, + pmt_cap_tracing, + NULL +}; + +struct pmt_cap pmt_cap_rmid_energy[] = { + {PMT_CAP_RMID_ENERGY, "energy"}, + {PMT_CAP_RMID_ACTIVITY, "activity"}, + {PMT_CAP_RMID_ENERGY_QUAL, "energy_quality"}, + {} +}; + +struct pmt_cap *pmt_caps_rmid_energy[] = { + pmt_cap_common, + pmt_cap_rmid_energy, + NULL +}; diff --git a/drivers/platform/x86/intel/pmt/telemetry.c b/drivers/platform/x86/intel/pmt/telemetry.c index 5e4009c05ecf..a4dfca6cac19 100644 --- a/drivers/platform/x86/intel/pmt/telemetry.c +++ b/drivers/platform/x86/intel/pmt/telemetry.c @@ -9,14 +9,22 @@ */ #include <linux/auxiliary_bus.h> +#include <linux/bitops.h> +#include <linux/cleanup.h> +#include <linux/err.h> +#include <linux/intel_pmt_features.h> +#include <linux/intel_vsec.h> #include <linux/kernel.h> +#include <linux/kref.h> #include <linux/module.h> +#include <linux/mutex.h> +#include <linux/overflow.h> #include <linux/pci.h> #include <linux/slab.h> +#include <linux/types.h> #include <linux/uaccess.h> -#include <linux/overflow.h> +#include <linux/xarray.h> -#include "../vsec.h" #include "class.h" #define TELEM_SIZE_OFFSET 0x0 @@ -30,6 +38,15 @@ /* Used by client hardware to identify a fixed telemetry entry*/ #define TELEM_CLIENT_FIXED_BLOCK_GUID 0x10000000 +#define NUM_BYTES_QWORD(v) ((v) << 3) +#define SAMPLE_ID_OFFSET(v) ((v) << 3) + +#define NUM_BYTES_DWORD(v) ((v) << 2) +#define SAMPLE_ID_OFFSET32(v) ((v) << 2) + +/* Protects access to the xarray of telemetry endpoint handles */ +static DEFINE_MUTEX(ep_lock); + enum telem_type { TELEM_TYPE_PUNIT = 0, TELEM_TYPE_CRASHLOG, @@ -58,10 +75,10 @@ static bool pmt_telem_region_overlaps(struct intel_pmt_entry *entry, } static int pmt_telem_header_decode(struct intel_pmt_entry *entry, - struct intel_pmt_header *header, struct device *dev) { void __iomem *disc_table = entry->disc_table; + struct intel_pmt_header *header = &entry->header; if (pmt_telem_region_overlaps(entry, dev)) return 1; @@ -78,27 +95,284 @@ static int pmt_telem_header_decode(struct intel_pmt_entry *entry, * reserved for future use. They have zero size. Do not fail * probe for these. Just ignore them. */ - if (header->size == 0) + if (header->size == 0 || header->access_type == 0xF) return 1; return 0; } +static int pmt_telem_add_endpoint(struct intel_vsec_device *ivdev, + struct intel_pmt_entry *entry) +{ + struct telem_endpoint *ep; + + /* Endpoint lifetimes are managed by kref, not devres */ + entry->ep = kzalloc(sizeof(*(entry->ep)), GFP_KERNEL); + if (!entry->ep) + return -ENOMEM; + + ep = entry->ep; + ep->pcidev = ivdev->pcidev; + ep->header.access_type = entry->header.access_type; + ep->header.guid = entry->header.guid; + ep->header.base_offset = entry->header.base_offset; + ep->header.size = entry->header.size; + ep->base = entry->base; + ep->present = true; + ep->cb = ivdev->priv_data; + + kref_init(&ep->kref); + + return 0; +} + static DEFINE_XARRAY_ALLOC(telem_array); static struct intel_pmt_namespace pmt_telem_ns = { .name = "telem", .xa = &telem_array, .pmt_header_decode = pmt_telem_header_decode, + .pmt_add_endpoint = pmt_telem_add_endpoint, }; +/* Called when all users unregister and the device is removed */ +static void pmt_telem_ep_release(struct kref *kref) +{ + struct telem_endpoint *ep; + + ep = container_of(kref, struct telem_endpoint, kref); + kfree(ep); +} + +unsigned long pmt_telem_get_next_endpoint(unsigned long start) +{ + struct intel_pmt_entry *entry; + unsigned long found_idx; + + mutex_lock(&ep_lock); + xa_for_each_start(&telem_array, found_idx, entry, start) { + /* + * Return first found index after start. + * 0 is not valid id. + */ + if (found_idx > start) + break; + } + mutex_unlock(&ep_lock); + + return found_idx == start ? 0 : found_idx; +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_get_next_endpoint, "INTEL_PMT_TELEMETRY"); + +struct telem_endpoint *pmt_telem_register_endpoint(int devid) +{ + struct intel_pmt_entry *entry; + unsigned long index = devid; + + mutex_lock(&ep_lock); + entry = xa_find(&telem_array, &index, index, XA_PRESENT); + if (!entry) { + mutex_unlock(&ep_lock); + return ERR_PTR(-ENXIO); + } + + kref_get(&entry->ep->kref); + mutex_unlock(&ep_lock); + + return entry->ep; +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_register_endpoint, "INTEL_PMT_TELEMETRY"); + +void pmt_telem_unregister_endpoint(struct telem_endpoint *ep) +{ + kref_put(&ep->kref, pmt_telem_ep_release); +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_unregister_endpoint, "INTEL_PMT_TELEMETRY"); + +int pmt_telem_get_endpoint_info(int devid, struct telem_endpoint_info *info) +{ + struct intel_pmt_entry *entry; + unsigned long index = devid; + int err = 0; + + if (!info) + return -EINVAL; + + mutex_lock(&ep_lock); + entry = xa_find(&telem_array, &index, index, XA_PRESENT); + if (!entry) { + err = -ENXIO; + goto unlock; + } + + info->pdev = entry->ep->pcidev; + info->header = entry->ep->header; + +unlock: + mutex_unlock(&ep_lock); + return err; + +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_get_endpoint_info, "INTEL_PMT_TELEMETRY"); + +static int pmt_copy_region(struct telemetry_region *region, + struct intel_pmt_entry *entry) +{ + + struct oobmsm_plat_info *plat_info; + + plat_info = intel_vsec_get_mapping(entry->ep->pcidev); + if (IS_ERR(plat_info)) + return PTR_ERR(plat_info); + + region->plat_info = *plat_info; + region->guid = entry->guid; + region->addr = entry->ep->base; + region->size = entry->size; + region->num_rmids = entry->num_rmids; + + return 0; +} + +static void pmt_feature_group_release(struct kref *kref) +{ + struct pmt_feature_group *feature_group; + + feature_group = container_of(kref, struct pmt_feature_group, kref); + kfree(feature_group); +} + +struct pmt_feature_group *intel_pmt_get_regions_by_feature(enum pmt_feature_id id) +{ + struct pmt_feature_group *feature_group __free(kfree) = NULL; + struct telemetry_region *region; + struct intel_pmt_entry *entry; + unsigned long idx; + int count = 0; + size_t size; + + if (!pmt_feature_id_is_valid(id)) + return ERR_PTR(-EINVAL); + + guard(mutex)(&ep_lock); + xa_for_each(&telem_array, idx, entry) { + if (entry->feature_flags & BIT(id)) + count++; + } + + if (!count) + return ERR_PTR(-ENOENT); + + size = struct_size(feature_group, regions, count); + feature_group = kzalloc(size, GFP_KERNEL); + if (!feature_group) + return ERR_PTR(-ENOMEM); + + feature_group->count = count; + + region = feature_group->regions; + xa_for_each(&telem_array, idx, entry) { + int ret; + + if (!(entry->feature_flags & BIT(id))) + continue; + + ret = pmt_copy_region(region, entry); + if (ret) + return ERR_PTR(ret); + + region++; + } + + kref_init(&feature_group->kref); + + return no_free_ptr(feature_group); +} +EXPORT_SYMBOL(intel_pmt_get_regions_by_feature); + +void intel_pmt_put_feature_group(struct pmt_feature_group *feature_group) +{ + kref_put(&feature_group->kref, pmt_feature_group_release); +} +EXPORT_SYMBOL(intel_pmt_put_feature_group); + +int pmt_telem_read(struct telem_endpoint *ep, u32 id, u64 *data, u32 count) +{ + u32 offset, size; + + if (!ep->present) + return -ENODEV; + + offset = SAMPLE_ID_OFFSET(id); + size = ep->header.size; + + if (offset + NUM_BYTES_QWORD(count) > size) + return -EINVAL; + + pmt_telem_read_mmio(ep->pcidev, ep->cb, ep->header.guid, data, ep->base, offset, + NUM_BYTES_QWORD(count)); + + return ep->present ? 0 : -EPIPE; +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_read, "INTEL_PMT_TELEMETRY"); + +int pmt_telem_read32(struct telem_endpoint *ep, u32 id, u32 *data, u32 count) +{ + u32 offset, size; + + if (!ep->present) + return -ENODEV; + + offset = SAMPLE_ID_OFFSET32(id); + size = ep->header.size; + + if (offset + NUM_BYTES_DWORD(count) > size) + return -EINVAL; + + memcpy_fromio(data, ep->base + offset, NUM_BYTES_DWORD(count)); + + return ep->present ? 0 : -EPIPE; +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_read32, "INTEL_PMT_TELEMETRY"); + +struct telem_endpoint * +pmt_telem_find_and_register_endpoint(struct pci_dev *pcidev, u32 guid, u16 pos) +{ + int devid = 0; + int inst = 0; + int err = 0; + + while ((devid = pmt_telem_get_next_endpoint(devid))) { + struct telem_endpoint_info ep_info; + + err = pmt_telem_get_endpoint_info(devid, &ep_info); + if (err) + return ERR_PTR(err); + + if (ep_info.header.guid == guid && ep_info.pdev == pcidev) { + if (inst == pos) + return pmt_telem_register_endpoint(devid); + ++inst; + } + } + + return ERR_PTR(-ENXIO); +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_find_and_register_endpoint, "INTEL_PMT_TELEMETRY"); + static void pmt_telem_remove(struct auxiliary_device *auxdev) { struct pmt_telem_priv *priv = auxiliary_get_drvdata(auxdev); int i; - for (i = 0; i < priv->num_entries; i++) - intel_pmt_dev_destroy(&priv->entry[i], &pmt_telem_ns); -} + mutex_lock(&ep_lock); + for (i = 0; i < priv->num_entries; i++) { + struct intel_pmt_entry *entry = &priv->entry[i]; + + kref_put(&entry->ep->kref, pmt_telem_ep_release); + intel_pmt_dev_destroy(entry, &pmt_telem_ns); + } + mutex_unlock(&ep_lock); +}; static int pmt_telem_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id) { @@ -117,13 +391,17 @@ static int pmt_telem_probe(struct auxiliary_device *auxdev, const struct auxilia for (i = 0; i < intel_vsec_dev->num_resources; i++) { struct intel_pmt_entry *entry = &priv->entry[priv->num_entries]; + mutex_lock(&ep_lock); ret = intel_pmt_dev_create(entry, &pmt_telem_ns, intel_vsec_dev, i); + mutex_unlock(&ep_lock); if (ret < 0) goto abort_probe; if (ret) continue; priv->num_entries++; + + intel_pmt_get_features(entry); } return 0; @@ -160,3 +438,5 @@ module_exit(pmt_telem_exit); MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>"); MODULE_DESCRIPTION("Intel PMT Telemetry driver"); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS("INTEL_PMT"); +MODULE_IMPORT_NS("INTEL_VSEC"); diff --git a/drivers/platform/x86/intel/pmt/telemetry.h b/drivers/platform/x86/intel/pmt/telemetry.h new file mode 100644 index 000000000000..d45af5512b4e --- /dev/null +++ b/drivers/platform/x86/intel/pmt/telemetry.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _TELEMETRY_H +#define _TELEMETRY_H + +/* Telemetry types */ +#define PMT_TELEM_TELEMETRY 0 +#define PMT_TELEM_CRASHLOG 1 + +struct telem_endpoint; +struct pci_dev; + +struct telem_header { + u8 access_type; + u16 size; + u32 guid; + u32 base_offset; +}; + +struct telem_endpoint_info { + struct pci_dev *pdev; + struct telem_header header; +}; + +/** + * pmt_telem_get_next_endpoint() - Get next device id for a telemetry endpoint + * @start: starting devid to look from + * + * This functions can be used in a while loop predicate to retrieve the devid + * of all available telemetry endpoints. Functions pmt_telem_get_next_endpoint() + * and pmt_telem_register_endpoint() can be used inside of the loop to examine + * endpoint info and register to receive a pointer to the endpoint. The pointer + * is then usable in the telemetry read calls to access the telemetry data. + * + * Return: + * * devid - devid of the next present endpoint from start + * * 0 - when no more endpoints are present after start + */ +unsigned long pmt_telem_get_next_endpoint(unsigned long start); + +/** + * pmt_telem_register_endpoint() - Register a telemetry endpoint + * @devid: device id/handle of the telemetry endpoint + * + * Increments the kref usage counter for the endpoint. + * + * Return: + * * endpoint - On success returns pointer to the telemetry endpoint + * * -ENXIO - telemetry endpoint not found + */ +struct telem_endpoint *pmt_telem_register_endpoint(int devid); + +/** + * pmt_telem_unregister_endpoint() - Unregister a telemetry endpoint + * @ep: ep structure to populate. + * + * Decrements the kref usage counter for the endpoint. + */ +void pmt_telem_unregister_endpoint(struct telem_endpoint *ep); + +/** + * pmt_telem_get_endpoint_info() - Get info for an endpoint from its devid + * @devid: device id/handle of the telemetry endpoint + * @info: Endpoint info structure to be populated + * + * Return: + * * 0 - Success + * * -ENXIO - telemetry endpoint not found for the devid + * * -EINVAL - @info is NULL + */ +int pmt_telem_get_endpoint_info(int devid, struct telem_endpoint_info *info); + +/** + * pmt_telem_find_and_register_endpoint() - Get a telemetry endpoint from + * pci_dev device, guid and pos + * @pdev: PCI device inside the Intel vsec + * @guid: GUID of the telemetry space + * @pos: Instance of the guid + * + * Return: + * * endpoint - On success returns pointer to the telemetry endpoint + * * -ENXIO - telemetry endpoint not found + */ +struct telem_endpoint *pmt_telem_find_and_register_endpoint(struct pci_dev *pcidev, + u32 guid, u16 pos); + +/** + * pmt_telem_read() - Read qwords from counter sram using sample id + * @ep: Telemetry endpoint to be read + * @id: The beginning sample id of the metric(s) to be read + * @data: Allocated qword buffer + * @count: Number of qwords requested + * + * Callers must ensure reads are aligned. When the call returns -ENODEV, + * the device has been removed and callers should unregister the telemetry + * endpoint. + * + * Return: + * * 0 - Success + * * -ENODEV - The device is not present. + * * -EINVAL - The offset is out bounds + * * -EPIPE - The device was removed during the read. Data written + * but should be considered invalid. + */ +int pmt_telem_read(struct telem_endpoint *ep, u32 id, u64 *data, u32 count); + +/** + * pmt_telem_read32() - Read qwords from counter sram using sample id + * @ep: Telemetry endpoint to be read + * @id: The beginning sample id of the metric(s) to be read + * @data: Allocated dword buffer + * @count: Number of dwords requested + * + * Callers must ensure reads are aligned. When the call returns -ENODEV, + * the device has been removed and callers should unregister the telemetry + * endpoint. + * + * Return: + * * 0 - Success + * * -ENODEV - The device is not present. + * * -EINVAL - The offset is out bounds + * * -EPIPE - The device was removed during the read. Data written + * but should be considered invalid. + */ +int pmt_telem_read32(struct telem_endpoint *ep, u32 id, u32 *data, u32 count); + +#endif diff --git a/drivers/platform/x86/intel/punit_ipc.c b/drivers/platform/x86/intel/punit_ipc.c index 66bb39fd0ef9..14513010daad 100644 --- a/drivers/platform/x86/intel/punit_ipc.c +++ b/drivers/platform/x86/intel/punit_ipc.c @@ -131,39 +131,6 @@ static int intel_punit_ipc_check_status(IPC_DEV *ipcdev, IPC_TYPE type) } /** - * intel_punit_ipc_simple_command() - Simple IPC command - * @cmd: IPC command code. - * @para1: First 8bit parameter, set 0 if not used. - * @para2: Second 8bit parameter, set 0 if not used. - * - * Send a IPC command to P-Unit when there is no data transaction - * - * Return: IPC error code or 0 on success. - */ -int intel_punit_ipc_simple_command(int cmd, int para1, int para2) -{ - IPC_DEV *ipcdev = punit_ipcdev; - IPC_TYPE type; - u32 val; - int ret; - - mutex_lock(&ipcdev->lock); - - reinit_completion(&ipcdev->cmd_complete); - type = (cmd & IPC_PUNIT_CMD_TYPE_MASK) >> IPC_TYPE_OFFSET; - - val = cmd & ~IPC_PUNIT_CMD_TYPE_MASK; - val |= CMD_RUN | para2 << CMD_PARA2_SHIFT | para1 << CMD_PARA1_SHIFT; - ipc_write_cmd(ipcdev, type, val); - ret = intel_punit_ipc_check_status(ipcdev, type); - - mutex_unlock(&ipcdev->lock); - - return ret; -} -EXPORT_SYMBOL(intel_punit_ipc_simple_command); - -/** * intel_punit_ipc_command() - IPC command with data and pointers * @cmd: IPC command code. * @para1: First 8bit parameter, set 0 if not used. @@ -283,7 +250,7 @@ static int intel_punit_ipc_probe(struct platform_device *pdev) } else { ret = devm_request_irq(&pdev->dev, irq, intel_punit_ioc, IRQF_NO_SUSPEND, "intel_punit_ipc", - &punit_ipcdev); + punit_ipcdev); if (ret) { dev_err(&pdev->dev, "Failed to request irq: %d\n", irq); return ret; @@ -302,11 +269,6 @@ static int intel_punit_ipc_probe(struct platform_device *pdev) return 0; } -static int intel_punit_ipc_remove(struct platform_device *pdev) -{ - return 0; -} - static const struct acpi_device_id punit_ipc_acpi_ids[] = { { "INT34D4", 0 }, { } @@ -315,7 +277,6 @@ MODULE_DEVICE_TABLE(acpi, punit_ipc_acpi_ids); static struct platform_driver intel_punit_ipc_driver = { .probe = intel_punit_ipc_probe, - .remove = intel_punit_ipc_remove, .driver = { .name = "intel_punit_ipc", .acpi_match_table = punit_ipc_acpi_ids, diff --git a/drivers/platform/x86/intel/rst.c b/drivers/platform/x86/intel/rst.c index 35814a7707af..f3a60e14d4c1 100644 --- a/drivers/platform/x86/intel/rst.c +++ b/drivers/platform/x86/intel/rst.c @@ -7,6 +7,7 @@ #include <linux/module.h> #include <linux/slab.h> +MODULE_DESCRIPTION("Intel Rapid Start Technology Driver"); MODULE_LICENSE("GPL"); static ssize_t irst_show_wakeup_events(struct device *dev, @@ -125,7 +126,6 @@ static const struct acpi_device_id irst_ids[] = { }; static struct acpi_driver irst_driver = { - .owner = THIS_MODULE, .name = "intel_rapid_start", .class = "intel_rapid_start", .ids = irst_ids, diff --git a/drivers/platform/x86/intel/sdsi.c b/drivers/platform/x86/intel/sdsi.c index 9e0ea2cdd704..da75f53d0bcc 100644 --- a/drivers/platform/x86/intel/sdsi.c +++ b/drivers/platform/x86/intel/sdsi.c @@ -12,17 +12,17 @@ #include <linux/bits.h> #include <linux/bitfield.h> #include <linux/device.h> +#include <linux/intel_vsec.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/overflow.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/types.h> #include <linux/uaccess.h> -#include "vsec.h" - #define ACCESS_TYPE_BARID 2 #define ACCESS_TYPE_LOCAL 3 @@ -49,7 +49,7 @@ #define SDSI_MBOX_CMD_SUCCESS 0x40 #define SDSI_MBOX_CMD_TIMEOUT 0x80 -#define MBOX_TIMEOUT_US 2000 +#define MBOX_TIMEOUT_US 500000 #define MBOX_TIMEOUT_ACQUIRE_US 1000 #define MBOX_POLLING_PERIOD_US 100 #define MBOX_ACQUIRE_NUM_RETRIES 5 @@ -66,6 +66,8 @@ #define CTRL_OWNER GENMASK(5, 4) #define CTRL_COMPLETE BIT(6) #define CTRL_READY BIT(7) +#define CTRL_INBAND_LOCK BIT(32) +#define CTRL_METER_ENABLE_DRAM BIT(33) #define CTRL_STATUS GENMASK(15, 8) #define CTRL_PACKET_SIZE GENMASK(31, 16) #define CTRL_MSG_SIZE GENMASK(63, 48) @@ -93,6 +95,7 @@ enum sdsi_command { struct sdsi_mbox_info { u64 *payload; void *buffer; + u64 control_flags; int size; }; @@ -156,8 +159,8 @@ static int sdsi_status_to_errno(u32 status) } } -static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *info, - size_t *data_size) +static int sdsi_mbox_poll(struct sdsi_priv *priv, struct sdsi_mbox_info *info, + size_t *data_size) { struct device *dev = priv->dev; u32 total, loop, eom, status, message_size; @@ -166,18 +169,10 @@ static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *inf lockdep_assert_held(&priv->mb_lock); - /* Format and send the read command */ - control = FIELD_PREP(CTRL_EOM, 1) | - FIELD_PREP(CTRL_SOM, 1) | - FIELD_PREP(CTRL_RUN_BUSY, 1) | - FIELD_PREP(CTRL_PACKET_SIZE, info->size); - writeq(control, priv->control_addr); - /* For reads, data sizes that are larger than the mailbox size are read in packets. */ total = 0; loop = 0; do { - void *buf = info->buffer + (SDSI_SIZE_MAILBOX * loop); u32 packet_size; /* Poll on ready bit */ @@ -195,6 +190,11 @@ static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *inf if (ret) break; + if (!packet_size) { + sdsi_complete_transaction(priv); + break; + } + /* Only the last packet can be less than the mailbox size. */ if (!eom && packet_size != SDSI_SIZE_MAILBOX) { dev_err(dev, "Invalid packet size\n"); @@ -208,9 +208,13 @@ static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *inf break; } - sdsi_memcpy64_fromio(buf, priv->mbox_addr, round_up(packet_size, SDSI_SIZE_CMD)); + if (info->buffer) { + void *buf = info->buffer + array_size(SDSI_SIZE_MAILBOX, loop); - total += packet_size; + sdsi_memcpy64_fromio(buf, priv->mbox_addr, + round_up(packet_size, SDSI_SIZE_CMD)); + total += packet_size; + } sdsi_complete_transaction(priv); } while (!eom && ++loop < MBOX_MAX_PACKETS); @@ -230,16 +234,34 @@ static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *inf dev_warn(dev, "Read count %u differs from expected count %u\n", total, message_size); - *data_size = total; + if (data_size) + *data_size = total; return 0; } -static int sdsi_mbox_cmd_write(struct sdsi_priv *priv, struct sdsi_mbox_info *info) +static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *info, + size_t *data_size) +{ + u64 control; + + lockdep_assert_held(&priv->mb_lock); + + /* Format and send the read command */ + control = FIELD_PREP(CTRL_EOM, 1) | + FIELD_PREP(CTRL_SOM, 1) | + FIELD_PREP(CTRL_RUN_BUSY, 1) | + FIELD_PREP(CTRL_PACKET_SIZE, info->size) | + info->control_flags; + writeq(control, priv->control_addr); + + return sdsi_mbox_poll(priv, info, data_size); +} + +static int sdsi_mbox_cmd_write(struct sdsi_priv *priv, struct sdsi_mbox_info *info, + size_t *data_size) { u64 control; - u32 status; - int ret; lockdep_assert_held(&priv->mb_lock); @@ -252,23 +274,11 @@ static int sdsi_mbox_cmd_write(struct sdsi_priv *priv, struct sdsi_mbox_info *in FIELD_PREP(CTRL_SOM, 1) | FIELD_PREP(CTRL_RUN_BUSY, 1) | FIELD_PREP(CTRL_READ_WRITE, 1) | + FIELD_PREP(CTRL_MSG_SIZE, info->size) | FIELD_PREP(CTRL_PACKET_SIZE, info->size); writeq(control, priv->control_addr); - /* Poll on ready bit */ - ret = readq_poll_timeout(priv->control_addr, control, control & CTRL_READY, - MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_US); - - if (ret) - goto release_mbox; - - status = FIELD_GET(CTRL_STATUS, control); - ret = sdsi_status_to_errno(status); - -release_mbox: - sdsi_complete_transaction(priv); - - return ret; + return sdsi_mbox_poll(priv, info, data_size); } static int sdsi_mbox_acquire(struct sdsi_priv *priv, struct sdsi_mbox_info *info) @@ -312,7 +322,8 @@ static int sdsi_mbox_acquire(struct sdsi_priv *priv, struct sdsi_mbox_info *info return ret; } -static int sdsi_mbox_write(struct sdsi_priv *priv, struct sdsi_mbox_info *info) +static int sdsi_mbox_write(struct sdsi_priv *priv, struct sdsi_mbox_info *info, + size_t *data_size) { int ret; @@ -322,7 +333,7 @@ static int sdsi_mbox_write(struct sdsi_priv *priv, struct sdsi_mbox_info *info) if (ret) return ret; - return sdsi_mbox_cmd_write(priv, info); + return sdsi_mbox_cmd_write(priv, info, data_size); } static int sdsi_mbox_read(struct sdsi_priv *priv, struct sdsi_mbox_info *info, size_t *data_size) @@ -338,15 +349,24 @@ static int sdsi_mbox_read(struct sdsi_priv *priv, struct sdsi_mbox_info *info, s return sdsi_mbox_cmd_read(priv, info, data_size); } +static bool sdsi_ib_locked(struct sdsi_priv *priv) +{ + return !!FIELD_GET(CTRL_INBAND_LOCK, readq(priv->control_addr)); +} + static ssize_t sdsi_provision(struct sdsi_priv *priv, char *buf, size_t count, enum sdsi_command command) { - struct sdsi_mbox_info info; + struct sdsi_mbox_info info = {}; int ret; if (count > (SDSI_SIZE_WRITE_MSG - SDSI_SIZE_CMD)) return -EOVERFLOW; + /* Make sure In-band lock is not set */ + if (sdsi_ib_locked(priv)) + return -EPERM; + /* Qword aligned message + command qword */ info.size = round_up(count, SDSI_SIZE_CMD) + SDSI_SIZE_CMD; @@ -363,7 +383,9 @@ static ssize_t sdsi_provision(struct sdsi_priv *priv, char *buf, size_t count, ret = mutex_lock_interruptible(&priv->mb_lock); if (ret) goto free_payload; - ret = sdsi_mbox_write(priv, &info); + + ret = sdsi_mbox_write(priv, &info, NULL); + mutex_unlock(&priv->mb_lock); free_payload: @@ -376,8 +398,8 @@ free_payload: } static ssize_t provision_akc_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t off, - size_t count) + const struct bin_attribute *attr, char *buf, + loff_t off, size_t count) { struct device *dev = kobj_to_dev(kobj); struct sdsi_priv *priv = dev_get_drvdata(dev); @@ -387,11 +409,11 @@ static ssize_t provision_akc_write(struct file *filp, struct kobject *kobj, return sdsi_provision(priv, buf, count, SDSI_CMD_PROVISION_AKC); } -static BIN_ATTR_WO(provision_akc, SDSI_SIZE_WRITE_MSG); +static const BIN_ATTR_WO(provision_akc, SDSI_SIZE_WRITE_MSG); static ssize_t provision_cap_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t off, - size_t count) + const struct bin_attribute *attr, char *buf, + loff_t off, size_t count) { struct device *dev = kobj_to_dev(kobj); struct sdsi_priv *priv = dev_get_drvdata(dev); @@ -401,13 +423,13 @@ static ssize_t provision_cap_write(struct file *filp, struct kobject *kobj, return sdsi_provision(priv, buf, count, SDSI_CMD_PROVISION_CAP); } -static BIN_ATTR_WO(provision_cap, SDSI_SIZE_WRITE_MSG); +static const BIN_ATTR_WO(provision_cap, SDSI_SIZE_WRITE_MSG); static ssize_t -certificate_read(u64 command, struct sdsi_priv *priv, char *buf, loff_t off, - size_t count) +certificate_read(u64 command, u64 control_flags, struct sdsi_priv *priv, + char *buf, loff_t off, size_t count) { - struct sdsi_mbox_info info; + struct sdsi_mbox_info info = {}; size_t size; int ret; @@ -421,6 +443,7 @@ certificate_read(u64 command, struct sdsi_priv *priv, char *buf, loff_t off, info.payload = &command; info.size = sizeof(command); + info.control_flags = control_flags; ret = mutex_lock_interruptible(&priv->mb_lock); if (ret) @@ -446,31 +469,44 @@ free_buffer: static ssize_t state_certificate_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t off, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = kobj_to_dev(kobj); struct sdsi_priv *priv = dev_get_drvdata(dev); - return certificate_read(SDSI_CMD_READ_STATE, priv, buf, off, count); + return certificate_read(SDSI_CMD_READ_STATE, 0, priv, buf, off, count); } -static BIN_ATTR_ADMIN_RO(state_certificate, SDSI_SIZE_READ_MSG); +static const BIN_ATTR_ADMIN_RO(state_certificate, SDSI_SIZE_READ_MSG); static ssize_t meter_certificate_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t off, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = kobj_to_dev(kobj); struct sdsi_priv *priv = dev_get_drvdata(dev); - return certificate_read(SDSI_CMD_READ_METER, priv, buf, off, count); + return certificate_read(SDSI_CMD_READ_METER, 0, priv, buf, off, count); +} +static const BIN_ATTR_ADMIN_RO(meter_certificate, SDSI_SIZE_READ_MSG); + +static ssize_t +meter_current_read(struct file *filp, struct kobject *kobj, + const struct bin_attribute *attr, char *buf, loff_t off, + size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct sdsi_priv *priv = dev_get_drvdata(dev); + + return certificate_read(SDSI_CMD_READ_METER, CTRL_METER_ENABLE_DRAM, + priv, buf, off, count); } -static BIN_ATTR_ADMIN_RO(meter_certificate, SDSI_SIZE_READ_MSG); +static const BIN_ATTR_ADMIN_RO(meter_current, SDSI_SIZE_READ_MSG); static ssize_t registers_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t off, - size_t count) + const struct bin_attribute *attr, char *buf, + loff_t off, size_t count) { struct device *dev = kobj_to_dev(kobj); struct sdsi_priv *priv = dev_get_drvdata(dev); @@ -492,19 +528,20 @@ static ssize_t registers_read(struct file *filp, struct kobject *kobj, return count; } -static BIN_ATTR_ADMIN_RO(registers, SDSI_SIZE_REGS); +static const BIN_ATTR_ADMIN_RO(registers, SDSI_SIZE_REGS); -static struct bin_attribute *sdsi_bin_attrs[] = { +static const struct bin_attribute *const sdsi_bin_attrs[] = { &bin_attr_registers, &bin_attr_state_certificate, &bin_attr_meter_certificate, + &bin_attr_meter_current, &bin_attr_provision_akc, &bin_attr_provision_cap, NULL }; static umode_t -sdsi_battr_is_visible(struct kobject *kobj, struct bin_attribute *attr, int n) +sdsi_battr_is_visible(struct kobject *kobj, const struct bin_attribute *attr, int n) { struct device *dev = kobj_to_dev(kobj); struct sdsi_priv *priv = dev_get_drvdata(dev); @@ -517,7 +554,7 @@ sdsi_battr_is_visible(struct kobject *kobj, struct bin_attribute *attr, int n) if (!(priv->features & SDSI_FEATURE_SDSI)) return 0; - if (attr == &bin_attr_meter_certificate) + if (attr == &bin_attr_meter_certificate || attr == &bin_attr_meter_current) return (priv->features & SDSI_FEATURE_METERING) ? attr->attr.mode : 0; diff --git a/drivers/platform/x86/intel/smartconnect.c b/drivers/platform/x86/intel/smartconnect.c index 64c2dc93472f..31019a1a6d5e 100644 --- a/drivers/platform/x86/intel/smartconnect.c +++ b/drivers/platform/x86/intel/smartconnect.c @@ -6,6 +6,7 @@ #include <linux/acpi.h> #include <linux/module.h> +MODULE_DESCRIPTION("Intel Smart Connect disabling driver"); MODULE_LICENSE("GPL"); static int smartconnect_acpi_init(struct acpi_device *acpi) @@ -32,7 +33,6 @@ static const struct acpi_device_id smartconnect_ids[] = { MODULE_DEVICE_TABLE(acpi, smartconnect_ids); static struct acpi_driver smartconnect_driver = { - .owner = THIS_MODULE, .name = "intel_smart_connect", .class = "intel_smart_connect", .ids = smartconnect_ids, diff --git a/drivers/platform/x86/intel/speed_select_if/Kconfig b/drivers/platform/x86/intel/speed_select_if/Kconfig index ce3e3dc076d2..4eb3ad299db0 100644 --- a/drivers/platform/x86/intel/speed_select_if/Kconfig +++ b/drivers/platform/x86/intel/speed_select_if/Kconfig @@ -2,8 +2,12 @@ menu "Intel Speed Select Technology interface support" depends on PCI depends on X86_64 || COMPILE_TEST +config INTEL_SPEED_SELECT_TPMI + tristate + config INTEL_SPEED_SELECT_INTERFACE tristate "Intel(R) Speed Select Technology interface drivers" + select INTEL_SPEED_SELECT_TPMI if INTEL_TPMI help This config enables the Intel(R) Speed Select Technology interface drivers. The Intel(R) speed select technology features are non diff --git a/drivers/platform/x86/intel/speed_select_if/Makefile b/drivers/platform/x86/intel/speed_select_if/Makefile index 856076206f35..1d878a36d0ab 100644 --- a/drivers/platform/x86/intel/speed_select_if/Makefile +++ b/drivers/platform/x86/intel/speed_select_if/Makefile @@ -8,3 +8,5 @@ obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_common.o obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_mmio.o obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_mbox_pci.o obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_mbox_msr.o +obj-$(CONFIG_INTEL_SPEED_SELECT_TPMI) += isst_tpmi_core.o +obj-$(CONFIG_INTEL_SPEED_SELECT_TPMI) += isst_tpmi.o diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c index a7e02b24a87a..7449873c3d40 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c @@ -19,9 +19,14 @@ #include <linux/uaccess.h> #include <uapi/linux/isst_if.h> +#include <asm/cpu_device_id.h> +#include <asm/intel-family.h> +#include <asm/msr.h> + #include "isst_if_common.h" #define MSR_THREAD_ID_INFO 0x53 +#define MSR_PM_LOGICAL_ID 0x54 #define MSR_CPU_BUS_NUMBER 0x128 static struct isst_if_cmd_cb punit_callbacks[ISST_IF_DEV_MAX]; @@ -31,6 +36,7 @@ static int punit_msr_white_list[] = { MSR_CONFIG_TDP_CONTROL, MSR_TURBO_RATIO_LIMIT1, MSR_TURBO_RATIO_LIMIT2, + MSR_PM_LOGICAL_ID, }; struct isst_valid_cmd_ranges { @@ -47,7 +53,7 @@ struct isst_cmd_set_req_type { static const struct isst_valid_cmd_ranges isst_valid_cmds[] = { {0xD0, 0x00, 0x03}, - {0x7F, 0x00, 0x0B}, + {0x7F, 0x00, 0x0C}, {0x7F, 0x10, 0x12}, {0x7F, 0x20, 0x23}, {0x94, 0x03, 0x03}, @@ -73,11 +79,13 @@ struct isst_cmd { u32 param; }; +static bool isst_hpm_support; + static DECLARE_HASHTABLE(isst_hash, 8); static DEFINE_MUTEX(isst_hash_lock); static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param, - u32 data) + u64 data) { struct isst_cmd *sst_cmd; @@ -112,6 +120,7 @@ static void isst_delete_hash(void) * isst_store_cmd() - Store command to a hash table * @cmd: Mailbox command. * @sub_cmd: Mailbox sub-command or MSR id. + * @cpu: Target CPU for the command * @mbox_cmd_type: Mailbox or MSR command. * @param: Mailbox parameter. * @data: Mailbox request data or MSR data. @@ -183,32 +192,13 @@ void isst_resume_common(void) if (cb->registered) isst_mbox_resume_command(cb, sst_cmd); } else { - wrmsrl_safe_on_cpu(sst_cmd->cpu, sst_cmd->cmd, + wrmsrq_safe_on_cpu(sst_cmd->cpu, sst_cmd->cmd, sst_cmd->data); } } } EXPORT_SYMBOL_GPL(isst_resume_common); -static void isst_restore_msr_local(int cpu) -{ - struct isst_cmd *sst_cmd; - int i; - - mutex_lock(&isst_hash_lock); - for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) { - if (!punit_msr_white_list[i]) - break; - - hash_for_each_possible(isst_hash, sst_cmd, hnode, - punit_msr_white_list[i]) { - if (!sst_cmd->mbox_cmd_type && sst_cmd->cpu == cpu) - wrmsrl_safe(sst_cmd->cmd, sst_cmd->data); - } - } - mutex_unlock(&isst_hash_lock); -} - /** * isst_if_mbox_cmd_invalid() - Check invalid mailbox commands * @cmd: Pointer to the command structure to verify. @@ -261,11 +251,13 @@ bool isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd *cmd) } EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_set_req); +static int isst_if_api_version; + static int isst_if_get_platform_info(void __user *argp) { struct isst_if_platform_info info; - info.api_version = ISST_IF_API_VERSION; + info.api_version = isst_if_api_version; info.driver_version = ISST_IF_DRIVER_VERSION; info.max_cmds_per_ioctl = ISST_IF_CMD_LIMIT; info.mbox_supported = punit_callbacks[ISST_IF_DEV_MBOX].registered; @@ -294,31 +286,31 @@ struct isst_if_pkg_info { static struct isst_if_cpu_info *isst_cpu_info; static struct isst_if_pkg_info *isst_pkg_info; -#define ISST_MAX_PCI_DOMAINS 8 - static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn) { struct pci_dev *matched_pci_dev = NULL; struct pci_dev *pci_dev = NULL; + struct pci_dev *_pci_dev = NULL; int no_matches = 0, pkg_id; - int i, bus_number; + int bus_number; if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 || cpu >= nr_cpu_ids || cpu >= num_possible_cpus()) return NULL; - pkg_id = topology_physical_package_id(cpu); + pkg_id = topology_logical_package_id(cpu); + if (pkg_id >= topology_max_packages()) + return NULL; bus_number = isst_cpu_info[cpu].bus_info[bus_no]; if (bus_number < 0) return NULL; - for (i = 0; i < ISST_MAX_PCI_DOMAINS; ++i) { - struct pci_dev *_pci_dev; + for_each_pci_dev(_pci_dev) { int node; - _pci_dev = pci_get_domain_bus_and_slot(i, bus_number, PCI_DEVFN(dev, fn)); - if (!_pci_dev) + if (_pci_dev->bus->number != bus_number || + _pci_dev->devfn != PCI_DEVFN(dev, fn)) continue; ++no_matches; @@ -327,8 +319,8 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn node = dev_to_node(&_pci_dev->dev); if (node == NUMA_NO_NODE) { - pr_info("Fail to get numa node for CPU:%d bus:%d dev:%d fn:%d\n", - cpu, bus_no, dev, fn); + pr_info_once("Fail to get numa node for CPU:%d bus:%d dev:%d fn:%d\n", + cpu, bus_no, dev, fn); continue; } @@ -363,7 +355,7 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn /** * isst_if_get_pci_dev() - Get the PCI device instance for a CPU * @cpu: Logical CPU number. - * @bus_number: The bus number assigned by the hardware. + * @bus_no: The bus number assigned by the hardware. * @dev: The device number assigned by the hardware. * @fn: The function number assigned by the hardware. * @@ -396,7 +388,7 @@ static int isst_if_cpu_online(unsigned int cpu) isst_cpu_info[cpu].numa_node = cpu_to_node(cpu); - ret = rdmsrl_safe(MSR_CPU_BUS_NUMBER, &data); + ret = rdmsrq_safe(MSR_CPU_BUS_NUMBER, &data); if (ret) { /* This is not a fatal error on MSR mailbox only I/F */ isst_cpu_info[cpu].bus_info[0] = -1; @@ -408,14 +400,21 @@ static int isst_if_cpu_online(unsigned int cpu) isst_cpu_info[cpu].pci_dev[1] = _isst_if_get_pci_dev(cpu, 1, 30, 1); } - ret = rdmsrl_safe(MSR_THREAD_ID_INFO, &data); + if (isst_hpm_support) { + + ret = rdmsrq_safe(MSR_PM_LOGICAL_ID, &data); + if (!ret) + goto set_punit_id; + } + + ret = rdmsrq_safe(MSR_THREAD_ID_INFO, &data); if (ret) { isst_cpu_info[cpu].punit_cpu_id = -1; return ret; } - isst_cpu_info[cpu].punit_cpu_id = data; - isst_restore_msr_local(cpu); +set_punit_id: + isst_cpu_info[cpu].punit_cpu_id = data; return 0; } @@ -505,7 +504,7 @@ static long isst_if_msr_cmd_req(u8 *cmd_ptr, int *write_only, int resume) if (!capable(CAP_SYS_ADMIN)) return -EPERM; - ret = wrmsrl_safe_on_cpu(msr_cmd->logical_cpu, + ret = wrmsrq_safe_on_cpu(msr_cmd->logical_cpu, msr_cmd->msr, msr_cmd->data); *write_only = 1; @@ -516,7 +515,7 @@ static long isst_if_msr_cmd_req(u8 *cmd_ptr, int *write_only, int resume) } else { u64 data; - ret = rdmsrl_safe_on_cpu(msr_cmd->logical_cpu, + ret = rdmsrq_safe_on_cpu(msr_cmd->logical_cpu, msr_cmd->msr, &data); if (!ret) { msr_cmd->data = data; @@ -587,6 +586,7 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd, struct isst_if_cmd_cb cmd_cb; struct isst_if_cmd_cb *cb; long ret = -ENOTTY; + int i; switch (cmd) { case ISST_IF_GET_PLATFORM_INFO: @@ -615,6 +615,16 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd, ret = isst_if_exec_multi_cmd(argp, &cmd_cb); break; default: + for (i = 0; i < ISST_IF_DEV_MAX; ++i) { + struct isst_if_cmd_cb *cb = &punit_callbacks[i]; + int ret; + + if (cb->def_ioctl) { + ret = cb->def_ioctl(file, cmd, arg); + if (!ret) + return ret; + } + } break; } @@ -623,10 +633,6 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd, /* Lock to prevent module registration when already opened by user space */ static DEFINE_MUTEX(punit_misc_dev_open_lock); -/* Lock to allow one shared misc device for all ISST interfaces */ -static DEFINE_MUTEX(punit_misc_dev_reg_lock); -static int misc_usage_count; -static int misc_device_ret; static int misc_device_open; static int isst_if_open(struct inode *inode, struct file *file) @@ -692,39 +698,23 @@ static struct miscdevice isst_if_char_driver = { static int isst_misc_reg(void) { - mutex_lock(&punit_misc_dev_reg_lock); - if (misc_device_ret) - goto unlock_exit; - - if (!misc_usage_count) { - misc_device_ret = isst_if_cpu_info_init(); - if (misc_device_ret) - goto unlock_exit; - - misc_device_ret = misc_register(&isst_if_char_driver); - if (misc_device_ret) { - isst_if_cpu_info_exit(); - goto unlock_exit; - } - } - misc_usage_count++; + int ret; -unlock_exit: - mutex_unlock(&punit_misc_dev_reg_lock); + ret = isst_if_cpu_info_init(); + if (ret) + return ret; + + ret = misc_register(&isst_if_char_driver); + if (ret) + isst_if_cpu_info_exit(); - return misc_device_ret; + return ret; } static void isst_misc_unreg(void) { - mutex_lock(&punit_misc_dev_reg_lock); - if (misc_usage_count) - misc_usage_count--; - if (!misc_usage_count && !misc_device_ret) { - misc_deregister(&isst_if_char_driver); - isst_if_cpu_info_exit(); - } - mutex_unlock(&punit_misc_dev_reg_lock); + misc_deregister(&isst_if_char_driver); + isst_if_cpu_info_exit(); } /** @@ -744,30 +734,26 @@ static void isst_misc_unreg(void) */ int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb) { - int ret; - if (device_type >= ISST_IF_DEV_MAX) return -EINVAL; + if (device_type < ISST_IF_DEV_TPMI && isst_hpm_support) + return -ENODEV; + mutex_lock(&punit_misc_dev_open_lock); /* Device is already open, we don't want to add new callbacks */ if (misc_device_open) { mutex_unlock(&punit_misc_dev_open_lock); return -EAGAIN; } + if (!cb->api_version) + cb->api_version = ISST_IF_API_VERSION; + if (cb->api_version > isst_if_api_version) + isst_if_api_version = cb->api_version; memcpy(&punit_callbacks[device_type], cb, sizeof(*cb)); punit_callbacks[device_type].registered = 1; mutex_unlock(&punit_misc_dev_open_lock); - ret = isst_misc_reg(); - if (ret) { - /* - * No need of mutex as the misc device register failed - * as no one can open device yet. Hence no contention. - */ - punit_callbacks[device_type].registered = 0; - return ret; - } return 0; } EXPORT_SYMBOL_GPL(isst_if_cdev_register); @@ -783,8 +769,8 @@ EXPORT_SYMBOL_GPL(isst_if_cdev_register); */ void isst_if_cdev_unregister(int device_type) { - isst_misc_unreg(); mutex_lock(&punit_misc_dev_open_lock); + punit_callbacks[device_type].def_ioctl = NULL; punit_callbacks[device_type].registered = 0; if (device_type == ISST_IF_DEV_MBOX) isst_delete_hash(); @@ -792,4 +778,53 @@ void isst_if_cdev_unregister(int device_type) } EXPORT_SYMBOL_GPL(isst_if_cdev_unregister); +#define SST_HPM_SUPPORTED 0x01 +#define SST_MBOX_SUPPORTED 0x02 + +static const struct x86_cpu_id isst_cpu_ids[] = { + X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, SST_HPM_SUPPORTED), + X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, SST_HPM_SUPPORTED), + X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, SST_HPM_SUPPORTED), + X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, 0), + X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, SST_HPM_SUPPORTED), + X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, SST_HPM_SUPPORTED), + X86_MATCH_VFM(INTEL_ICELAKE_D, 0), + X86_MATCH_VFM(INTEL_ICELAKE_X, 0), + X86_MATCH_VFM(INTEL_DIAMONDRAPIDS_X, SST_HPM_SUPPORTED), + X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, 0), + X86_MATCH_VFM(INTEL_SKYLAKE_X, SST_MBOX_SUPPORTED), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, isst_cpu_ids); + +static int __init isst_if_common_init(void) +{ + const struct x86_cpu_id *id; + + id = x86_match_cpu(isst_cpu_ids); + if (!id) + return -ENODEV; + + if (id->driver_data == SST_HPM_SUPPORTED) { + isst_hpm_support = true; + } else if (id->driver_data == SST_MBOX_SUPPORTED) { + u64 data; + + /* Can fail only on some Skylake-X generations */ + if (rdmsrq_safe(MSR_OS_MAILBOX_INTERFACE, &data) || + rdmsrq_safe(MSR_OS_MAILBOX_DATA, &data)) + return -ENODEV; + } + + return isst_misc_reg(); +} +module_init(isst_if_common_init) + +static void __exit isst_if_common_exit(void) +{ + isst_misc_unreg(); +} +module_exit(isst_if_common_exit) + +MODULE_DESCRIPTION("ISST common interface module"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.h b/drivers/platform/x86/intel/speed_select_if/isst_if_common.h index fdecdae248d7..378055fe1d16 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.h +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.h @@ -16,6 +16,9 @@ #define PCI_DEVICE_ID_INTEL_RAPL_PRIO_DEVID_1 0x3251 #define PCI_DEVICE_ID_INTEL_CFG_MBOX_DEVID_1 0x3259 +#define MSR_OS_MAILBOX_INTERFACE 0xB0 +#define MSR_OS_MAILBOX_DATA 0xB1 + /* * Validate maximum commands in a single request. * This is enough to handle command to every core in one ioctl, or all @@ -30,7 +33,8 @@ #define ISST_IF_DEV_MBOX 0 #define ISST_IF_DEV_MMIO 1 -#define ISST_IF_DEV_MAX 2 +#define ISST_IF_DEV_TPMI 2 +#define ISST_IF_DEV_MAX 3 /** * struct isst_if_cmd_cb - Used to register a IOCTL handler @@ -40,12 +44,16 @@ * @offset: Offset to the first valid member in command structure. * This will be the offset of the start of the command * after command count field + * @api_version: API version supported for this target. 0, if none. + * @owner: Registered module owner * @cmd_callback: Callback function to handle IOCTL. The callback has the * command pointer with data for command. There is a pointer * called write_only, which when set, will not copy the * response to user ioctl buffer. The "resume" argument * can be used to avoid storing the command for replay * during system resume + * @def_ioctl: Default IOCTL handler callback, if there is no match in + * the existing list of IOCTL handled by the common handler. * * This structure is used to register an handler for IOCTL. To avoid * code duplication common code handles all the IOCTL command read/write @@ -56,8 +64,10 @@ struct isst_if_cmd_cb { int registered; int cmd_size; int offset; + int api_version; struct module *owner; long (*cmd_callback)(u8 *ptr, int *write_only, int resume); + long (*def_ioctl)(struct file *file, unsigned int cmd, unsigned long arg); }; /* Internal interface functions */ diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c index 1b6eab071068..22745b217c6f 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c @@ -18,11 +18,10 @@ #include <uapi/linux/isst_if.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> +#include <asm/msr.h> #include "isst_if_common.h" -#define MSR_OS_MAILBOX_INTERFACE 0xB0 -#define MSR_OS_MAILBOX_DATA 0xB1 #define MSR_OS_MAILBOX_BUSY_BIT 31 /* @@ -41,7 +40,7 @@ static int isst_if_send_mbox_cmd(u8 command, u8 sub_command, u32 parameter, /* Poll for rb bit == 0 */ retries = OS_MAILBOX_RETRY_COUNT; do { - rdmsrl(MSR_OS_MAILBOX_INTERFACE, data); + rdmsrq(MSR_OS_MAILBOX_INTERFACE, data); if (data & BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT)) { ret = -EBUSY; continue; @@ -54,19 +53,19 @@ static int isst_if_send_mbox_cmd(u8 command, u8 sub_command, u32 parameter, return ret; /* Write DATA register */ - wrmsrl(MSR_OS_MAILBOX_DATA, command_data); + wrmsrq(MSR_OS_MAILBOX_DATA, command_data); /* Write command register */ data = BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT) | (parameter & GENMASK_ULL(13, 0)) << 16 | (sub_command << 8) | command; - wrmsrl(MSR_OS_MAILBOX_INTERFACE, data); + wrmsrq(MSR_OS_MAILBOX_INTERFACE, data); /* Poll for rb bit == 0 */ retries = OS_MAILBOX_RETRY_COUNT; do { - rdmsrl(MSR_OS_MAILBOX_INTERFACE, data); + rdmsrq(MSR_OS_MAILBOX_INTERFACE, data); if (data & BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT)) { ret = -EBUSY; continue; @@ -76,7 +75,7 @@ static int isst_if_send_mbox_cmd(u8 command, u8 sub_command, u32 parameter, return -ENXIO; if (response_data) { - rdmsrl(MSR_OS_MAILBOX_DATA, data); + rdmsrq(MSR_OS_MAILBOX_DATA, data); *response_data = data; } ret = 0; @@ -161,7 +160,7 @@ static struct notifier_block isst_pm_nb = { }; static const struct x86_cpu_id isst_if_cpu_ids[] = { - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL), + X86_MATCH_VFM(INTEL_SKYLAKE_X, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, isst_if_cpu_ids); @@ -178,11 +177,11 @@ static int __init isst_if_mbox_init(void) return -ENODEV; /* Check presence of mailbox MSRs */ - ret = rdmsrl_safe(MSR_OS_MAILBOX_INTERFACE, &data); + ret = rdmsrq_safe(MSR_OS_MAILBOX_INTERFACE, &data); if (ret) return ret; - ret = rdmsrl_safe(MSR_OS_MAILBOX_DATA, &data); + ret = rdmsrq_safe(MSR_OS_MAILBOX_DATA, &data); if (ret) return ret; diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c b/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c index ff49025ec085..950ede5eab76 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c @@ -18,16 +18,17 @@ struct isst_mmio_range { int beg; int end; + int size; }; static struct isst_mmio_range mmio_range_devid_0[] = { - {0x04, 0x14}, - {0x20, 0xD0}, + {0x04, 0x14, 0x18}, + {0x20, 0xD0, 0xD4}, }; static struct isst_mmio_range mmio_range_devid_1[] = { - {0x04, 0x14}, - {0x20, 0x11C}, + {0x04, 0x14, 0x18}, + {0x20, 0x11C, 0x120}, }; struct isst_if_device { @@ -93,6 +94,7 @@ static int isst_if_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct isst_if_device *punit_dev; struct isst_if_cmd_cb cb; u32 mmio_base, pcu_base; + struct resource r; u64 base_addr; int ret; @@ -106,21 +108,24 @@ static int isst_if_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ret = pci_read_config_dword(pdev, 0xD0, &mmio_base); if (ret) - return ret; + return pcibios_err_to_errno(ret); ret = pci_read_config_dword(pdev, 0xFC, &pcu_base); if (ret) - return ret; + return pcibios_err_to_errno(ret); pcu_base &= GENMASK(10, 0); base_addr = (u64)mmio_base << 23 | (u64) pcu_base << 12; - punit_dev->punit_mmio = devm_ioremap(&pdev->dev, base_addr, 256); - if (!punit_dev->punit_mmio) - return -ENOMEM; + + punit_dev->mmio_range = (struct isst_mmio_range *) ent->driver_data; + + r = DEFINE_RES_MEM(base_addr, punit_dev->mmio_range[1].size); + punit_dev->punit_mmio = devm_ioremap_resource(&pdev->dev, &r); + if (IS_ERR(punit_dev->punit_mmio)) + return PTR_ERR(punit_dev->punit_mmio); mutex_init(&punit_dev->mutex); pci_set_drvdata(pdev, punit_dev); - punit_dev->mmio_range = (struct isst_mmio_range *) ent->driver_data; memset(&cb, 0, sizeof(cb)); cb.cmd_size = sizeof(struct isst_if_io_reg); diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi.c new file mode 100644 index 000000000000..bcf0a5cbc68d --- /dev/null +++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * isst_tpmi.c: SST TPMI interface + * + * Copyright (c) 2023, Intel Corporation. + * All Rights Reserved. + * + */ + +#include <linux/auxiliary_bus.h> +#include <linux/module.h> +#include <linux/intel_tpmi.h> + +#include "isst_tpmi_core.h" + +static int intel_sst_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id) +{ + int ret; + + ret = tpmi_sst_init(); + if (ret) + return ret; + + ret = tpmi_sst_dev_add(auxdev); + if (ret) + tpmi_sst_exit(); + + return ret; +} + +static void intel_sst_remove(struct auxiliary_device *auxdev) +{ + tpmi_sst_dev_remove(auxdev); + tpmi_sst_exit(); +} + +static int intel_sst_suspend(struct device *dev) +{ + tpmi_sst_dev_suspend(to_auxiliary_dev(dev)); + + return 0; +} + +static int intel_sst_resume(struct device *dev) +{ + tpmi_sst_dev_resume(to_auxiliary_dev(dev)); + + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(intel_sst_pm, intel_sst_suspend, intel_sst_resume); + +static const struct auxiliary_device_id intel_sst_id_table[] = { + { .name = "intel_vsec.tpmi-sst" }, + {} +}; +MODULE_DEVICE_TABLE(auxiliary, intel_sst_id_table); + +static struct auxiliary_driver intel_sst_aux_driver = { + .id_table = intel_sst_id_table, + .remove = intel_sst_remove, + .probe = intel_sst_probe, + .driver = { + .pm = pm_sleep_ptr(&intel_sst_pm), + }, +}; + +module_auxiliary_driver(intel_sst_aux_driver); + +MODULE_IMPORT_NS("INTEL_TPMI_SST"); +MODULE_DESCRIPTION("Intel TPMI SST Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c new file mode 100644 index 000000000000..34bff2f65a83 --- /dev/null +++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c @@ -0,0 +1,1833 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * isst_tpmi.c: SST TPMI interface core + * + * Copyright (c) 2023, Intel Corporation. + * All Rights Reserved. + * + * This information will be useful to understand flows: + * In the current generation of platforms, TPMI is supported via OOB + * PCI device. This PCI device has one instance per CPU package. + * There is a unique TPMI ID for SST. Each TPMI ID also has multiple + * entries, representing per power domain information. + * + * There is one dev file for complete SST information and control same as the + * prior generation of hardware. User spaces don't need to know how the + * information is presented by the hardware. The TPMI core module implements + * the hardware mapping. + */ + +#define dev_fmt(fmt) "tpmi_sst: " fmt + +#include <linux/auxiliary_bus.h> +#include <linux/delay.h> +#include <linux/intel_tpmi.h> +#include <linux/intel_vsec.h> +#include <linux/fs.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/minmax.h> +#include <linux/module.h> +#include <asm/msr.h> +#include <uapi/linux/isst_if.h> + +#include "isst_tpmi_core.h" +#include "isst_if_common.h" + +/* Supported SST hardware version by this driver */ +#define ISST_MAJOR_VERSION 0 +#define ISST_MINOR_VERSION 2 + +/* + * Used to indicate if value read from MMIO needs to get multiplied + * to get to a standard unit or not. + */ +#define SST_MUL_FACTOR_NONE 1 + +/* Define 100 as a scaling factor frequency ratio to frequency conversion */ +#define SST_MUL_FACTOR_FREQ 100 + +/* All SST regs are 64 bit size */ +#define SST_REG_SIZE 8 + +/** + * struct sst_header - SST main header + * @interface_version: Version number for this interface + * @cap_mask: Bitmask of the supported sub features. 1=the sub feature is enabled. + * 0=disabled. + * Bit[8]= SST_CP enable (1), disable (0) + * bit[9]= SST_PP enable (1), disable (0) + * other bits are reserved for future use + * @cp_offset: Qword (8 bytes) offset to the SST_CP register bank + * @pp_offset: Qword (8 bytes) offset to the SST_PP register bank + * @reserved: Reserved for future use + * + * This register allows SW to discover SST capability and the offsets to SST-CP + * and SST-PP register banks. + */ +struct sst_header { + u8 interface_version; + u8 cap_mask; + u8 cp_offset; + u8 pp_offset; + u32 reserved; +} __packed; + +/** + * struct cp_header - SST-CP (core-power) header + * @feature_id: 0=SST-CP, 1=SST-PP, 2=SST-BF, 3=SST-TF + * @feature_rev: Interface Version number for this SST feature + * @ratio_unit: Frequency ratio unit. 00: 100MHz. All others are reserved + * @reserved: Reserved for future use + * + * This structure is used store SST-CP header. This is packed to the same + * format as defined in the specifications. + */ +struct cp_header { + u64 feature_id :4; + u64 feature_rev :8; + u64 ratio_unit :2; + u64 reserved :50; +} __packed; + +/** + * struct pp_header - SST-PP (Perf profile) header + * @feature_id: 0=SST-CP, 1=SST-PP, 2=SST-BF, 3=SST-TF + * @feature_rev: Interface Version number for this SST feature + * @level_en_mask: SST-PP level enable/disable fuse mask + * @allowed_level_mask: Allowed level mask used for dynamic config level switching + * @reserved0: Reserved for future use + * @ratio_unit: Frequency ratio unit. 00: 100MHz. All others are reserved + * @block_size: Size of PP block in Qword unit (8 bytes) + * @dynamic_switch: If set (1), dynamic switching of SST PP is supported + * @memory_ratio_unit: Memory Controller frequency ratio unit. 00: 100MHz, others reserved + * @reserved1: Reserved for future use + * + * This structure is used store SST-PP header. This is packed to the same + * format as defined in the specifications. + */ +struct pp_header { + u64 feature_id :4; + u64 feature_rev :8; + u64 level_en_mask :8; + u64 allowed_level_mask :8; + u64 reserved0 :4; + u64 ratio_unit :2; + u64 block_size :8; + u64 dynamic_switch :1; + u64 memory_ratio_unit :2; + u64 reserved1 :19; +} __packed; + +/** + * struct feature_offset - Offsets to SST-PP features + * @pp_offset: Qword offset within PP level for the SST_PP register bank + * @bf_offset: Qword offset within PP level for the SST_BF register bank + * @tf_offset: Qword offset within PP level for the SST_TF register bank + * @reserved: Reserved for future use + * + * This structure is used store offsets for SST features in the register bank. + * This is packed to the same format as defined in the specifications. + */ +struct feature_offset { + u64 pp_offset :8; + u64 bf_offset :8; + u64 tf_offset :8; + u64 reserved :40; +} __packed; + +/** + * struct levels_offset - Offsets to each SST PP level + * @sst_pp_level0_offset: Qword offset to the register block of PP level 0 + * @sst_pp_level1_offset: Qword offset to the register block of PP level 1 + * @sst_pp_level2_offset: Qword offset to the register block of PP level 2 + * @sst_pp_level3_offset: Qword offset to the register block of PP level 3 + * @sst_pp_level4_offset: Qword offset to the register block of PP level 4 + * @reserved: Reserved for future use + * + * This structure is used store offsets of SST PP levels in the register bank. + * This is packed to the same format as defined in the specifications. + */ +struct levels_offset { + u64 sst_pp_level0_offset :8; + u64 sst_pp_level1_offset :8; + u64 sst_pp_level2_offset :8; + u64 sst_pp_level3_offset :8; + u64 sst_pp_level4_offset :8; + u64 reserved :24; +} __packed; + +/** + * struct pp_control_offset - Offsets for SST PP controls + * @perf_level: A SST-PP level that SW intends to switch to + * @perf_level_lock: SST-PP level select lock. 0 - unlocked. 1 - locked till next reset + * @resvd0: Reserved for future use + * @current_state: Bit mask to control the enable(1)/disable(0) state of each feature + * of the current PP level, bit 0 = BF, bit 1 = TF, bit 2-7 = reserved + * @reserved: Reserved for future use + * + * This structure is used store offsets of SST PP controls in the register bank. + * This is packed to the same format as defined in the specifications. + */ +struct pp_control_offset { + u64 perf_level :3; + u64 perf_level_lock :1; + u64 resvd0 :4; + u64 current_state :8; + u64 reserved :48; +} __packed; + +/** + * struct pp_status_offset - Offsets for SST PP status fields + * @sst_pp_level: Returns the current SST-PP level + * @sst_pp_lock: Returns the lock bit setting of perf_level_lock in pp_control_offset + * @error_type: Returns last error of SST-PP level change request. 0: no error, + * 1: level change not allowed, others: reserved + * @feature_state: Bit mask to indicate the enable(1)/disable(0) state of each feature of the + * current PP level. bit 0 = BF, bit 1 = TF, bit 2-7 reserved + * @reserved0: Reserved for future use + * @feature_error_type: Returns last error of the specific feature. Three error_type bits per + * feature. i.e. ERROR_TYPE[2:0] for BF, ERROR_TYPE[5:3] for TF, etc. + * 0x0: no error, 0x1: The specific feature is not supported by the hardware. + * 0x2-0x6: Reserved. 0x7: feature state change is not allowed. + * @reserved1: Reserved for future use + * + * This structure is used store offsets of SST PP status in the register bank. + * This is packed to the same format as defined in the specifications. + */ +struct pp_status_offset { + u64 sst_pp_level :3; + u64 sst_pp_lock :1; + u64 error_type :4; + u64 feature_state :8; + u64 reserved0 :16; + u64 feature_error_type : 24; + u64 reserved1 :8; +} __packed; + +/** + * struct perf_level - Used to store perf level and mmio offset + * @mmio_offset: mmio offset for a perf level + * @level: perf level for this offset + * + * This structure is used store final mmio offset of each perf level from the + * SST base mmio offset. + */ +struct perf_level { + int mmio_offset; + int level; +}; + +/** + * struct tpmi_per_power_domain_info - Store per power_domain SST info + * @package_id: Package id for this power_domain + * @power_domain_id: Power domain id, Each entry from the SST-TPMI instance is a power_domain. + * @max_level: Max possible PP level possible for this power_domain + * @ratio_unit: Ratio unit for converting to MHz + * @avx_levels: Number of AVX levels + * @pp_block_size: Block size from PP header + * @sst_header: Store SST header for this power_domain + * @cp_header: Store SST-CP header for this power_domain + * @pp_header: Store SST-PP header for this power_domain + * @perf_levels: Pointer to each perf level to map level to mmio offset + * @feature_offsets: Store feature offsets for each PP-level + * @control_offset: Store the control offset for each PP-level + * @status_offset: Store the status offset for each PP-level + * @sst_base: Mapped SST base IO memory + * @auxdev: Auxiliary device instance enumerated this instance + * @saved_sst_cp_control: Save SST-CP control configuration to store restore for suspend/resume + * @saved_clos_configs: Save SST-CP CLOS configuration to store restore for suspend/resume + * @saved_clos_assocs: Save SST-CP CLOS association to store restore for suspend/resume + * @saved_pp_control: Save SST-PP control information to store restore for suspend/resume + * @write_blocked: Write operation is blocked, so can't change SST state + * + * This structure is used store complete SST information for a power_domain. This information + * is used to read/write request for any SST IOCTL. Each physical CPU package can have multiple + * power_domains. Each power domain describes its own SST information and has its own controls. + */ +struct tpmi_per_power_domain_info { + int package_id; + int power_domain_id; + int max_level; + int ratio_unit; + int avx_levels; + int pp_block_size; + struct sst_header sst_header; + struct cp_header cp_header; + struct pp_header pp_header; + struct perf_level *perf_levels; + struct feature_offset feature_offsets; + struct pp_control_offset control_offset; + struct pp_status_offset status_offset; + void __iomem *sst_base; + struct auxiliary_device *auxdev; + u64 saved_sst_cp_control; + u64 saved_clos_configs[4]; + u64 saved_clos_assocs[4]; + u64 saved_pp_control; + bool write_blocked; +}; + +/* Supported maximum partitions */ +#define SST_MAX_PARTITIONS 2 + +/** + * struct tpmi_sst_struct - Store sst info for a package + * @package_id: Package id for this aux device instance + * @number_of_power_domains: Number of power_domains pointed by power_domain_info pointer + * @power_domain_info: Pointer to power domains information + * @cdie_mask: Mask of compute dies present in a partition from hardware. + * This mask is not present in the version 1 information header. + * @io_dies: Number of IO dies in a partition. This will be 0 for TPMI + * version 1 information header. + * @partition_mask: Mask of all partitions. + * @partition_mask_current: Current partition mask as some may have been unbound. + * + * This structure is used store full SST information for a package. + * Each package has one or multiple OOB PCI devices. Each package can contain multiple + * power domains. + */ +struct tpmi_sst_struct { + int package_id; + struct tpmi_per_power_domain_info *power_domain_info[SST_MAX_PARTITIONS]; + u16 cdie_mask[SST_MAX_PARTITIONS]; + u8 number_of_power_domains[SST_MAX_PARTITIONS]; + u8 io_dies[SST_MAX_PARTITIONS]; + u8 partition_mask; + u8 partition_mask_current; +}; + +/** + * struct tpmi_sst_common_struct - Store all SST instances + * @max_index: Maximum instances currently present + * @sst_inst: Pointer to per package instance + * + * Stores every SST Package instance. + */ +struct tpmi_sst_common_struct { + int max_index; + struct tpmi_sst_struct **sst_inst; +}; + +/* + * Each IOCTL request is processed under this lock. Also used to protect + * registration functions and common data structures. + */ +static DEFINE_MUTEX(isst_tpmi_dev_lock); + +/* Usage count to track, number of TPMI SST instances registered to this core. */ +static int isst_core_usage_count; + +/* Stores complete SST information for every package and power_domain */ +static struct tpmi_sst_common_struct isst_common; + +#define SST_MAX_AVX_LEVELS 3 + +#define SST_PP_OFFSET_0 8 +#define SST_PP_OFFSET_1 16 +#define SST_PP_OFFSET_SIZE 8 + +static int sst_add_perf_profiles(struct auxiliary_device *auxdev, + struct tpmi_per_power_domain_info *pd_info, + int levels) +{ + struct device *dev = &auxdev->dev; + u64 perf_level_offsets; + int i; + + pd_info->perf_levels = devm_kcalloc(dev, levels, sizeof(struct perf_level), GFP_KERNEL); + if (!pd_info->perf_levels) + return 0; + + pd_info->ratio_unit = pd_info->pp_header.ratio_unit; + pd_info->avx_levels = SST_MAX_AVX_LEVELS; + pd_info->pp_block_size = pd_info->pp_header.block_size; + + /* Read PP Offset 0: Get feature offset with PP level */ + *((u64 *)&pd_info->feature_offsets) = readq(pd_info->sst_base + + pd_info->sst_header.pp_offset + + SST_PP_OFFSET_0); + + perf_level_offsets = readq(pd_info->sst_base + pd_info->sst_header.pp_offset + + SST_PP_OFFSET_1); + + for (i = 0; i < levels; ++i) { + u64 offset; + + offset = perf_level_offsets & (0xffULL << (i * SST_PP_OFFSET_SIZE)); + offset >>= (i * 8); + offset &= 0xff; + offset *= 8; /* Convert to byte from QWORD offset */ + pd_info->perf_levels[i].mmio_offset = pd_info->sst_header.pp_offset + offset; + } + + return 0; +} + +static int sst_main(struct auxiliary_device *auxdev, struct tpmi_per_power_domain_info *pd_info) +{ + struct device *dev = &auxdev->dev; + int i, mask, levels; + + *((u64 *)&pd_info->sst_header) = readq(pd_info->sst_base); + pd_info->sst_header.cp_offset *= 8; + pd_info->sst_header.pp_offset *= 8; + + if (pd_info->sst_header.interface_version == TPMI_VERSION_INVALID) + return -ENODEV; + + if (TPMI_MAJOR_VERSION(pd_info->sst_header.interface_version) != ISST_MAJOR_VERSION) { + dev_err(dev, "SST: Unsupported major version:%lx\n", + TPMI_MAJOR_VERSION(pd_info->sst_header.interface_version)); + return -ENODEV; + } + + if (TPMI_MINOR_VERSION(pd_info->sst_header.interface_version) > ISST_MINOR_VERSION) + dev_info(dev, "SST: Ignore: Unsupported minor version:%lx\n", + TPMI_MINOR_VERSION(pd_info->sst_header.interface_version)); + + /* Read SST CP Header */ + *((u64 *)&pd_info->cp_header) = readq(pd_info->sst_base + pd_info->sst_header.cp_offset); + + /* Read PP header */ + *((u64 *)&pd_info->pp_header) = readq(pd_info->sst_base + pd_info->sst_header.pp_offset); + + mask = 0x01; + levels = 0; + for (i = 0; i < 8; ++i) { + if (pd_info->pp_header.level_en_mask & mask) + levels = i; + mask <<= 1; + } + pd_info->max_level = levels; + sst_add_perf_profiles(auxdev, pd_info, levels + 1); + + return 0; +} + +static u8 isst_instance_count(struct tpmi_sst_struct *sst_inst) +{ + u8 i, max_part, count = 0; + + /* Partition mask starts from bit 0 and contains 1s only */ + max_part = hweight8(sst_inst->partition_mask); + for (i = 0; i < max_part; i++) + count += sst_inst->number_of_power_domains[i]; + + return count; +} + +/** + * map_cdies() - Map user domain ID to compute domain ID + * @sst_inst: TPMI Instance + * @id: User domain ID + * @partition: Resolved partition + * + * Helper function to map_partition_power_domain_id() to resolve compute + * domain ID and partition. Use hardware provided cdie_mask for a partition + * as is to resolve a compute domain ID. + * + * Return: %-EINVAL on error, otherwise mapped domain ID >= 0. + */ +static int map_cdies(struct tpmi_sst_struct *sst_inst, u8 id, u8 *partition) +{ + u8 i, max_part; + + max_part = hweight8(sst_inst->partition_mask); + for (i = 0; i < max_part; i++) { + if (!(sst_inst->cdie_mask[i] & BIT(id))) + continue; + + *partition = i; + return id - ffs(sst_inst->cdie_mask[i]) + 1; + } + + return -EINVAL; +} + +/** + * map_partition_power_domain_id() - Map user domain ID to partition domain ID + * @sst_inst: TPMI Instance + * @id: User domain ID + * @partition: Resolved partition + * + * In a partitioned system a CPU package has two separate MMIO ranges (Under + * two PCI devices). But the CPU package compute die/power domain IDs are + * unique in a package. User space can get compute die/power domain ID from + * CPUID and MSR 0x54 for a CPU. So, those IDs need to be preserved even if + * they are present in two different partitions with its own order. + * + * For example for command ISST_IF_COUNT_TPMI_INSTANCES, the valid_mask + * is 111111b for a 4 compute and 2 IO dies system. This is presented as + * provided by the hardware in a non-partitioned system with the following + * order: + * I1-I0-C3-C2-C1-C0 + * Here: "C": for compute and "I" for IO die. + * Compute dies are always present first in TPMI instances, as they have + * to map to the real power domain/die ID of a system. In a non-partitioned + * system there is no way to identify compute and IO die boundaries from + * this driver without reading each CPU's mapping. + * + * The same order needs to be preserved, even if those compute dies are + * distributed among multiple partitions. For example: + * Partition 1 can contain: I1-C1-C0 + * Partition 2 can contain: I2-C3-C2 + * + * This will require a conversion of user space IDs to the actual index into + * array of stored power domains for each partition. For the above example + * this function will return partition and index as follows: + * + * ============= ========= ===== ======== + * User space ID Partition Index Die type + * ============= ========= ===== ======== + * 0 0 0 Compute + * 1 0 1 Compute + * 2 1 0 Compute + * 3 1 1 Compute + * 4 0 2 IO + * 5 1 2 IO + * ============= ========= ===== ======== + * + * Return: %-EINVAL on error, otherwise mapped domain ID >= 0. + */ +static int map_partition_power_domain_id(struct tpmi_sst_struct *sst_inst, u8 id, u8 *partition) +{ + u8 i, io_start_id, max_part; + + *partition = 0; + + /* If any PCI device for partition is unbound, treat this as failure */ + if (sst_inst->partition_mask != sst_inst->partition_mask_current) + return -EINVAL; + + max_part = hweight8(sst_inst->partition_mask); + + /* IO Index begin here */ + io_start_id = fls(sst_inst->cdie_mask[max_part - 1]); + + if (id < io_start_id) + return map_cdies(sst_inst, id, partition); + + for (i = 0; i < max_part; i++) { + u8 io_id; + + io_id = id - io_start_id; + if (io_id < sst_inst->io_dies[i]) { + u8 cdie_range; + + cdie_range = fls(sst_inst->cdie_mask[i]) - ffs(sst_inst->cdie_mask[i]) + 1; + *partition = i; + return cdie_range + io_id; + } + io_start_id += sst_inst->io_dies[i]; + } + + return -EINVAL; +} + +/* + * Map a package and power_domain id to SST information structure unique for a power_domain. + * The caller should call under isst_tpmi_dev_lock. + */ +static struct tpmi_per_power_domain_info *get_instance(int pkg_id, int power_domain_id) +{ + struct tpmi_per_power_domain_info *power_domain_info; + struct tpmi_sst_struct *sst_inst; + u8 part; + + if (!in_range(pkg_id, 0, topology_max_packages()) || pkg_id > isst_common.max_index) + return NULL; + + sst_inst = isst_common.sst_inst[pkg_id]; + if (!sst_inst) + return NULL; + + power_domain_id = map_partition_power_domain_id(sst_inst, power_domain_id, &part); + if (power_domain_id < 0) + return NULL; + + power_domain_info = &sst_inst->power_domain_info[part][power_domain_id]; + + if (power_domain_info && !power_domain_info->sst_base) + return NULL; + + return power_domain_info; +} + +static bool disable_dynamic_sst_features(void) +{ + u64 value; + + rdmsrq(MSR_PM_ENABLE, value); + return !(value & 0x1); +} + +#define _read_cp_info(name_str, name, offset, start, width, mult_factor)\ +{\ + u64 val, mask;\ + \ + val = readq(power_domain_info->sst_base + power_domain_info->sst_header.cp_offset +\ + (offset));\ + mask = GENMASK_ULL((start + width - 1), start);\ + val &= mask; \ + val >>= start;\ + name = (val * mult_factor);\ +} + +#define _write_cp_info(name_str, name, offset, start, width, div_factor)\ +{\ + u64 val, mask;\ + \ + val = readq(power_domain_info->sst_base +\ + power_domain_info->sst_header.cp_offset + (offset));\ + mask = GENMASK_ULL((start + width - 1), start);\ + val &= ~mask;\ + val |= (name / div_factor) << start;\ + writeq(val, power_domain_info->sst_base + power_domain_info->sst_header.cp_offset +\ + (offset));\ +} + +#define SST_CP_CONTROL_OFFSET 8 +#define SST_CP_STATUS_OFFSET 16 + +#define SST_CP_ENABLE_START 0 +#define SST_CP_ENABLE_WIDTH 1 + +#define SST_CP_PRIORITY_TYPE_START 1 +#define SST_CP_PRIORITY_TYPE_WIDTH 1 + +static long isst_if_core_power_state(void __user *argp) +{ + struct tpmi_per_power_domain_info *power_domain_info; + struct isst_core_power core_power; + + if (copy_from_user(&core_power, argp, sizeof(core_power))) + return -EFAULT; + + if (core_power.get_set && disable_dynamic_sst_features()) + return -EFAULT; + + power_domain_info = get_instance(core_power.socket_id, core_power.power_domain_id); + if (!power_domain_info) + return -EINVAL; + + if (core_power.get_set) { + _write_cp_info("cp_enable", core_power.enable, SST_CP_CONTROL_OFFSET, + SST_CP_ENABLE_START, SST_CP_ENABLE_WIDTH, SST_MUL_FACTOR_NONE) + _write_cp_info("cp_prio_type", core_power.priority_type, SST_CP_CONTROL_OFFSET, + SST_CP_PRIORITY_TYPE_START, SST_CP_PRIORITY_TYPE_WIDTH, + SST_MUL_FACTOR_NONE) + } else { + /* get */ + _read_cp_info("cp_enable", core_power.enable, SST_CP_STATUS_OFFSET, + SST_CP_ENABLE_START, SST_CP_ENABLE_WIDTH, SST_MUL_FACTOR_NONE) + _read_cp_info("cp_prio_type", core_power.priority_type, SST_CP_STATUS_OFFSET, + SST_CP_PRIORITY_TYPE_START, SST_CP_PRIORITY_TYPE_WIDTH, + SST_MUL_FACTOR_NONE) + core_power.supported = !!(power_domain_info->sst_header.cap_mask & BIT(0)); + if (copy_to_user(argp, &core_power, sizeof(core_power))) + return -EFAULT; + } + + return 0; +} + +#define SST_CLOS_CONFIG_0_OFFSET 24 + +#define SST_CLOS_CONFIG_PRIO_START 4 +#define SST_CLOS_CONFIG_PRIO_WIDTH 4 + +#define SST_CLOS_CONFIG_MIN_START 8 +#define SST_CLOS_CONFIG_MIN_WIDTH 8 + +#define SST_CLOS_CONFIG_MAX_START 16 +#define SST_CLOS_CONFIG_MAX_WIDTH 8 + +static long isst_if_clos_param(void __user *argp) +{ + struct tpmi_per_power_domain_info *power_domain_info; + struct isst_clos_param clos_param; + + if (copy_from_user(&clos_param, argp, sizeof(clos_param))) + return -EFAULT; + + power_domain_info = get_instance(clos_param.socket_id, clos_param.power_domain_id); + if (!power_domain_info) + return -EINVAL; + + if (clos_param.get_set) { + if (power_domain_info->write_blocked) + return -EPERM; + + _write_cp_info("clos.min_freq", clos_param.min_freq_mhz, + (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE), + SST_CLOS_CONFIG_MIN_START, SST_CLOS_CONFIG_MIN_WIDTH, + SST_MUL_FACTOR_FREQ); + _write_cp_info("clos.max_freq", clos_param.max_freq_mhz, + (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE), + SST_CLOS_CONFIG_MAX_START, SST_CLOS_CONFIG_MAX_WIDTH, + SST_MUL_FACTOR_FREQ); + _write_cp_info("clos.prio", clos_param.prop_prio, + (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE), + SST_CLOS_CONFIG_PRIO_START, SST_CLOS_CONFIG_PRIO_WIDTH, + SST_MUL_FACTOR_NONE); + } else { + /* get */ + _read_cp_info("clos.min_freq", clos_param.min_freq_mhz, + (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE), + SST_CLOS_CONFIG_MIN_START, SST_CLOS_CONFIG_MIN_WIDTH, + SST_MUL_FACTOR_FREQ) + _read_cp_info("clos.max_freq", clos_param.max_freq_mhz, + (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE), + SST_CLOS_CONFIG_MAX_START, SST_CLOS_CONFIG_MAX_WIDTH, + SST_MUL_FACTOR_FREQ) + _read_cp_info("clos.prio", clos_param.prop_prio, + (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE), + SST_CLOS_CONFIG_PRIO_START, SST_CLOS_CONFIG_PRIO_WIDTH, + SST_MUL_FACTOR_NONE) + + if (copy_to_user(argp, &clos_param, sizeof(clos_param))) + return -EFAULT; + } + + return 0; +} + +#define SST_CLOS_ASSOC_0_OFFSET 56 +#define SST_CLOS_ASSOC_CPUS_PER_REG 16 +#define SST_CLOS_ASSOC_BITS_PER_CPU 4 + +static long isst_if_clos_assoc(void __user *argp) +{ + struct isst_if_clos_assoc_cmds assoc_cmds; + unsigned char __user *ptr; + int i; + + /* Each multi command has u16 command count as the first field */ + if (copy_from_user(&assoc_cmds, argp, sizeof(assoc_cmds))) + return -EFAULT; + + if (!assoc_cmds.cmd_count || assoc_cmds.cmd_count > ISST_IF_CMD_LIMIT) + return -EINVAL; + + ptr = argp + offsetof(struct isst_if_clos_assoc_cmds, assoc_info); + for (i = 0; i < assoc_cmds.cmd_count; ++i) { + struct tpmi_per_power_domain_info *power_domain_info; + struct isst_if_clos_assoc clos_assoc; + int punit_id, punit_cpu_no, pkg_id; + struct tpmi_sst_struct *sst_inst; + int offset, shift, cpu; + u64 val, mask, clos; + u8 part; + + if (copy_from_user(&clos_assoc, ptr, sizeof(clos_assoc))) + return -EFAULT; + + if (clos_assoc.socket_id > topology_max_packages()) + return -EINVAL; + + cpu = clos_assoc.logical_cpu; + clos = clos_assoc.clos; + + if (assoc_cmds.punit_cpu_map) + punit_cpu_no = cpu; + else + return -EOPNOTSUPP; + + if (punit_cpu_no < 0) + return -EINVAL; + + punit_id = clos_assoc.power_domain_id; + pkg_id = clos_assoc.socket_id; + + sst_inst = isst_common.sst_inst[pkg_id]; + + punit_id = map_partition_power_domain_id(sst_inst, punit_id, &part); + if (punit_id < 0) + return -EINVAL; + + power_domain_info = &sst_inst->power_domain_info[part][punit_id]; + + if (assoc_cmds.get_set && power_domain_info->write_blocked) + return -EPERM; + + offset = SST_CLOS_ASSOC_0_OFFSET + + (punit_cpu_no / SST_CLOS_ASSOC_CPUS_PER_REG) * SST_REG_SIZE; + shift = punit_cpu_no % SST_CLOS_ASSOC_CPUS_PER_REG; + shift *= SST_CLOS_ASSOC_BITS_PER_CPU; + + val = readq(power_domain_info->sst_base + + power_domain_info->sst_header.cp_offset + offset); + if (assoc_cmds.get_set) { + mask = GENMASK_ULL((shift + SST_CLOS_ASSOC_BITS_PER_CPU - 1), shift); + val &= ~mask; + val |= (clos << shift); + writeq(val, power_domain_info->sst_base + + power_domain_info->sst_header.cp_offset + offset); + } else { + val >>= shift; + clos_assoc.clos = val & GENMASK(SST_CLOS_ASSOC_BITS_PER_CPU - 1, 0); + if (copy_to_user(ptr, &clos_assoc, sizeof(clos_assoc))) + return -EFAULT; + } + + ptr += sizeof(clos_assoc); + } + + return 0; +} + +#define _read_pp_info(name_str, name, offset, start, width, mult_factor)\ +{\ + u64 val, _mask;\ + \ + val = readq(power_domain_info->sst_base + power_domain_info->sst_header.pp_offset +\ + (offset));\ + _mask = GENMASK_ULL((start + width - 1), start);\ + val &= _mask;\ + val >>= start;\ + name = (val * mult_factor);\ +} + +#define _write_pp_info(name_str, name, offset, start, width, div_factor)\ +{\ + u64 val, _mask;\ + \ + val = readq(power_domain_info->sst_base + power_domain_info->sst_header.pp_offset +\ + (offset));\ + _mask = GENMASK((start + width - 1), start);\ + val &= ~_mask;\ + val |= (name / div_factor) << start;\ + writeq(val, power_domain_info->sst_base + power_domain_info->sst_header.pp_offset +\ + (offset));\ +} + +#define _read_bf_level_info(name_str, name, level, offset, start, width, mult_factor)\ +{\ + u64 val, _mask;\ + \ + val = readq(power_domain_info->sst_base +\ + power_domain_info->perf_levels[level].mmio_offset +\ + (power_domain_info->feature_offsets.bf_offset * 8) + (offset));\ + _mask = GENMASK_ULL((start + width - 1), start);\ + val &= _mask; \ + val >>= start;\ + name = (val * mult_factor);\ +} + +#define _read_tf_level_info(name_str, name, level, offset, start, width, mult_factor)\ +{\ + u64 val, _mask;\ + \ + val = readq(power_domain_info->sst_base +\ + power_domain_info->perf_levels[level].mmio_offset +\ + (power_domain_info->feature_offsets.tf_offset * 8) + (offset));\ + _mask = GENMASK_ULL((start + width - 1), start);\ + val &= _mask; \ + val >>= start;\ + name = (val * mult_factor);\ +} + +#define SST_PP_STATUS_OFFSET 32 + +#define SST_PP_LEVEL_START 0 +#define SST_PP_LEVEL_WIDTH 3 + +#define SST_PP_LOCK_START 3 +#define SST_PP_LOCK_WIDTH 1 + +#define SST_PP_FEATURE_STATE_START 8 +#define SST_PP_FEATURE_STATE_WIDTH 8 + +#define SST_BF_FEATURE_SUPPORTED_START 12 +#define SST_BF_FEATURE_SUPPORTED_WIDTH 1 + +#define SST_TF_FEATURE_SUPPORTED_START 12 +#define SST_TF_FEATURE_SUPPORTED_WIDTH 1 + +static int isst_if_get_perf_level(void __user *argp) +{ + struct isst_perf_level_info perf_level; + struct tpmi_per_power_domain_info *power_domain_info; + unsigned long level_mask; + u8 level, support; + + if (copy_from_user(&perf_level, argp, sizeof(perf_level))) + return -EFAULT; + + power_domain_info = get_instance(perf_level.socket_id, perf_level.power_domain_id); + if (!power_domain_info) + return -EINVAL; + + perf_level.max_level = power_domain_info->max_level; + perf_level.level_mask = power_domain_info->pp_header.level_en_mask; + perf_level.feature_rev = power_domain_info->pp_header.feature_rev; + _read_pp_info("current_level", perf_level.current_level, SST_PP_STATUS_OFFSET, + SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE) + _read_pp_info("locked", perf_level.locked, SST_PP_STATUS_OFFSET, + SST_PP_LOCK_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE) + _read_pp_info("feature_state", perf_level.feature_state, SST_PP_STATUS_OFFSET, + SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH, SST_MUL_FACTOR_NONE) + perf_level.enabled = !!(power_domain_info->sst_header.cap_mask & BIT(1)); + + level_mask = perf_level.level_mask; + perf_level.sst_bf_support = 0; + for_each_set_bit(level, &level_mask, BITS_PER_BYTE) { + /* + * Read BF support for a level. Read output is updated + * to "support" variable by the below macro. + */ + _read_bf_level_info("bf_support", support, level, 0, SST_BF_FEATURE_SUPPORTED_START, + SST_BF_FEATURE_SUPPORTED_WIDTH, SST_MUL_FACTOR_NONE); + + /* If supported set the bit for the level */ + if (support) + perf_level.sst_bf_support |= BIT(level); + } + + perf_level.sst_tf_support = 0; + for_each_set_bit(level, &level_mask, BITS_PER_BYTE) { + /* + * Read TF support for a level. Read output is updated + * to "support" variable by the below macro. + */ + _read_tf_level_info("tf_support", support, level, 0, SST_TF_FEATURE_SUPPORTED_START, + SST_TF_FEATURE_SUPPORTED_WIDTH, SST_MUL_FACTOR_NONE); + + /* If supported set the bit for the level */ + if (support) + perf_level.sst_tf_support |= BIT(level); + } + + if (copy_to_user(argp, &perf_level, sizeof(perf_level))) + return -EFAULT; + + return 0; +} + +#define SST_PP_CONTROL_OFFSET 24 +#define SST_PP_LEVEL_CHANGE_TIME_MS 5 +#define SST_PP_LEVEL_CHANGE_RETRY_COUNT 3 + +static int isst_if_set_perf_level(void __user *argp) +{ + struct isst_perf_level_control perf_level; + struct tpmi_per_power_domain_info *power_domain_info; + int level, retry = 0; + + if (disable_dynamic_sst_features()) + return -EFAULT; + + if (copy_from_user(&perf_level, argp, sizeof(perf_level))) + return -EFAULT; + + power_domain_info = get_instance(perf_level.socket_id, perf_level.power_domain_id); + if (!power_domain_info) + return -EINVAL; + + if (power_domain_info->write_blocked) + return -EPERM; + + if (!(power_domain_info->pp_header.allowed_level_mask & BIT(perf_level.level))) + return -EINVAL; + + _read_pp_info("current_level", level, SST_PP_STATUS_OFFSET, + SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE) + + /* If the requested new level is same as the current level, reject */ + if (perf_level.level == level) + return -EINVAL; + + _write_pp_info("perf_level", perf_level.level, SST_PP_CONTROL_OFFSET, + SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE) + + /* It is possible that firmware is busy (although unlikely), so retry */ + do { + /* Give time to FW to process */ + msleep(SST_PP_LEVEL_CHANGE_TIME_MS); + + _read_pp_info("current_level", level, SST_PP_STATUS_OFFSET, + SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE) + + /* Check if the new level is active */ + if (perf_level.level == level) + break; + + } while (retry++ < SST_PP_LEVEL_CHANGE_RETRY_COUNT); + + /* If the level change didn't happen, return fault */ + if (perf_level.level != level) + return -EFAULT; + + /* Reset the feature state on level change */ + _write_pp_info("perf_feature", 0, SST_PP_CONTROL_OFFSET, + SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH, + SST_MUL_FACTOR_NONE) + + /* Give time to FW to process */ + msleep(SST_PP_LEVEL_CHANGE_TIME_MS); + + return 0; +} + +static int isst_if_set_perf_feature(void __user *argp) +{ + struct isst_perf_feature_control perf_feature; + struct tpmi_per_power_domain_info *power_domain_info; + + if (disable_dynamic_sst_features()) + return -EFAULT; + + if (copy_from_user(&perf_feature, argp, sizeof(perf_feature))) + return -EFAULT; + + power_domain_info = get_instance(perf_feature.socket_id, perf_feature.power_domain_id); + if (!power_domain_info) + return -EINVAL; + + if (power_domain_info->write_blocked) + return -EPERM; + + _write_pp_info("perf_feature", perf_feature.feature, SST_PP_CONTROL_OFFSET, + SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH, + SST_MUL_FACTOR_NONE) + + return 0; +} + +#define _read_pp_level_info(name_str, name, level, offset, start, width, mult_factor)\ +{\ + u64 val, _mask;\ + \ + val = readq(power_domain_info->sst_base +\ + power_domain_info->perf_levels[level].mmio_offset +\ + (power_domain_info->feature_offsets.pp_offset * 8) + (offset));\ + _mask = GENMASK_ULL((start + width - 1), start);\ + val &= _mask; \ + val >>= start;\ + name = (val * mult_factor);\ +} + +#define SST_PP_INFO_0_OFFSET 0 +#define SST_PP_INFO_1_OFFSET 8 +#define SST_PP_INFO_2_OFFSET 16 +#define SST_PP_INFO_3_OFFSET 24 + +/* SST_PP_INFO_4_OFFSET to SST_PP_INFO_9_OFFSET are trl levels */ +#define SST_PP_INFO_4_OFFSET 32 + +#define SST_PP_INFO_10_OFFSET 80 +#define SST_PP_INFO_11_OFFSET 88 +#define SST_PP_INFO_12_OFFSET 96 + +#define SST_PP_P1_SSE_START 0 +#define SST_PP_P1_SSE_WIDTH 8 + +#define SST_PP_P1_AVX2_START 8 +#define SST_PP_P1_AVX2_WIDTH 8 + +#define SST_PP_P1_AVX512_START 16 +#define SST_PP_P1_AVX512_WIDTH 8 + +#define SST_PP_P1_AMX_START 24 +#define SST_PP_P1_AMX_WIDTH 8 + +#define SST_PP_TDP_START 32 +#define SST_PP_TDP_WIDTH 15 + +#define SST_PP_T_PROCHOT_START 47 +#define SST_PP_T_PROCHOT_WIDTH 8 + +#define SST_PP_MAX_MEMORY_FREQ_START 55 +#define SST_PP_MAX_MEMORY_FREQ_WIDTH 7 + +#define SST_PP_COOLING_TYPE_START 62 +#define SST_PP_COOLING_TYPE_WIDTH 2 + +#define SST_PP_TRL_0_RATIO_0_START 0 +#define SST_PP_TRL_0_RATIO_0_WIDTH 8 + +#define SST_PP_TRL_CORES_BUCKET_0_START 0 +#define SST_PP_TRL_CORES_BUCKET_0_WIDTH 8 + +#define SST_PP_CORE_RATIO_P0_START 0 +#define SST_PP_CORE_RATIO_P0_WIDTH 8 + +#define SST_PP_CORE_RATIO_P1_START 8 +#define SST_PP_CORE_RATIO_P1_WIDTH 8 + +#define SST_PP_CORE_RATIO_PN_START 16 +#define SST_PP_CORE_RATIO_PN_WIDTH 8 + +#define SST_PP_CORE_RATIO_PM_START 24 +#define SST_PP_CORE_RATIO_PM_WIDTH 8 + +#define SST_PP_CORE_RATIO_P0_FABRIC_START 32 +#define SST_PP_CORE_RATIO_P0_FABRIC_WIDTH 8 + +#define SST_PP_CORE_RATIO_P1_FABRIC_START 40 +#define SST_PP_CORE_RATIO_P1_FABRIC_WIDTH 8 + +#define SST_PP_CORE_RATIO_PM_FABRIC_START 48 +#define SST_PP_CORE_RATIO_PM_FABRIC_WIDTH 8 + +#define SST_PP_CORE_RATIO_P0_FABRIC_1_START 0 +#define SST_PP_CORE_RATIO_P0_FABRIC_1_WIDTH 8 + +#define SST_PP_CORE_RATIO_P1_FABRIC_1_START 8 +#define SST_PP_CORE_RATIO_P1_FABRIC_1_WIDTH 8 + +#define SST_PP_CORE_RATIO_PM_FABRIC_1_START 16 +#define SST_PP_CORE_RATIO_PM_FABRIC_1_WIDTH 8 + +static int isst_if_get_perf_level_info(void __user *argp) +{ + struct isst_perf_level_data_info perf_level; + struct tpmi_per_power_domain_info *power_domain_info; + int i, j; + + if (copy_from_user(&perf_level, argp, sizeof(perf_level))) + return -EFAULT; + + power_domain_info = get_instance(perf_level.socket_id, perf_level.power_domain_id); + if (!power_domain_info) + return -EINVAL; + + if (perf_level.level > power_domain_info->max_level) + return -EINVAL; + + if (!(power_domain_info->pp_header.level_en_mask & BIT(perf_level.level))) + return -EINVAL; + + _read_pp_level_info("tdp_ratio", perf_level.tdp_ratio, perf_level.level, + SST_PP_INFO_0_OFFSET, SST_PP_P1_SSE_START, SST_PP_P1_SSE_WIDTH, + SST_MUL_FACTOR_NONE) + _read_pp_level_info("base_freq_mhz", perf_level.base_freq_mhz, perf_level.level, + SST_PP_INFO_0_OFFSET, SST_PP_P1_SSE_START, SST_PP_P1_SSE_WIDTH, + SST_MUL_FACTOR_FREQ) + _read_pp_level_info("base_freq_avx2_mhz", perf_level.base_freq_avx2_mhz, perf_level.level, + SST_PP_INFO_0_OFFSET, SST_PP_P1_AVX2_START, SST_PP_P1_AVX2_WIDTH, + SST_MUL_FACTOR_FREQ) + _read_pp_level_info("base_freq_avx512_mhz", perf_level.base_freq_avx512_mhz, + perf_level.level, SST_PP_INFO_0_OFFSET, SST_PP_P1_AVX512_START, + SST_PP_P1_AVX512_WIDTH, SST_MUL_FACTOR_FREQ) + _read_pp_level_info("base_freq_amx_mhz", perf_level.base_freq_amx_mhz, perf_level.level, + SST_PP_INFO_0_OFFSET, SST_PP_P1_AMX_START, SST_PP_P1_AMX_WIDTH, + SST_MUL_FACTOR_FREQ) + + _read_pp_level_info("thermal_design_power_w", perf_level.thermal_design_power_w, + perf_level.level, SST_PP_INFO_1_OFFSET, SST_PP_TDP_START, + SST_PP_TDP_WIDTH, SST_MUL_FACTOR_NONE) + perf_level.thermal_design_power_w /= 8; /* units are in 1/8th watt */ + _read_pp_level_info("tjunction_max_c", perf_level.tjunction_max_c, perf_level.level, + SST_PP_INFO_1_OFFSET, SST_PP_T_PROCHOT_START, SST_PP_T_PROCHOT_WIDTH, + SST_MUL_FACTOR_NONE) + _read_pp_level_info("max_memory_freq_mhz", perf_level.max_memory_freq_mhz, + perf_level.level, SST_PP_INFO_1_OFFSET, SST_PP_MAX_MEMORY_FREQ_START, + SST_PP_MAX_MEMORY_FREQ_WIDTH, SST_MUL_FACTOR_FREQ) + _read_pp_level_info("cooling_type", perf_level.cooling_type, perf_level.level, + SST_PP_INFO_1_OFFSET, SST_PP_COOLING_TYPE_START, + SST_PP_COOLING_TYPE_WIDTH, SST_MUL_FACTOR_NONE) + + for (i = 0; i < TRL_MAX_LEVELS; ++i) { + for (j = 0; j < TRL_MAX_BUCKETS; ++j) + _read_pp_level_info("trl*_bucket*_freq_mhz", + perf_level.trl_freq_mhz[i][j], perf_level.level, + SST_PP_INFO_4_OFFSET + (i * SST_PP_TRL_0_RATIO_0_WIDTH), + j * SST_PP_TRL_0_RATIO_0_WIDTH, + SST_PP_TRL_0_RATIO_0_WIDTH, + SST_MUL_FACTOR_FREQ); + } + + for (i = 0; i < TRL_MAX_BUCKETS; ++i) + _read_pp_level_info("bucket*_core_count", perf_level.bucket_core_counts[i], + perf_level.level, SST_PP_INFO_10_OFFSET, + SST_PP_TRL_CORES_BUCKET_0_WIDTH * i, + SST_PP_TRL_CORES_BUCKET_0_WIDTH, SST_MUL_FACTOR_NONE) + + perf_level.max_buckets = TRL_MAX_BUCKETS; + perf_level.max_trl_levels = TRL_MAX_LEVELS; + + _read_pp_level_info("p0_freq_mhz", perf_level.p0_freq_mhz, perf_level.level, + SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_P0_START, + SST_PP_CORE_RATIO_P0_WIDTH, SST_MUL_FACTOR_FREQ) + _read_pp_level_info("p1_freq_mhz", perf_level.p1_freq_mhz, perf_level.level, + SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_P1_START, + SST_PP_CORE_RATIO_P1_WIDTH, SST_MUL_FACTOR_FREQ) + _read_pp_level_info("pn_freq_mhz", perf_level.pn_freq_mhz, perf_level.level, + SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_PN_START, + SST_PP_CORE_RATIO_PN_WIDTH, SST_MUL_FACTOR_FREQ) + _read_pp_level_info("pm_freq_mhz", perf_level.pm_freq_mhz, perf_level.level, + SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_PM_START, + SST_PP_CORE_RATIO_PM_WIDTH, SST_MUL_FACTOR_FREQ) + _read_pp_level_info("p0_fabric_freq_mhz", perf_level.p0_fabric_freq_mhz, + perf_level.level, SST_PP_INFO_11_OFFSET, + SST_PP_CORE_RATIO_P0_FABRIC_START, + SST_PP_CORE_RATIO_P0_FABRIC_WIDTH, SST_MUL_FACTOR_FREQ) + _read_pp_level_info("p1_fabric_freq_mhz", perf_level.p1_fabric_freq_mhz, + perf_level.level, SST_PP_INFO_11_OFFSET, + SST_PP_CORE_RATIO_P1_FABRIC_START, + SST_PP_CORE_RATIO_P1_FABRIC_WIDTH, SST_MUL_FACTOR_FREQ) + _read_pp_level_info("pm_fabric_freq_mhz", perf_level.pm_fabric_freq_mhz, + perf_level.level, SST_PP_INFO_11_OFFSET, + SST_PP_CORE_RATIO_PM_FABRIC_START, + SST_PP_CORE_RATIO_PM_FABRIC_WIDTH, SST_MUL_FACTOR_FREQ) + + if (copy_to_user(argp, &perf_level, sizeof(perf_level))) + return -EFAULT; + + return 0; +} + +static int isst_if_get_perf_level_fabric_info(void __user *argp) +{ + struct isst_perf_level_fabric_info perf_level_fabric; + struct tpmi_per_power_domain_info *power_domain_info; + int start = SST_PP_CORE_RATIO_P0_FABRIC_START; + int width = SST_PP_CORE_RATIO_P0_FABRIC_WIDTH; + int offset = SST_PP_INFO_11_OFFSET; + int i; + + if (copy_from_user(&perf_level_fabric, argp, sizeof(perf_level_fabric))) + return -EFAULT; + + power_domain_info = get_instance(perf_level_fabric.socket_id, + perf_level_fabric.power_domain_id); + if (!power_domain_info) + return -EINVAL; + + if (perf_level_fabric.level > power_domain_info->max_level) + return -EINVAL; + + if (power_domain_info->pp_header.feature_rev < 2) + return -EINVAL; + + if (!(power_domain_info->pp_header.level_en_mask & BIT(perf_level_fabric.level))) + return -EINVAL; + + /* For revision 2, maximum number of fabrics is 2 */ + perf_level_fabric.max_fabrics = 2; + + for (i = 0; i < perf_level_fabric.max_fabrics; i++) { + _read_pp_level_info("p0_fabric_freq_mhz", perf_level_fabric.p0_fabric_freq_mhz[i], + perf_level_fabric.level, offset, start, width, + SST_MUL_FACTOR_FREQ) + start += width; + + _read_pp_level_info("p1_fabric_freq_mhz", perf_level_fabric.p1_fabric_freq_mhz[i], + perf_level_fabric.level, offset, start, width, + SST_MUL_FACTOR_FREQ) + start += width; + + _read_pp_level_info("pm_fabric_freq_mhz", perf_level_fabric.pm_fabric_freq_mhz[i], + perf_level_fabric.level, offset, start, width, + SST_MUL_FACTOR_FREQ) + offset = SST_PP_INFO_12_OFFSET; + start = SST_PP_CORE_RATIO_P0_FABRIC_1_START; + } + + if (copy_to_user(argp, &perf_level_fabric, sizeof(perf_level_fabric))) + return -EFAULT; + + return 0; +} + +#define SST_PP_FUSED_CORE_COUNT_START 0 +#define SST_PP_FUSED_CORE_COUNT_WIDTH 8 + +#define SST_PP_RSLVD_CORE_COUNT_START 8 +#define SST_PP_RSLVD_CORE_COUNT_WIDTH 8 + +#define SST_PP_RSLVD_CORE_MASK_START 0 +#define SST_PP_RSLVD_CORE_MASK_WIDTH 64 + +static int isst_if_get_perf_level_mask(void __user *argp) +{ + static struct isst_perf_level_cpu_mask cpumask; + struct tpmi_per_power_domain_info *power_domain_info; + u64 mask; + + if (copy_from_user(&cpumask, argp, sizeof(cpumask))) + return -EFAULT; + + power_domain_info = get_instance(cpumask.socket_id, cpumask.power_domain_id); + if (!power_domain_info) + return -EINVAL; + + _read_pp_level_info("mask", mask, cpumask.level, SST_PP_INFO_2_OFFSET, + SST_PP_RSLVD_CORE_MASK_START, SST_PP_RSLVD_CORE_MASK_WIDTH, + SST_MUL_FACTOR_NONE) + + cpumask.mask = mask; + + if (!cpumask.punit_cpu_map) + return -EOPNOTSUPP; + + if (copy_to_user(argp, &cpumask, sizeof(cpumask))) + return -EFAULT; + + return 0; +} + +#define SST_BF_INFO_0_OFFSET 0 +#define SST_BF_INFO_1_OFFSET 8 + +#define SST_BF_P1_HIGH_START 13 +#define SST_BF_P1_HIGH_WIDTH 8 + +#define SST_BF_P1_LOW_START 21 +#define SST_BF_P1_LOW_WIDTH 8 + +#define SST_BF_T_PROHOT_START 38 +#define SST_BF_T_PROHOT_WIDTH 8 + +#define SST_BF_TDP_START 46 +#define SST_BF_TDP_WIDTH 15 + +static int isst_if_get_base_freq_info(void __user *argp) +{ + static struct isst_base_freq_info base_freq; + struct tpmi_per_power_domain_info *power_domain_info; + + if (copy_from_user(&base_freq, argp, sizeof(base_freq))) + return -EFAULT; + + power_domain_info = get_instance(base_freq.socket_id, base_freq.power_domain_id); + if (!power_domain_info) + return -EINVAL; + + if (base_freq.level > power_domain_info->max_level) + return -EINVAL; + + _read_bf_level_info("p1_high", base_freq.high_base_freq_mhz, base_freq.level, + SST_BF_INFO_0_OFFSET, SST_BF_P1_HIGH_START, SST_BF_P1_HIGH_WIDTH, + SST_MUL_FACTOR_FREQ) + _read_bf_level_info("p1_low", base_freq.low_base_freq_mhz, base_freq.level, + SST_BF_INFO_0_OFFSET, SST_BF_P1_LOW_START, SST_BF_P1_LOW_WIDTH, + SST_MUL_FACTOR_FREQ) + _read_bf_level_info("BF-TJ", base_freq.tjunction_max_c, base_freq.level, + SST_BF_INFO_0_OFFSET, SST_BF_T_PROHOT_START, SST_BF_T_PROHOT_WIDTH, + SST_MUL_FACTOR_NONE) + _read_bf_level_info("BF-tdp", base_freq.thermal_design_power_w, base_freq.level, + SST_BF_INFO_0_OFFSET, SST_BF_TDP_START, SST_BF_TDP_WIDTH, + SST_MUL_FACTOR_NONE) + base_freq.thermal_design_power_w /= 8; /*unit = 1/8th watt*/ + + if (copy_to_user(argp, &base_freq, sizeof(base_freq))) + return -EFAULT; + + return 0; +} + +#define P1_HI_CORE_MASK_START 0 +#define P1_HI_CORE_MASK_WIDTH 64 + +static int isst_if_get_base_freq_mask(void __user *argp) +{ + static struct isst_perf_level_cpu_mask cpumask; + struct tpmi_per_power_domain_info *power_domain_info; + u64 mask; + + if (copy_from_user(&cpumask, argp, sizeof(cpumask))) + return -EFAULT; + + power_domain_info = get_instance(cpumask.socket_id, cpumask.power_domain_id); + if (!power_domain_info) + return -EINVAL; + + _read_bf_level_info("BF-cpumask", mask, cpumask.level, SST_BF_INFO_1_OFFSET, + P1_HI_CORE_MASK_START, P1_HI_CORE_MASK_WIDTH, + SST_MUL_FACTOR_NONE) + + cpumask.mask = mask; + + if (!cpumask.punit_cpu_map) + return -EOPNOTSUPP; + + if (copy_to_user(argp, &cpumask, sizeof(cpumask))) + return -EFAULT; + + return 0; +} + +static int isst_if_get_tpmi_instance_count(void __user *argp) +{ + struct isst_tpmi_instance_count tpmi_inst; + struct tpmi_sst_struct *sst_inst; + int i; + + if (copy_from_user(&tpmi_inst, argp, sizeof(tpmi_inst))) + return -EFAULT; + + if (tpmi_inst.socket_id >= topology_max_packages()) + return -EINVAL; + + sst_inst = isst_common.sst_inst[tpmi_inst.socket_id]; + + tpmi_inst.count = isst_instance_count(sst_inst); + + tpmi_inst.valid_mask = 0; + for (i = 0; i < tpmi_inst.count; i++) { + struct tpmi_per_power_domain_info *pd_info; + u8 part; + int pd; + + pd = map_partition_power_domain_id(sst_inst, i, &part); + if (pd < 0) + continue; + + pd_info = &sst_inst->power_domain_info[part][pd]; + if (pd_info->sst_base) + tpmi_inst.valid_mask |= BIT(i); + } + + if (!tpmi_inst.valid_mask) + tpmi_inst.count = 0; + + if (copy_to_user(argp, &tpmi_inst, sizeof(tpmi_inst))) + return -EFAULT; + + return 0; +} + +#define SST_TF_INFO_0_OFFSET 0 +#define SST_TF_INFO_1_OFFSET 8 +#define SST_TF_INFO_2_OFFSET 16 +#define SST_TF_INFO_8_OFFSET 64 +#define SST_TF_INFO_8_BUCKETS 3 + +#define SST_TF_MAX_LP_CLIP_RATIOS TRL_MAX_LEVELS + +#define SST_TF_FEATURE_REV_START 4 +#define SST_TF_FEATURE_REV_WIDTH 8 + +#define SST_TF_LP_CLIP_RATIO_0_START 16 +#define SST_TF_LP_CLIP_RATIO_0_WIDTH 8 + +#define SST_TF_RATIO_0_START 0 +#define SST_TF_RATIO_0_WIDTH 8 + +#define SST_TF_NUM_CORE_0_START 0 +#define SST_TF_NUM_CORE_0_WIDTH 8 + +#define SST_TF_NUM_MOD_0_START 0 +#define SST_TF_NUM_MOD_0_WIDTH 16 + +static int isst_if_get_turbo_freq_info(void __user *argp) +{ + static struct isst_turbo_freq_info turbo_freq; + struct tpmi_per_power_domain_info *power_domain_info; + u8 feature_rev; + int i, j; + + if (copy_from_user(&turbo_freq, argp, sizeof(turbo_freq))) + return -EFAULT; + + power_domain_info = get_instance(turbo_freq.socket_id, turbo_freq.power_domain_id); + if (!power_domain_info) + return -EINVAL; + + if (turbo_freq.level > power_domain_info->max_level) + return -EINVAL; + + turbo_freq.max_buckets = TRL_MAX_BUCKETS; + turbo_freq.max_trl_levels = TRL_MAX_LEVELS; + turbo_freq.max_clip_freqs = SST_TF_MAX_LP_CLIP_RATIOS; + + _read_tf_level_info("feature_rev", feature_rev, turbo_freq.level, + SST_TF_INFO_0_OFFSET, SST_TF_FEATURE_REV_START, + SST_TF_FEATURE_REV_WIDTH, SST_MUL_FACTOR_NONE); + + for (i = 0; i < turbo_freq.max_clip_freqs; ++i) + _read_tf_level_info("lp_clip*", turbo_freq.lp_clip_freq_mhz[i], + turbo_freq.level, SST_TF_INFO_0_OFFSET, + SST_TF_LP_CLIP_RATIO_0_START + + (i * SST_TF_LP_CLIP_RATIO_0_WIDTH), + SST_TF_LP_CLIP_RATIO_0_WIDTH, SST_MUL_FACTOR_FREQ) + + for (i = 0; i < TRL_MAX_LEVELS; ++i) { + for (j = 0; j < TRL_MAX_BUCKETS; ++j) + _read_tf_level_info("cydn*_bucket_*_trl", + turbo_freq.trl_freq_mhz[i][j], turbo_freq.level, + SST_TF_INFO_2_OFFSET + (i * SST_TF_RATIO_0_WIDTH), + j * SST_TF_RATIO_0_WIDTH, SST_TF_RATIO_0_WIDTH, + SST_MUL_FACTOR_FREQ) + } + + if (feature_rev >= 2) { + bool has_tf_info_8 = false; + + for (i = 0; i < SST_TF_INFO_8_BUCKETS; ++i) { + _read_tf_level_info("bucket_*_mod_count", turbo_freq.bucket_core_counts[i], + turbo_freq.level, SST_TF_INFO_8_OFFSET, + SST_TF_NUM_MOD_0_WIDTH * i, SST_TF_NUM_MOD_0_WIDTH, + SST_MUL_FACTOR_NONE) + + if (turbo_freq.bucket_core_counts[i]) + has_tf_info_8 = true; + } + + if (has_tf_info_8) + goto done_core_count; + } + + for (i = 0; i < TRL_MAX_BUCKETS; ++i) + _read_tf_level_info("bucket_*_core_count", turbo_freq.bucket_core_counts[i], + turbo_freq.level, SST_TF_INFO_1_OFFSET, + SST_TF_NUM_CORE_0_WIDTH * i, SST_TF_NUM_CORE_0_WIDTH, + SST_MUL_FACTOR_NONE) + + +done_core_count: + + if (copy_to_user(argp, &turbo_freq, sizeof(turbo_freq))) + return -EFAULT; + + return 0; +} + +static long isst_if_def_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + void __user *argp = (void __user *)arg; + long ret = -ENOTTY; + + mutex_lock(&isst_tpmi_dev_lock); + switch (cmd) { + case ISST_IF_COUNT_TPMI_INSTANCES: + ret = isst_if_get_tpmi_instance_count(argp); + break; + case ISST_IF_CORE_POWER_STATE: + ret = isst_if_core_power_state(argp); + break; + case ISST_IF_CLOS_PARAM: + ret = isst_if_clos_param(argp); + break; + case ISST_IF_CLOS_ASSOC: + ret = isst_if_clos_assoc(argp); + break; + case ISST_IF_PERF_LEVELS: + ret = isst_if_get_perf_level(argp); + break; + case ISST_IF_PERF_SET_LEVEL: + ret = isst_if_set_perf_level(argp); + break; + case ISST_IF_PERF_SET_FEATURE: + ret = isst_if_set_perf_feature(argp); + break; + case ISST_IF_GET_PERF_LEVEL_INFO: + ret = isst_if_get_perf_level_info(argp); + break; + case ISST_IF_GET_PERF_LEVEL_FABRIC_INFO: + ret = isst_if_get_perf_level_fabric_info(argp); + break; + case ISST_IF_GET_PERF_LEVEL_CPU_MASK: + ret = isst_if_get_perf_level_mask(argp); + break; + case ISST_IF_GET_BASE_FREQ_INFO: + ret = isst_if_get_base_freq_info(argp); + break; + case ISST_IF_GET_BASE_FREQ_CPU_MASK: + ret = isst_if_get_base_freq_mask(argp); + break; + case ISST_IF_GET_TURBO_FREQ_INFO: + ret = isst_if_get_turbo_freq_info(argp); + break; + default: + break; + } + mutex_unlock(&isst_tpmi_dev_lock); + + return ret; +} + +#define TPMI_SST_AUTO_SUSPEND_DELAY_MS 2000 + +int tpmi_sst_dev_add(struct auxiliary_device *auxdev) +{ + struct tpmi_per_power_domain_info *pd_info; + bool read_blocked = 0, write_blocked = 0; + struct oobmsm_plat_info *plat_info; + struct device *dev = &auxdev->dev; + struct tpmi_sst_struct *tpmi_sst; + u8 i, num_resources, io_die_cnt; + int ret, pkg = 0, inst = 0; + bool first_enum = false; + u16 cdie_mask; + u8 partition; + + ret = tpmi_get_feature_status(auxdev, TPMI_ID_SST, &read_blocked, &write_blocked); + if (ret) + dev_info(dev, "Can't read feature status: ignoring read/write blocked status\n"); + + if (read_blocked) { + dev_info(dev, "Firmware has blocked reads, exiting\n"); + return -ENODEV; + } + + plat_info = tpmi_get_platform_data(auxdev); + if (!plat_info) { + dev_err(dev, "No platform info\n"); + return -EINVAL; + } + + pkg = plat_info->package_id; + if (pkg >= topology_max_packages()) { + dev_err(dev, "Invalid package id :%x\n", pkg); + return -EINVAL; + } + + partition = plat_info->partition; + if (partition >= SST_MAX_PARTITIONS) { + dev_err(&auxdev->dev, "Invalid partition :%x\n", partition); + return -EINVAL; + } + + num_resources = tpmi_get_resource_count(auxdev); + + if (!num_resources) + return -EINVAL; + + mutex_lock(&isst_tpmi_dev_lock); + + if (isst_common.sst_inst[pkg]) { + tpmi_sst = isst_common.sst_inst[pkg]; + } else { + /* + * tpmi_sst instance is for a package. So needs to be + * allocated only once for both partitions. We can't use + * devm_* allocation here as each partition is a + * different device, which can be unbound. + */ + tpmi_sst = kzalloc(sizeof(*tpmi_sst), GFP_KERNEL); + if (!tpmi_sst) { + ret = -ENOMEM; + goto unlock_exit; + } + first_enum = true; + } + + ret = 0; + + pd_info = devm_kcalloc(dev, num_resources, sizeof(*pd_info), GFP_KERNEL); + if (!pd_info) { + ret = -ENOMEM; + goto unlock_free; + } + + /* Get the IO die count, if cdie_mask is present */ + if (plat_info->cdie_mask) { + u8 cdie_range; + + cdie_mask = plat_info->cdie_mask; + cdie_range = fls(cdie_mask) - ffs(cdie_mask) + 1; + io_die_cnt = num_resources - cdie_range; + } else { + /* + * This is a synthetic mask, careful when assuming that + * they are compute dies only. + */ + cdie_mask = (1 << num_resources) - 1; + io_die_cnt = 0; + } + + for (i = 0; i < num_resources; ++i) { + struct resource *res; + + res = tpmi_get_resource_at_index(auxdev, i); + if (!res) { + pd_info[i].sst_base = NULL; + continue; + } + + pd_info[i].package_id = pkg; + pd_info[i].power_domain_id = i; + pd_info[i].auxdev = auxdev; + pd_info[i].write_blocked = write_blocked; + pd_info[i].sst_base = devm_ioremap_resource(dev, res); + if (IS_ERR(pd_info[i].sst_base)) { + ret = PTR_ERR(pd_info[i].sst_base); + goto unlock_free; + } + + if (sst_main(auxdev, &pd_info[i])) { + /* + * This entry is not valid, hardware can partially + * populate dies. In this case MMIO will have 0xFFs. + * Also possible some pre-production hardware has + * invalid data. But don't fail and continue to use + * other dies with valid data. + */ + devm_iounmap(dev, pd_info[i].sst_base); + pd_info[i].sst_base = NULL; + continue; + } + + ++inst; + } + + if (!inst) { + ret = -ENODEV; + goto unlock_free; + } + + tpmi_sst->package_id = pkg; + + tpmi_sst->power_domain_info[partition] = pd_info; + tpmi_sst->number_of_power_domains[partition] = num_resources; + tpmi_sst->cdie_mask[partition] = cdie_mask; + tpmi_sst->io_dies[partition] = io_die_cnt; + tpmi_sst->partition_mask |= BIT(partition); + tpmi_sst->partition_mask_current |= BIT(partition); + + auxiliary_set_drvdata(auxdev, tpmi_sst); + + if (isst_common.max_index < pkg) + isst_common.max_index = pkg; + isst_common.sst_inst[pkg] = tpmi_sst; + +unlock_free: + if (ret && first_enum) + kfree(tpmi_sst); +unlock_exit: + mutex_unlock(&isst_tpmi_dev_lock); + + return ret; +} +EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_add, "INTEL_TPMI_SST"); + +void tpmi_sst_dev_remove(struct auxiliary_device *auxdev) +{ + struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev); + struct oobmsm_plat_info *plat_info; + + plat_info = tpmi_get_platform_data(auxdev); + if (!plat_info) + return; + + mutex_lock(&isst_tpmi_dev_lock); + tpmi_sst->power_domain_info[plat_info->partition] = NULL; + tpmi_sst->partition_mask_current &= ~BIT(plat_info->partition); + /* Free the package instance when the all partitions are removed */ + if (!tpmi_sst->partition_mask_current) { + isst_common.sst_inst[tpmi_sst->package_id] = NULL; + kfree(tpmi_sst); + } + mutex_unlock(&isst_tpmi_dev_lock); +} +EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_remove, "INTEL_TPMI_SST"); + +void tpmi_sst_dev_suspend(struct auxiliary_device *auxdev) +{ + struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev); + struct tpmi_per_power_domain_info *power_domain_info; + struct oobmsm_plat_info *plat_info; + void __iomem *cp_base; + + plat_info = tpmi_get_platform_data(auxdev); + if (!plat_info) + return; + + power_domain_info = tpmi_sst->power_domain_info[plat_info->partition]; + + cp_base = power_domain_info->sst_base + power_domain_info->sst_header.cp_offset; + power_domain_info->saved_sst_cp_control = readq(cp_base + SST_CP_CONTROL_OFFSET); + + memcpy_fromio(power_domain_info->saved_clos_configs, cp_base + SST_CLOS_CONFIG_0_OFFSET, + sizeof(power_domain_info->saved_clos_configs)); + + memcpy_fromio(power_domain_info->saved_clos_assocs, cp_base + SST_CLOS_ASSOC_0_OFFSET, + sizeof(power_domain_info->saved_clos_assocs)); + + power_domain_info->saved_pp_control = readq(power_domain_info->sst_base + + power_domain_info->sst_header.pp_offset + + SST_PP_CONTROL_OFFSET); +} +EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_suspend, "INTEL_TPMI_SST"); + +void tpmi_sst_dev_resume(struct auxiliary_device *auxdev) +{ + struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev); + struct tpmi_per_power_domain_info *power_domain_info; + struct oobmsm_plat_info *plat_info; + void __iomem *cp_base; + + plat_info = tpmi_get_platform_data(auxdev); + if (!plat_info) + return; + + power_domain_info = tpmi_sst->power_domain_info[plat_info->partition]; + + cp_base = power_domain_info->sst_base + power_domain_info->sst_header.cp_offset; + writeq(power_domain_info->saved_sst_cp_control, cp_base + SST_CP_CONTROL_OFFSET); + + memcpy_toio(cp_base + SST_CLOS_CONFIG_0_OFFSET, power_domain_info->saved_clos_configs, + sizeof(power_domain_info->saved_clos_configs)); + + memcpy_toio(cp_base + SST_CLOS_ASSOC_0_OFFSET, power_domain_info->saved_clos_assocs, + sizeof(power_domain_info->saved_clos_assocs)); + + writeq(power_domain_info->saved_pp_control, power_domain_info->sst_base + + power_domain_info->sst_header.pp_offset + SST_PP_CONTROL_OFFSET); +} +EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_resume, "INTEL_TPMI_SST"); + +#define ISST_TPMI_API_VERSION 0x03 + +int tpmi_sst_init(void) +{ + struct isst_if_cmd_cb cb; + int ret = 0; + + mutex_lock(&isst_tpmi_dev_lock); + + if (isst_core_usage_count) { + ++isst_core_usage_count; + goto init_done; + } + + isst_common.sst_inst = kcalloc(topology_max_packages(), + sizeof(*isst_common.sst_inst), + GFP_KERNEL); + if (!isst_common.sst_inst) { + ret = -ENOMEM; + goto init_done; + } + + memset(&cb, 0, sizeof(cb)); + cb.cmd_size = sizeof(struct isst_if_io_reg); + cb.offset = offsetof(struct isst_if_io_regs, io_reg); + cb.cmd_callback = NULL; + cb.api_version = ISST_TPMI_API_VERSION; + cb.def_ioctl = isst_if_def_ioctl; + cb.owner = THIS_MODULE; + ret = isst_if_cdev_register(ISST_IF_DEV_TPMI, &cb); + if (ret) + kfree(isst_common.sst_inst); + else + ++isst_core_usage_count; +init_done: + mutex_unlock(&isst_tpmi_dev_lock); + return ret; +} +EXPORT_SYMBOL_NS_GPL(tpmi_sst_init, "INTEL_TPMI_SST"); + +void tpmi_sst_exit(void) +{ + mutex_lock(&isst_tpmi_dev_lock); + if (isst_core_usage_count) + --isst_core_usage_count; + + if (!isst_core_usage_count) { + isst_if_cdev_unregister(ISST_IF_DEV_TPMI); + kfree(isst_common.sst_inst); + } + mutex_unlock(&isst_tpmi_dev_lock); +} +EXPORT_SYMBOL_NS_GPL(tpmi_sst_exit, "INTEL_TPMI_SST"); + +MODULE_IMPORT_NS("INTEL_TPMI"); +MODULE_IMPORT_NS("INTEL_TPMI_POWER_DOMAIN"); + +MODULE_DESCRIPTION("ISST TPMI interface module"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.h b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.h new file mode 100644 index 000000000000..900b483703f9 --- /dev/null +++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Intel Speed Select Interface: Drivers Internal defines + * Copyright (c) 2023, Intel Corporation. + * All rights reserved. + * + */ + +#ifndef _ISST_TPMI_CORE_H +#define _ISST_TPMI_CORE_H + +int tpmi_sst_init(void); +void tpmi_sst_exit(void); +int tpmi_sst_dev_add(struct auxiliary_device *auxdev); +void tpmi_sst_dev_remove(struct auxiliary_device *auxdev); +void tpmi_sst_dev_suspend(struct auxiliary_device *auxdev); +void tpmi_sst_dev_resume(struct auxiliary_device *auxdev); +#endif diff --git a/drivers/platform/x86/intel/telemetry/core.c b/drivers/platform/x86/intel/telemetry/core.c index fdf55b5d6948..f312864b8d07 100644 --- a/drivers/platform/x86/intel/telemetry/core.c +++ b/drivers/platform/x86/intel/telemetry/core.c @@ -21,33 +21,6 @@ struct telemetry_core_config { static struct telemetry_core_config telm_core_conf; -static int telemetry_def_update_events(struct telemetry_evtconfig pss_evtconfig, - struct telemetry_evtconfig ioss_evtconfig) -{ - return 0; -} - -static int telemetry_def_set_sampling_period(u8 pss_period, u8 ioss_period) -{ - return 0; -} - -static int telemetry_def_get_sampling_period(u8 *pss_min_period, - u8 *pss_max_period, - u8 *ioss_min_period, - u8 *ioss_max_period) -{ - return 0; -} - -static int telemetry_def_get_eventconfig( - struct telemetry_evtconfig *pss_evtconfig, - struct telemetry_evtconfig *ioss_evtconfig, - int pss_len, int ioss_len) -{ - return 0; -} - static int telemetry_def_get_trace_verbosity(enum telemetry_unit telem_unit, u32 *verbosity) { @@ -75,145 +48,14 @@ static int telemetry_def_read_eventlog(enum telemetry_unit telem_unit, return 0; } -static int telemetry_def_add_events(u8 num_pss_evts, u8 num_ioss_evts, - u32 *pss_evtmap, u32 *ioss_evtmap) -{ - return 0; -} - -static int telemetry_def_reset_events(void) -{ - return 0; -} - static const struct telemetry_core_ops telm_defpltops = { - .set_sampling_period = telemetry_def_set_sampling_period, - .get_sampling_period = telemetry_def_get_sampling_period, .get_trace_verbosity = telemetry_def_get_trace_verbosity, .set_trace_verbosity = telemetry_def_set_trace_verbosity, .raw_read_eventlog = telemetry_def_raw_read_eventlog, - .get_eventconfig = telemetry_def_get_eventconfig, .read_eventlog = telemetry_def_read_eventlog, - .update_events = telemetry_def_update_events, - .reset_events = telemetry_def_reset_events, - .add_events = telemetry_def_add_events, }; /** - * telemetry_update_events() - Update telemetry Configuration - * @pss_evtconfig: PSS related config. No change if num_evts = 0. - * @pss_evtconfig: IOSS related config. No change if num_evts = 0. - * - * This API updates the IOSS & PSS Telemetry configuration. Old config - * is overwritten. Call telemetry_reset_events when logging is over - * All sample period values should be in the form of: - * bits[6:3] -> value; bits [0:2]-> Exponent; Period = (Value *16^Exponent) - * - * Return: 0 success, < 0 for failure - */ -int telemetry_update_events(struct telemetry_evtconfig pss_evtconfig, - struct telemetry_evtconfig ioss_evtconfig) -{ - return telm_core_conf.telem_ops->update_events(pss_evtconfig, - ioss_evtconfig); -} -EXPORT_SYMBOL_GPL(telemetry_update_events); - - -/** - * telemetry_set_sampling_period() - Sets the IOSS & PSS sampling period - * @pss_period: placeholder for PSS Period to be set. - * Set to 0 if not required to be updated - * @ioss_period: placeholder for IOSS Period to be set - * Set to 0 if not required to be updated - * - * All values should be in the form of: - * bits[6:3] -> value; bits [0:2]-> Exponent; Period = (Value *16^Exponent) - * - * Return: 0 success, < 0 for failure - */ -int telemetry_set_sampling_period(u8 pss_period, u8 ioss_period) -{ - return telm_core_conf.telem_ops->set_sampling_period(pss_period, - ioss_period); -} -EXPORT_SYMBOL_GPL(telemetry_set_sampling_period); - -/** - * telemetry_get_sampling_period() - Get IOSS & PSS min & max sampling period - * @pss_min_period: placeholder for PSS Min Period supported - * @pss_max_period: placeholder for PSS Max Period supported - * @ioss_min_period: placeholder for IOSS Min Period supported - * @ioss_max_period: placeholder for IOSS Max Period supported - * - * All values should be in the form of: - * bits[6:3] -> value; bits [0:2]-> Exponent; Period = (Value *16^Exponent) - * - * Return: 0 success, < 0 for failure - */ -int telemetry_get_sampling_period(u8 *pss_min_period, u8 *pss_max_period, - u8 *ioss_min_period, u8 *ioss_max_period) -{ - return telm_core_conf.telem_ops->get_sampling_period(pss_min_period, - pss_max_period, - ioss_min_period, - ioss_max_period); -} -EXPORT_SYMBOL_GPL(telemetry_get_sampling_period); - - -/** - * telemetry_reset_events() - Restore the IOSS & PSS configuration to default - * - * Return: 0 success, < 0 for failure - */ -int telemetry_reset_events(void) -{ - return telm_core_conf.telem_ops->reset_events(); -} -EXPORT_SYMBOL_GPL(telemetry_reset_events); - -/** - * telemetry_get_eventconfig() - Returns the pss and ioss events enabled - * @pss_evtconfig: Pointer to PSS related configuration. - * @pss_evtconfig: Pointer to IOSS related configuration. - * @pss_len: Number of u32 elements allocated for pss_evtconfig array - * @ioss_len: Number of u32 elements allocated for ioss_evtconfig array - * - * Return: 0 success, < 0 for failure - */ -int telemetry_get_eventconfig(struct telemetry_evtconfig *pss_evtconfig, - struct telemetry_evtconfig *ioss_evtconfig, - int pss_len, int ioss_len) -{ - return telm_core_conf.telem_ops->get_eventconfig(pss_evtconfig, - ioss_evtconfig, - pss_len, ioss_len); -} -EXPORT_SYMBOL_GPL(telemetry_get_eventconfig); - -/** - * telemetry_add_events() - Add IOSS & PSS configuration to existing settings. - * @num_pss_evts: Number of PSS Events (<29) in pss_evtmap. Can be 0. - * @num_ioss_evts: Number of IOSS Events (<29) in ioss_evtmap. Can be 0. - * @pss_evtmap: Array of PSS Event-IDs to Enable - * @ioss_evtmap: Array of PSS Event-IDs to Enable - * - * Events are appended to Old Configuration. In case of total events > 28, it - * returns error. Call telemetry_reset_events to reset after eventlog done - * - * Return: 0 success, < 0 for failure - */ -int telemetry_add_events(u8 num_pss_evts, u8 num_ioss_evts, - u32 *pss_evtmap, u32 *ioss_evtmap) -{ - return telm_core_conf.telem_ops->add_events(num_pss_evts, - num_ioss_evts, pss_evtmap, - ioss_evtmap); -} -EXPORT_SYMBOL_GPL(telemetry_add_events); - -/** * telemetry_read_events() - Fetches samples as specified by evtlog.telem_evt_id * @telem_unit: Specify whether IOSS or PSS Read * @evtlog: Array of telemetry_evtlog structs to fill data @@ -231,25 +73,6 @@ int telemetry_read_events(enum telemetry_unit telem_unit, EXPORT_SYMBOL_GPL(telemetry_read_events); /** - * telemetry_raw_read_events() - Fetch samples specified by evtlog.telem_evt_id - * @telem_unit: Specify whether IOSS or PSS Read - * @evtlog: Array of telemetry_evtlog structs to fill data - * evtlog.telem_evt_id specifies the ids to read - * @len: Length of array of evtlog - * - * The caller must take care of locking in this case. - * - * Return: number of eventlogs read for success, < 0 for failure - */ -int telemetry_raw_read_events(enum telemetry_unit telem_unit, - struct telemetry_evtlog *evtlog, int len) -{ - return telm_core_conf.telem_ops->raw_read_eventlog(telem_unit, evtlog, - len, 0); -} -EXPORT_SYMBOL_GPL(telemetry_raw_read_events); - -/** * telemetry_read_eventlog() - Fetch the Telemetry log from PSS or IOSS * @telem_unit: Specify whether IOSS or PSS Read * @evtlog: Array of telemetry_evtlog structs to fill data diff --git a/drivers/platform/x86/intel/telemetry/debugfs.c b/drivers/platform/x86/intel/telemetry/debugfs.c index 1d4d0fbfd63c..70e5736c44c7 100644 --- a/drivers/platform/x86/intel/telemetry/debugfs.c +++ b/drivers/platform/x86/intel/telemetry/debugfs.c @@ -308,8 +308,8 @@ static struct telemetry_debugfs_conf telem_apl_debugfs_conf = { }; static const struct x86_cpu_id telemetry_debugfs_cpu_ids[] = { - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &telem_apl_debugfs_conf), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &telem_apl_debugfs_conf), + X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &telem_apl_debugfs_conf), + X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &telem_apl_debugfs_conf), {} }; MODULE_DEVICE_TABLE(x86cpu, telemetry_debugfs_cpu_ids); diff --git a/drivers/platform/x86/intel/telemetry/pltdrv.c b/drivers/platform/x86/intel/telemetry/pltdrv.c index 405dea87de6b..f23c170a55dc 100644 --- a/drivers/platform/x86/intel/telemetry/pltdrv.c +++ b/drivers/platform/x86/intel/telemetry/pltdrv.c @@ -177,8 +177,8 @@ static struct telemetry_plt_config telem_glk_config = { }; static const struct x86_cpu_id telemetry_cpu_ids[] = { - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &telem_apl_config), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &telem_glk_config), + X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &telem_apl_config), + X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &telem_glk_config), {} }; @@ -639,231 +639,6 @@ static int telemetry_setup(struct platform_device *pdev) return 0; } -static int telemetry_plt_update_events(struct telemetry_evtconfig pss_evtconfig, - struct telemetry_evtconfig ioss_evtconfig) -{ - int ret; - - if ((pss_evtconfig.num_evts > 0) && - (TELEM_SAMPLE_PERIOD_INVALID(pss_evtconfig.period))) { - pr_err("PSS Sampling Period Out of Range\n"); - return -EINVAL; - } - - if ((ioss_evtconfig.num_evts > 0) && - (TELEM_SAMPLE_PERIOD_INVALID(ioss_evtconfig.period))) { - pr_err("IOSS Sampling Period Out of Range\n"); - return -EINVAL; - } - - ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig, - TELEM_UPDATE); - if (ret) - pr_err("TELEMETRY Config Failed\n"); - - return ret; -} - - -static int telemetry_plt_set_sampling_period(u8 pss_period, u8 ioss_period) -{ - u32 telem_ctrl = 0; - int ret = 0; - - mutex_lock(&(telm_conf->telem_lock)); - if (ioss_period) { - struct intel_scu_ipc_dev *scu = telm_conf->scu; - - if (TELEM_SAMPLE_PERIOD_INVALID(ioss_period)) { - pr_err("IOSS Sampling Period Out of Range\n"); - ret = -EINVAL; - goto out; - } - - /* Get telemetry EVENT CTL */ - ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM, - IOSS_TELEM_EVENT_CTL_READ, NULL, 0, - &telem_ctrl, sizeof(telem_ctrl)); - if (ret) { - pr_err("IOSS TELEM_CTRL Read Failed\n"); - goto out; - } - - /* Disable Telemetry */ - TELEM_DISABLE(telem_ctrl); - - ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM, - IOSS_TELEM_EVENT_CTL_WRITE, - &telem_ctrl, sizeof(telem_ctrl), - NULL, 0); - if (ret) { - pr_err("IOSS TELEM_CTRL Event Disable Write Failed\n"); - goto out; - } - - /* Enable Periodic Telemetry Events and enable SRAM trace */ - TELEM_CLEAR_SAMPLE_PERIOD(telem_ctrl); - TELEM_ENABLE_SRAM_EVT_TRACE(telem_ctrl); - TELEM_ENABLE_PERIODIC(telem_ctrl); - telem_ctrl |= ioss_period; - - ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM, - IOSS_TELEM_EVENT_CTL_WRITE, - &telem_ctrl, sizeof(telem_ctrl), - NULL, 0); - if (ret) { - pr_err("IOSS TELEM_CTRL Event Enable Write Failed\n"); - goto out; - } - telm_conf->ioss_config.curr_period = ioss_period; - } - - if (pss_period) { - if (TELEM_SAMPLE_PERIOD_INVALID(pss_period)) { - pr_err("PSS Sampling Period Out of Range\n"); - ret = -EINVAL; - goto out; - } - - /* Get telemetry EVENT CTL */ - ret = intel_punit_ipc_command( - IPC_PUNIT_BIOS_READ_TELE_EVENT_CTRL, - 0, 0, NULL, &telem_ctrl); - if (ret) { - pr_err("PSS TELEM_CTRL Read Failed\n"); - goto out; - } - - /* Disable Telemetry */ - TELEM_DISABLE(telem_ctrl); - ret = intel_punit_ipc_command( - IPC_PUNIT_BIOS_WRITE_TELE_EVENT_CTRL, - 0, 0, &telem_ctrl, NULL); - if (ret) { - pr_err("PSS TELEM_CTRL Event Disable Write Failed\n"); - goto out; - } - - /* Enable Periodic Telemetry Events and enable SRAM trace */ - TELEM_CLEAR_SAMPLE_PERIOD(telem_ctrl); - TELEM_ENABLE_SRAM_EVT_TRACE(telem_ctrl); - TELEM_ENABLE_PERIODIC(telem_ctrl); - telem_ctrl |= pss_period; - - ret = intel_punit_ipc_command( - IPC_PUNIT_BIOS_WRITE_TELE_EVENT_CTRL, - 0, 0, &telem_ctrl, NULL); - if (ret) { - pr_err("PSS TELEM_CTRL Event Enable Write Failed\n"); - goto out; - } - telm_conf->pss_config.curr_period = pss_period; - } - -out: - mutex_unlock(&(telm_conf->telem_lock)); - return ret; -} - - -static int telemetry_plt_get_sampling_period(u8 *pss_min_period, - u8 *pss_max_period, - u8 *ioss_min_period, - u8 *ioss_max_period) -{ - *pss_min_period = telm_conf->pss_config.min_period; - *pss_max_period = telm_conf->pss_config.max_period; - *ioss_min_period = telm_conf->ioss_config.min_period; - *ioss_max_period = telm_conf->ioss_config.max_period; - - return 0; -} - - -static int telemetry_plt_reset_events(void) -{ - struct telemetry_evtconfig pss_evtconfig, ioss_evtconfig; - int ret; - - pss_evtconfig.evtmap = NULL; - pss_evtconfig.num_evts = TELEM_MAX_OS_ALLOCATED_EVENTS; - pss_evtconfig.period = TELEM_SAMPLING_DEFAULT_PERIOD; - - ioss_evtconfig.evtmap = NULL; - ioss_evtconfig.num_evts = TELEM_MAX_OS_ALLOCATED_EVENTS; - ioss_evtconfig.period = TELEM_SAMPLING_DEFAULT_PERIOD; - - ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig, - TELEM_RESET); - if (ret) - pr_err("TELEMETRY Reset Failed\n"); - - return ret; -} - - -static int telemetry_plt_get_eventconfig(struct telemetry_evtconfig *pss_config, - struct telemetry_evtconfig *ioss_config, - int pss_len, int ioss_len) -{ - u32 *pss_evtmap, *ioss_evtmap; - u32 index; - - pss_evtmap = pss_config->evtmap; - ioss_evtmap = ioss_config->evtmap; - - mutex_lock(&(telm_conf->telem_lock)); - pss_config->num_evts = telm_conf->pss_config.ssram_evts_used; - ioss_config->num_evts = telm_conf->ioss_config.ssram_evts_used; - - pss_config->period = telm_conf->pss_config.curr_period; - ioss_config->period = telm_conf->ioss_config.curr_period; - - if ((pss_len < telm_conf->pss_config.ssram_evts_used) || - (ioss_len < telm_conf->ioss_config.ssram_evts_used)) { - mutex_unlock(&(telm_conf->telem_lock)); - return -EINVAL; - } - - for (index = 0; index < telm_conf->pss_config.ssram_evts_used; - index++) { - pss_evtmap[index] = - telm_conf->pss_config.telem_evts[index].evt_id; - } - - for (index = 0; index < telm_conf->ioss_config.ssram_evts_used; - index++) { - ioss_evtmap[index] = - telm_conf->ioss_config.telem_evts[index].evt_id; - } - - mutex_unlock(&(telm_conf->telem_lock)); - return 0; -} - - -static int telemetry_plt_add_events(u8 num_pss_evts, u8 num_ioss_evts, - u32 *pss_evtmap, u32 *ioss_evtmap) -{ - struct telemetry_evtconfig pss_evtconfig, ioss_evtconfig; - int ret; - - pss_evtconfig.evtmap = pss_evtmap; - pss_evtconfig.num_evts = num_pss_evts; - pss_evtconfig.period = telm_conf->pss_config.curr_period; - - ioss_evtconfig.evtmap = ioss_evtmap; - ioss_evtconfig.num_evts = num_ioss_evts; - ioss_evtconfig.period = telm_conf->ioss_config.curr_period; - - ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig, - TELEM_ADD); - if (ret) - pr_err("TELEMETRY ADD Failed\n"); - - return ret; -} - static int telem_evtlog_read(enum telemetry_unit telem_unit, struct telem_ssram_region *ssram_region, u8 len) { @@ -1093,14 +868,8 @@ out: static const struct telemetry_core_ops telm_pltops = { .get_trace_verbosity = telemetry_plt_get_trace_verbosity, .set_trace_verbosity = telemetry_plt_set_trace_verbosity, - .set_sampling_period = telemetry_plt_set_sampling_period, - .get_sampling_period = telemetry_plt_get_sampling_period, .raw_read_eventlog = telemetry_plt_raw_read_eventlog, - .get_eventconfig = telemetry_plt_get_eventconfig, - .update_events = telemetry_plt_update_events, .read_eventlog = telemetry_plt_read_eventlog, - .reset_events = telemetry_plt_reset_events, - .add_events = telemetry_plt_add_events, }; static int telemetry_pltdrv_probe(struct platform_device *pdev) @@ -1156,10 +925,9 @@ out: return ret; } -static int telemetry_pltdrv_remove(struct platform_device *pdev) +static void telemetry_pltdrv_remove(struct platform_device *pdev) { telemetry_clear_pltdata(); - return 0; } static struct platform_driver telemetry_soc_driver = { diff --git a/drivers/platform/x86/intel/tpmi_power_domains.c b/drivers/platform/x86/intel/tpmi_power_domains.c new file mode 100644 index 000000000000..7d93119a4c30 --- /dev/null +++ b/drivers/platform/x86/intel/tpmi_power_domains.c @@ -0,0 +1,265 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Mapping of TPMI power domains CPU mapping + * + * Copyright (c) 2024, Intel Corporation. + */ + +#include <linux/bitfield.h> +#include <linux/cleanup.h> +#include <linux/cpuhotplug.h> +#include <linux/cpumask.h> +#include <linux/errno.h> +#include <linux/export.h> +#include <linux/hashtable.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/overflow.h> +#include <linux/slab.h> +#include <linux/topology.h> +#include <linux/types.h> + +#include <asm/cpu_device_id.h> +#include <asm/intel-family.h> +#include <asm/msr.h> + +#include "tpmi_power_domains.h" + +#define MSR_PM_LOGICAL_ID 0x54 + +/* + * Struct of MSR 0x54 + * [15:11] PM_DOMAIN_ID + * [10:3] MODULE_ID (aka IDI_AGENT_ID) + * [2:0] LP_ID + * For Atom: + * [2] Always 0 + * [1:0] core ID within module + * For Core + * [2:1] Always 0 + * [0] thread ID + */ + +#define LP_ID_MASK GENMASK_ULL(2, 0) +#define MODULE_ID_MASK GENMASK_ULL(10, 3) +#define PM_DOMAIN_ID_MASK GENMASK_ULL(15, 11) + +/** + * struct tpmi_cpu_info - Mapping information for a CPU + * @hnode: Used to add mapping information to hash list + * @linux_cpu: Linux CPU number + * @pkg_id: Package ID of this CPU + * @punit_thread_id: Punit thread id of this CPU + * @punit_core_id: Punit core id + * @punit_domain_id: Power domain id from Punit + * + * Structure to store mapping information for a Linux CPU + * to a Punit core, thread and power domain. + */ +struct tpmi_cpu_info { + struct hlist_node hnode; + int linux_cpu; + u8 pkg_id; + u8 punit_thread_id; + u8 punit_core_id; + u8 punit_domain_id; +}; + +static DEFINE_PER_CPU(struct tpmi_cpu_info, tpmi_cpu_info); + +/* The dynamically assigned cpu hotplug state to free later */ +static enum cpuhp_state tpmi_hp_state __read_mostly; + +#define MAX_POWER_DOMAINS 8 + +static cpumask_t *tpmi_power_domain_mask; + +static u16 *domain_die_map; + +/* Lock to protect tpmi_power_domain_mask and tpmi_cpu_hash */ +static DEFINE_MUTEX(tpmi_lock); + +static const struct x86_cpu_id tpmi_cpu_ids[] = { + X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, NULL), + X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, NULL), + X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, NULL), + X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, NULL), + X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, NULL), + X86_MATCH_VFM(INTEL_DIAMONDRAPIDS_X, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, tpmi_cpu_ids); + +static DECLARE_HASHTABLE(tpmi_cpu_hash, 8); + +static bool tpmi_domain_is_valid(struct tpmi_cpu_info *info) +{ + return info->pkg_id < topology_max_packages() && + info->punit_domain_id < MAX_POWER_DOMAINS; +} + +int tpmi_get_linux_cpu_number(int package_id, int domain_id, int punit_core_id) +{ + struct tpmi_cpu_info *info; + int ret = -EINVAL; + + guard(mutex)(&tpmi_lock); + hash_for_each_possible(tpmi_cpu_hash, info, hnode, punit_core_id) { + if (info->punit_domain_id == domain_id && info->pkg_id == package_id) { + ret = info->linux_cpu; + break; + } + } + + return ret; +} +EXPORT_SYMBOL_NS_GPL(tpmi_get_linux_cpu_number, "INTEL_TPMI_POWER_DOMAIN"); + +int tpmi_get_punit_core_number(int cpu_no) +{ + if (cpu_no >= num_possible_cpus()) + return -EINVAL; + + return per_cpu(tpmi_cpu_info, cpu_no).punit_core_id; +} +EXPORT_SYMBOL_NS_GPL(tpmi_get_punit_core_number, "INTEL_TPMI_POWER_DOMAIN"); + +int tpmi_get_power_domain_id(int cpu_no) +{ + if (cpu_no >= num_possible_cpus()) + return -EINVAL; + + return per_cpu(tpmi_cpu_info, cpu_no).punit_domain_id; +} +EXPORT_SYMBOL_NS_GPL(tpmi_get_power_domain_id, "INTEL_TPMI_POWER_DOMAIN"); + +cpumask_t *tpmi_get_power_domain_mask(int cpu_no) +{ + struct tpmi_cpu_info *info; + cpumask_t *mask; + int index; + + if (cpu_no >= num_possible_cpus()) + return NULL; + + info = &per_cpu(tpmi_cpu_info, cpu_no); + if (!tpmi_domain_is_valid(info)) + return NULL; + + index = info->pkg_id * MAX_POWER_DOMAINS + info->punit_domain_id; + guard(mutex)(&tpmi_lock); + mask = &tpmi_power_domain_mask[index]; + + return mask; +} +EXPORT_SYMBOL_NS_GPL(tpmi_get_power_domain_mask, "INTEL_TPMI_POWER_DOMAIN"); + +int tpmi_get_linux_die_id(int pkg_id, int domain_id) +{ + if (pkg_id >= topology_max_packages() || domain_id >= MAX_POWER_DOMAINS) + return -EINVAL; + + return domain_die_map[pkg_id * MAX_POWER_DOMAINS + domain_id]; +} +EXPORT_SYMBOL_NS_GPL(tpmi_get_linux_die_id, "INTEL_TPMI_POWER_DOMAIN"); + +static int tpmi_get_logical_id(unsigned int cpu, struct tpmi_cpu_info *info) +{ + u64 data; + int ret; + + ret = rdmsrq_safe(MSR_PM_LOGICAL_ID, &data); + if (ret) + return ret; + + info->punit_domain_id = FIELD_GET(PM_DOMAIN_ID_MASK, data); + if (info->punit_domain_id >= MAX_POWER_DOMAINS) + return -EINVAL; + + info->punit_thread_id = FIELD_GET(LP_ID_MASK, data); + info->punit_core_id = FIELD_GET(MODULE_ID_MASK, data); + info->pkg_id = topology_logical_package_id(cpu); + info->linux_cpu = cpu; + + return 0; +} + +static int tpmi_cpu_online(unsigned int cpu) +{ + struct tpmi_cpu_info *info = &per_cpu(tpmi_cpu_info, cpu); + int ret, index; + + /* Don't fail CPU online for some bad mapping of CPUs */ + ret = tpmi_get_logical_id(cpu, info); + if (ret) + return 0; + + index = info->pkg_id * MAX_POWER_DOMAINS + info->punit_domain_id; + + guard(mutex)(&tpmi_lock); + cpumask_set_cpu(cpu, &tpmi_power_domain_mask[index]); + hash_add(tpmi_cpu_hash, &info->hnode, info->punit_core_id); + + domain_die_map[info->pkg_id * MAX_POWER_DOMAINS + info->punit_domain_id] = + topology_die_id(cpu); + + return 0; +} + +static int __init tpmi_init(void) +{ + const struct x86_cpu_id *id; + u64 data; + int ret; + + id = x86_match_cpu(tpmi_cpu_ids); + if (!id) + return -ENODEV; + + /* Check for MSR 0x54 presence */ + ret = rdmsrq_safe(MSR_PM_LOGICAL_ID, &data); + if (ret) + return ret; + + tpmi_power_domain_mask = kcalloc(size_mul(topology_max_packages(), MAX_POWER_DOMAINS), + sizeof(*tpmi_power_domain_mask), GFP_KERNEL); + if (!tpmi_power_domain_mask) + return -ENOMEM; + + domain_die_map = kcalloc(size_mul(topology_max_packages(), MAX_POWER_DOMAINS), + sizeof(*domain_die_map), GFP_KERNEL); + if (!domain_die_map) { + ret = -ENOMEM; + goto free_domain_mask; + } + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "platform/x86/tpmi_power_domains:online", + tpmi_cpu_online, NULL); + if (ret < 0) + goto free_domain_map; + + tpmi_hp_state = ret; + + return 0; + +free_domain_map: + kfree(domain_die_map); + +free_domain_mask: + kfree(tpmi_power_domain_mask); + + return ret; +} +module_init(tpmi_init) + +static void __exit tpmi_exit(void) +{ + cpuhp_remove_state(tpmi_hp_state); + kfree(tpmi_power_domain_mask); + kfree(domain_die_map); +} +module_exit(tpmi_exit) + +MODULE_DESCRIPTION("TPMI Power Domains Mapping"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/intel/tpmi_power_domains.h b/drivers/platform/x86/intel/tpmi_power_domains.h new file mode 100644 index 000000000000..2fd0dd7afbd2 --- /dev/null +++ b/drivers/platform/x86/intel/tpmi_power_domains.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Mapping of TPMI power domain and CPUs + * + * Copyright (c) 2024, Intel Corporation. + */ + +#ifndef _TPMI_POWER_DOMAINS_H_ +#define _TPMI_POWER_DOMAINS_H_ + +#include <linux/cpumask.h> + +int tpmi_get_linux_cpu_number(int package_id, int die_id, int punit_core_id); +int tpmi_get_punit_core_number(int cpu_no); +int tpmi_get_power_domain_id(int cpu_no); +cpumask_t *tpmi_get_power_domain_mask(int cpu_no); +int tpmi_get_linux_die_id(int pkg_id, int domain_id); + +#endif diff --git a/drivers/platform/x86/intel/turbo_max_3.c b/drivers/platform/x86/intel/turbo_max_3.c index 892140b62898..b5af3e91ba04 100644 --- a/drivers/platform/x86/intel/turbo_max_3.c +++ b/drivers/platform/x86/intel/turbo_max_3.c @@ -17,6 +17,7 @@ #include <asm/cpu_device_id.h> #include <asm/intel-family.h> +#include <asm/msr.h> #define MSR_OC_MAILBOX 0x150 #define MSR_OC_MAILBOX_CMD_OFFSET 32 @@ -41,14 +42,14 @@ static int get_oc_core_priority(unsigned int cpu) value = cmd << MSR_OC_MAILBOX_CMD_OFFSET; /* Set the busy bit to indicate OS is trying to issue command */ value |= BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT); - ret = wrmsrl_safe(MSR_OC_MAILBOX, value); + ret = wrmsrq_safe(MSR_OC_MAILBOX, value); if (ret) { pr_debug("cpu %d OC mailbox write failed\n", cpu); return ret; } for (i = 0; i < OC_MAILBOX_RETRY_COUNT; ++i) { - ret = rdmsrl_safe(MSR_OC_MAILBOX, &value); + ret = rdmsrq_safe(MSR_OC_MAILBOX, &value); if (ret) { pr_debug("cpu %d OC mailbox read failed\n", cpu); break; @@ -114,8 +115,8 @@ static int itmt_legacy_cpu_online(unsigned int cpu) } static const struct x86_cpu_id itmt_legacy_cpu_ids[] = { - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, NULL), - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL), + X86_MATCH_VFM(INTEL_BROADWELL_X, NULL), + X86_MATCH_VFM(INTEL_SKYLAKE_X, NULL), {} }; diff --git a/drivers/platform/x86/intel/uncore-frequency/Kconfig b/drivers/platform/x86/intel/uncore-frequency/Kconfig index 21b209124916..a56d55056927 100644 --- a/drivers/platform/x86/intel/uncore-frequency/Kconfig +++ b/drivers/platform/x86/intel/uncore-frequency/Kconfig @@ -6,9 +6,13 @@ menu "Intel Uncore Frequency Control" depends on X86_64 || COMPILE_TEST +config INTEL_UNCORE_FREQ_CONTROL_TPMI + tristate + config INTEL_UNCORE_FREQ_CONTROL tristate "Intel Uncore frequency control driver" depends on X86_64 + select INTEL_UNCORE_FREQ_CONTROL_TPMI if INTEL_TPMI help This driver allows control of Uncore frequency limits on supported server platforms. diff --git a/drivers/platform/x86/intel/uncore-frequency/Makefile b/drivers/platform/x86/intel/uncore-frequency/Makefile index e0f7968e8285..08ff57492b28 100644 --- a/drivers/platform/x86/intel/uncore-frequency/Makefile +++ b/drivers/platform/x86/intel/uncore-frequency/Makefile @@ -7,3 +7,5 @@ obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL) += intel-uncore-frequency.o intel-uncore-frequency-y := uncore-frequency.o obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL) += intel-uncore-frequency-common.o intel-uncore-frequency-common-y := uncore-frequency-common.o +obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL_TPMI) += intel-uncore-frequency-tpmi.o +intel-uncore-frequency-tpmi-y := uncore-frequency-tpmi.o diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c index cb24de9e97dc..65897fae17df 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c @@ -16,102 +16,141 @@ static struct kobject *uncore_root_kobj; /* uncore instance count */ static int uncore_instance_count; +static DEFINE_IDA(intel_uncore_ida); + /* callbacks for actual HW read/write */ -static int (*uncore_read)(struct uncore_data *data, unsigned int *min, unsigned int *max); -static int (*uncore_write)(struct uncore_data *data, unsigned int input, unsigned int min_max); -static int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq); +static int (*uncore_read)(struct uncore_data *data, unsigned int *value, enum uncore_index index); +static int (*uncore_write)(struct uncore_data *data, unsigned int input, enum uncore_index index); -static ssize_t show_min_max_freq_khz(struct uncore_data *data, - char *buf, int min_max) +static ssize_t show_domain_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - unsigned int min, max; - int ret; + struct uncore_data *data = container_of(attr, struct uncore_data, domain_id_kobj_attr); - mutex_lock(&uncore_lock); - ret = uncore_read(data, &min, &max); - mutex_unlock(&uncore_lock); - if (ret) - return ret; + return sprintf(buf, "%u\n", data->domain_id); +} + +static ssize_t show_fabric_cluster_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + struct uncore_data *data = container_of(attr, struct uncore_data, fabric_cluster_id_kobj_attr); - if (min_max) - return sprintf(buf, "%u\n", max); + return sprintf(buf, "%u\n", data->cluster_id); +} + +static ssize_t show_package_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + struct uncore_data *data = container_of(attr, struct uncore_data, package_id_kobj_attr); - return sprintf(buf, "%u\n", min); + return sprintf(buf, "%u\n", data->package_id); } -static ssize_t store_min_max_freq_khz(struct uncore_data *data, - const char *buf, ssize_t count, - int min_max) +#define MAX_UNCORE_AGENT_TYPES 4 + +/* The order follows AGENT_TYPE_* defines */ +static const char *agent_name[MAX_UNCORE_AGENT_TYPES] = {"core", "cache", "memory", "io"}; + +static ssize_t show_agent_types(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - unsigned int input; + struct uncore_data *data = container_of(attr, struct uncore_data, agent_types_kobj_attr); + unsigned long agent_mask = data->agent_type_mask; + int agent, length = 0; + + for_each_set_bit(agent, &agent_mask, MAX_UNCORE_AGENT_TYPES) { + if (length) + length += sysfs_emit_at(buf, length, " "); + + length += sysfs_emit_at(buf, length, "%s", agent_name[agent]); + } + + length += sysfs_emit_at(buf, length, "\n"); - if (kstrtouint(buf, 10, &input)) - return -EINVAL; + return length; +} + +static ssize_t show_attr(struct uncore_data *data, char *buf, enum uncore_index index) +{ + unsigned int value; + int ret; mutex_lock(&uncore_lock); - uncore_write(data, input, min_max); + ret = uncore_read(data, &value, index); mutex_unlock(&uncore_lock); + if (ret) + return ret; - return count; + return sprintf(buf, "%u\n", value); } -static ssize_t show_perf_status_freq_khz(struct uncore_data *data, char *buf) +static ssize_t store_attr(struct uncore_data *data, const char *buf, ssize_t count, + enum uncore_index index) { - unsigned int freq; + unsigned int input = 0; int ret; + if (index == UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE) { + if (kstrtobool(buf, (bool *)&input)) + return -EINVAL; + } else { + if (kstrtouint(buf, 10, &input)) + return -EINVAL; + } + mutex_lock(&uncore_lock); - ret = uncore_read_freq(data, &freq); + ret = uncore_write(data, input, index); mutex_unlock(&uncore_lock); + if (ret) return ret; - return sprintf(buf, "%u\n", freq); + return count; } -#define store_uncore_min_max(name, min_max) \ - static ssize_t store_##name(struct device *dev, \ - struct device_attribute *attr, \ +#define store_uncore_attr(name, index) \ + static ssize_t store_##name(struct kobject *kobj, \ + struct kobj_attribute *attr, \ const char *buf, size_t count) \ { \ - struct uncore_data *data = container_of(attr, struct uncore_data, name##_dev_attr);\ + struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\ \ - return store_min_max_freq_khz(data, buf, count, \ - min_max); \ + return store_attr(data, buf, count, index); \ } -#define show_uncore_min_max(name, min_max) \ - static ssize_t show_##name(struct device *dev, \ - struct device_attribute *attr, char *buf)\ +#define show_uncore_attr(name, index) \ + static ssize_t show_##name(struct kobject *kobj, \ + struct kobj_attribute *attr, char *buf)\ { \ - struct uncore_data *data = container_of(attr, struct uncore_data, name##_dev_attr);\ + struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\ \ - return show_min_max_freq_khz(data, buf, min_max); \ + return show_attr(data, buf, index); \ } -#define show_uncore_perf_status(name) \ - static ssize_t show_##name(struct device *dev, \ - struct device_attribute *attr, char *buf)\ - { \ - struct uncore_data *data = container_of(attr, struct uncore_data, name##_dev_attr);\ - \ - return show_perf_status_freq_khz(data, buf); \ - } +store_uncore_attr(min_freq_khz, UNCORE_INDEX_MIN_FREQ); +store_uncore_attr(max_freq_khz, UNCORE_INDEX_MAX_FREQ); + +show_uncore_attr(min_freq_khz, UNCORE_INDEX_MIN_FREQ); +show_uncore_attr(max_freq_khz, UNCORE_INDEX_MAX_FREQ); -store_uncore_min_max(min_freq_khz, 0); -store_uncore_min_max(max_freq_khz, 1); +show_uncore_attr(current_freq_khz, UNCORE_INDEX_CURRENT_FREQ); -show_uncore_min_max(min_freq_khz, 0); -show_uncore_min_max(max_freq_khz, 1); +store_uncore_attr(elc_low_threshold_percent, UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD); +store_uncore_attr(elc_high_threshold_percent, UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD); +store_uncore_attr(elc_high_threshold_enable, + UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE); +store_uncore_attr(elc_floor_freq_khz, UNCORE_INDEX_EFF_LAT_CTRL_FREQ); -show_uncore_perf_status(current_freq_khz); +show_uncore_attr(elc_low_threshold_percent, UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD); +show_uncore_attr(elc_high_threshold_percent, UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD); +show_uncore_attr(elc_high_threshold_enable, + UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE); +show_uncore_attr(elc_floor_freq_khz, UNCORE_INDEX_EFF_LAT_CTRL_FREQ); + +show_uncore_attr(die_id, UNCORE_INDEX_DIE_ID); #define show_uncore_data(member_name) \ - static ssize_t show_##member_name(struct device *dev, \ - struct device_attribute *attr, char *buf)\ + static ssize_t show_##member_name(struct kobject *kobj, \ + struct kobj_attribute *attr, char *buf)\ { \ struct uncore_data *data = container_of(attr, struct uncore_data,\ - member_name##_dev_attr);\ + member_name##_kobj_attr);\ \ return sysfs_emit(buf, "%u\n", \ data->member_name); \ @@ -122,34 +161,35 @@ show_uncore_data(initial_max_freq_khz); #define init_attribute_rw(_name) \ do { \ - sysfs_attr_init(&data->_name##_dev_attr.attr); \ - data->_name##_dev_attr.show = show_##_name; \ - data->_name##_dev_attr.store = store_##_name; \ - data->_name##_dev_attr.attr.name = #_name; \ - data->_name##_dev_attr.attr.mode = 0644; \ + sysfs_attr_init(&data->_name##_kobj_attr.attr); \ + data->_name##_kobj_attr.show = show_##_name; \ + data->_name##_kobj_attr.store = store_##_name; \ + data->_name##_kobj_attr.attr.name = #_name; \ + data->_name##_kobj_attr.attr.mode = 0644; \ } while (0) #define init_attribute_ro(_name) \ do { \ - sysfs_attr_init(&data->_name##_dev_attr.attr); \ - data->_name##_dev_attr.show = show_##_name; \ - data->_name##_dev_attr.store = NULL; \ - data->_name##_dev_attr.attr.name = #_name; \ - data->_name##_dev_attr.attr.mode = 0444; \ + sysfs_attr_init(&data->_name##_kobj_attr.attr); \ + data->_name##_kobj_attr.show = show_##_name; \ + data->_name##_kobj_attr.store = NULL; \ + data->_name##_kobj_attr.attr.name = #_name; \ + data->_name##_kobj_attr.attr.mode = 0444; \ } while (0) #define init_attribute_root_ro(_name) \ do { \ - sysfs_attr_init(&data->_name##_dev_attr.attr); \ - data->_name##_dev_attr.show = show_##_name; \ - data->_name##_dev_attr.store = NULL; \ - data->_name##_dev_attr.attr.name = #_name; \ - data->_name##_dev_attr.attr.mode = 0400; \ + sysfs_attr_init(&data->_name##_kobj_attr.attr); \ + data->_name##_kobj_attr.show = show_##_name; \ + data->_name##_kobj_attr.store = NULL; \ + data->_name##_kobj_attr.attr.name = #_name; \ + data->_name##_kobj_attr.attr.mode = 0400; \ } while (0) static int create_attr_group(struct uncore_data *data, char *name) { int ret, index = 0; + unsigned int val; init_attribute_rw(max_freq_khz); init_attribute_rw(min_freq_khz); @@ -157,11 +197,47 @@ static int create_attr_group(struct uncore_data *data, char *name) init_attribute_ro(initial_max_freq_khz); init_attribute_root_ro(current_freq_khz); - data->uncore_attrs[index++] = &data->max_freq_khz_dev_attr.attr; - data->uncore_attrs[index++] = &data->min_freq_khz_dev_attr.attr; - data->uncore_attrs[index++] = &data->initial_min_freq_khz_dev_attr.attr; - data->uncore_attrs[index++] = &data->initial_max_freq_khz_dev_attr.attr; - data->uncore_attrs[index++] = &data->current_freq_khz_dev_attr.attr; + if (data->domain_id != UNCORE_DOMAIN_ID_INVALID) { + init_attribute_root_ro(domain_id); + data->uncore_attrs[index++] = &data->domain_id_kobj_attr.attr; + init_attribute_root_ro(fabric_cluster_id); + data->uncore_attrs[index++] = &data->fabric_cluster_id_kobj_attr.attr; + init_attribute_root_ro(package_id); + data->uncore_attrs[index++] = &data->package_id_kobj_attr.attr; + if (data->agent_type_mask) { + init_attribute_ro(agent_types); + data->uncore_attrs[index++] = &data->agent_types_kobj_attr.attr; + } + if (topology_max_dies_per_package() > 1 && + data->agent_type_mask & AGENT_TYPE_CORE) { + init_attribute_ro(die_id); + data->uncore_attrs[index++] = &data->die_id_kobj_attr.attr; + } + } + + data->uncore_attrs[index++] = &data->max_freq_khz_kobj_attr.attr; + data->uncore_attrs[index++] = &data->min_freq_khz_kobj_attr.attr; + data->uncore_attrs[index++] = &data->initial_min_freq_khz_kobj_attr.attr; + data->uncore_attrs[index++] = &data->initial_max_freq_khz_kobj_attr.attr; + + ret = uncore_read(data, &val, UNCORE_INDEX_CURRENT_FREQ); + if (!ret) + data->uncore_attrs[index++] = &data->current_freq_khz_kobj_attr.attr; + + ret = uncore_read(data, &val, UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD); + if (!ret) { + init_attribute_rw(elc_low_threshold_percent); + init_attribute_rw(elc_high_threshold_percent); + init_attribute_rw(elc_high_threshold_enable); + init_attribute_rw(elc_floor_freq_khz); + + data->uncore_attrs[index++] = &data->elc_low_threshold_percent_kobj_attr.attr; + data->uncore_attrs[index++] = &data->elc_high_threshold_percent_kobj_attr.attr; + data->uncore_attrs[index++] = + &data->elc_high_threshold_enable_kobj_attr.attr; + data->uncore_attrs[index++] = &data->elc_floor_freq_khz_kobj_attr.attr; + } + data->uncore_attrs[index] = NULL; data->uncore_attr_group.name = name; @@ -187,12 +263,25 @@ int uncore_freq_add_entry(struct uncore_data *data, int cpu) goto uncore_unlock; } - sprintf(data->name, "package_%02d_die_%02d", data->package_id, data->die_id); + if (data->domain_id != UNCORE_DOMAIN_ID_INVALID) { + ret = ida_alloc(&intel_uncore_ida, GFP_KERNEL); + if (ret < 0) + goto uncore_unlock; - uncore_read(data, &data->initial_min_freq_khz, &data->initial_max_freq_khz); + data->instance_id = ret; + sprintf(data->name, "uncore%02d", ret); + } else { + sprintf(data->name, "package_%02d_die_%02d", data->package_id, data->die_id); + } + + uncore_read(data, &data->initial_min_freq_khz, UNCORE_INDEX_MIN_FREQ); + uncore_read(data, &data->initial_max_freq_khz, UNCORE_INDEX_MAX_FREQ); ret = create_attr_group(data, data->name); - if (!ret) { + if (ret) { + if (data->domain_id != UNCORE_DOMAIN_ID_INVALID) + ida_free(&intel_uncore_ida, data->instance_id); + } else { data->control_cpu = cpu; data->valid = true; } @@ -202,7 +291,7 @@ uncore_unlock: return ret; } -EXPORT_SYMBOL_NS_GPL(uncore_freq_add_entry, INTEL_UNCORE_FREQUENCY); +EXPORT_SYMBOL_NS_GPL(uncore_freq_add_entry, "INTEL_UNCORE_FREQUENCY"); void uncore_freq_remove_die_entry(struct uncore_data *data) { @@ -210,30 +299,39 @@ void uncore_freq_remove_die_entry(struct uncore_data *data) delete_attr_group(data, data->name); data->control_cpu = -1; data->valid = false; + if (data->domain_id != UNCORE_DOMAIN_ID_INVALID) + ida_free(&intel_uncore_ida, data->instance_id); + mutex_unlock(&uncore_lock); } -EXPORT_SYMBOL_NS_GPL(uncore_freq_remove_die_entry, INTEL_UNCORE_FREQUENCY); +EXPORT_SYMBOL_NS_GPL(uncore_freq_remove_die_entry, "INTEL_UNCORE_FREQUENCY"); -int uncore_freq_common_init(int (*read_control_freq)(struct uncore_data *data, unsigned int *min, unsigned int *max), - int (*write_control_freq)(struct uncore_data *data, unsigned int input, unsigned int set_max), - int (*read_freq)(struct uncore_data *data, unsigned int *freq)) +int uncore_freq_common_init(int (*read)(struct uncore_data *data, unsigned int *value, + enum uncore_index index), + int (*write)(struct uncore_data *data, unsigned int input, + enum uncore_index index)) { mutex_lock(&uncore_lock); - uncore_read = read_control_freq; - uncore_write = write_control_freq; - uncore_read_freq = read_freq; + uncore_read = read; + uncore_write = write; - if (!uncore_root_kobj) - uncore_root_kobj = kobject_create_and_add("intel_uncore_frequency", - &cpu_subsys.dev_root->kobj); + if (!uncore_root_kobj) { + struct device *dev_root = bus_get_dev_root(&cpu_subsys); + + if (dev_root) { + uncore_root_kobj = kobject_create_and_add("intel_uncore_frequency", + &dev_root->kobj); + put_device(dev_root); + } + } if (uncore_root_kobj) ++uncore_instance_count; mutex_unlock(&uncore_lock); return uncore_root_kobj ? 0 : -ENOMEM; } -EXPORT_SYMBOL_NS_GPL(uncore_freq_common_init, INTEL_UNCORE_FREQUENCY); +EXPORT_SYMBOL_NS_GPL(uncore_freq_common_init, "INTEL_UNCORE_FREQUENCY"); void uncore_freq_common_exit(void) { @@ -245,7 +343,7 @@ void uncore_freq_common_exit(void) } mutex_unlock(&uncore_lock); } -EXPORT_SYMBOL_NS_GPL(uncore_freq_common_exit, INTEL_UNCORE_FREQUENCY); +EXPORT_SYMBOL_NS_GPL(uncore_freq_common_exit, "INTEL_UNCORE_FREQUENCY"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h index f5dcfa2fb285..0abe850ef54e 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h @@ -11,6 +11,18 @@ #include <linux/device.h> +/* + * Define uncore agents, which are under uncore frequency control. + * Defined in the same order as specified in the TPMI UFS Specifications. + * It is possible that there are common uncore frequency control to more than + * one hardware agents. So, these defines are used as a bit mask. +*/ + +#define AGENT_TYPE_CORE 0x01 +#define AGENT_TYPE_CACHE 0x02 +#define AGENT_TYPE_MEMORY 0x04 +#define AGENT_TYPE_IO 0x08 + /** * struct uncore_data - Encapsulate all uncore data * @stored_uncore_data: Last user changed MSR 620 value, which will be restored @@ -21,13 +33,29 @@ * @valid: Mark the data valid/invalid * @package_id: Package id for this instance * @die_id: Die id for this instance + * @domain_id: Power domain id for this instance + * @cluster_id: cluster id in a domain + * @instance_id: Unique instance id to append to directory name * @name: Sysfs entry name for this instance + * @agent_type_mask: Bit mask of all hardware agents for this domain * @uncore_attr_group: Attribute group storage - * @max_freq_khz_dev_attr: Storage for device attribute max_freq_khz - * @mix_freq_khz_dev_attr: Storage for device attribute min_freq_khz - * @initial_max_freq_khz_dev_attr: Storage for device attribute initial_max_freq_khz - * @initial_min_freq_khz_dev_attr: Storage for device attribute initial_min_freq_khz - * @current_freq_khz_dev_attr: Storage for device attribute current_freq_khz + * @max_freq_khz_kobj_attr: Storage for kobject attribute max_freq_khz + * @min_freq_khz_kobj_attr: Storage for kobject attribute min_freq_khz + * @initial_max_freq_khz_kobj_attr: Storage for kobject attribute initial_max_freq_khz + * @initial_min_freq_khz_kobj_attr: Storage for kobject attribute initial_min_freq_khz + * @current_freq_khz_kobj_attr: Storage for kobject attribute current_freq_khz + * @domain_id_kobj_attr: Storage for kobject attribute domain_id + * @fabric_cluster_id_kobj_attr: Storage for kobject attribute fabric_cluster_id + * @package_id_kobj_attr: Storage for kobject attribute package_id + * @elc_low_threshold_percent_kobj_attr: + * Storage for kobject attribute elc_low_threshold_percent + * @elc_high_threshold_percent_kobj_attr: + * Storage for kobject attribute elc_high_threshold_percent + * @elc_high_threshold_enable_kobj_attr: + * Storage for kobject attribute elc_high_threshold_enable + * @elc_floor_freq_khz_kobj_attr: Storage for kobject attribute elc_floor_freq_khz + * @agent_types_kobj_attr: Storage for kobject attribute agent_type + * @die_id_kobj_attr: Attribute storage for die_id information * @uncore_attrs: Attribute storage for group creation * * This structure is used to encapsulate all data related to uncore sysfs @@ -41,20 +69,47 @@ struct uncore_data { bool valid; int package_id; int die_id; + int domain_id; + int cluster_id; + int instance_id; char name[32]; + u16 agent_type_mask; struct attribute_group uncore_attr_group; - struct device_attribute max_freq_khz_dev_attr; - struct device_attribute min_freq_khz_dev_attr; - struct device_attribute initial_max_freq_khz_dev_attr; - struct device_attribute initial_min_freq_khz_dev_attr; - struct device_attribute current_freq_khz_dev_attr; - struct attribute *uncore_attrs[6]; + struct kobj_attribute max_freq_khz_kobj_attr; + struct kobj_attribute min_freq_khz_kobj_attr; + struct kobj_attribute initial_max_freq_khz_kobj_attr; + struct kobj_attribute initial_min_freq_khz_kobj_attr; + struct kobj_attribute current_freq_khz_kobj_attr; + struct kobj_attribute domain_id_kobj_attr; + struct kobj_attribute fabric_cluster_id_kobj_attr; + struct kobj_attribute package_id_kobj_attr; + struct kobj_attribute elc_low_threshold_percent_kobj_attr; + struct kobj_attribute elc_high_threshold_percent_kobj_attr; + struct kobj_attribute elc_high_threshold_enable_kobj_attr; + struct kobj_attribute elc_floor_freq_khz_kobj_attr; + struct kobj_attribute agent_types_kobj_attr; + struct kobj_attribute die_id_kobj_attr; + struct attribute *uncore_attrs[15]; +}; + +#define UNCORE_DOMAIN_ID_INVALID -1 + +enum uncore_index { + UNCORE_INDEX_MIN_FREQ, + UNCORE_INDEX_MAX_FREQ, + UNCORE_INDEX_CURRENT_FREQ, + UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD, + UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD, + UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE, + UNCORE_INDEX_EFF_LAT_CTRL_FREQ, + UNCORE_INDEX_DIE_ID, }; -int uncore_freq_common_init(int (*read_control_freq)(struct uncore_data *data, unsigned int *min, unsigned int *max), - int (*write_control_freq)(struct uncore_data *data, unsigned int input, unsigned int min_max), - int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq)); +int uncore_freq_common_init(int (*read)(struct uncore_data *data, unsigned int *value, + enum uncore_index index), + int (*write)(struct uncore_data *data, unsigned int input, + enum uncore_index index)); void uncore_freq_common_exit(void); int uncore_freq_add_entry(struct uncore_data *data, int cpu); void uncore_freq_remove_die_entry(struct uncore_data *data); diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c new file mode 100644 index 000000000000..1237d9570886 --- /dev/null +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c @@ -0,0 +1,765 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * uncore-frquency-tpmi: Uncore frequency scaling using TPMI + * + * Copyright (c) 2023, Intel Corporation. + * All Rights Reserved. + * + * The hardware interface to read/write is basically substitution of + * MSR 0x620 and 0x621. + * There are specific MMIO offset and bits to get/set minimum and + * maximum uncore ratio, similar to MSRs. + * The scope of the uncore MSRs was package scope. But TPMI allows + * new gen CPUs to have multiple uncore controls at uncore-cluster + * level. Each package can have multiple power domains which further + * can have multiple clusters. + * Here number of power domains = number of resources in this aux + * device. There are offsets and bits to discover number of clusters + * and offset for each cluster level controls. + * + */ + +#include <linux/auxiliary_bus.h> +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/intel_tpmi.h> +#include <linux/intel_vsec.h> +#include <linux/io.h> +#include <linux/module.h> + +#include "../tpmi_power_domains.h" +#include "uncore-frequency-common.h" + +#define UNCORE_MAJOR_VERSION 0 +#define UNCORE_MINOR_VERSION 2 +#define UNCORE_ELC_SUPPORTED_VERSION 2 +#define UNCORE_HEADER_INDEX 0 +#define UNCORE_FABRIC_CLUSTER_OFFSET 8 + +/* status + control + adv_ctl1 + adv_ctl2 */ +#define UNCORE_FABRIC_CLUSTER_SIZE (4 * 8) + +#define UNCORE_STATUS_INDEX 0 +#define UNCORE_CONTROL_INDEX 8 + +#define UNCORE_FREQ_KHZ_MULTIPLIER 100000 + +struct tpmi_uncore_struct; + +/* Information for each cluster */ +struct tpmi_uncore_cluster_info { + bool root_domain; + bool elc_supported; + u8 __iomem *cluster_base; + u16 cdie_id; + struct uncore_data uncore_data; + struct tpmi_uncore_struct *uncore_root; +}; + +/* Information for each power domain */ +struct tpmi_uncore_power_domain_info { + u8 __iomem *uncore_base; + int ufs_header_ver; + int cluster_count; + struct tpmi_uncore_cluster_info *cluster_infos; +}; + +/* Information for all power domains in a package */ +struct tpmi_uncore_struct { + int power_domain_count; + int max_ratio; + int min_ratio; + struct tpmi_uncore_power_domain_info *pd_info; + struct tpmi_uncore_cluster_info root_cluster; + bool write_blocked; +}; + +/* Bit definitions for STATUS register */ +#define UNCORE_CURRENT_RATIO_MASK GENMASK_ULL(6, 0) + +/* Bit definitions for CONTROL register */ +#define UNCORE_MAX_RATIO_MASK GENMASK_ULL(14, 8) +#define UNCORE_MIN_RATIO_MASK GENMASK_ULL(21, 15) +#define UNCORE_EFF_LAT_CTRL_RATIO_MASK GENMASK_ULL(28, 22) +#define UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK GENMASK_ULL(38, 32) +#define UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE BIT(39) +#define UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK GENMASK_ULL(46, 40) + +/* Helper function to read MMIO offset for max/min control frequency */ +static void read_control_freq(struct tpmi_uncore_cluster_info *cluster_info, + unsigned int *value, enum uncore_index index) +{ + u64 control; + + control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX); + if (index == UNCORE_INDEX_MAX_FREQ) + *value = FIELD_GET(UNCORE_MAX_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER; + else + *value = FIELD_GET(UNCORE_MIN_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER; +} + +/* Helper function to read efficiency latency control values over MMIO */ +static int read_eff_lat_ctrl(struct uncore_data *data, unsigned int *val, enum uncore_index index) +{ + struct tpmi_uncore_cluster_info *cluster_info; + u64 ctrl; + + cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data); + if (cluster_info->root_domain) + return -ENODATA; + + if (!cluster_info->elc_supported) + return -EOPNOTSUPP; + + ctrl = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX); + + switch (index) { + case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD: + *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK, ctrl); + *val *= 100; + *val = DIV_ROUND_UP(*val, FIELD_MAX(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK)); + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD: + *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK, ctrl); + *val *= 100; + *val = DIV_ROUND_UP(*val, FIELD_MAX(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK)); + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE: + *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE, ctrl); + break; + case UNCORE_INDEX_EFF_LAT_CTRL_FREQ: + *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_RATIO_MASK, ctrl) * UNCORE_FREQ_KHZ_MULTIPLIER; + break; + + default: + return -EOPNOTSUPP; + } + + return 0; +} + +#define UNCORE_MAX_RATIO FIELD_MAX(UNCORE_MAX_RATIO_MASK) + +/* Helper for sysfs read for max/min frequencies. Called under mutex locks */ +static int uncore_read_control_freq(struct uncore_data *data, unsigned int *value, + enum uncore_index index) +{ + struct tpmi_uncore_cluster_info *cluster_info; + + cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data); + + if (cluster_info->root_domain) { + struct tpmi_uncore_struct *uncore_root = cluster_info->uncore_root; + unsigned int min, max, v; + int i; + + min = UNCORE_MAX_RATIO * UNCORE_FREQ_KHZ_MULTIPLIER; + max = 0; + + /* + * Get the max/min by looking at each cluster. Get the lowest + * min and highest max. + */ + for (i = 0; i < uncore_root->power_domain_count; ++i) { + int j; + + for (j = 0; j < uncore_root->pd_info[i].cluster_count; ++j) { + read_control_freq(&uncore_root->pd_info[i].cluster_infos[j], + &v, index); + if (v < min) + min = v; + if (v > max) + max = v; + } + } + + if (index == UNCORE_INDEX_MIN_FREQ) + *value = min; + else + *value = max; + + return 0; + } + + read_control_freq(cluster_info, value, index); + + return 0; +} + +/* Helper function for writing efficiency latency control values over MMIO */ +static int write_eff_lat_ctrl(struct uncore_data *data, unsigned int val, enum uncore_index index) +{ + struct tpmi_uncore_cluster_info *cluster_info; + struct tpmi_uncore_struct *uncore_root; + u64 control; + + cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data); + uncore_root = cluster_info->uncore_root; + + if (uncore_root->write_blocked) + return -EPERM; + + if (cluster_info->root_domain) + return -ENODATA; + + if (!cluster_info->elc_supported) + return -EOPNOTSUPP; + + switch (index) { + case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD: + if (val > 100) + return -EINVAL; + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD: + if (val > 100) + return -EINVAL; + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE: + if (val > 1) + return -EINVAL; + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_FREQ: + val /= UNCORE_FREQ_KHZ_MULTIPLIER; + if (val > FIELD_MAX(UNCORE_EFF_LAT_CTRL_RATIO_MASK)) + return -EINVAL; + break; + + default: + return -EOPNOTSUPP; + } + + control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX); + + switch (index) { + case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD: + val *= FIELD_MAX(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK); + val /= 100; + control &= ~UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK; + control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK, val); + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD: + val *= FIELD_MAX(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK); + val /= 100; + control &= ~UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK; + control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK, val); + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE: + control &= ~UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE; + control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE, val); + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_FREQ: + control &= ~UNCORE_EFF_LAT_CTRL_RATIO_MASK; + control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_RATIO_MASK, val); + break; + + default: + break; + } + + writeq(control, cluster_info->cluster_base + UNCORE_CONTROL_INDEX); + + return 0; +} + +/* Helper function to write MMIO offset for max/min control frequency */ +static void write_control_freq(struct tpmi_uncore_cluster_info *cluster_info, unsigned int input, + unsigned int index) +{ + u64 control; + + control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX); + + if (index == UNCORE_INDEX_MAX_FREQ) { + control &= ~UNCORE_MAX_RATIO_MASK; + control |= FIELD_PREP(UNCORE_MAX_RATIO_MASK, input); + } else { + control &= ~UNCORE_MIN_RATIO_MASK; + control |= FIELD_PREP(UNCORE_MIN_RATIO_MASK, input); + } + + writeq(control, (cluster_info->cluster_base + UNCORE_CONTROL_INDEX)); +} + +/* Helper for sysfs write for max/min frequencies. Called under mutex locks */ +static int uncore_write_control_freq(struct uncore_data *data, unsigned int input, + enum uncore_index index) +{ + struct tpmi_uncore_cluster_info *cluster_info; + struct tpmi_uncore_struct *uncore_root; + + input /= UNCORE_FREQ_KHZ_MULTIPLIER; + if (!input || input > UNCORE_MAX_RATIO) + return -EINVAL; + + cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data); + uncore_root = cluster_info->uncore_root; + + if (uncore_root->write_blocked) + return -EPERM; + + /* Update each cluster in a package */ + if (cluster_info->root_domain) { + struct tpmi_uncore_struct *uncore_root = cluster_info->uncore_root; + int i; + + for (i = 0; i < uncore_root->power_domain_count; ++i) { + int j; + + for (j = 0; j < uncore_root->pd_info[i].cluster_count; ++j) + write_control_freq(&uncore_root->pd_info[i].cluster_infos[j], + input, index); + } + + if (index == UNCORE_INDEX_MAX_FREQ) + uncore_root->max_ratio = input; + else + uncore_root->min_ratio = input; + + return 0; + } + + if (index == UNCORE_INDEX_MAX_FREQ && uncore_root->max_ratio && + uncore_root->max_ratio < input) + return -EINVAL; + + if (index == UNCORE_INDEX_MIN_FREQ && uncore_root->min_ratio && + uncore_root->min_ratio > input) + return -EINVAL; + + write_control_freq(cluster_info, input, index); + + return 0; +} + +/* Helper for sysfs read for the current uncore frequency. Called under mutex locks */ +static int uncore_read_freq(struct uncore_data *data, unsigned int *freq) +{ + struct tpmi_uncore_cluster_info *cluster_info; + u64 status; + + cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data); + if (cluster_info->root_domain) + return -ENODATA; + + status = readq((u8 __iomem *)cluster_info->cluster_base + UNCORE_STATUS_INDEX); + *freq = FIELD_GET(UNCORE_CURRENT_RATIO_MASK, status) * UNCORE_FREQ_KHZ_MULTIPLIER; + + return 0; +} + +/* + * Agent types as per the TPMI UFS Specification for UFS_STATUS + * Agent Type - Core Bit: 23 + * Agent Type - Cache Bit: 24 + * Agent Type - Memory Bit: 25 + * Agent Type - IO Bit: 26 + */ + +#define UNCORE_AGENT_TYPES GENMASK_ULL(26, 23) + +/* Helper function to read agent type over MMIO and set the agent type mask */ +static void uncore_set_agent_type(struct tpmi_uncore_cluster_info *cluster_info) +{ + u64 status; + + status = readq((u8 __iomem *)cluster_info->cluster_base + UNCORE_STATUS_INDEX); + cluster_info->uncore_data.agent_type_mask = FIELD_GET(UNCORE_AGENT_TYPES, status); +} + +#define MAX_PARTITIONS 2 + +/* IO domain ID start index for a partition */ +static u8 io_die_start[MAX_PARTITIONS]; + +/* Next IO domain ID index after the current partition IO die IDs */ +static u8 io_die_index_next; + +/* Lock to protect io_die_start, io_die_index_next */ +static DEFINE_MUTEX(domain_lock); + +static void set_domain_id(int id, int num_resources, + struct oobmsm_plat_info *plat_info, + struct tpmi_uncore_cluster_info *cluster_info) +{ + u8 part_io_index, cdie_range, pkg_io_index, max_dies; + + if (plat_info->partition >= MAX_PARTITIONS) { + cluster_info->uncore_data.domain_id = id; + return; + } + + if (cluster_info->uncore_data.agent_type_mask & AGENT_TYPE_CORE) { + cluster_info->uncore_data.domain_id = cluster_info->cdie_id; + return; + } + + /* Unlikely but cdie_mask may have holes, so take range */ + cdie_range = fls(plat_info->cdie_mask) - ffs(plat_info->cdie_mask) + 1; + max_dies = topology_max_dies_per_package(); + + /* + * If the CPU doesn't enumerate dies, then use current cdie range + * as the max. + */ + if (cdie_range > max_dies) + max_dies = cdie_range; + + guard(mutex)(&domain_lock); + + if (!io_die_index_next) + io_die_index_next = max_dies; + + if (!io_die_start[plat_info->partition]) { + io_die_start[plat_info->partition] = io_die_index_next; + /* + * number of IO dies = num_resources - cdie_range. Hence + * next partition io_die_index_next is set after IO dies + * in the current partition. + */ + io_die_index_next += (num_resources - cdie_range); + } + + /* + * Index from IO die start within the partition: + * This is the first valid domain after the cdies. + * For example the current resource index 5 and cdies end at + * index 3 (cdie_cnt = 4). Then the IO only index 5 - 4 = 1. + */ + part_io_index = id - cdie_range; + + /* + * Add to the IO die start index for this partition in this package + * to make unique in the package. + */ + pkg_io_index = io_die_start[plat_info->partition] + part_io_index; + + /* Assign this to domain ID */ + cluster_info->uncore_data.domain_id = pkg_io_index; +} + +/* Callback for sysfs read for TPMI uncore values. Called under mutex locks. */ +static int uncore_read(struct uncore_data *data, unsigned int *value, enum uncore_index index) +{ + struct tpmi_uncore_cluster_info *cluster_info; + int ret; + + switch (index) { + case UNCORE_INDEX_MIN_FREQ: + case UNCORE_INDEX_MAX_FREQ: + return uncore_read_control_freq(data, value, index); + + case UNCORE_INDEX_CURRENT_FREQ: + return uncore_read_freq(data, value); + + case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD: + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD: + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE: + case UNCORE_INDEX_EFF_LAT_CTRL_FREQ: + return read_eff_lat_ctrl(data, value, index); + + case UNCORE_INDEX_DIE_ID: + cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data); + ret = tpmi_get_linux_die_id(cluster_info->uncore_data.package_id, + cluster_info->cdie_id); + if (ret < 0) + return ret; + + *value = ret; + return 0; + + default: + break; + } + + return -EOPNOTSUPP; +} + +/* Callback for sysfs write for TPMI uncore data. Called under mutex locks. */ +static int uncore_write(struct uncore_data *data, unsigned int value, enum uncore_index index) +{ + switch (index) { + case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD: + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD: + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE: + case UNCORE_INDEX_EFF_LAT_CTRL_FREQ: + return write_eff_lat_ctrl(data, value, index); + + case UNCORE_INDEX_MIN_FREQ: + case UNCORE_INDEX_MAX_FREQ: + return uncore_write_control_freq(data, value, index); + + default: + break; + } + + return -EOPNOTSUPP; +} + +static void remove_cluster_entries(struct tpmi_uncore_struct *tpmi_uncore) +{ + int i; + + for (i = 0; i < tpmi_uncore->power_domain_count; ++i) { + struct tpmi_uncore_power_domain_info *pd_info; + int j; + + pd_info = &tpmi_uncore->pd_info[i]; + if (!pd_info->uncore_base) + continue; + + for (j = 0; j < pd_info->cluster_count; ++j) { + struct tpmi_uncore_cluster_info *cluster_info; + + cluster_info = &pd_info->cluster_infos[j]; + uncore_freq_remove_die_entry(&cluster_info->uncore_data); + } + } +} + +static void set_cdie_id(int domain_id, struct tpmi_uncore_cluster_info *cluster_info, + struct oobmsm_plat_info *plat_info) +{ + + cluster_info->cdie_id = domain_id; + + if (plat_info->cdie_mask && cluster_info->uncore_data.agent_type_mask & AGENT_TYPE_CORE) + cluster_info->cdie_id = domain_id + ffs(plat_info->cdie_mask) - 1; +} + +#define UNCORE_VERSION_MASK GENMASK_ULL(7, 0) +#define UNCORE_LOCAL_FABRIC_CLUSTER_ID_MASK GENMASK_ULL(15, 8) +#define UNCORE_CLUSTER_OFF_MASK GENMASK_ULL(7, 0) +#define UNCORE_MAX_CLUSTER_PER_DOMAIN 8 + +static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id) +{ + bool read_blocked = 0, write_blocked = 0; + struct oobmsm_plat_info *plat_info; + struct tpmi_uncore_struct *tpmi_uncore; + bool uncore_sysfs_added = false; + int ret, i, pkg = 0; + int num_resources; + + ret = tpmi_get_feature_status(auxdev, TPMI_ID_UNCORE, &read_blocked, &write_blocked); + if (ret) + dev_info(&auxdev->dev, "Can't read feature status: ignoring blocked status\n"); + + if (read_blocked) { + dev_info(&auxdev->dev, "Firmware has blocked reads, exiting\n"); + return -ENODEV; + } + + /* Get number of power domains, which is equal to number of resources */ + num_resources = tpmi_get_resource_count(auxdev); + if (!num_resources) + return -EINVAL; + + /* Register callbacks to uncore core */ + ret = uncore_freq_common_init(uncore_read, uncore_write); + if (ret) + return ret; + + /* Allocate uncore instance per package */ + tpmi_uncore = devm_kzalloc(&auxdev->dev, sizeof(*tpmi_uncore), GFP_KERNEL); + if (!tpmi_uncore) { + ret = -ENOMEM; + goto err_rem_common; + } + + /* Allocate memory for all power domains in a package */ + tpmi_uncore->pd_info = devm_kcalloc(&auxdev->dev, num_resources, + sizeof(*tpmi_uncore->pd_info), + GFP_KERNEL); + if (!tpmi_uncore->pd_info) { + ret = -ENOMEM; + goto err_rem_common; + } + + tpmi_uncore->power_domain_count = num_resources; + tpmi_uncore->write_blocked = write_blocked; + + /* Get the package ID from the TPMI core */ + plat_info = tpmi_get_platform_data(auxdev); + if (unlikely(!plat_info)) { + dev_info(&auxdev->dev, "Platform information is NULL\n"); + ret = -ENODEV; + goto err_rem_common; + } + + pkg = plat_info->package_id; + + for (i = 0; i < num_resources; ++i) { + struct tpmi_uncore_power_domain_info *pd_info; + struct resource *res; + u64 cluster_offset; + u8 cluster_mask; + int mask, j; + u64 header; + + res = tpmi_get_resource_at_index(auxdev, i); + if (!res) + continue; + + pd_info = &tpmi_uncore->pd_info[i]; + + pd_info->uncore_base = devm_ioremap_resource(&auxdev->dev, res); + if (IS_ERR(pd_info->uncore_base)) { + ret = PTR_ERR(pd_info->uncore_base); + /* + * Set to NULL so that clean up can still remove other + * entries already created if any by + * remove_cluster_entries() + */ + pd_info->uncore_base = NULL; + goto remove_clusters; + } + + /* Check for version and skip this resource if there is mismatch */ + header = readq(pd_info->uncore_base); + pd_info->ufs_header_ver = header & UNCORE_VERSION_MASK; + + if (pd_info->ufs_header_ver == TPMI_VERSION_INVALID) + continue; + + if (TPMI_MAJOR_VERSION(pd_info->ufs_header_ver) != UNCORE_MAJOR_VERSION) { + dev_err(&auxdev->dev, "Uncore: Unsupported major version:%lx\n", + TPMI_MAJOR_VERSION(pd_info->ufs_header_ver)); + ret = -ENODEV; + goto remove_clusters; + } + + if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) > UNCORE_MINOR_VERSION) + dev_info(&auxdev->dev, "Uncore: Ignore: Unsupported minor version:%lx\n", + TPMI_MINOR_VERSION(pd_info->ufs_header_ver)); + + /* Get Cluster ID Mask */ + cluster_mask = FIELD_GET(UNCORE_LOCAL_FABRIC_CLUSTER_ID_MASK, header); + if (!cluster_mask) { + dev_info(&auxdev->dev, "Uncore: Invalid cluster mask:%x\n", cluster_mask); + continue; + } + + /* Find out number of clusters in this resource */ + pd_info->cluster_count = hweight8(cluster_mask); + + pd_info->cluster_infos = devm_kcalloc(&auxdev->dev, pd_info->cluster_count, + sizeof(struct tpmi_uncore_cluster_info), + GFP_KERNEL); + if (!pd_info->cluster_infos) { + ret = -ENOMEM; + goto remove_clusters; + } + /* + * Each byte in the register point to status and control + * registers belonging to cluster id 0-8. + */ + cluster_offset = readq(pd_info->uncore_base + + UNCORE_FABRIC_CLUSTER_OFFSET); + + for (j = 0; j < pd_info->cluster_count; ++j) { + struct tpmi_uncore_cluster_info *cluster_info; + + /* Get the offset for this cluster */ + mask = (cluster_offset & UNCORE_CLUSTER_OFF_MASK); + /* Offset in QWORD, so change to bytes */ + mask <<= 3; + + cluster_info = &pd_info->cluster_infos[j]; + + cluster_info->cluster_base = pd_info->uncore_base + mask; + + uncore_set_agent_type(cluster_info); + + cluster_info->uncore_data.package_id = pkg; + /* There are no dies like Cascade Lake */ + cluster_info->uncore_data.die_id = 0; + cluster_info->uncore_data.cluster_id = j; + + set_cdie_id(i, cluster_info, plat_info); + + set_domain_id(i, num_resources, plat_info, cluster_info); + + cluster_info->uncore_root = tpmi_uncore; + + if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) >= UNCORE_ELC_SUPPORTED_VERSION) + cluster_info->elc_supported = true; + + ret = uncore_freq_add_entry(&cluster_info->uncore_data, 0); + if (ret) { + cluster_info->cluster_base = NULL; + goto remove_clusters; + } + /* Point to next cluster offset */ + cluster_offset >>= UNCORE_MAX_CLUSTER_PER_DOMAIN; + uncore_sysfs_added = true; + } + } + + if (!uncore_sysfs_added) { + ret = -ENODEV; + goto remove_clusters; + } + + auxiliary_set_drvdata(auxdev, tpmi_uncore); + + if (topology_max_dies_per_package() > 1 || plat_info->partition) + return 0; + + tpmi_uncore->root_cluster.root_domain = true; + tpmi_uncore->root_cluster.uncore_root = tpmi_uncore; + + tpmi_uncore->root_cluster.uncore_data.package_id = pkg; + tpmi_uncore->root_cluster.uncore_data.domain_id = UNCORE_DOMAIN_ID_INVALID; + ret = uncore_freq_add_entry(&tpmi_uncore->root_cluster.uncore_data, 0); + if (ret) + goto remove_clusters; + + return 0; + +remove_clusters: + remove_cluster_entries(tpmi_uncore); +err_rem_common: + uncore_freq_common_exit(); + + return ret; +} + +static void uncore_remove(struct auxiliary_device *auxdev) +{ + struct tpmi_uncore_struct *tpmi_uncore = auxiliary_get_drvdata(auxdev); + + if (tpmi_uncore->root_cluster.root_domain) + uncore_freq_remove_die_entry(&tpmi_uncore->root_cluster.uncore_data); + + remove_cluster_entries(tpmi_uncore); + + uncore_freq_common_exit(); +} + +static const struct auxiliary_device_id intel_uncore_id_table[] = { + { .name = "intel_vsec.tpmi-uncore" }, + {} +}; +MODULE_DEVICE_TABLE(auxiliary, intel_uncore_id_table); + +static struct auxiliary_driver intel_uncore_aux_driver = { + .id_table = intel_uncore_id_table, + .remove = uncore_remove, + .probe = uncore_probe, +}; + +module_auxiliary_driver(intel_uncore_aux_driver); + +MODULE_IMPORT_NS("INTEL_TPMI"); +MODULE_IMPORT_NS("INTEL_UNCORE_FREQUENCY"); +MODULE_IMPORT_NS("INTEL_TPMI_POWER_DOMAIN"); +MODULE_DESCRIPTION("Intel TPMI UFS Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c index 00ac7e381441..0dfc552b2802 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c @@ -14,12 +14,14 @@ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> */ +#include <linux/bitfield.h> #include <linux/cpu.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/suspend.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> +#include <asm/msr.h> #include "uncore-frequency-common.h" @@ -36,8 +38,13 @@ static enum cpuhp_state uncore_hp_state __read_mostly; #define MSR_UNCORE_PERF_STATUS 0x621 #define UNCORE_FREQ_KHZ_MULTIPLIER 100000 -static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min, - unsigned int *max) +#define UNCORE_MAX_RATIO_MASK GENMASK_ULL(6, 0) +#define UNCORE_MIN_RATIO_MASK GENMASK_ULL(14, 8) + +#define UNCORE_CURRENT_RATIO_MASK GENMASK_ULL(6, 0) + +static int uncore_read_control_freq(struct uncore_data *data, unsigned int *value, + enum uncore_index index) { u64 cap; int ret; @@ -45,42 +52,44 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min, if (data->control_cpu < 0) return -ENXIO; - ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap); + ret = rdmsrq_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap); if (ret) return ret; - *max = (cap & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER; - *min = ((cap & GENMASK(14, 8)) >> 8) * UNCORE_FREQ_KHZ_MULTIPLIER; + if (index == UNCORE_INDEX_MAX_FREQ) + *value = FIELD_GET(UNCORE_MAX_RATIO_MASK, cap) * UNCORE_FREQ_KHZ_MULTIPLIER; + else + *value = FIELD_GET(UNCORE_MIN_RATIO_MASK, cap) * UNCORE_FREQ_KHZ_MULTIPLIER; return 0; } static int uncore_write_control_freq(struct uncore_data *data, unsigned int input, - unsigned int min_max) + enum uncore_index index) { int ret; u64 cap; input /= UNCORE_FREQ_KHZ_MULTIPLIER; - if (!input || input > 0x7F) + if (!input || input > FIELD_MAX(UNCORE_MAX_RATIO_MASK)) return -EINVAL; if (data->control_cpu < 0) return -ENXIO; - ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap); + ret = rdmsrq_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap); if (ret) return ret; - if (min_max) { - cap &= ~0x7F; - cap |= input; + if (index == UNCORE_INDEX_MAX_FREQ) { + cap &= ~UNCORE_MAX_RATIO_MASK; + cap |= FIELD_PREP(UNCORE_MAX_RATIO_MASK, input); } else { - cap &= ~GENMASK(14, 8); - cap |= (input << 8); + cap &= ~UNCORE_MIN_RATIO_MASK; + cap |= FIELD_PREP(UNCORE_MIN_RATIO_MASK, input); } - ret = wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap); + ret = wrmsrq_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap); if (ret) return ret; @@ -97,15 +106,32 @@ static int uncore_read_freq(struct uncore_data *data, unsigned int *freq) if (data->control_cpu < 0) return -ENXIO; - ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_PERF_STATUS, &ratio); + ret = rdmsrq_on_cpu(data->control_cpu, MSR_UNCORE_PERF_STATUS, &ratio); if (ret) return ret; - *freq = (ratio & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER; + *freq = FIELD_GET(UNCORE_CURRENT_RATIO_MASK, ratio) * UNCORE_FREQ_KHZ_MULTIPLIER; return 0; } +static int uncore_read(struct uncore_data *data, unsigned int *value, enum uncore_index index) +{ + switch (index) { + case UNCORE_INDEX_MIN_FREQ: + case UNCORE_INDEX_MAX_FREQ: + return uncore_read_control_freq(data, value, index); + + case UNCORE_INDEX_CURRENT_FREQ: + return uncore_read_freq(data, value); + + default: + break; + } + + return -EOPNOTSUPP; +} + /* Caller provides protection */ static struct uncore_data *uncore_get_instance(unsigned int cpu) { @@ -121,23 +147,29 @@ static int uncore_event_cpu_online(unsigned int cpu) { struct uncore_data *data; int target; + int ret; /* Check if there is an online cpu in the package for uncore MSR */ target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu)); if (target < nr_cpu_ids) return 0; - /* Use this CPU on this die as a control CPU */ - cpumask_set_cpu(cpu, &uncore_cpu_mask); - data = uncore_get_instance(cpu); if (!data) return 0; data->package_id = topology_physical_package_id(cpu); data->die_id = topology_die_id(cpu); + data->domain_id = UNCORE_DOMAIN_ID_INVALID; - return uncore_freq_add_entry(data, cpu); + ret = uncore_freq_add_entry(data, cpu); + if (ret) + return ret; + + /* Use this CPU on this die as a control CPU */ + cpumask_set_cpu(cpu, &uncore_cpu_mask); + + return 0; } static int uncore_event_cpu_offline(unsigned int cpu) @@ -181,7 +213,7 @@ static int uncore_pm_notify(struct notifier_block *nb, unsigned long mode, if (!data || !data->valid || !data->stored_uncore_data) return 0; - wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, + wrmsrq_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, data->stored_uncore_data); } break; @@ -196,14 +228,38 @@ static struct notifier_block uncore_pm_nb = { }; static const struct x86_cpu_id intel_uncore_cpu_ids[] = { - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, NULL), - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, NULL), - X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, NULL), - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL), - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, NULL), - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL), - X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL), - X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, NULL), + X86_MATCH_VFM(INTEL_BROADWELL_G, NULL), + X86_MATCH_VFM(INTEL_BROADWELL_X, NULL), + X86_MATCH_VFM(INTEL_BROADWELL_D, NULL), + X86_MATCH_VFM(INTEL_SKYLAKE_X, NULL), + X86_MATCH_VFM(INTEL_ICELAKE_X, NULL), + X86_MATCH_VFM(INTEL_ICELAKE_D, NULL), + X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, NULL), + X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, NULL), + X86_MATCH_VFM(INTEL_KABYLAKE, NULL), + X86_MATCH_VFM(INTEL_KABYLAKE_L, NULL), + X86_MATCH_VFM(INTEL_COMETLAKE, NULL), + X86_MATCH_VFM(INTEL_COMETLAKE_L, NULL), + X86_MATCH_VFM(INTEL_CANNONLAKE_L, NULL), + X86_MATCH_VFM(INTEL_ICELAKE, NULL), + X86_MATCH_VFM(INTEL_ICELAKE_L, NULL), + X86_MATCH_VFM(INTEL_ROCKETLAKE, NULL), + X86_MATCH_VFM(INTEL_TIGERLAKE, NULL), + X86_MATCH_VFM(INTEL_TIGERLAKE_L, NULL), + X86_MATCH_VFM(INTEL_ALDERLAKE, NULL), + X86_MATCH_VFM(INTEL_ALDERLAKE_L, NULL), + X86_MATCH_VFM(INTEL_RAPTORLAKE, NULL), + X86_MATCH_VFM(INTEL_RAPTORLAKE_P, NULL), + X86_MATCH_VFM(INTEL_RAPTORLAKE_S, NULL), + X86_MATCH_VFM(INTEL_METEORLAKE, NULL), + X86_MATCH_VFM(INTEL_METEORLAKE_L, NULL), + X86_MATCH_VFM(INTEL_ARROWLAKE, NULL), + X86_MATCH_VFM(INTEL_ARROWLAKE_H, NULL), + X86_MATCH_VFM(INTEL_LUNARLAKE_M, NULL), + X86_MATCH_VFM(INTEL_PANTHERLAKE_L, NULL), + X86_MATCH_VFM(INTEL_WILDCATLAKE_L, NULL), + X86_MATCH_VFM(INTEL_NOVALAKE, NULL), + X86_MATCH_VFM(INTEL_NOVALAKE_L, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_uncore_cpu_ids); @@ -221,14 +277,13 @@ static int __init intel_uncore_init(void) return -ENODEV; uncore_max_entries = topology_max_packages() * - topology_max_die_per_package(); + topology_max_dies_per_package(); uncore_instances = kcalloc(uncore_max_entries, sizeof(*uncore_instances), GFP_KERNEL); if (!uncore_instances) return -ENOMEM; - ret = uncore_freq_common_init(uncore_read_control_freq, uncore_write_control_freq, - uncore_read_freq); + ret = uncore_freq_common_init(uncore_read, uncore_write_control_freq); if (ret) goto err_free; @@ -271,6 +326,6 @@ static void __exit intel_uncore_exit(void) } module_exit(intel_uncore_exit) -MODULE_IMPORT_NS(INTEL_UNCORE_FREQUENCY); +MODULE_IMPORT_NS("INTEL_UNCORE_FREQUENCY"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Intel Uncore Frequency Limits Driver"); diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c index c5e4e35c8d20..232cd12e3c9f 100644 --- a/drivers/platform/x86/intel/vbtn.c +++ b/drivers/platform/x86/intel/vbtn.c @@ -7,11 +7,13 @@ */ #include <linux/acpi.h> +#include <linux/cleanup.h> #include <linux/dmi.h> #include <linux/input.h> #include <linux/input/sparse-keymap.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/suspend.h> #include "../dual_accel_detect.h" @@ -24,6 +26,7 @@ #define VGBS_TABLET_MODE_FLAGS (VGBS_TABLET_MODE_FLAG | VGBS_TABLET_MODE_FLAG_ALT) +MODULE_DESCRIPTION("Intel Virtual Button driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("AceLan Kao"); @@ -65,6 +68,7 @@ static const struct key_entry intel_vbtn_switchmap[] = { }; struct intel_vbtn_priv { + struct mutex mutex; /* Avoid notify_handler() racing with itself */ struct input_dev *buttons_dev; struct input_dev *switches_dev; bool dual_accel; @@ -73,10 +77,10 @@ struct intel_vbtn_priv { bool wakeup_mode; }; -static void detect_tablet_mode(struct platform_device *device) +static void detect_tablet_mode(struct device *dev) { - struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev); - acpi_handle handle = ACPI_HANDLE(&device->dev); + struct intel_vbtn_priv *priv = dev_get_drvdata(dev); + acpi_handle handle = ACPI_HANDLE(dev); unsigned long long vgbs; acpi_status status; int m; @@ -89,6 +93,8 @@ static void detect_tablet_mode(struct platform_device *device) input_report_switch(priv->switches_dev, SW_TABLET_MODE, m); m = (vgbs & VGBS_DOCK_MODE_FLAG) ? 1 : 0; input_report_switch(priv->switches_dev, SW_DOCK, m); + + input_sync(priv->switches_dev); } /* @@ -134,8 +140,6 @@ static int intel_vbtn_input_setup(struct platform_device *device) priv->switches_dev->id.bustype = BUS_HOST; if (priv->has_switches) { - detect_tablet_mode(device); - ret = input_register_device(priv->switches_dev); if (ret) return ret; @@ -154,9 +158,12 @@ static void notify_handler(acpi_handle handle, u32 event, void *context) bool autorelease; int ret; + guard(mutex)(&priv->mutex); + if ((ke = sparse_keymap_entry_from_scancode(priv->buttons_dev, event))) { if (!priv->has_buttons) { - dev_warn(&device->dev, "Warning: received a button event on a device without buttons, please report this.\n"); + dev_warn(&device->dev, "Warning: received 0x%02x button event on a device without buttons, please report this.\n", + event); return; } input_dev = priv->buttons_dev; @@ -256,9 +263,6 @@ static const struct dmi_system_id dmi_switches_allow_list[] = { static bool intel_vbtn_has_switches(acpi_handle handle, bool dual_accel) { - unsigned long long vgbs; - acpi_status status; - /* See dual_accel_detect.h for more info */ if (dual_accel) return false; @@ -266,8 +270,7 @@ static bool intel_vbtn_has_switches(acpi_handle handle, bool dual_accel) if (!dmi_check_system(dmi_switches_allow_list)) return false; - status = acpi_evaluate_integer(handle, "VGBS", NULL, &vgbs); - return ACPI_SUCCESS(status); + return acpi_has_method(handle, "VGBS"); } static int intel_vbtn_probe(struct platform_device *device) @@ -292,6 +295,10 @@ static int intel_vbtn_probe(struct platform_device *device) return -ENOMEM; dev_set_drvdata(&device->dev, priv); + err = devm_mutex_init(&device->dev, &priv->mutex); + if (err) + return err; + priv->dual_accel = dual_accel; priv->has_buttons = has_buttons; priv->has_switches = has_switches; @@ -314,6 +321,9 @@ static int intel_vbtn_probe(struct platform_device *device) if (ACPI_FAILURE(status)) dev_err(&device->dev, "Error VBDL failed with ACPI status %d\n", status); } + // Check switches after buttons since VBDL may have side effects. + if (has_switches) + detect_tablet_mode(&device->dev); device_init_wakeup(&device->dev, true); /* @@ -325,18 +335,12 @@ static int intel_vbtn_probe(struct platform_device *device) return 0; } -static int intel_vbtn_remove(struct platform_device *device) +static void intel_vbtn_remove(struct platform_device *device) { acpi_handle handle = ACPI_HANDLE(&device->dev); device_init_wakeup(&device->dev, false); acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler); - - /* - * Even if we failed to shut off the event stream, we can still - * safely detach from the device. - */ - return 0; } static int intel_vbtn_pm_prepare(struct device *dev) @@ -358,7 +362,13 @@ static void intel_vbtn_pm_complete(struct device *dev) static int intel_vbtn_pm_resume(struct device *dev) { + struct intel_vbtn_priv *priv = dev_get_drvdata(dev); + intel_vbtn_pm_complete(dev); + + if (priv->has_switches) + detect_tablet_mode(dev); + return 0; } diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c index bb81b8b1f7e9..ecfc7703f201 100644 --- a/drivers/platform/x86/intel/vsec.c +++ b/drivers/platform/x86/intel/vsec.c @@ -15,22 +15,18 @@ #include <linux/auxiliary_bus.h> #include <linux/bits.h> +#include <linux/bitops.h> +#include <linux/bug.h> +#include <linux/cleanup.h> #include <linux/delay.h> -#include <linux/kernel.h> #include <linux/idr.h> +#include <linux/log2.h> +#include <linux/intel_vsec.h> +#include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/types.h> -#include "vsec.h" - -/* Intel DVSEC offsets */ -#define INTEL_DVSEC_ENTRIES 0xA -#define INTEL_DVSEC_SIZE 0xB -#define INTEL_DVSEC_TABLE 0xC -#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0)) -#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3)) -#define TABLE_OFFSET_SHIFT 3 #define PMT_XA_START 0 #define PMT_XA_MAX INT_MAX #define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX) @@ -39,38 +35,18 @@ static DEFINE_IDA(intel_vsec_ida); static DEFINE_IDA(intel_vsec_sdsi_ida); static DEFINE_XARRAY_ALLOC(auxdev_array); -/** - * struct intel_vsec_header - Common fields of Intel VSEC and DVSEC registers. - * @rev: Revision ID of the VSEC/DVSEC register space - * @length: Length of the VSEC/DVSEC register space - * @id: ID of the feature - * @num_entries: Number of instances of the feature - * @entry_size: Size of the discovery table for each feature - * @tbir: BAR containing the discovery tables - * @offset: BAR offset of start of the first discovery table - */ -struct intel_vsec_header { - u8 rev; - u16 length; - u16 id; - u8 num_entries; - u8 entry_size; - u8 tbir; - u32 offset; +enum vsec_device_state { + STATE_NOT_FOUND, + STATE_REGISTERED, + STATE_SKIP, }; -enum intel_vsec_id { - VSEC_ID_TELEMETRY = 2, - VSEC_ID_WATCHER = 3, - VSEC_ID_CRASHLOG = 4, - VSEC_ID_SDSI = 65, -}; - -static enum intel_vsec_id intel_vsec_allow_list[] = { - VSEC_ID_TELEMETRY, - VSEC_ID_WATCHER, - VSEC_ID_CRASHLOG, - VSEC_ID_SDSI, +struct vsec_priv { + struct intel_vsec_platform_info *info; + struct device *suppliers[VSEC_FEATURE_COUNT]; + struct oobmsm_plat_info plat_info; + enum vsec_device_state state[VSEC_FEATURE_COUNT]; + unsigned long found_caps; }; static const char *intel_vsec_name(enum intel_vsec_id id) @@ -88,31 +64,32 @@ static const char *intel_vsec_name(enum intel_vsec_id id) case VSEC_ID_SDSI: return "sdsi"; + case VSEC_ID_TPMI: + return "tpmi"; + + case VSEC_ID_DISCOVERY: + return "discovery"; + default: return NULL; } } -static bool intel_vsec_allowed(u16 id) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(intel_vsec_allow_list); i++) - if (intel_vsec_allow_list[i] == id) - return true; - - return false; -} - -static bool intel_vsec_disabled(u16 id, unsigned long quirks) +static bool intel_vsec_supported(u16 id, unsigned long caps) { switch (id) { + case VSEC_ID_TELEMETRY: + return !!(caps & VSEC_CAP_TELEMETRY); case VSEC_ID_WATCHER: - return !!(quirks & VSEC_QUIRK_NO_WATCHER); - + return !!(caps & VSEC_CAP_WATCHER); case VSEC_ID_CRASHLOG: - return !!(quirks & VSEC_QUIRK_NO_CRASHLOG); - + return !!(caps & VSEC_CAP_CRASHLOG); + case VSEC_ID_SDSI: + return !!(caps & VSEC_CAP_SDSI); + case VSEC_ID_TPMI: + return !!(caps & VSEC_CAP_TPMI); + case VSEC_ID_DISCOVERY: + return !!(caps & VSEC_CAP_DISCOVERY); default: return false; } @@ -128,65 +105,188 @@ static void intel_vsec_dev_release(struct device *dev) { struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(dev); + xa_erase(&auxdev_array, intel_vsec_dev->id); + ida_free(intel_vsec_dev->ida, intel_vsec_dev->auxdev.id); + kfree(intel_vsec_dev->resource); kfree(intel_vsec_dev); } -static int intel_vsec_add_aux(struct pci_dev *pdev, struct intel_vsec_device *intel_vsec_dev, - const char *name) +static const struct vsec_feature_dependency * +get_consumer_dependencies(struct vsec_priv *priv, int cap_id) +{ + const struct vsec_feature_dependency *deps = priv->info->deps; + int consumer_id = priv->info->num_deps; + + if (!deps) + return NULL; + + while (consumer_id--) + if (deps[consumer_id].feature == BIT(cap_id)) + return &deps[consumer_id]; + + return NULL; +} + +static bool vsec_driver_present(int cap_id) +{ + unsigned long bit = BIT(cap_id); + + switch (bit) { + case VSEC_CAP_TELEMETRY: + return IS_ENABLED(CONFIG_INTEL_PMT_TELEMETRY); + case VSEC_CAP_WATCHER: + return IS_ENABLED(CONFIG_INTEL_PMT_WATCHER); + case VSEC_CAP_CRASHLOG: + return IS_ENABLED(CONFIG_INTEL_PMT_CRASHLOG); + case VSEC_CAP_SDSI: + return IS_ENABLED(CONFIG_INTEL_SDSI); + case VSEC_CAP_TPMI: + return IS_ENABLED(CONFIG_INTEL_TPMI); + case VSEC_CAP_DISCOVERY: + return IS_ENABLED(CONFIG_INTEL_PMT_DISCOVERY); + default: + return false; + } +} + +/* + * Although pci_device_id table is available in the pdev, this prototype is + * necessary because the code using it can be called by an exported API that + * might pass a different pdev. + */ +static const struct pci_device_id intel_vsec_pci_ids[]; + +static int intel_vsec_link_devices(struct pci_dev *pdev, struct device *dev, + int consumer_id) +{ + const struct vsec_feature_dependency *deps; + enum vsec_device_state *state; + struct device **suppliers; + struct vsec_priv *priv; + int supplier_id; + + if (!consumer_id) + return 0; + + if (!pci_match_id(intel_vsec_pci_ids, pdev)) + return 0; + + priv = pci_get_drvdata(pdev); + state = priv->state; + suppliers = priv->suppliers; + + priv->suppliers[consumer_id] = dev; + + deps = get_consumer_dependencies(priv, consumer_id); + if (!deps) + return 0; + + for_each_set_bit(supplier_id, &deps->supplier_bitmap, VSEC_FEATURE_COUNT) { + struct device_link *link; + + if (state[supplier_id] != STATE_REGISTERED || + !vsec_driver_present(supplier_id)) + continue; + + if (!suppliers[supplier_id]) { + dev_err(dev, "Bad supplier list\n"); + return -EINVAL; + } + + link = device_link_add(dev, suppliers[supplier_id], + DL_FLAG_AUTOPROBE_CONSUMER); + if (!link) + return -EINVAL; + } + + return 0; +} + +int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent, + struct intel_vsec_device *intel_vsec_dev, + const char *name) { struct auxiliary_device *auxdev = &intel_vsec_dev->auxdev; int ret, id; - ret = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL); + if (!parent) + return -EINVAL; + + ret = xa_alloc(&auxdev_array, &intel_vsec_dev->id, intel_vsec_dev, + PMT_XA_LIMIT, GFP_KERNEL); if (ret < 0) { + kfree(intel_vsec_dev->resource); kfree(intel_vsec_dev); return ret; } - auxdev->id = ret; + id = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL); + if (id < 0) { + xa_erase(&auxdev_array, intel_vsec_dev->id); + kfree(intel_vsec_dev->resource); + kfree(intel_vsec_dev); + return id; + } + + auxdev->id = id; auxdev->name = name; - auxdev->dev.parent = &pdev->dev; + auxdev->dev.parent = parent; auxdev->dev.release = intel_vsec_dev_release; ret = auxiliary_device_init(auxdev); if (ret < 0) { - ida_free(intel_vsec_dev->ida, auxdev->id); - kfree(intel_vsec_dev->resource); - kfree(intel_vsec_dev); + intel_vsec_dev_release(&auxdev->dev); return ret; } - ret = auxiliary_device_add(auxdev); - if (ret < 0) { - auxiliary_device_uninit(auxdev); - return ret; - } + /* + * Assign a name now to ensure that the device link doesn't contain + * a null string for the consumer name. This is a problem when a supplier + * supplies more than one consumer and can lead to a duplicate name error + * when the link is created in sysfs. + */ + ret = dev_set_name(&auxdev->dev, "%s.%s.%d", KBUILD_MODNAME, auxdev->name, + auxdev->id); + if (ret) + goto cleanup_aux; - ret = devm_add_action_or_reset(&pdev->dev, intel_vsec_remove_aux, - auxdev); - if (ret < 0) - return ret; + ret = intel_vsec_link_devices(pdev, &auxdev->dev, intel_vsec_dev->cap_id); + if (ret) + goto cleanup_aux; - /* Add auxdev to list */ - ret = xa_alloc(&auxdev_array, &id, intel_vsec_dev, PMT_XA_LIMIT, - GFP_KERNEL); + ret = auxiliary_device_add(auxdev); if (ret) - return ret; + goto cleanup_aux; - return 0; + return devm_add_action_or_reset(parent, intel_vsec_remove_aux, + auxdev); + +cleanup_aux: + auxiliary_device_uninit(auxdev); + return ret; } +EXPORT_SYMBOL_NS_GPL(intel_vsec_add_aux, "INTEL_VSEC"); static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *header, - struct intel_vsec_platform_info *info) + struct intel_vsec_platform_info *info, + unsigned long cap_id) { - struct intel_vsec_device *intel_vsec_dev; - struct resource *res, *tmp; + struct intel_vsec_device __free(kfree) *intel_vsec_dev = NULL; + struct resource __free(kfree) *res = NULL; + struct resource *tmp; + struct device *parent; unsigned long quirks = info->quirks; + u64 base_addr; int i; - if (!intel_vsec_allowed(header->id) || intel_vsec_disabled(header->id, quirks)) + if (info->parent) + parent = info->parent; + else + parent = &pdev->dev; + + if (!intel_vsec_supported(header->id, info->caps)) return -EINVAL; if (!header->num_entries) { @@ -204,52 +304,168 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he return -ENOMEM; res = kcalloc(header->num_entries, sizeof(*res), GFP_KERNEL); - if (!res) { - kfree(intel_vsec_dev); + if (!res) return -ENOMEM; - } if (quirks & VSEC_QUIRK_TABLE_SHIFT) header->offset >>= TABLE_OFFSET_SHIFT; + if (info->base_addr) + base_addr = info->base_addr; + else + base_addr = pdev->resource[header->tbir].start; + /* * The DVSEC/VSEC contains the starting offset and count for a block of * discovery tables. Create a resource array of these tables to the * auxiliary device driver. */ for (i = 0, tmp = res; i < header->num_entries; i++, tmp++) { - tmp->start = pdev->resource[header->tbir].start + - header->offset + i * (header->entry_size * sizeof(u32)); + tmp->start = base_addr + header->offset + i * (header->entry_size * sizeof(u32)); tmp->end = tmp->start + (header->entry_size * sizeof(u32)) - 1; tmp->flags = IORESOURCE_MEM; + + /* Check resource is not in use */ + if (!request_mem_region(tmp->start, resource_size(tmp), "")) + return -EBUSY; + + release_mem_region(tmp->start, resource_size(tmp)); } intel_vsec_dev->pcidev = pdev; - intel_vsec_dev->resource = res; + intel_vsec_dev->resource = no_free_ptr(res); intel_vsec_dev->num_resources = header->num_entries; - intel_vsec_dev->info = info; + intel_vsec_dev->quirks = info->quirks; + intel_vsec_dev->base_addr = info->base_addr; + intel_vsec_dev->priv_data = info->priv_data; + intel_vsec_dev->cap_id = cap_id; if (header->id == VSEC_ID_SDSI) intel_vsec_dev->ida = &intel_vsec_sdsi_ida; else intel_vsec_dev->ida = &intel_vsec_ida; - return intel_vsec_add_aux(pdev, intel_vsec_dev, intel_vsec_name(header->id)); + /* + * Pass the ownership of intel_vsec_dev and resource within it to + * intel_vsec_add_aux() + */ + return intel_vsec_add_aux(pdev, parent, no_free_ptr(intel_vsec_dev), + intel_vsec_name(header->id)); +} + +static bool suppliers_ready(struct vsec_priv *priv, + const struct vsec_feature_dependency *consumer_deps, + int cap_id) +{ + enum vsec_device_state *state = priv->state; + int supplier_id; + + if (WARN_ON_ONCE(consumer_deps->feature != BIT(cap_id))) + return false; + + /* + * Verify that all required suppliers have been found. Return false + * immediately if any are still missing. + */ + for_each_set_bit(supplier_id, &consumer_deps->supplier_bitmap, VSEC_FEATURE_COUNT) { + if (state[supplier_id] == STATE_SKIP) + continue; + + if (state[supplier_id] == STATE_NOT_FOUND) + return false; + } + + /* + * All suppliers have been found and the consumer is ready to be + * registered. + */ + return true; +} + +static int get_cap_id(u32 header_id, unsigned long *cap_id) +{ + switch (header_id) { + case VSEC_ID_TELEMETRY: + *cap_id = ilog2(VSEC_CAP_TELEMETRY); + break; + case VSEC_ID_WATCHER: + *cap_id = ilog2(VSEC_CAP_WATCHER); + break; + case VSEC_ID_CRASHLOG: + *cap_id = ilog2(VSEC_CAP_CRASHLOG); + break; + case VSEC_ID_SDSI: + *cap_id = ilog2(VSEC_CAP_SDSI); + break; + case VSEC_ID_TPMI: + *cap_id = ilog2(VSEC_CAP_TPMI); + break; + case VSEC_ID_DISCOVERY: + *cap_id = ilog2(VSEC_CAP_DISCOVERY); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int intel_vsec_register_device(struct pci_dev *pdev, + struct intel_vsec_header *header, + struct intel_vsec_platform_info *info) +{ + const struct vsec_feature_dependency *consumer_deps; + struct vsec_priv *priv; + unsigned long cap_id; + int ret; + + ret = get_cap_id(header->id, &cap_id); + if (ret) + return ret; + + /* + * Only track dependencies for devices probed by the VSEC driver. + * For others using the exported APIs, add the device directly. + */ + if (!pci_match_id(intel_vsec_pci_ids, pdev)) + return intel_vsec_add_dev(pdev, header, info, cap_id); + + priv = pci_get_drvdata(pdev); + if (priv->state[cap_id] == STATE_REGISTERED || + priv->state[cap_id] == STATE_SKIP) + return -EEXIST; + + priv->found_caps |= BIT(cap_id); + + if (!vsec_driver_present(cap_id)) { + priv->state[cap_id] = STATE_SKIP; + return -ENODEV; + } + + consumer_deps = get_consumer_dependencies(priv, cap_id); + if (!consumer_deps || suppliers_ready(priv, consumer_deps, cap_id)) { + ret = intel_vsec_add_dev(pdev, header, info, cap_id); + if (ret) + priv->state[cap_id] = STATE_SKIP; + else + priv->state[cap_id] = STATE_REGISTERED; + + return ret; + } + + return -EAGAIN; } static bool intel_vsec_walk_header(struct pci_dev *pdev, struct intel_vsec_platform_info *info) { - struct intel_vsec_header **header = info->capabilities; + struct intel_vsec_header **header = info->headers; bool have_devices = false; int ret; for ( ; *header; header++) { - ret = intel_vsec_add_dev(pdev, *header, info); - if (ret) - dev_info(&pdev->dev, "Could not add device for DVSEC id %d\n", - (*header)->id); - else + ret = intel_vsec_register_device(pdev, *header, info); + if (!ret) have_devices = true; } @@ -296,7 +512,7 @@ static bool intel_vsec_walk_dvsec(struct pci_dev *pdev, pci_read_config_dword(pdev, pos + PCI_DVSEC_HEADER2, &hdr); header.id = PCI_DVSEC_HEADER2_ID(hdr); - ret = intel_vsec_add_dev(pdev, &header, info); + ret = intel_vsec_register_device(pdev, &header, info); if (ret) continue; @@ -341,7 +557,7 @@ static bool intel_vsec_walk_vsec(struct pci_dev *pdev, header.tbir = INTEL_DVSEC_TABLE_BAR(table); header.offset = INTEL_DVSEC_TABLE_OFFSET(table); - ret = intel_vsec_add_dev(pdev, &header, info); + ret = intel_vsec_register_device(pdev, &header, info); if (ret) continue; @@ -351,11 +567,69 @@ static bool intel_vsec_walk_vsec(struct pci_dev *pdev, return have_devices; } +int intel_vsec_register(struct pci_dev *pdev, + struct intel_vsec_platform_info *info) +{ + if (!pdev || !info || !info->headers) + return -EINVAL; + + if (!intel_vsec_walk_header(pdev, info)) + return -ENODEV; + else + return 0; +} +EXPORT_SYMBOL_NS_GPL(intel_vsec_register, "INTEL_VSEC"); + +static bool intel_vsec_get_features(struct pci_dev *pdev, + struct intel_vsec_platform_info *info) +{ + bool found = false; + + /* + * Both DVSEC and VSEC capabilities can exist on the same device, + * so both intel_vsec_walk_dvsec() and intel_vsec_walk_vsec() must be + * called independently. Additionally, intel_vsec_walk_header() is + * needed for devices that do not have VSEC/DVSEC but provide the + * information via device_data. + */ + if (intel_vsec_walk_dvsec(pdev, info)) + found = true; + + if (intel_vsec_walk_vsec(pdev, info)) + found = true; + + if (info && (info->quirks & VSEC_QUIRK_NO_DVSEC) && + intel_vsec_walk_header(pdev, info)) + found = true; + + return found; +} + +static void intel_vsec_skip_missing_dependencies(struct pci_dev *pdev) +{ + struct vsec_priv *priv = pci_get_drvdata(pdev); + const struct vsec_feature_dependency *deps = priv->info->deps; + int consumer_id = priv->info->num_deps; + + while (consumer_id--) { + int supplier_id; + + deps = &priv->info->deps[consumer_id]; + + for_each_set_bit(supplier_id, &deps->supplier_bitmap, VSEC_FEATURE_COUNT) { + if (!(BIT(supplier_id) & priv->found_caps)) + priv->state[supplier_id] = STATE_SKIP; + } + } +} + static int intel_vsec_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct intel_vsec_platform_info *info; - bool have_devices = false; - int ret; + struct vsec_priv *priv; + int num_caps, ret; + int run_once = 0; + bool found_any = false; ret = pcim_enable_device(pdev); if (ret) @@ -366,30 +640,64 @@ static int intel_vsec_pci_probe(struct pci_dev *pdev, const struct pci_device_id if (!info) return -EINVAL; - if (intel_vsec_walk_dvsec(pdev, info)) - have_devices = true; + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; - if (intel_vsec_walk_vsec(pdev, info)) - have_devices = true; + priv->info = info; + pci_set_drvdata(pdev, priv); - if (info && (info->quirks & VSEC_QUIRK_NO_DVSEC) && - intel_vsec_walk_header(pdev, info)) - have_devices = true; + num_caps = hweight_long(info->caps); + while (num_caps--) { + found_any |= intel_vsec_get_features(pdev, info); - if (!have_devices) + if (priv->found_caps == info->caps) + break; + + if (!run_once) { + intel_vsec_skip_missing_dependencies(pdev); + run_once = 1; + } + } + + if (!found_any) return -ENODEV; return 0; } -/* TGL info */ -static const struct intel_vsec_platform_info tgl_info = { - .quirks = VSEC_QUIRK_NO_WATCHER | VSEC_QUIRK_NO_CRASHLOG | - VSEC_QUIRK_TABLE_SHIFT | VSEC_QUIRK_EARLY_HW, -}; +int intel_vsec_set_mapping(struct oobmsm_plat_info *plat_info, + struct intel_vsec_device *vsec_dev) +{ + struct vsec_priv *priv; + + priv = pci_get_drvdata(vsec_dev->pcidev); + if (!priv) + return -EINVAL; + + priv->plat_info = *plat_info; + + return 0; +} +EXPORT_SYMBOL_NS_GPL(intel_vsec_set_mapping, "INTEL_VSEC"); + +struct oobmsm_plat_info *intel_vsec_get_mapping(struct pci_dev *pdev) +{ + struct vsec_priv *priv; + + if (!pci_match_id(intel_vsec_pci_ids, pdev)) + return ERR_PTR(-EINVAL); + + priv = pci_get_drvdata(pdev); + if (!priv) + return ERR_PTR(-EINVAL); + + return &priv->plat_info; +} +EXPORT_SYMBOL_NS_GPL(intel_vsec_get_mapping, "INTEL_VSEC"); /* DG1 info */ -static struct intel_vsec_header dg1_telemetry = { +static struct intel_vsec_header dg1_header = { .length = 0x10, .id = 2, .num_entries = 1, @@ -398,27 +706,78 @@ static struct intel_vsec_header dg1_telemetry = { .offset = 0x466000, }; -static struct intel_vsec_header *dg1_capabilities[] = { - &dg1_telemetry, +static struct intel_vsec_header *dg1_headers[] = { + &dg1_header, NULL }; static const struct intel_vsec_platform_info dg1_info = { - .capabilities = dg1_capabilities, + .caps = VSEC_CAP_TELEMETRY, + .headers = dg1_headers, .quirks = VSEC_QUIRK_NO_DVSEC | VSEC_QUIRK_EARLY_HW, }; +/* MTL info */ +static const struct intel_vsec_platform_info mtl_info = { + .caps = VSEC_CAP_TELEMETRY, +}; + +static const struct vsec_feature_dependency oobmsm_deps[] = { + { + .feature = VSEC_CAP_TELEMETRY, + .supplier_bitmap = VSEC_CAP_DISCOVERY | VSEC_CAP_TPMI, + }, +}; + +/* OOBMSM info */ +static const struct intel_vsec_platform_info oobmsm_info = { + .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_SDSI | VSEC_CAP_TPMI | + VSEC_CAP_DISCOVERY, + .deps = oobmsm_deps, + .num_deps = ARRAY_SIZE(oobmsm_deps), +}; + +/* DMR OOBMSM info */ +static const struct intel_vsec_platform_info dmr_oobmsm_info = { + .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_TPMI | VSEC_CAP_DISCOVERY, + .deps = oobmsm_deps, + .num_deps = ARRAY_SIZE(oobmsm_deps), +}; + +/* TGL info */ +static const struct intel_vsec_platform_info tgl_info = { + .caps = VSEC_CAP_TELEMETRY, + .quirks = VSEC_QUIRK_TABLE_SHIFT | VSEC_QUIRK_EARLY_HW, +}; + +/* LNL info */ +static const struct intel_vsec_platform_info lnl_info = { + .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_WATCHER, +}; + #define PCI_DEVICE_ID_INTEL_VSEC_ADL 0x467d #define PCI_DEVICE_ID_INTEL_VSEC_DG1 0x490e +#define PCI_DEVICE_ID_INTEL_VSEC_MTL_M 0x7d0d +#define PCI_DEVICE_ID_INTEL_VSEC_MTL_S 0xad0d #define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM 0x09a7 +#define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM_DMR 0x09a1 #define PCI_DEVICE_ID_INTEL_VSEC_RPL 0xa77d #define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d +#define PCI_DEVICE_ID_INTEL_VSEC_LNL_M 0x647d +#define PCI_DEVICE_ID_INTEL_VSEC_PTL 0xb07d +#define PCI_DEVICE_ID_INTEL_VSEC_WCL 0xfd7d static const struct pci_device_id intel_vsec_pci_ids[] = { { PCI_DEVICE_DATA(INTEL, VSEC_ADL, &tgl_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_DG1, &dg1_info) }, - { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, &(struct intel_vsec_platform_info) {}) }, + { PCI_DEVICE_DATA(INTEL, VSEC_MTL_M, &mtl_info) }, + { PCI_DEVICE_DATA(INTEL, VSEC_MTL_S, &mtl_info) }, + { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, &oobmsm_info) }, + { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM_DMR, &dmr_oobmsm_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_RPL, &tgl_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) }, + { PCI_DEVICE_DATA(INTEL, VSEC_LNL_M, &lnl_info) }, + { PCI_DEVICE_DATA(INTEL, VSEC_PTL, &mtl_info) }, + { PCI_DEVICE_DATA(INTEL, VSEC_WCL, &mtl_info) }, { } }; MODULE_DEVICE_TABLE(pci, intel_vsec_pci_ids); diff --git a/drivers/platform/x86/intel/vsec.h b/drivers/platform/x86/intel/vsec.h deleted file mode 100644 index 3deeb05cf394..000000000000 --- a/drivers/platform/x86/intel/vsec.h +++ /dev/null @@ -1,52 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _VSEC_H -#define _VSEC_H - -#include <linux/auxiliary_bus.h> -#include <linux/bits.h> - -struct pci_dev; -struct resource; - -enum intel_vsec_quirks { - /* Watcher feature not supported */ - VSEC_QUIRK_NO_WATCHER = BIT(0), - - /* Crashlog feature not supported */ - VSEC_QUIRK_NO_CRASHLOG = BIT(1), - - /* Use shift instead of mask to read discovery table offset */ - VSEC_QUIRK_TABLE_SHIFT = BIT(2), - - /* DVSEC not present (provided in driver data) */ - VSEC_QUIRK_NO_DVSEC = BIT(3), - - /* Platforms requiring quirk in the auxiliary driver */ - VSEC_QUIRK_EARLY_HW = BIT(4), -}; - -/* Platform specific data */ -struct intel_vsec_platform_info { - struct intel_vsec_header **capabilities; - unsigned long quirks; -}; - -struct intel_vsec_device { - struct auxiliary_device auxdev; - struct pci_dev *pcidev; - struct resource *resource; - struct ida *ida; - struct intel_vsec_platform_info *info; - int num_resources; -}; - -static inline struct intel_vsec_device *dev_to_ivdev(struct device *dev) -{ - return container_of(dev, struct intel_vsec_device, auxdev.dev); -} - -static inline struct intel_vsec_device *auxdev_to_ivdev(struct auxiliary_device *auxdev) -{ - return container_of(auxdev, struct intel_vsec_device, auxdev); -} -#endif diff --git a/drivers/platform/x86/intel/vsec_tpmi.c b/drivers/platform/x86/intel/vsec_tpmi.c new file mode 100644 index 000000000000..7748b5557a18 --- /dev/null +++ b/drivers/platform/x86/intel/vsec_tpmi.c @@ -0,0 +1,861 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Driver to enumerate TPMI features and create devices + * + * Copyright (c) 2023, Intel Corporation. + * All Rights Reserved. + * + * The TPMI (Topology Aware Register and PM Capsule Interface) provides a + * flexible, extendable and PCIe enumerable MMIO interface for PM features. + * + * For example Intel RAPL (Running Average Power Limit) provides a MMIO + * interface using TPMI. This has advantage over traditional MSR + * (Model Specific Register) interface, where a thread needs to be scheduled + * on the target CPU to read or write. Also the RAPL features vary between + * CPU models, and hence lot of model specific code. Here TPMI provides an + * architectural interface by providing hierarchical tables and fields, + * which will not need any model specific implementation. + * + * The TPMI interface uses a PCI VSEC structure to expose the location of + * MMIO region. + * + * This VSEC structure is present in the PCI configuration space of the + * Intel Out-of-Band (OOB) device, which is handled by the Intel VSEC + * driver. The Intel VSEC driver parses VSEC structures present in the PCI + * configuration space of the given device and creates an auxiliary device + * object for each of them. In particular, it creates an auxiliary device + * object representing TPMI that can be bound by an auxiliary driver. + * + * This TPMI driver will bind to the TPMI auxiliary device object created + * by the Intel VSEC driver. + * + * The TPMI specification defines a PFS (PM Feature Structure) table. + * This table is present in the TPMI MMIO region. The starting address + * of PFS is derived from the tBIR (Bar Indicator Register) and "Address" + * field from the VSEC header. + * + * Each TPMI PM feature has one entry in the PFS with a unique TPMI + * ID and its access details. The TPMI driver creates device nodes + * for the supported PM features. + * + * The names of the devices created by the TPMI driver start with the + * "intel_vsec.tpmi-" prefix which is followed by a specific name of the + * given PM feature (for example, "intel_vsec.tpmi-rapl.0"). + * + * The device nodes are create by using interface "intel_vsec_add_aux()" + * provided by the Intel VSEC driver. + */ + +#include <linux/auxiliary_bus.h> +#include <linux/bitfield.h> +#include <linux/debugfs.h> +#include <linux/delay.h> +#include <linux/intel_tpmi.h> +#include <linux/intel_vsec.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/security.h> +#include <linux/sizes.h> +#include <linux/string_helpers.h> + +/** + * struct intel_tpmi_pfs_entry - TPMI PM Feature Structure (PFS) entry + * @tpmi_id: TPMI feature identifier (what the feature is and its data format). + * @num_entries: Number of feature interface instances present in the PFS. + * This represents the maximum number of Power domains in the SoC. + * @entry_size: Interface instance entry size in 32-bit words. + * @cap_offset: Offset from the PM_Features base address to the base of the PM VSEC + * register bank in KB. + * @attribute: Feature attribute: 0=BIOS. 1=OS. 2-3=Reserved. + * @reserved: Bits for use in the future. + * + * Represents one TPMI feature entry data in the PFS retrieved as is + * from the hardware. + */ +struct intel_tpmi_pfs_entry { + u64 tpmi_id:8; + u64 num_entries:8; + u64 entry_size:16; + u64 cap_offset:16; + u64 attribute:2; + u64 reserved:14; +} __packed; + +/** + * struct intel_tpmi_pm_feature - TPMI PM Feature information for a TPMI ID + * @pfs_header: PFS header retireved from the hardware. + * @vsec_offset: Starting MMIO address for this feature in bytes. Essentially + * this offset = "Address" from VSEC header + PFS Capability + * offset for this feature entry. + * @vsec_dev: Pointer to intel_vsec_device structure for this TPMI device + * + * Represents TPMI instance information for one TPMI ID. + */ +struct intel_tpmi_pm_feature { + struct intel_tpmi_pfs_entry pfs_header; + u64 vsec_offset; + struct intel_vsec_device *vsec_dev; +}; + +/** + * struct intel_tpmi_info - TPMI information for all IDs in an instance + * @tpmi_features: Pointer to a list of TPMI feature instances + * @vsec_dev: Pointer to intel_vsec_device structure for this TPMI device + * @feature_count: Number of TPMI of TPMI instances pointed by tpmi_features + * @pfs_start: Start of PFS offset for the TPMI instances in this device + * @plat_info: Stores platform info which can be used by the client drivers + * @tpmi_control_mem: Memory mapped IO for getting control information + * @dbgfs_dir: debugfs entry pointer + * + * Stores the information for all TPMI devices enumerated from a single PCI device. + */ +struct intel_tpmi_info { + struct intel_tpmi_pm_feature *tpmi_features; + struct intel_vsec_device *vsec_dev; + int feature_count; + u64 pfs_start; + struct oobmsm_plat_info plat_info; + void __iomem *tpmi_control_mem; + struct dentry *dbgfs_dir; +}; + +/** + * struct tpmi_info_header - CPU package ID to PCI device mapping information + * @fn: PCI function number + * @dev: PCI device number + * @bus: PCI bus number + * @pkg: CPU Package id + * @segment: PCI segment id + * @partition: Package Partition id + * @cdie_mask: Bitmap of compute dies in the current partition + * @reserved: Reserved for future use + * @lock: When set to 1 the register is locked and becomes read-only + * until next reset. Not for use by the OS driver. + * + * The structure to read hardware provided mapping information. + */ +struct tpmi_info_header { + u64 fn:3; + u64 dev:5; + u64 bus:8; + u64 pkg:8; + u64 segment:8; + u64 partition:2; + u64 cdie_mask:16; + u64 reserved:13; + u64 lock:1; +} __packed; + +/** + * struct tpmi_feature_state - Structure to read hardware state of a feature + * @enabled: Enable state of a feature, 1: enabled, 0: disabled + * @reserved_1: Reserved for future use + * @write_blocked: Writes are blocked means all write operations are ignored + * @read_blocked: Reads are blocked means will read 0xFFs + * @pcs_select: Interface used by out of band software, not used in OS + * @reserved_2: Reserved for future use + * @id: TPMI ID of the feature + * @reserved_3: Reserved for future use + * @locked: When set to 1, OS can't change this register. + * + * The structure is used to read hardware state of a TPMI feature. This + * information is used for debug and restricting operations for this feature. + */ +struct tpmi_feature_state { + u32 enabled:1; + u32 reserved_1:3; + u32 write_blocked:1; + u32 read_blocked:1; + u32 pcs_select:1; + u32 reserved_2:1; + u32 id:8; + u32 reserved_3:15; + u32 locked:1; +} __packed; + +/* + * The size from hardware is in u32 units. This size is from a trusted hardware, + * but better to verify for pre silicon platforms. Set size to 0, when invalid. + */ +#define TPMI_GET_SINGLE_ENTRY_SIZE(pfs) \ +({ \ + pfs->pfs_header.entry_size > SZ_1K ? 0 : pfs->pfs_header.entry_size << 2; \ +}) + +/* Used during auxbus device creation */ +static DEFINE_IDA(intel_vsec_tpmi_ida); + +struct oobmsm_plat_info *tpmi_get_platform_data(struct auxiliary_device *auxdev) +{ + struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev); + + return vsec_dev->priv_data; +} +EXPORT_SYMBOL_NS_GPL(tpmi_get_platform_data, "INTEL_TPMI"); + +int tpmi_get_resource_count(struct auxiliary_device *auxdev) +{ + struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev); + + if (vsec_dev) + return vsec_dev->num_resources; + + return 0; +} +EXPORT_SYMBOL_NS_GPL(tpmi_get_resource_count, "INTEL_TPMI"); + +struct resource *tpmi_get_resource_at_index(struct auxiliary_device *auxdev, int index) +{ + struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev); + + if (vsec_dev && index < vsec_dev->num_resources) + return &vsec_dev->resource[index]; + + return NULL; +} +EXPORT_SYMBOL_NS_GPL(tpmi_get_resource_at_index, "INTEL_TPMI"); + +/* TPMI Control Interface */ + +#define TPMI_CONTROL_STATUS_OFFSET 0x00 +#define TPMI_COMMAND_OFFSET 0x08 +#define TMPI_CONTROL_DATA_VAL_OFFSET 0x0c + +/* + * Spec is calling for max 1 seconds to get ownership at the worst + * case. Read at 10 ms timeouts and repeat up to 1 second. + */ +#define TPMI_CONTROL_TIMEOUT_US (10 * USEC_PER_MSEC) +#define TPMI_CONTROL_TIMEOUT_MAX_US (1 * USEC_PER_SEC) + +#define TPMI_RB_TIMEOUT_US (10 * USEC_PER_MSEC) +#define TPMI_RB_TIMEOUT_MAX_US USEC_PER_SEC + +/* TPMI Control status register defines */ + +#define TPMI_CONTROL_STATUS_RB BIT_ULL(0) + +#define TPMI_CONTROL_STATUS_OWNER GENMASK_ULL(5, 4) +#define TPMI_OWNER_NONE 0 +#define TPMI_OWNER_IN_BAND 1 + +#define TPMI_CONTROL_STATUS_CPL BIT_ULL(6) +#define TPMI_CONTROL_STATUS_RESULT GENMASK_ULL(15, 8) +#define TPMI_CONTROL_STATUS_LEN GENMASK_ULL(31, 16) + +#define TPMI_CMD_PKT_LEN 2 +#define TPMI_CMD_STATUS_SUCCESS 0x40 + +/* TPMI command data registers */ +#define TMPI_CONTROL_DATA_CMD GENMASK_ULL(7, 0) +#define TPMI_CONTROL_DATA_VAL_FEATURE GENMASK_ULL(48, 40) + +/* Command to send via control interface */ +#define TPMI_CONTROL_GET_STATE_CMD 0x10 + +#define TPMI_CONTROL_CMD_MASK GENMASK_ULL(48, 40) + +#define TPMI_CMD_LEN_MASK GENMASK_ULL(18, 16) + +/* Mutex to complete get feature status without interruption */ +static DEFINE_MUTEX(tpmi_dev_lock); + +static int tpmi_wait_for_owner(struct intel_tpmi_info *tpmi_info, u8 owner) +{ + u64 control; + + return readq_poll_timeout(tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET, + control, owner == FIELD_GET(TPMI_CONTROL_STATUS_OWNER, control), + TPMI_CONTROL_TIMEOUT_US, TPMI_CONTROL_TIMEOUT_MAX_US); +} + +static int tpmi_read_feature_status(struct intel_tpmi_info *tpmi_info, int feature_id, + struct tpmi_feature_state *feature_state) +{ + u64 control, data; + int ret; + + if (!tpmi_info->tpmi_control_mem) + return -EFAULT; + + mutex_lock(&tpmi_dev_lock); + + /* Wait for owner bit set to 0 (none) */ + ret = tpmi_wait_for_owner(tpmi_info, TPMI_OWNER_NONE); + if (ret) + goto err_unlock; + + /* set command id to 0x10 for TPMI_GET_STATE */ + data = FIELD_PREP(TMPI_CONTROL_DATA_CMD, TPMI_CONTROL_GET_STATE_CMD); + + /* 32 bits for DATA offset and +8 for feature_id field */ + data |= FIELD_PREP(TPMI_CONTROL_DATA_VAL_FEATURE, feature_id); + + /* Write at command offset for qword access */ + writeq(data, tpmi_info->tpmi_control_mem + TPMI_COMMAND_OFFSET); + + /* Wait for owner bit set to in-band */ + ret = tpmi_wait_for_owner(tpmi_info, TPMI_OWNER_IN_BAND); + if (ret) + goto err_unlock; + + /* Set Run Busy and packet length of 2 dwords */ + control = TPMI_CONTROL_STATUS_RB; + control |= FIELD_PREP(TPMI_CONTROL_STATUS_LEN, TPMI_CMD_PKT_LEN); + + /* Write at status offset for qword access */ + writeq(control, tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET); + + /* Wait for Run Busy clear */ + ret = readq_poll_timeout(tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET, + control, !(control & TPMI_CONTROL_STATUS_RB), + TPMI_RB_TIMEOUT_US, TPMI_RB_TIMEOUT_MAX_US); + if (ret) + goto done_proc; + + control = FIELD_GET(TPMI_CONTROL_STATUS_RESULT, control); + if (control != TPMI_CMD_STATUS_SUCCESS) { + ret = -EBUSY; + goto done_proc; + } + + /* Response is ready */ + memcpy_fromio(feature_state, tpmi_info->tpmi_control_mem + TMPI_CONTROL_DATA_VAL_OFFSET, + sizeof(*feature_state)); + + ret = 0; + +done_proc: + /* Set CPL "completion" bit */ + writeq(TPMI_CONTROL_STATUS_CPL, tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET); + +err_unlock: + mutex_unlock(&tpmi_dev_lock); + + return ret; +} + +int tpmi_get_feature_status(struct auxiliary_device *auxdev, + int feature_id, bool *read_blocked, bool *write_blocked) +{ + struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(auxdev->dev.parent); + struct intel_tpmi_info *tpmi_info = auxiliary_get_drvdata(&intel_vsec_dev->auxdev); + struct tpmi_feature_state feature_state; + int ret; + + ret = tpmi_read_feature_status(tpmi_info, feature_id, &feature_state); + if (ret) + return ret; + + *read_blocked = feature_state.read_blocked; + *write_blocked = feature_state.write_blocked; + + return 0; +} +EXPORT_SYMBOL_NS_GPL(tpmi_get_feature_status, "INTEL_TPMI"); + +struct dentry *tpmi_get_debugfs_dir(struct auxiliary_device *auxdev) +{ + struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(auxdev->dev.parent); + struct intel_tpmi_info *tpmi_info = auxiliary_get_drvdata(&intel_vsec_dev->auxdev); + + return tpmi_info->dbgfs_dir; +} +EXPORT_SYMBOL_NS_GPL(tpmi_get_debugfs_dir, "INTEL_TPMI"); + +static int tpmi_pfs_dbg_show(struct seq_file *s, void *unused) +{ + struct intel_tpmi_info *tpmi_info = s->private; + int locked, disabled, read_blocked, write_blocked; + struct tpmi_feature_state feature_state; + struct intel_tpmi_pm_feature *pfs; + int ret, i; + + + seq_printf(s, "tpmi PFS start offset 0x:%llx\n", tpmi_info->pfs_start); + seq_puts(s, "tpmi_id\t\tentries\t\tsize\t\tcap_offset\tattribute\tvsec_offset\tlocked\tdisabled\tread_blocked\twrite_blocked\n"); + for (i = 0; i < tpmi_info->feature_count; ++i) { + pfs = &tpmi_info->tpmi_features[i]; + ret = tpmi_read_feature_status(tpmi_info, pfs->pfs_header.tpmi_id, &feature_state); + if (ret) { + locked = 'U'; + disabled = 'U'; + read_blocked = 'U'; + write_blocked = 'U'; + } else { + disabled = feature_state.enabled ? 'N' : 'Y'; + locked = feature_state.locked ? 'Y' : 'N'; + read_blocked = feature_state.read_blocked ? 'Y' : 'N'; + write_blocked = feature_state.write_blocked ? 'Y' : 'N'; + } + seq_printf(s, "0x%02x\t\t0x%02x\t\t0x%04x\t\t0x%04x\t\t0x%02x\t\t0x%016llx\t%c\t%c\t\t%c\t\t%c\n", + pfs->pfs_header.tpmi_id, pfs->pfs_header.num_entries, + pfs->pfs_header.entry_size, pfs->pfs_header.cap_offset, + pfs->pfs_header.attribute, pfs->vsec_offset, locked, disabled, + read_blocked, write_blocked); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(tpmi_pfs_dbg); + +#define MEM_DUMP_COLUMN_COUNT 8 + +static int tpmi_mem_dump_show(struct seq_file *s, void *unused) +{ + size_t row_size = MEM_DUMP_COLUMN_COUNT * sizeof(u32); + struct intel_tpmi_pm_feature *pfs = s->private; + int count, ret = 0; + void __iomem *mem; + u32 size; + u64 off; + u8 *buffer; + + size = TPMI_GET_SINGLE_ENTRY_SIZE(pfs); + if (!size) + return -EIO; + + buffer = kmalloc(size, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + off = pfs->vsec_offset; + + mutex_lock(&tpmi_dev_lock); + + for (count = 0; count < pfs->pfs_header.num_entries; ++count) { + seq_printf(s, "TPMI Instance:%d offset:0x%llx\n", count, off); + + mem = ioremap(off, size); + if (!mem) { + ret = -ENOMEM; + break; + } + + memcpy_fromio(buffer, mem, size); + + seq_hex_dump(s, " ", DUMP_PREFIX_OFFSET, row_size, sizeof(u32), buffer, size, + false); + + iounmap(mem); + + off += size; + } + + mutex_unlock(&tpmi_dev_lock); + + kfree(buffer); + + return ret; +} +DEFINE_SHOW_ATTRIBUTE(tpmi_mem_dump); + +static ssize_t mem_write(struct file *file, const char __user *userbuf, size_t len, loff_t *ppos) +{ + struct seq_file *m = file->private_data; + struct intel_tpmi_pm_feature *pfs = m->private; + u32 addr, value, punit, size; + u32 num_elems, *array; + void __iomem *mem; + int ret; + + size = TPMI_GET_SINGLE_ENTRY_SIZE(pfs); + if (!size) + return -EIO; + + ret = parse_int_array_user(userbuf, len, (int **)&array); + if (ret < 0) + return ret; + + num_elems = *array; + if (num_elems != 3) { + ret = -EINVAL; + goto exit_write; + } + + punit = array[1]; + addr = array[2]; + value = array[3]; + + if (punit >= pfs->pfs_header.num_entries) { + ret = -EINVAL; + goto exit_write; + } + + if (addr >= size) { + ret = -EINVAL; + goto exit_write; + } + + mutex_lock(&tpmi_dev_lock); + + mem = ioremap(pfs->vsec_offset + punit * size, size); + if (!mem) { + ret = -ENOMEM; + goto unlock_mem_write; + } + + writel(value, mem + addr); + + iounmap(mem); + + ret = len; + +unlock_mem_write: + mutex_unlock(&tpmi_dev_lock); + +exit_write: + kfree(array); + + return ret; +} + +static int mem_write_show(struct seq_file *s, void *unused) +{ + return 0; +} + +static int mem_write_open(struct inode *inode, struct file *file) +{ + return single_open(file, mem_write_show, inode->i_private); +} + +static const struct file_operations mem_write_ops = { + .open = mem_write_open, + .read = seq_read, + .write = mem_write, + .llseek = seq_lseek, + .release = single_release, +}; + +#define tpmi_to_dev(info) (&info->vsec_dev->pcidev->dev) + +static void tpmi_dbgfs_register(struct intel_tpmi_info *tpmi_info) +{ + char name[64]; + int i; + + snprintf(name, sizeof(name), "tpmi-%s", dev_name(tpmi_to_dev(tpmi_info))); + tpmi_info->dbgfs_dir = debugfs_create_dir(name, NULL); + + debugfs_create_file("pfs_dump", 0444, tpmi_info->dbgfs_dir, tpmi_info, &tpmi_pfs_dbg_fops); + + for (i = 0; i < tpmi_info->feature_count; ++i) { + struct intel_tpmi_pm_feature *pfs; + struct dentry *dir; + + pfs = &tpmi_info->tpmi_features[i]; + snprintf(name, sizeof(name), "tpmi-id-%02x", pfs->pfs_header.tpmi_id); + dir = debugfs_create_dir(name, tpmi_info->dbgfs_dir); + + debugfs_create_file("mem_dump", 0444, dir, pfs, &tpmi_mem_dump_fops); + debugfs_create_file("mem_write", 0644, dir, pfs, &mem_write_ops); + } +} + +static void tpmi_set_control_base(struct auxiliary_device *auxdev, + struct intel_tpmi_info *tpmi_info, + struct intel_tpmi_pm_feature *pfs) +{ + void __iomem *mem; + u32 size; + + size = TPMI_GET_SINGLE_ENTRY_SIZE(pfs); + if (!size) + return; + + mem = devm_ioremap(&auxdev->dev, pfs->vsec_offset, size); + if (!mem) + return; + + /* mem is pointing to TPMI CONTROL base */ + tpmi_info->tpmi_control_mem = mem; +} + +static const char *intel_tpmi_name(enum intel_tpmi_id id) +{ + switch (id) { + case TPMI_ID_RAPL: + return "rapl"; + case TPMI_ID_PEM: + return "pem"; + case TPMI_ID_UNCORE: + return "uncore"; + case TPMI_ID_SST: + return "sst"; + case TPMI_ID_PLR: + return "plr"; + default: + return NULL; + } +} + +/* String Length for tpmi-"feature_name(upto 8 bytes)" */ +#define TPMI_FEATURE_NAME_LEN 14 + +static int tpmi_create_device(struct intel_tpmi_info *tpmi_info, + struct intel_tpmi_pm_feature *pfs, + u64 pfs_start) +{ + struct intel_vsec_device *vsec_dev = tpmi_info->vsec_dev; + char feature_id_name[TPMI_FEATURE_NAME_LEN]; + struct intel_vsec_device *feature_vsec_dev; + struct tpmi_feature_state feature_state; + struct resource *res, *tmp; + const char *name; + int i, ret; + + ret = tpmi_read_feature_status(tpmi_info, pfs->pfs_header.tpmi_id, &feature_state); + if (ret) + return ret; + + /* + * If not enabled, continue to look at other features in the PFS, so return -EOPNOTSUPP. + * This will not cause failure of loading of this driver. + */ + if (!feature_state.enabled) + return -EOPNOTSUPP; + + name = intel_tpmi_name(pfs->pfs_header.tpmi_id); + if (!name) + return -EOPNOTSUPP; + + res = kcalloc(pfs->pfs_header.num_entries, sizeof(*res), GFP_KERNEL); + if (!res) + return -ENOMEM; + + feature_vsec_dev = kzalloc(sizeof(*feature_vsec_dev), GFP_KERNEL); + if (!feature_vsec_dev) { + kfree(res); + return -ENOMEM; + } + + snprintf(feature_id_name, sizeof(feature_id_name), "tpmi-%s", name); + + for (i = 0, tmp = res; i < pfs->pfs_header.num_entries; i++, tmp++) { + u64 entry_size_bytes = pfs->pfs_header.entry_size * sizeof(u32); + + tmp->start = pfs->vsec_offset + entry_size_bytes * i; + tmp->end = tmp->start + entry_size_bytes - 1; + tmp->flags = IORESOURCE_MEM; + } + + feature_vsec_dev->pcidev = vsec_dev->pcidev; + feature_vsec_dev->resource = res; + feature_vsec_dev->num_resources = pfs->pfs_header.num_entries; + feature_vsec_dev->priv_data = &tpmi_info->plat_info; + feature_vsec_dev->priv_data_size = sizeof(tpmi_info->plat_info); + feature_vsec_dev->ida = &intel_vsec_tpmi_ida; + + /* + * intel_vsec_add_aux() is resource managed, no explicit + * delete is required on error or on module unload. + * feature_vsec_dev and res memory are also freed as part of + * device deletion. + */ + return intel_vsec_add_aux(vsec_dev->pcidev, &vsec_dev->auxdev.dev, + feature_vsec_dev, feature_id_name); +} + +static int tpmi_create_devices(struct intel_tpmi_info *tpmi_info) +{ + struct intel_vsec_device *vsec_dev = tpmi_info->vsec_dev; + int ret, i; + + for (i = 0; i < vsec_dev->num_resources; i++) { + ret = tpmi_create_device(tpmi_info, &tpmi_info->tpmi_features[i], + tpmi_info->pfs_start); + /* + * Fail, if the supported features fails to create device, + * otherwise, continue. Even if one device failed to create, + * fail the loading of driver. Since intel_vsec_add_aux() + * is resource managed, no clean up is required for the + * successfully created devices. + */ + if (ret && ret != -EOPNOTSUPP) + return ret; + } + + return 0; +} + +#define TPMI_INFO_BUS_INFO_OFFSET 0x08 +#define TPMI_INFO_MAJOR_VERSION 0x00 +#define TPMI_INFO_MINOR_VERSION 0x02 + +static int tpmi_process_info(struct intel_tpmi_info *tpmi_info, + struct intel_tpmi_pm_feature *pfs) +{ + struct tpmi_info_header header; + void __iomem *info_mem; + u64 feature_header; + int ret = 0; + + info_mem = ioremap(pfs->vsec_offset, pfs->pfs_header.entry_size * sizeof(u32)); + if (!info_mem) + return -ENOMEM; + + feature_header = readq(info_mem); + if (TPMI_MAJOR_VERSION(feature_header) != TPMI_INFO_MAJOR_VERSION) { + ret = -ENODEV; + goto error_info_header; + } + + memcpy_fromio(&header, info_mem + TPMI_INFO_BUS_INFO_OFFSET, sizeof(header)); + + tpmi_info->plat_info.package_id = header.pkg; + tpmi_info->plat_info.bus_number = header.bus; + tpmi_info->plat_info.device_number = header.dev; + tpmi_info->plat_info.function_number = header.fn; + + if (TPMI_MINOR_VERSION(feature_header) >= TPMI_INFO_MINOR_VERSION) { + tpmi_info->plat_info.cdie_mask = header.cdie_mask; + tpmi_info->plat_info.partition = header.partition; + tpmi_info->plat_info.segment = header.segment; + } + +error_info_header: + iounmap(info_mem); + + return ret; +} + +static int tpmi_fetch_pfs_header(struct intel_tpmi_pm_feature *pfs, u64 start, int size) +{ + void __iomem *pfs_mem; + + pfs_mem = ioremap(start, size); + if (!pfs_mem) + return -ENOMEM; + + memcpy_fromio(&pfs->pfs_header, pfs_mem, sizeof(pfs->pfs_header)); + + iounmap(pfs_mem); + + return 0; +} + +#define TPMI_CAP_OFFSET_UNIT 1024 + +static int intel_vsec_tpmi_init(struct auxiliary_device *auxdev) +{ + struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev); + struct pci_dev *pci_dev = vsec_dev->pcidev; + struct intel_tpmi_info *tpmi_info; + u64 pfs_start = 0; + int ret, i; + + tpmi_info = devm_kzalloc(&auxdev->dev, sizeof(*tpmi_info), GFP_KERNEL); + if (!tpmi_info) + return -ENOMEM; + + tpmi_info->vsec_dev = vsec_dev; + tpmi_info->feature_count = vsec_dev->num_resources; + tpmi_info->plat_info.bus_number = pci_dev->bus->number; + + tpmi_info->tpmi_features = devm_kcalloc(&auxdev->dev, vsec_dev->num_resources, + sizeof(*tpmi_info->tpmi_features), + GFP_KERNEL); + if (!tpmi_info->tpmi_features) + return -ENOMEM; + + for (i = 0; i < vsec_dev->num_resources; i++) { + struct intel_tpmi_pm_feature *pfs; + struct resource *res; + u64 res_start; + int size, ret; + + pfs = &tpmi_info->tpmi_features[i]; + pfs->vsec_dev = vsec_dev; + + res = &vsec_dev->resource[i]; + if (!res) + continue; + + res_start = res->start; + size = resource_size(res); + if (size < 0) + continue; + + ret = tpmi_fetch_pfs_header(pfs, res_start, size); + if (ret) + continue; + + if (!pfs_start) + pfs_start = res_start; + + pfs->vsec_offset = pfs_start + pfs->pfs_header.cap_offset * TPMI_CAP_OFFSET_UNIT; + + /* + * Process TPMI_INFO to get PCI device to CPU package ID. + * Device nodes for TPMI features are not created in this + * for loop. So, the mapping information will be available + * when actual device nodes created outside this + * loop via tpmi_create_devices(). + */ + if (pfs->pfs_header.tpmi_id == TPMI_INFO_ID) { + ret = tpmi_process_info(tpmi_info, pfs); + if (ret) + return ret; + + ret = intel_vsec_set_mapping(&tpmi_info->plat_info, vsec_dev); + if (ret) + return ret; + } + + if (pfs->pfs_header.tpmi_id == TPMI_CONTROL_ID) + tpmi_set_control_base(auxdev, tpmi_info, pfs); + } + + tpmi_info->pfs_start = pfs_start; + + auxiliary_set_drvdata(auxdev, tpmi_info); + + ret = tpmi_create_devices(tpmi_info); + if (ret) + return ret; + + /* + * Allow debugfs when security policy allows. Everything this debugfs + * interface provides, can also be done via /dev/mem access. If + * /dev/mem interface is locked, don't allow debugfs to present any + * information. Also check for CAP_SYS_RAWIO as /dev/mem interface. + */ + if (!security_locked_down(LOCKDOWN_DEV_MEM) && capable(CAP_SYS_RAWIO)) + tpmi_dbgfs_register(tpmi_info); + + return 0; +} + +static int tpmi_probe(struct auxiliary_device *auxdev, + const struct auxiliary_device_id *id) +{ + return intel_vsec_tpmi_init(auxdev); +} + +static void tpmi_remove(struct auxiliary_device *auxdev) +{ + struct intel_tpmi_info *tpmi_info = auxiliary_get_drvdata(auxdev); + + debugfs_remove_recursive(tpmi_info->dbgfs_dir); +} + +static const struct auxiliary_device_id tpmi_id_table[] = { + { .name = "intel_vsec.tpmi" }, + {} +}; +MODULE_DEVICE_TABLE(auxiliary, tpmi_id_table); + +static struct auxiliary_driver tpmi_aux_driver = { + .id_table = tpmi_id_table, + .probe = tpmi_probe, + .remove = tpmi_remove, +}; + +module_auxiliary_driver(tpmi_aux_driver); + +MODULE_IMPORT_NS("INTEL_VSEC"); +MODULE_DESCRIPTION("Intel TPMI enumeration module"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/intel/wmi/sbl-fw-update.c b/drivers/platform/x86/intel/wmi/sbl-fw-update.c index 3c86e0108a24..75c82c08117f 100644 --- a/drivers/platform/x86/intel/wmi/sbl-fw-update.c +++ b/drivers/platform/x86/intel/wmi/sbl-fw-update.c @@ -25,19 +25,14 @@ static int get_fwu_request(struct device *dev, u32 *out) { - struct acpi_buffer result = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *obj; - acpi_status status; - status = wmi_query_block(INTEL_WMI_SBL_GUID, 0, &result); - if (ACPI_FAILURE(status)) { - dev_err(dev, "wmi_query_block failed\n"); + obj = wmidev_block_query(to_wmi_device(dev), 0); + if (!obj) return -ENODEV; - } - obj = (union acpi_object *)result.pointer; - if (!obj || obj->type != ACPI_TYPE_INTEGER) { - dev_warn(dev, "wmi_query_block returned invalid value\n"); + if (obj->type != ACPI_TYPE_INTEGER) { + dev_warn(dev, "wmidev_block_query returned invalid value\n"); kfree(obj); return -EINVAL; } @@ -58,9 +53,9 @@ static int set_fwu_request(struct device *dev, u32 in) input.length = sizeof(u32); input.pointer = &value; - status = wmi_set_block(INTEL_WMI_SBL_GUID, 0, &input); + status = wmidev_block_set(to_wmi_device(dev), 0, &input); if (ACPI_FAILURE(status)) { - dev_err(dev, "wmi_set_block failed\n"); + dev_err(dev, "wmidev_block_set failed\n"); return -ENODEV; } @@ -136,6 +131,7 @@ static struct wmi_driver intel_wmi_sbl_fw_update_driver = { .probe = intel_wmi_sbl_fw_update_probe, .remove = intel_wmi_sbl_fw_update_remove, .id_table = intel_wmi_sbl_id_table, + .no_singleton = true, }; module_wmi_driver(intel_wmi_sbl_fw_update_driver); diff --git a/drivers/platform/x86/intel/wmi/thunderbolt.c b/drivers/platform/x86/intel/wmi/thunderbolt.c index fc333ff82d1e..08df560a2c7a 100644 --- a/drivers/platform/x86/intel/wmi/thunderbolt.c +++ b/drivers/platform/x86/intel/wmi/thunderbolt.c @@ -32,8 +32,7 @@ static ssize_t force_power_store(struct device *dev, mode = hex_to_bin(buf[0]); dev_dbg(dev, "force_power: storing %#x\n", mode); if (mode == 0 || mode == 1) { - status = wmi_evaluate_method(INTEL_WMI_THUNDERBOLT_GUID, 0, 1, - &input, NULL); + status = wmidev_evaluate_method(to_wmi_device(dev), 0, 1, &input, NULL); if (ACPI_FAILURE(status)) { dev_dbg(dev, "force_power: failed to evaluate ACPI method\n"); return -ENODEV; @@ -64,6 +63,7 @@ static struct wmi_driver intel_wmi_thunderbolt_driver = { .dev_groups = tbt_groups, }, .id_table = intel_wmi_thunderbolt_id_table, + .no_singleton = true, }; module_wmi_driver(intel_wmi_thunderbolt_driver); |
