summaryrefslogtreecommitdiff
path: root/drivers/platform/x86/intel
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/platform/x86/intel')
-rw-r--r--drivers/platform/x86/intel/Kconfig14
-rw-r--r--drivers/platform/x86/intel/Makefile64
-rw-r--r--drivers/platform/x86/intel/bxtwc_tmu.c24
-rw-r--r--drivers/platform/x86/intel/bytcrc_pwrsrc.c81
-rw-r--r--drivers/platform/x86/intel/chtdc_ti_pwrbtn.c2
-rw-r--r--drivers/platform/x86/intel/chtwc_int33fe.c8
-rw-r--r--drivers/platform/x86/intel/hid.c33
-rw-r--r--drivers/platform/x86/intel/ifs/core.c49
-rw-r--r--drivers/platform/x86/intel/ifs/ifs.h97
-rw-r--r--drivers/platform/x86/intel/ifs/load.c44
-rw-r--r--drivers/platform/x86/intel/ifs/runtest.c363
-rw-r--r--drivers/platform/x86/intel/int0002_vgpio.c18
-rw-r--r--drivers/platform/x86/intel/int1092/intel_sar.c2
-rw-r--r--drivers/platform/x86/intel/int3472/Makefile9
-rw-r--r--drivers/platform/x86/intel/int3472/common.c9
-rw-r--r--drivers/platform/x86/intel/int3472/discrete.c116
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470.c3
-rw-r--r--drivers/platform/x86/intel/mrfld_pwrbtn.c2
-rw-r--r--drivers/platform/x86/intel/oaktrail.c5
-rw-r--r--drivers/platform/x86/intel/plr_tpmi.c354
-rw-r--r--drivers/platform/x86/intel/pmc/arl.c7
-rw-r--r--drivers/platform/x86/intel/pmc/cnp.c53
-rw-r--r--drivers/platform/x86/intel/pmc/core.c436
-rw-r--r--drivers/platform/x86/intel/pmc/core.h28
-rw-r--r--drivers/platform/x86/intel/pmc/core_ssram.c18
-rw-r--r--drivers/platform/x86/intel/pmc/lnl.c520
-rw-r--r--drivers/platform/x86/intel/pmc/mtl.c3
-rw-r--r--drivers/platform/x86/intel/pmc/pltdrv.c17
-rw-r--r--drivers/platform/x86/intel/pmt/class.c44
-rw-r--r--drivers/platform/x86/intel/pmt/class.h10
-rw-r--r--drivers/platform/x86/intel/pmt/crashlog.c4
-rw-r--r--drivers/platform/x86/intel/pmt/telemetry.c28
-rw-r--r--drivers/platform/x86/intel/punit_ipc.c33
-rw-r--r--drivers/platform/x86/intel/rst.c2
-rw-r--r--drivers/platform/x86/intel/sdsi.c153
-rw-r--r--drivers/platform/x86/intel/smartconnect.c2
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c120
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.h3
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c4
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_tpmi.c2
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c406
-rw-r--r--drivers/platform/x86/intel/telemetry/debugfs.c4
-rw-r--r--drivers/platform/x86/intel/telemetry/pltdrv.c6
-rw-r--r--drivers/platform/x86/intel/tpmi_power_domains.c237
-rw-r--r--drivers/platform/x86/intel/tpmi_power_domains.h18
-rw-r--r--drivers/platform/x86/intel/turbo_max_3.c4
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c129
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h30
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c279
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c112
-rw-r--r--drivers/platform/x86/intel/vbtn.c26
-rw-r--r--drivers/platform/x86/intel/vsec.c32
-rw-r--r--drivers/platform/x86/intel/vsec.h108
-rw-r--r--drivers/platform/x86/intel/vsec_tpmi.c (renamed from drivers/platform/x86/intel/tpmi.c)74
-rw-r--r--drivers/platform/x86/intel/wmi/sbl-fw-update.c1
-rw-r--r--drivers/platform/x86/intel/wmi/thunderbolt.c1
56 files changed, 3061 insertions, 1190 deletions
diff --git a/drivers/platform/x86/intel/Kconfig b/drivers/platform/x86/intel/Kconfig
index e9dc0c021029..19a2246f2770 100644
--- a/drivers/platform/x86/intel/Kconfig
+++ b/drivers/platform/x86/intel/Kconfig
@@ -62,7 +62,7 @@ config INTEL_INT0002_VGPIO
config INTEL_OAKTRAIL
tristate "Intel Oaktrail Platform Extras"
- depends on ACPI
+ depends on ACPI_EC
depends on ACPI_VIDEO || ACPI_VIDEO=n
depends on RFKILL && BACKLIGHT_CLASS_DEVICE && ACPI
help
@@ -83,6 +83,7 @@ config INTEL_BXTWC_PMIC_TMU
config INTEL_BYTCRC_PWRSRC
tristate "Intel Bay Trail Crystal Cove power source driver"
depends on INTEL_SOC_PMIC
+ depends on POWER_SUPPLY
help
This option adds a power source driver for Crystal Cove PMICs
on Intel Bay Trail devices.
@@ -192,10 +193,14 @@ config INTEL_SMARTCONNECT
This driver checks to determine whether the device has Intel Smart
Connect enabled, and if so disables it.
+config INTEL_TPMI_POWER_DOMAINS
+ tristate
+
config INTEL_TPMI
tristate "Intel Topology Aware Register and PM Capsule Interface (TPMI)"
depends on INTEL_VSEC
depends on X86_64
+ select INTEL_TPMI_POWER_DOMAINS
help
The Intel Topology Aware Register and PM Capsule Interface (TPMI),
provides enumerable MMIO interface for power management features.
@@ -205,6 +210,13 @@ config INTEL_TPMI
To compile this driver as a module, choose M here: the module will
be called intel_vsec_tpmi.
+config INTEL_PLR_TPMI
+ tristate "Intel SoC TPMI Power Limit Reasons driver"
+ depends on INTEL_TPMI
+ help
+ This driver provides the TPMI power limit reasons status information
+ via debugfs files.
+
config INTEL_TURBO_MAX_3
bool "Intel Turbo Boost Max Technology 3.0 enumeration driver"
depends on X86_64 && SCHED_MC_PRIO
diff --git a/drivers/platform/x86/intel/Makefile b/drivers/platform/x86/intel/Makefile
index c1d5fe05e3f3..78acb414e154 100644
--- a/drivers/platform/x86/intel/Makefile
+++ b/drivers/platform/x86/intel/Makefile
@@ -17,46 +17,40 @@ obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL) += uncore-frequency/
# Intel input drivers
-intel-hid-y := hid.o
-obj-$(CONFIG_INTEL_HID_EVENT) += intel-hid.o
-intel-vbtn-y := vbtn.o
-obj-$(CONFIG_INTEL_VBTN) += intel-vbtn.o
+intel-target-$(CONFIG_INTEL_HID_EVENT) += hid.o
+intel-target-$(CONFIG_INTEL_VBTN) += vbtn.o
# Intel miscellaneous drivers
-obj-$(CONFIG_INTEL_ISHTP_ECLITE) += ishtp_eclite.o
-intel_int0002_vgpio-y := int0002_vgpio.o
-obj-$(CONFIG_INTEL_INT0002_VGPIO) += intel_int0002_vgpio.o
-intel_oaktrail-y := oaktrail.o
-obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o
-intel_sdsi-y := sdsi.o
-obj-$(CONFIG_INTEL_SDSI) += intel_sdsi.o
-intel_vsec-y := vsec.o
-obj-$(CONFIG_INTEL_VSEC) += intel_vsec.o
+intel-target-$(CONFIG_INTEL_INT0002_VGPIO) += int0002_vgpio.o
+intel-target-$(CONFIG_INTEL_ISHTP_ECLITE) += ishtp_eclite.o
+intel-target-$(CONFIG_INTEL_OAKTRAIL) += oaktrail.o
+intel-target-$(CONFIG_INTEL_SDSI) += sdsi.o
+intel-target-$(CONFIG_INTEL_VSEC) += vsec.o
# Intel PMIC / PMC / P-Unit drivers
-intel_bxtwc_tmu-y := bxtwc_tmu.o
-obj-$(CONFIG_INTEL_BXTWC_PMIC_TMU) += intel_bxtwc_tmu.o
-intel_crystal_cove_charger-y := crystal_cove_charger.o
-obj-$(CONFIG_X86_ANDROID_TABLETS) += intel_crystal_cove_charger.o
-intel_bytcrc_pwrsrc-y := bytcrc_pwrsrc.o
-obj-$(CONFIG_INTEL_BYTCRC_PWRSRC) += intel_bytcrc_pwrsrc.o
-intel_chtdc_ti_pwrbtn-y := chtdc_ti_pwrbtn.o
-obj-$(CONFIG_INTEL_CHTDC_TI_PWRBTN) += intel_chtdc_ti_pwrbtn.o
-intel_chtwc_int33fe-y := chtwc_int33fe.o
-obj-$(CONFIG_INTEL_CHTWC_INT33FE) += intel_chtwc_int33fe.o
-intel_mrfld_pwrbtn-y := mrfld_pwrbtn.o
-obj-$(CONFIG_INTEL_MRFLD_PWRBTN) += intel_mrfld_pwrbtn.o
-intel_punit_ipc-y := punit_ipc.o
-obj-$(CONFIG_INTEL_PUNIT_IPC) += intel_punit_ipc.o
+intel-target-$(CONFIG_INTEL_BYTCRC_PWRSRC) += bytcrc_pwrsrc.o
+intel-target-$(CONFIG_INTEL_BXTWC_PMIC_TMU) += bxtwc_tmu.o
+intel-target-$(CONFIG_INTEL_CHTDC_TI_PWRBTN) += chtdc_ti_pwrbtn.o
+intel-target-$(CONFIG_INTEL_CHTWC_INT33FE) += chtwc_int33fe.o
+intel-target-$(CONFIG_X86_ANDROID_TABLETS) += crystal_cove_charger.o
+intel-target-$(CONFIG_INTEL_MRFLD_PWRBTN) += mrfld_pwrbtn.o
+intel-target-$(CONFIG_INTEL_PUNIT_IPC) += punit_ipc.o
# TPMI drivers
-intel_vsec_tpmi-y := tpmi.o
-obj-$(CONFIG_INTEL_TPMI) += intel_vsec_tpmi.o
+intel-target-$(CONFIG_INTEL_PLR_TPMI) += plr_tpmi.o
+intel-target-$(CONFIG_INTEL_TPMI_POWER_DOMAINS) += tpmi_power_domains.o
+intel-target-$(CONFIG_INTEL_TPMI) += vsec_tpmi.o
# Intel Uncore drivers
-intel-rst-y := rst.o
-obj-$(CONFIG_INTEL_RST) += intel-rst.o
-intel-smartconnect-y := smartconnect.o
-obj-$(CONFIG_INTEL_SMARTCONNECT) += intel-smartconnect.o
-intel_turbo_max_3-y := turbo_max_3.o
-obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o
+intel-target-$(CONFIG_INTEL_RST) += rst.o
+intel-target-$(CONFIG_INTEL_SMARTCONNECT) += smartconnect.o
+intel-target-$(CONFIG_INTEL_TURBO_MAX_3) += turbo_max_3.o
+
+# Add 'intel' prefix to each module listed in intel-target-*
+define INTEL_OBJ_TARGET
+intel-$(1)-y := $(1).o
+obj-$(2) += intel-$(1).o
+endef
+
+$(foreach target, $(basename $(intel-target-y)), $(eval $(call INTEL_OBJ_TARGET,$(target),y)))
+$(foreach target, $(basename $(intel-target-m)), $(eval $(call INTEL_OBJ_TARGET,$(target),m)))
diff --git a/drivers/platform/x86/intel/bxtwc_tmu.c b/drivers/platform/x86/intel/bxtwc_tmu.c
index d0e2a3c293b0..99437b2ccc25 100644
--- a/drivers/platform/x86/intel/bxtwc_tmu.c
+++ b/drivers/platform/x86/intel/bxtwc_tmu.c
@@ -48,9 +48,8 @@ static irqreturn_t bxt_wcove_tmu_irq_handler(int irq, void *data)
static int bxt_wcove_tmu_probe(struct platform_device *pdev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
- struct regmap_irq_chip_data *regmap_irq_chip;
struct wcove_tmu *wctmu;
- int ret, virq, irq;
+ int ret;
wctmu = devm_kzalloc(&pdev->dev, sizeof(*wctmu), GFP_KERNEL);
if (!wctmu)
@@ -59,27 +58,18 @@ static int bxt_wcove_tmu_probe(struct platform_device *pdev)
wctmu->dev = &pdev->dev;
wctmu->regmap = pmic->regmap;
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
+ wctmu->irq = platform_get_irq(pdev, 0);
+ if (wctmu->irq < 0)
+ return wctmu->irq;
- regmap_irq_chip = pmic->irq_chip_data_tmu;
- virq = regmap_irq_get_virq(regmap_irq_chip, irq);
- if (virq < 0) {
- dev_err(&pdev->dev,
- "failed to get virtual interrupt=%d\n", irq);
- return virq;
- }
-
- ret = devm_request_threaded_irq(&pdev->dev, virq,
+ ret = devm_request_threaded_irq(&pdev->dev, wctmu->irq,
NULL, bxt_wcove_tmu_irq_handler,
IRQF_ONESHOT, "bxt_wcove_tmu", wctmu);
if (ret) {
dev_err(&pdev->dev, "request irq failed: %d,virq: %d\n",
- ret, virq);
+ ret, wctmu->irq);
return ret;
}
- wctmu->irq = virq;
/* Unmask TMU second level Wake & System alarm */
regmap_update_bits(wctmu->regmap, BXTWC_MTMUIRQ_REG,
@@ -131,7 +121,7 @@ MODULE_DEVICE_TABLE(platform, bxt_wcove_tmu_id_table);
static struct platform_driver bxt_wcove_tmu_driver = {
.probe = bxt_wcove_tmu_probe,
- .remove_new = bxt_wcove_tmu_remove,
+ .remove = bxt_wcove_tmu_remove,
.driver = {
.name = "bxt_wcove_tmu",
.pm = &bxtwc_tmu_pm_ops,
diff --git a/drivers/platform/x86/intel/bytcrc_pwrsrc.c b/drivers/platform/x86/intel/bytcrc_pwrsrc.c
index 418b71af27ff..68ac040082df 100644
--- a/drivers/platform/x86/intel/bytcrc_pwrsrc.c
+++ b/drivers/platform/x86/intel/bytcrc_pwrsrc.c
@@ -8,13 +8,22 @@
* Copyright (C) 2013 Intel Corporation
*/
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/debugfs.h>
+#include <linux/interrupt.h>
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/property.h>
#include <linux/regmap.h>
+#define CRYSTALCOVE_PWRSRC_IRQ 0x03
#define CRYSTALCOVE_SPWRSRC_REG 0x1E
+#define CRYSTALCOVE_SPWRSRC_USB BIT(0)
+#define CRYSTALCOVE_SPWRSRC_DC BIT(1)
+#define CRYSTALCOVE_SPWRSRC_BATTERY BIT(2)
#define CRYSTALCOVE_RESETSRC0_REG 0x20
#define CRYSTALCOVE_RESETSRC1_REG 0x21
#define CRYSTALCOVE_WAKESRC_REG 0x22
@@ -22,6 +31,7 @@
struct crc_pwrsrc_data {
struct regmap *regmap;
struct dentry *debug_dentry;
+ struct power_supply *psy;
unsigned int resetsrc0;
unsigned int resetsrc1;
unsigned int wakesrc;
@@ -118,13 +128,60 @@ static int crc_pwrsrc_read_and_clear(struct crc_pwrsrc_data *data,
return regmap_write(data->regmap, reg, *val);
}
+static irqreturn_t crc_pwrsrc_irq_handler(int irq, void *_data)
+{
+ struct crc_pwrsrc_data *data = _data;
+ unsigned int irq_mask;
+
+ if (regmap_read(data->regmap, CRYSTALCOVE_PWRSRC_IRQ, &irq_mask))
+ return IRQ_NONE;
+
+ regmap_write(data->regmap, CRYSTALCOVE_PWRSRC_IRQ, irq_mask);
+
+ power_supply_changed(data->psy);
+ return IRQ_HANDLED;
+}
+
+static int crc_pwrsrc_psy_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct crc_pwrsrc_data *data = power_supply_get_drvdata(psy);
+ unsigned int pwrsrc;
+ int ret;
+
+ if (psp != POWER_SUPPLY_PROP_ONLINE)
+ return -EINVAL;
+
+ ret = regmap_read(data->regmap, CRYSTALCOVE_SPWRSRC_REG, &pwrsrc);
+ if (ret)
+ return ret;
+
+ val->intval = !!(pwrsrc & (CRYSTALCOVE_SPWRSRC_USB |
+ CRYSTALCOVE_SPWRSRC_DC));
+ return 0;
+}
+
+static const enum power_supply_property crc_pwrsrc_psy_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static const struct power_supply_desc crc_pwrsrc_psy_desc = {
+ .name = "crystal_cove_pwrsrc",
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .properties = crc_pwrsrc_psy_props,
+ .num_properties = ARRAY_SIZE(crc_pwrsrc_psy_props),
+ .get_property = crc_pwrsrc_psy_get_property,
+};
+
static int crc_pwrsrc_probe(struct platform_device *pdev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
struct crc_pwrsrc_data *data;
- int ret;
+ int irq, ret;
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -149,6 +206,24 @@ static int crc_pwrsrc_probe(struct platform_device *pdev)
if (ret)
return ret;
+ if (device_property_read_bool(dev->parent, "linux,register-pwrsrc-power_supply")) {
+ struct power_supply_config psy_cfg = { .drv_data = data };
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ data->psy = devm_power_supply_register(dev, &crc_pwrsrc_psy_desc, &psy_cfg);
+ if (IS_ERR(data->psy))
+ return dev_err_probe(dev, PTR_ERR(data->psy), "registering power-supply\n");
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ crc_pwrsrc_irq_handler,
+ IRQF_ONESHOT, KBUILD_MODNAME, data);
+ if (ret)
+ return dev_err_probe(dev, ret, "requesting IRQ\n");
+ }
+
data->debug_dentry = debugfs_create_dir(KBUILD_MODNAME, NULL);
debugfs_create_file("pwrsrc", 0444, data->debug_dentry, data, &pwrsrc_fops);
debugfs_create_file("resetsrc", 0444, data->debug_dentry, data, &resetsrc_fops);
@@ -167,7 +242,7 @@ static void crc_pwrsrc_remove(struct platform_device *pdev)
static struct platform_driver crc_pwrsrc_driver = {
.probe = crc_pwrsrc_probe,
- .remove_new = crc_pwrsrc_remove,
+ .remove = crc_pwrsrc_remove,
.driver = {
.name = "crystal_cove_pwrsrc",
},
diff --git a/drivers/platform/x86/intel/chtdc_ti_pwrbtn.c b/drivers/platform/x86/intel/chtdc_ti_pwrbtn.c
index 615f8d1a0c68..53f01e198047 100644
--- a/drivers/platform/x86/intel/chtdc_ti_pwrbtn.c
+++ b/drivers/platform/x86/intel/chtdc_ti_pwrbtn.c
@@ -84,7 +84,7 @@ static struct platform_driver chtdc_ti_pwrbtn_driver = {
.name = KBUILD_MODNAME,
},
.probe = chtdc_ti_pwrbtn_probe,
- .remove_new = chtdc_ti_pwrbtn_remove,
+ .remove = chtdc_ti_pwrbtn_remove,
.id_table = chtdc_ti_pwrbtn_id_table,
};
module_platform_driver(chtdc_ti_pwrbtn_driver);
diff --git a/drivers/platform/x86/intel/chtwc_int33fe.c b/drivers/platform/x86/intel/chtwc_int33fe.c
index 93f75ba1dafd..29e8b5432f4c 100644
--- a/drivers/platform/x86/intel/chtwc_int33fe.c
+++ b/drivers/platform/x86/intel/chtwc_int33fe.c
@@ -270,7 +270,7 @@ cht_int33fe_register_max17047(struct device *dev, struct cht_int33fe_data *data)
}
memset(&board_info, 0, sizeof(board_info));
- strscpy(board_info.type, "max17047", I2C_NAME_SIZE);
+ strscpy(board_info.type, "max17047");
board_info.dev_name = "max17047";
board_info.fwnode = fwnode;
data->battery_fg = i2c_acpi_new_device(dev, 1, &board_info);
@@ -361,7 +361,7 @@ static int cht_int33fe_typec_probe(struct platform_device *pdev)
}
memset(&board_info, 0, sizeof(board_info));
- strscpy(board_info.type, "typec_fusb302", I2C_NAME_SIZE);
+ strscpy(board_info.type, "typec_fusb302");
board_info.dev_name = "fusb302";
board_info.fwnode = fwnode;
board_info.irq = fusb302_irq;
@@ -381,7 +381,7 @@ static int cht_int33fe_typec_probe(struct platform_device *pdev)
memset(&board_info, 0, sizeof(board_info));
board_info.dev_name = "pi3usb30532";
board_info.fwnode = fwnode;
- strscpy(board_info.type, "pi3usb30532", I2C_NAME_SIZE);
+ strscpy(board_info.type, "pi3usb30532");
data->pi3usb30532 = i2c_acpi_new_device(dev, 3, &board_info);
if (IS_ERR(data->pi3usb30532)) {
@@ -427,7 +427,7 @@ static struct platform_driver cht_int33fe_typec_driver = {
.acpi_match_table = ACPI_PTR(cht_int33fe_acpi_ids),
},
.probe = cht_int33fe_typec_probe,
- .remove_new = cht_int33fe_typec_remove,
+ .remove = cht_int33fe_typec_remove,
};
module_platform_driver(cht_int33fe_typec_driver);
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index 7457ca2b27a6..88a1a9ff2f34 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/string_choices.h>
#include <linux/suspend.h>
#include "../dual_accel_detect.h"
@@ -38,6 +39,7 @@ MODULE_PARM_DESC(enable_sw_tablet_mode,
/* When NOT in tablet mode, VGBS returns with the flag 0x40 */
#define TABLET_MODE_FLAG BIT(6)
+MODULE_DESCRIPTION("Intel HID Event hotkey driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alex Hung");
@@ -49,6 +51,8 @@ static const struct acpi_device_id intel_hid_ids[] = {
{"INTC1076", 0},
{"INTC1077", 0},
{"INTC1078", 0},
+ {"INTC107B", 0},
+ {"INTC10CB", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, intel_hid_ids);
@@ -115,6 +119,13 @@ static const struct dmi_system_id button_array_table[] = {
},
},
{
+ .ident = "Lenovo ThinkPad X1 Tablet Gen 1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X12 Detachable Gen 1"),
+ },
+ },
+ {
.ident = "Lenovo ThinkPad X1 Tablet Gen 2",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -128,6 +139,13 @@ static const struct dmi_system_id button_array_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"),
},
},
+ {
+ .ident = "Microsoft Surface Go 4",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 4"),
+ },
+ },
{ }
};
@@ -328,10 +346,8 @@ static int intel_hid_set_enable(struct device *device, bool enable)
acpi_handle handle = ACPI_HANDLE(device);
/* Enable|disable features - power button is always enabled */
- if (!intel_hid_execute_method(handle, INTEL_HID_DSM_HDSM_FN,
- enable)) {
- dev_warn(device, "failed to %sable hotkeys\n",
- enable ? "en" : "dis");
+ if (!intel_hid_execute_method(handle, INTEL_HID_DSM_HDSM_FN, enable)) {
+ dev_warn(device, "failed to %s hotkeys\n", str_enable_disable(enable));
return -EIO;
}
@@ -504,6 +520,7 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
struct platform_device *device = context;
struct intel_hid_priv *priv = dev_get_drvdata(&device->dev);
unsigned long long ev_index;
+ struct key_entry *ke;
int err;
/*
@@ -545,11 +562,15 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
if (event == 0xc0 || !priv->array)
return;
- if (!sparse_keymap_entry_from_scancode(priv->array, event)) {
+ ke = sparse_keymap_entry_from_scancode(priv->array, event);
+ if (!ke) {
dev_info(&device->dev, "unknown event 0x%x\n", event);
return;
}
+ if (ke->type == KE_IGNORE)
+ return;
+
wakeup:
pm_wakeup_hard_event(&device->dev);
@@ -740,7 +761,7 @@ static struct platform_driver intel_hid_pl_driver = {
.pm = &intel_hid_pl_pm_ops,
},
.probe = intel_hid_probe,
- .remove_new = intel_hid_remove,
+ .remove = intel_hid_remove,
};
/*
diff --git a/drivers/platform/x86/intel/ifs/core.c b/drivers/platform/x86/intel/ifs/core.c
index 7b11198d85a1..1ae50702bdb7 100644
--- a/drivers/platform/x86/intel/ifs/core.c
+++ b/drivers/platform/x86/intel/ifs/core.c
@@ -11,16 +11,16 @@
#include "ifs.h"
-#define X86_MATCH(model, array_gen) \
- X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, \
- INTEL_FAM6_##model, X86_FEATURE_CORE_CAPABILITIES, array_gen)
+#define X86_MATCH(vfm, array_gen) \
+ X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_CORE_CAPABILITIES, array_gen)
static const struct x86_cpu_id ifs_cpu_ids[] __initconst = {
- X86_MATCH(SAPPHIRERAPIDS_X, ARRAY_GEN0),
- X86_MATCH(EMERALDRAPIDS_X, ARRAY_GEN0),
- X86_MATCH(GRANITERAPIDS_X, ARRAY_GEN0),
- X86_MATCH(GRANITERAPIDS_D, ARRAY_GEN0),
- X86_MATCH(ATOM_CRESTMONT_X, ARRAY_GEN1),
+ X86_MATCH(INTEL_SAPPHIRERAPIDS_X, ARRAY_GEN0),
+ X86_MATCH(INTEL_EMERALDRAPIDS_X, ARRAY_GEN0),
+ X86_MATCH(INTEL_GRANITERAPIDS_X, ARRAY_GEN0),
+ X86_MATCH(INTEL_GRANITERAPIDS_D, ARRAY_GEN0),
+ X86_MATCH(INTEL_ATOM_CRESTMONT_X, ARRAY_GEN1),
+ X86_MATCH(INTEL_ATOM_DARKMONT_X, ARRAY_GEN1),
{}
};
MODULE_DEVICE_TABLE(x86cpu, ifs_cpu_ids);
@@ -33,6 +33,7 @@ bool *ifs_pkg_auth;
static const struct ifs_test_caps scan_test = {
.integrity_cap_bit = MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT,
.test_num = IFS_TYPE_SAF,
+ .image_suffix = "scan",
};
static const struct ifs_test_caps array_test = {
@@ -40,9 +41,32 @@ static const struct ifs_test_caps array_test = {
.test_num = IFS_TYPE_ARRAY_BIST,
};
+static const struct ifs_test_msrs scan_msrs = {
+ .copy_hashes = MSR_COPY_SCAN_HASHES,
+ .copy_hashes_status = MSR_SCAN_HASHES_STATUS,
+ .copy_chunks = MSR_AUTHENTICATE_AND_COPY_CHUNK,
+ .copy_chunks_status = MSR_CHUNKS_AUTHENTICATION_STATUS,
+ .test_ctrl = MSR_SAF_CTRL,
+};
+
+static const struct ifs_test_msrs sbaf_msrs = {
+ .copy_hashes = MSR_COPY_SBAF_HASHES,
+ .copy_hashes_status = MSR_SBAF_HASHES_STATUS,
+ .copy_chunks = MSR_AUTHENTICATE_AND_COPY_SBAF_CHUNK,
+ .copy_chunks_status = MSR_SBAF_CHUNKS_AUTHENTICATION_STATUS,
+ .test_ctrl = MSR_SBAF_CTRL,
+};
+
+static const struct ifs_test_caps sbaf_test = {
+ .integrity_cap_bit = MSR_INTEGRITY_CAPS_SBAF_BIT,
+ .test_num = IFS_TYPE_SBAF,
+ .image_suffix = "sbft",
+};
+
static struct ifs_device ifs_devices[] = {
[IFS_TYPE_SAF] = {
.test_caps = &scan_test,
+ .test_msrs = &scan_msrs,
.misc = {
.name = "intel_ifs_0",
.minor = MISC_DYNAMIC_MINOR,
@@ -57,6 +81,15 @@ static struct ifs_device ifs_devices[] = {
.groups = plat_ifs_array_groups,
},
},
+ [IFS_TYPE_SBAF] = {
+ .test_caps = &sbaf_test,
+ .test_msrs = &sbaf_msrs,
+ .misc = {
+ .name = "intel_ifs_2",
+ .minor = MISC_DYNAMIC_MINOR,
+ .groups = plat_ifs_groups,
+ },
+ },
};
#define IFS_NUMTESTS ARRAY_SIZE(ifs_devices)
diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h
index 56b9f3e3cf76..f369fb0d3d82 100644
--- a/drivers/platform/x86/intel/ifs/ifs.h
+++ b/drivers/platform/x86/intel/ifs/ifs.h
@@ -23,12 +23,14 @@
* IFS Image
* ---------
*
- * Intel provides a firmware file containing the scan tests via
- * github [#f1]_. Similar to microcode there is a separate file for each
+ * Intel provides firmware files containing the scan tests via the webpage [#f1]_.
+ * Look under "In-Field Scan Test Images Download" section towards the
+ * end of the page. Similar to microcode, there are separate files for each
* family-model-stepping. IFS Images are not applicable for some test types.
* Wherever applicable the sysfs directory would provide a "current_batch" file
* (see below) for loading the image.
*
+ * .. [#f1] https://intel.com/InFieldScan
*
* IFS Image Loading
* -----------------
@@ -125,12 +127,38 @@
* 2) Hardware allows for some number of cores to be tested in parallel.
* The driver does not make use of this, it only tests one core at a time.
*
- * .. [#f1] https://github.com/intel/TBD
+ * Structural Based Functional Test at Field (SBAF):
+ * -------------------------------------------------
+ *
+ * SBAF is a new type of testing that provides comprehensive core test
+ * coverage complementing Scan at Field (SAF) testing. SBAF mimics the
+ * manufacturing screening environment and leverages the same test suite.
+ * It makes use of Design For Test (DFT) observation sites and features
+ * to maximize coverage in minimum time.
+ *
+ * Similar to the SAF test, SBAF isolates the core under test from the
+ * rest of the system during execution. Upon completion, the core
+ * seamlessly resets to its pre-test state and resumes normal operation.
+ * Any machine checks or hangs encountered during the test are confined to
+ * the isolated core, preventing disruption to the overall system.
+ *
+ * Like the SAF test, the SBAF test is also divided into multiple batches,
+ * and each batch test can take hundreds of milliseconds (100-200 ms) to
+ * complete. If such a lengthy interruption is undesirable, it is
+ * recommended to relocate the time-sensitive applications to other cores.
*/
#include <linux/device.h>
#include <linux/miscdevice.h>
#define MSR_ARRAY_BIST 0x00000105
+
+#define MSR_COPY_SBAF_HASHES 0x000002b8
+#define MSR_SBAF_HASHES_STATUS 0x000002b9
+#define MSR_AUTHENTICATE_AND_COPY_SBAF_CHUNK 0x000002ba
+#define MSR_SBAF_CHUNKS_AUTHENTICATION_STATUS 0x000002bb
+#define MSR_ACTIVATE_SBAF 0x000002bc
+#define MSR_SBAF_STATUS 0x000002bd
+
#define MSR_COPY_SCAN_HASHES 0x000002c2
#define MSR_SCAN_HASHES_STATUS 0x000002c3
#define MSR_AUTHENTICATE_AND_COPY_CHUNK 0x000002c4
@@ -140,6 +168,7 @@
#define MSR_ARRAY_TRIGGER 0x000002d6
#define MSR_ARRAY_STATUS 0x000002d7
#define MSR_SAF_CTRL 0x000004f0
+#define MSR_SBAF_CTRL 0x000004f8
#define SCAN_NOT_TESTED 0
#define SCAN_TEST_PASS 1
@@ -147,6 +176,7 @@
#define IFS_TYPE_SAF 0
#define IFS_TYPE_ARRAY_BIST 1
+#define IFS_TYPE_SBAF 2
#define ARRAY_GEN0 0
#define ARRAY_GEN1 1
@@ -196,7 +226,8 @@ union ifs_chunks_auth_status_gen2 {
u16 valid_chunks;
u16 total_chunks;
u32 error_code :8;
- u32 rsvd2 :24;
+ u32 rsvd2 :8;
+ u32 max_bundle :16;
};
};
@@ -253,6 +284,34 @@ union ifs_array {
};
};
+/* MSR_ACTIVATE_SBAF bit fields */
+union ifs_sbaf {
+ u64 data;
+ struct {
+ u32 bundle_idx :9;
+ u32 rsvd1 :5;
+ u32 pgm_idx :2;
+ u32 rsvd2 :16;
+ u32 delay :31;
+ u32 sigmce :1;
+ };
+};
+
+/* MSR_SBAF_STATUS bit fields */
+union ifs_sbaf_status {
+ u64 data;
+ struct {
+ u32 bundle_idx :9;
+ u32 rsvd1 :5;
+ u32 pgm_idx :2;
+ u32 rsvd2 :16;
+ u32 error_code :8;
+ u32 rsvd3 :21;
+ u32 test_fail :1;
+ u32 sbaf_status :2;
+ };
+};
+
/*
* Driver populated error-codes
* 0xFD: Test timed out before completing all the chunks.
@@ -261,9 +320,28 @@ union ifs_array {
#define IFS_SW_TIMEOUT 0xFD
#define IFS_SW_PARTIAL_COMPLETION 0xFE
+#define IFS_SUFFIX_SZ 5
+
struct ifs_test_caps {
int integrity_cap_bit;
int test_num;
+ char image_suffix[IFS_SUFFIX_SZ];
+};
+
+/**
+ * struct ifs_test_msrs - MSRs used in IFS tests
+ * @copy_hashes: Copy test hash data
+ * @copy_hashes_status: Status of copied test hash data
+ * @copy_chunks: Copy chunks of the test data
+ * @copy_chunks_status: Status of the copied test data chunks
+ * @test_ctrl: Control the test attributes
+ */
+struct ifs_test_msrs {
+ u32 copy_hashes;
+ u32 copy_hashes_status;
+ u32 copy_chunks;
+ u32 copy_chunks_status;
+ u32 test_ctrl;
};
/**
@@ -278,6 +356,7 @@ struct ifs_test_caps {
* @generation: IFS test generation enumerated by hardware
* @chunk_size: size of a test chunk
* @array_gen: test generation of array test
+ * @max_bundle: maximum bundle index
*/
struct ifs_data {
int loaded_version;
@@ -290,6 +369,7 @@ struct ifs_data {
u32 generation;
u32 chunk_size;
u32 array_gen;
+ u32 max_bundle;
};
struct ifs_work {
@@ -299,6 +379,7 @@ struct ifs_work {
struct ifs_device {
const struct ifs_test_caps *test_caps;
+ const struct ifs_test_msrs *test_msrs;
struct ifs_data rw_data;
struct miscdevice misc;
};
@@ -319,6 +400,14 @@ static inline const struct ifs_test_caps *ifs_get_test_caps(struct device *dev)
return d->test_caps;
}
+static inline const struct ifs_test_msrs *ifs_get_test_msrs(struct device *dev)
+{
+ struct miscdevice *m = dev_get_drvdata(dev);
+ struct ifs_device *d = container_of(m, struct ifs_device, misc);
+
+ return d->test_msrs;
+}
+
extern bool *ifs_pkg_auth;
int ifs_load_firmware(struct device *dev);
int do_core_test(int cpu, struct device *dev);
diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c
index 2cf3b4a8813f..de54bd1a5970 100644
--- a/drivers/platform/x86/intel/ifs/load.c
+++ b/drivers/platform/x86/intel/ifs/load.c
@@ -118,15 +118,17 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work)
union ifs_scan_hashes_status hashes_status;
union ifs_chunks_auth_status chunk_status;
struct device *dev = local_work->dev;
+ const struct ifs_test_msrs *msrs;
int i, num_chunks, chunk_size;
struct ifs_data *ifsd;
u64 linear_addr, base;
u32 err_code;
ifsd = ifs_get_data(dev);
+ msrs = ifs_get_test_msrs(dev);
/* run scan hash copy */
- wrmsrl(MSR_COPY_SCAN_HASHES, ifs_hash_ptr);
- rdmsrl(MSR_SCAN_HASHES_STATUS, hashes_status.data);
+ wrmsrl(msrs->copy_hashes, ifs_hash_ptr);
+ rdmsrl(msrs->copy_hashes_status, hashes_status.data);
/* enumerate the scan image information */
num_chunks = hashes_status.num_chunks;
@@ -147,8 +149,8 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work)
linear_addr = base + i * chunk_size;
linear_addr |= i;
- wrmsrl(MSR_AUTHENTICATE_AND_COPY_CHUNK, linear_addr);
- rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data);
+ wrmsrl(msrs->copy_chunks, linear_addr);
+ rdmsrl(msrs->copy_chunks_status, chunk_status.data);
ifsd->valid_chunks = chunk_status.valid_chunks;
err_code = chunk_status.error_code;
@@ -180,6 +182,7 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev)
union ifs_scan_hashes_status_gen2 hashes_status;
union ifs_chunks_auth_status_gen2 chunk_status;
u32 err_code, valid_chunks, total_chunks;
+ const struct ifs_test_msrs *msrs;
int i, num_chunks, chunk_size;
union meta_data *ifs_meta;
int starting_chunk_nr;
@@ -189,10 +192,11 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev)
int retry_count;
ifsd = ifs_get_data(dev);
+ msrs = ifs_get_test_msrs(dev);
if (need_copy_scan_hashes(ifsd)) {
- wrmsrl(MSR_COPY_SCAN_HASHES, ifs_hash_ptr);
- rdmsrl(MSR_SCAN_HASHES_STATUS, hashes_status.data);
+ wrmsrl(msrs->copy_hashes, ifs_hash_ptr);
+ rdmsrl(msrs->copy_hashes_status, hashes_status.data);
/* enumerate the scan image information */
chunk_size = hashes_status.chunk_size * SZ_1K;
@@ -212,8 +216,8 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev)
}
if (ifsd->generation >= IFS_GEN_STRIDE_AWARE) {
- wrmsrl(MSR_SAF_CTRL, INVALIDATE_STRIDE);
- rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data);
+ wrmsrl(msrs->test_ctrl, INVALIDATE_STRIDE);
+ rdmsrl(msrs->copy_chunks_status, chunk_status.data);
if (chunk_status.valid_chunks != 0) {
dev_err(dev, "Couldn't invalidate installed stride - %d\n",
chunk_status.valid_chunks);
@@ -233,8 +237,10 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev)
chunk_table[0] = starting_chunk_nr + i;
chunk_table[1] = linear_addr;
do {
- wrmsrl(MSR_AUTHENTICATE_AND_COPY_CHUNK, (u64)chunk_table);
- rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data);
+ local_irq_disable();
+ wrmsrl(msrs->copy_chunks, (u64)chunk_table);
+ local_irq_enable();
+ rdmsrl(msrs->copy_chunks_status, chunk_status.data);
err_code = chunk_status.error_code;
} while (err_code == AUTH_INTERRUPTED_ERROR && --retry_count);
@@ -255,20 +261,22 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev)
return -EIO;
}
ifsd->valid_chunks = valid_chunks;
+ ifsd->max_bundle = chunk_status.max_bundle;
return 0;
}
static int validate_ifs_metadata(struct device *dev)
{
+ const struct ifs_test_caps *test = ifs_get_test_caps(dev);
struct ifs_data *ifsd = ifs_get_data(dev);
union meta_data *ifs_meta;
char test_file[64];
int ret = -EINVAL;
- snprintf(test_file, sizeof(test_file), "%02x-%02x-%02x-%02x.scan",
+ snprintf(test_file, sizeof(test_file), "%02x-%02x-%02x-%02x.%s",
boot_cpu_data.x86, boot_cpu_data.x86_model,
- boot_cpu_data.x86_stepping, ifsd->cur_batch);
+ boot_cpu_data.x86_stepping, ifsd->cur_batch, test->image_suffix);
ifs_meta = (union meta_data *)find_meta_data(ifs_header_ptr, META_TYPE_IFS);
if (!ifs_meta) {
@@ -298,6 +306,12 @@ static int validate_ifs_metadata(struct device *dev)
return ret;
}
+ if (ifs_meta->test_type != test->test_num) {
+ dev_warn(dev, "Metadata test_type %d mismatches with device type\n",
+ ifs_meta->test_type);
+ return ret;
+ }
+
return 0;
}
@@ -383,11 +397,11 @@ int ifs_load_firmware(struct device *dev)
unsigned int expected_size;
const struct firmware *fw;
char scan_path[64];
- int ret = -EINVAL;
+ int ret;
- snprintf(scan_path, sizeof(scan_path), "intel/ifs_%d/%02x-%02x-%02x-%02x.scan",
+ snprintf(scan_path, sizeof(scan_path), "intel/ifs_%d/%02x-%02x-%02x-%02x.%s",
test->test_num, boot_cpu_data.x86, boot_cpu_data.x86_model,
- boot_cpu_data.x86_stepping, ifsd->cur_batch);
+ boot_cpu_data.x86_stepping, ifsd->cur_batch, test->image_suffix);
ret = request_firmware_direct(&fw, scan_path, dev);
if (ret) {
diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c
index 13ecd55c6668..f978dd05d4d8 100644
--- a/drivers/platform/x86/intel/ifs/runtest.c
+++ b/drivers/platform/x86/intel/ifs/runtest.c
@@ -23,6 +23,19 @@
/* Max retries on the same chunk */
#define MAX_IFS_RETRIES 5
+struct run_params {
+ struct ifs_data *ifsd;
+ union ifs_scan *activate;
+ union ifs_status status;
+};
+
+struct sbaf_run_params {
+ struct ifs_data *ifsd;
+ int *retry_cnt;
+ union ifs_sbaf *activate;
+ union ifs_sbaf_status status;
+};
+
/*
* Number of TSC cycles that a logical CPU will wait for the other
* logical CPU on the core in the WRMSR(ACTIVATE_SCAN).
@@ -63,6 +76,19 @@ static const char * const scan_test_status[] = {
static void message_not_tested(struct device *dev, int cpu, union ifs_status status)
{
+ struct ifs_data *ifsd = ifs_get_data(dev);
+
+ /*
+ * control_error is set when the microcode runs into a problem
+ * loading the image from the reserved BIOS memory, or it has
+ * been corrupted. Reloading the image may fix this issue.
+ */
+ if (status.control_error) {
+ dev_warn(dev, "CPU(s) %*pbl: Scan controller error. Batch: %02x version: 0x%x\n",
+ cpumask_pr_args(cpu_smt_mask(cpu)), ifsd->cur_batch, ifsd->loaded_version);
+ return;
+ }
+
if (status.error_code < ARRAY_SIZE(scan_test_status)) {
dev_info(dev, "CPU(s) %*pbl: SCAN operation did not start. %s\n",
cpumask_pr_args(cpu_smt_mask(cpu)),
@@ -85,16 +111,6 @@ static void message_fail(struct device *dev, int cpu, union ifs_status status)
struct ifs_data *ifsd = ifs_get_data(dev);
/*
- * control_error is set when the microcode runs into a problem
- * loading the image from the reserved BIOS memory, or it has
- * been corrupted. Reloading the image may fix this issue.
- */
- if (status.control_error) {
- dev_err(dev, "CPU(s) %*pbl: could not execute from loaded scan image. Batch: %02x version: 0x%x\n",
- cpumask_pr_args(cpu_smt_mask(cpu)), ifsd->cur_batch, ifsd->loaded_version);
- }
-
- /*
* signature_error is set when the output from the scan chains does not
* match the expected signature. This might be a transient problem (e.g.
* due to a bit flip from an alpha particle or neutron). If the problem
@@ -134,19 +150,57 @@ static bool can_restart(union ifs_status status)
return false;
}
+#define SPINUNIT 100 /* 100 nsec */
+static atomic_t array_cpus_in;
+static atomic_t scan_cpus_in;
+static atomic_t sbaf_cpus_in;
+
+/*
+ * Simplified cpu sibling rendezvous loop based on microcode loader __wait_for_cpus()
+ */
+static void wait_for_sibling_cpu(atomic_t *t, long long timeout)
+{
+ int cpu = smp_processor_id();
+ const struct cpumask *smt_mask = cpu_smt_mask(cpu);
+ int all_cpus = cpumask_weight(smt_mask);
+
+ atomic_inc(t);
+ while (atomic_read(t) < all_cpus) {
+ if (timeout < SPINUNIT)
+ return;
+ ndelay(SPINUNIT);
+ timeout -= SPINUNIT;
+ touch_nmi_watchdog();
+ }
+}
+
/*
* Execute the scan. Called "simultaneously" on all threads of a core
* at high priority using the stop_cpus mechanism.
*/
static int doscan(void *data)
{
- int cpu = smp_processor_id();
- u64 *msrs = data;
+ int cpu = smp_processor_id(), start, stop;
+ struct run_params *params = data;
+ union ifs_status status;
+ struct ifs_data *ifsd;
int first;
+ ifsd = params->ifsd;
+
+ if (ifsd->generation) {
+ start = params->activate->gen2.start;
+ stop = params->activate->gen2.stop;
+ } else {
+ start = params->activate->gen0.start;
+ stop = params->activate->gen0.stop;
+ }
+
/* Only the first logical CPU on a core reports result */
first = cpumask_first(cpu_smt_mask(cpu));
+ wait_for_sibling_cpu(&scan_cpus_in, NSEC_PER_SEC);
+
/*
* This WRMSR will wait for other HT threads to also write
* to this MSR (at most for activate.delay cycles). Then it
@@ -155,12 +209,14 @@ static int doscan(void *data)
* take up to 200 milliseconds (in the case where all chunks
* are processed in a single pass) before it retires.
*/
- wrmsrl(MSR_ACTIVATE_SCAN, msrs[0]);
+ wrmsrl(MSR_ACTIVATE_SCAN, params->activate->data);
+ rdmsrl(MSR_SCAN_STATUS, status.data);
- if (cpu == first) {
- /* Pass back the result of the scan */
- rdmsrl(MSR_SCAN_STATUS, msrs[1]);
- }
+ trace_ifs_status(ifsd->cur_batch, start, stop, status.data);
+
+ /* Pass back the result of the scan */
+ if (cpu == first)
+ params->status = status;
return 0;
}
@@ -173,13 +229,13 @@ static int doscan(void *data)
*/
static void ifs_test_core(int cpu, struct device *dev)
{
+ union ifs_status status = {};
union ifs_scan activate;
- union ifs_status status;
unsigned long timeout;
struct ifs_data *ifsd;
int to_start, to_stop;
int status_chunk;
- u64 msrvals[2];
+ struct run_params params;
int retries;
ifsd = ifs_get_data(dev);
@@ -190,6 +246,8 @@ static void ifs_test_core(int cpu, struct device *dev)
to_start = 0;
to_stop = ifsd->valid_chunks - 1;
+ params.ifsd = ifs_get_data(dev);
+
if (ifsd->generation) {
activate.gen2.start = to_start;
activate.gen2.stop = to_stop;
@@ -207,12 +265,11 @@ static void ifs_test_core(int cpu, struct device *dev)
break;
}
- msrvals[0] = activate.data;
- stop_core_cpuslocked(cpu, doscan, msrvals);
-
- status.data = msrvals[1];
+ params.activate = &activate;
+ atomic_set(&scan_cpus_in, 0);
+ stop_core_cpuslocked(cpu, doscan, &params);
- trace_ifs_status(cpu, to_start, to_stop, status.data);
+ status = params.status;
/* Some cases can be retried, give up for others */
if (!can_restart(status))
@@ -239,10 +296,10 @@ static void ifs_test_core(int cpu, struct device *dev)
/* Update status for this core */
ifsd->scan_details = status.data;
- if (status.control_error || status.signature_error) {
+ if (status.signature_error) {
ifsd->status = SCAN_TEST_FAIL;
message_fail(dev, cpu, status);
- } else if (status.error_code) {
+ } else if (status.control_error || status.error_code) {
ifsd->status = SCAN_NOT_TESTED;
message_not_tested(dev, cpu, status);
} else {
@@ -250,34 +307,14 @@ static void ifs_test_core(int cpu, struct device *dev)
}
}
-#define SPINUNIT 100 /* 100 nsec */
-static atomic_t array_cpus_out;
-
-/*
- * Simplified cpu sibling rendezvous loop based on microcode loader __wait_for_cpus()
- */
-static void wait_for_sibling_cpu(atomic_t *t, long long timeout)
-{
- int cpu = smp_processor_id();
- const struct cpumask *smt_mask = cpu_smt_mask(cpu);
- int all_cpus = cpumask_weight(smt_mask);
-
- atomic_inc(t);
- while (atomic_read(t) < all_cpus) {
- if (timeout < SPINUNIT)
- return;
- ndelay(SPINUNIT);
- timeout -= SPINUNIT;
- touch_nmi_watchdog();
- }
-}
-
static int do_array_test(void *data)
{
union ifs_array *command = data;
int cpu = smp_processor_id();
int first;
+ wait_for_sibling_cpu(&array_cpus_in, NSEC_PER_SEC);
+
/*
* Only one logical CPU on a core needs to trigger the Array test via MSR write.
*/
@@ -289,9 +326,6 @@ static int do_array_test(void *data)
rdmsrl(MSR_ARRAY_BIST, command->data);
}
- /* Tests complete faster if the sibling is spinning here */
- wait_for_sibling_cpu(&array_cpus_out, NSEC_PER_SEC);
-
return 0;
}
@@ -312,7 +346,7 @@ static void ifs_array_test_core(int cpu, struct device *dev)
timed_out = true;
break;
}
- atomic_set(&array_cpus_out, 0);
+ atomic_set(&array_cpus_in, 0);
stop_core_cpuslocked(cpu, do_array_test, &command);
if (command.ctrl_result)
@@ -361,6 +395,225 @@ static void ifs_array_test_gen1(int cpu, struct device *dev)
ifsd->status = SCAN_TEST_PASS;
}
+#define SBAF_STATUS_PASS 0
+#define SBAF_STATUS_SIGN_FAIL 1
+#define SBAF_STATUS_INTR 2
+#define SBAF_STATUS_TEST_FAIL 3
+
+enum sbaf_status_err_code {
+ IFS_SBAF_NO_ERROR = 0,
+ IFS_SBAF_OTHER_THREAD_COULD_NOT_JOIN = 1,
+ IFS_SBAF_INTERRUPTED_BEFORE_RENDEZVOUS = 2,
+ IFS_SBAF_UNASSIGNED_ERROR_CODE3 = 3,
+ IFS_SBAF_INVALID_BUNDLE_INDEX = 4,
+ IFS_SBAF_MISMATCH_ARGS_BETWEEN_THREADS = 5,
+ IFS_SBAF_CORE_NOT_CAPABLE_CURRENTLY = 6,
+ IFS_SBAF_UNASSIGNED_ERROR_CODE7 = 7,
+ IFS_SBAF_EXCEED_NUMBER_OF_THREADS_CONCURRENT = 8,
+ IFS_SBAF_INTERRUPTED_DURING_EXECUTION = 9,
+ IFS_SBAF_INVALID_PROGRAM_INDEX = 0xA,
+ IFS_SBAF_CORRUPTED_CHUNK = 0xB,
+ IFS_SBAF_DID_NOT_START = 0xC,
+};
+
+static const char * const sbaf_test_status[] = {
+ [IFS_SBAF_NO_ERROR] = "SBAF no error",
+ [IFS_SBAF_OTHER_THREAD_COULD_NOT_JOIN] = "Other thread could not join.",
+ [IFS_SBAF_INTERRUPTED_BEFORE_RENDEZVOUS] = "Interrupt occurred prior to SBAF coordination.",
+ [IFS_SBAF_UNASSIGNED_ERROR_CODE3] = "Unassigned error code 0x3",
+ [IFS_SBAF_INVALID_BUNDLE_INDEX] = "Non-valid sbaf bundles. Reload test image",
+ [IFS_SBAF_MISMATCH_ARGS_BETWEEN_THREADS] = "Mismatch in arguments between threads T0/T1.",
+ [IFS_SBAF_CORE_NOT_CAPABLE_CURRENTLY] = "Core not capable of performing SBAF currently",
+ [IFS_SBAF_UNASSIGNED_ERROR_CODE7] = "Unassigned error code 0x7",
+ [IFS_SBAF_EXCEED_NUMBER_OF_THREADS_CONCURRENT] = "Exceeded number of Logical Processors (LP) allowed to run Scan-At-Field concurrently",
+ [IFS_SBAF_INTERRUPTED_DURING_EXECUTION] = "Interrupt occurred prior to SBAF start",
+ [IFS_SBAF_INVALID_PROGRAM_INDEX] = "SBAF program index not valid",
+ [IFS_SBAF_CORRUPTED_CHUNK] = "SBAF operation aborted due to corrupted chunk",
+ [IFS_SBAF_DID_NOT_START] = "SBAF operation did not start",
+};
+
+static void sbaf_message_not_tested(struct device *dev, int cpu, u64 status_data)
+{
+ union ifs_sbaf_status status = (union ifs_sbaf_status)status_data;
+
+ if (status.error_code < ARRAY_SIZE(sbaf_test_status)) {
+ dev_info(dev, "CPU(s) %*pbl: SBAF operation did not start. %s\n",
+ cpumask_pr_args(cpu_smt_mask(cpu)),
+ sbaf_test_status[status.error_code]);
+ } else if (status.error_code == IFS_SW_TIMEOUT) {
+ dev_info(dev, "CPU(s) %*pbl: software timeout during scan\n",
+ cpumask_pr_args(cpu_smt_mask(cpu)));
+ } else if (status.error_code == IFS_SW_PARTIAL_COMPLETION) {
+ dev_info(dev, "CPU(s) %*pbl: %s\n",
+ cpumask_pr_args(cpu_smt_mask(cpu)),
+ "Not all SBAF bundles executed. Maximum forward progress retries exceeded");
+ } else {
+ dev_info(dev, "CPU(s) %*pbl: SBAF unknown status %llx\n",
+ cpumask_pr_args(cpu_smt_mask(cpu)), status.data);
+ }
+}
+
+static void sbaf_message_fail(struct device *dev, int cpu, union ifs_sbaf_status status)
+{
+ /* Failed signature check is set when SBAF signature did not match the expected value */
+ if (status.sbaf_status == SBAF_STATUS_SIGN_FAIL) {
+ dev_err(dev, "CPU(s) %*pbl: Failed signature check\n",
+ cpumask_pr_args(cpu_smt_mask(cpu)));
+ }
+
+ /* Failed to reach end of test */
+ if (status.sbaf_status == SBAF_STATUS_TEST_FAIL) {
+ dev_err(dev, "CPU(s) %*pbl: Failed to complete test\n",
+ cpumask_pr_args(cpu_smt_mask(cpu)));
+ }
+}
+
+static bool sbaf_bundle_completed(union ifs_sbaf_status status)
+{
+ return !(status.sbaf_status || status.error_code);
+}
+
+static bool sbaf_can_restart(union ifs_sbaf_status status)
+{
+ enum sbaf_status_err_code err_code = status.error_code;
+
+ /* Signature for chunk is bad, or scan test failed */
+ if (status.sbaf_status == SBAF_STATUS_SIGN_FAIL ||
+ status.sbaf_status == SBAF_STATUS_TEST_FAIL)
+ return false;
+
+ switch (err_code) {
+ case IFS_SBAF_NO_ERROR:
+ case IFS_SBAF_OTHER_THREAD_COULD_NOT_JOIN:
+ case IFS_SBAF_INTERRUPTED_BEFORE_RENDEZVOUS:
+ case IFS_SBAF_EXCEED_NUMBER_OF_THREADS_CONCURRENT:
+ case IFS_SBAF_INTERRUPTED_DURING_EXECUTION:
+ return true;
+ case IFS_SBAF_UNASSIGNED_ERROR_CODE3:
+ case IFS_SBAF_INVALID_BUNDLE_INDEX:
+ case IFS_SBAF_MISMATCH_ARGS_BETWEEN_THREADS:
+ case IFS_SBAF_CORE_NOT_CAPABLE_CURRENTLY:
+ case IFS_SBAF_UNASSIGNED_ERROR_CODE7:
+ case IFS_SBAF_INVALID_PROGRAM_INDEX:
+ case IFS_SBAF_CORRUPTED_CHUNK:
+ case IFS_SBAF_DID_NOT_START:
+ break;
+ }
+ return false;
+}
+
+/*
+ * Execute the SBAF test. Called "simultaneously" on all threads of a core
+ * at high priority using the stop_cpus mechanism.
+ */
+static int dosbaf(void *data)
+{
+ struct sbaf_run_params *run_params = data;
+ int cpu = smp_processor_id();
+ union ifs_sbaf_status status;
+ struct ifs_data *ifsd;
+ int first;
+
+ ifsd = run_params->ifsd;
+
+ /* Only the first logical CPU on a core reports result */
+ first = cpumask_first(cpu_smt_mask(cpu));
+ wait_for_sibling_cpu(&sbaf_cpus_in, NSEC_PER_SEC);
+
+ /*
+ * This WRMSR will wait for other HT threads to also write
+ * to this MSR (at most for activate.delay cycles). Then it
+ * starts scan of each requested bundle. The core test happens
+ * during the "execution" of the WRMSR.
+ */
+ wrmsrl(MSR_ACTIVATE_SBAF, run_params->activate->data);
+ rdmsrl(MSR_SBAF_STATUS, status.data);
+ trace_ifs_sbaf(ifsd->cur_batch, *run_params->activate, status);
+
+ /* Pass back the result of the test */
+ if (cpu == first)
+ run_params->status = status;
+
+ return 0;
+}
+
+static void ifs_sbaf_test_core(int cpu, struct device *dev)
+{
+ struct sbaf_run_params run_params;
+ union ifs_sbaf_status status = {};
+ union ifs_sbaf activate;
+ unsigned long timeout;
+ struct ifs_data *ifsd;
+ int stop_bundle;
+ int retries;
+
+ ifsd = ifs_get_data(dev);
+
+ activate.data = 0;
+ activate.delay = IFS_THREAD_WAIT;
+
+ timeout = jiffies + 2 * HZ;
+ retries = MAX_IFS_RETRIES;
+ activate.bundle_idx = 0;
+ stop_bundle = ifsd->max_bundle;
+
+ while (activate.bundle_idx <= stop_bundle) {
+ if (time_after(jiffies, timeout)) {
+ status.error_code = IFS_SW_TIMEOUT;
+ break;
+ }
+
+ atomic_set(&sbaf_cpus_in, 0);
+
+ run_params.ifsd = ifsd;
+ run_params.activate = &activate;
+ run_params.retry_cnt = &retries;
+ stop_core_cpuslocked(cpu, dosbaf, &run_params);
+
+ status = run_params.status;
+
+ if (sbaf_bundle_completed(status)) {
+ activate.bundle_idx = status.bundle_idx + 1;
+ activate.pgm_idx = 0;
+ retries = MAX_IFS_RETRIES;
+ continue;
+ }
+
+ /* Some cases can be retried, give up for others */
+ if (!sbaf_can_restart(status))
+ break;
+
+ if (status.pgm_idx == activate.pgm_idx) {
+ /* If no progress retry */
+ if (--retries == 0) {
+ if (status.error_code == IFS_NO_ERROR)
+ status.error_code = IFS_SW_PARTIAL_COMPLETION;
+ break;
+ }
+ } else {
+ /* if some progress, more pgms remaining in bundle, reset retries */
+ retries = MAX_IFS_RETRIES;
+ activate.bundle_idx = status.bundle_idx;
+ activate.pgm_idx = status.pgm_idx;
+ }
+ }
+
+ /* Update status for this core */
+ ifsd->scan_details = status.data;
+
+ if (status.sbaf_status == SBAF_STATUS_SIGN_FAIL ||
+ status.sbaf_status == SBAF_STATUS_TEST_FAIL) {
+ ifsd->status = SCAN_TEST_FAIL;
+ sbaf_message_fail(dev, cpu, status);
+ } else if (status.error_code || status.sbaf_status == SBAF_STATUS_INTR ||
+ (activate.bundle_idx < stop_bundle)) {
+ ifsd->status = SCAN_NOT_TESTED;
+ sbaf_message_not_tested(dev, cpu, status.data);
+ } else {
+ ifsd->status = SCAN_TEST_PASS;
+ }
+}
+
/*
* Initiate per core test. It wakes up work queue threads on the target cpu and
* its sibling cpu. Once all sibling threads wake up, the scan test gets executed and
@@ -394,6 +647,12 @@ int do_core_test(int cpu, struct device *dev)
else
ifs_array_test_gen1(cpu, dev);
break;
+ case IFS_TYPE_SBAF:
+ if (!ifsd->loaded)
+ ret = -EPERM;
+ else
+ ifs_sbaf_test_core(cpu, dev);
+ break;
default:
ret = -EINVAL;
}
diff --git a/drivers/platform/x86/intel/int0002_vgpio.c b/drivers/platform/x86/intel/int0002_vgpio.c
index 527d8fbc7cc1..3b48cd7a4075 100644
--- a/drivers/platform/x86/intel/int0002_vgpio.c
+++ b/drivers/platform/x86/intel/int0002_vgpio.c
@@ -83,8 +83,12 @@ static void int0002_irq_ack(struct irq_data *data)
static void int0002_irq_unmask(struct irq_data *data)
{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
u32 gpe_en_reg;
+ gpiochip_enable_irq(gc, hwirq);
+
gpe_en_reg = inl(GPE0A_EN_PORT);
gpe_en_reg |= GPE0A_PME_B0_EN_BIT;
outl(gpe_en_reg, GPE0A_EN_PORT);
@@ -92,11 +96,15 @@ static void int0002_irq_unmask(struct irq_data *data)
static void int0002_irq_mask(struct irq_data *data)
{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
u32 gpe_en_reg;
gpe_en_reg = inl(GPE0A_EN_PORT);
gpe_en_reg &= ~GPE0A_PME_B0_EN_BIT;
outl(gpe_en_reg, GPE0A_EN_PORT);
+
+ gpiochip_disable_irq(gc, hwirq);
}
static int int0002_irq_set_wake(struct irq_data *data, unsigned int on)
@@ -140,12 +148,14 @@ static bool int0002_check_wake(void *data)
return (gpe_sts_reg & GPE0A_PME_B0_STS_BIT);
}
-static struct irq_chip int0002_irqchip = {
+static const struct irq_chip int0002_irqchip = {
.name = DRV_NAME,
.irq_ack = int0002_irq_ack,
.irq_mask = int0002_irq_mask,
.irq_unmask = int0002_irq_unmask,
.irq_set_wake = int0002_irq_set_wake,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static void int0002_init_irq_valid_mask(struct gpio_chip *chip,
@@ -203,7 +213,7 @@ static int int0002_probe(struct platform_device *pdev)
}
girq = &chip->irq;
- girq->chip = &int0002_irqchip;
+ gpio_irq_chip_set_chip(girq, &int0002_irqchip);
/* This let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
@@ -266,13 +276,13 @@ static const struct acpi_device_id int0002_acpi_ids[] = {
MODULE_DEVICE_TABLE(acpi, int0002_acpi_ids);
static struct platform_driver int0002_driver = {
- .driver = {
+ .driver = {
.name = DRV_NAME,
.acpi_match_table = int0002_acpi_ids,
.pm = &int0002_pm_ops,
},
.probe = int0002_probe,
- .remove_new = int0002_remove,
+ .remove = int0002_remove,
};
module_platform_driver(int0002_driver);
diff --git a/drivers/platform/x86/intel/int1092/intel_sar.c b/drivers/platform/x86/intel/int1092/intel_sar.c
index 6246c066ade2..e526841aff60 100644
--- a/drivers/platform/x86/intel/int1092/intel_sar.c
+++ b/drivers/platform/x86/intel/int1092/intel_sar.c
@@ -308,7 +308,7 @@ static void sar_remove(struct platform_device *device)
static struct platform_driver sar_driver = {
.probe = sar_probe,
- .remove_new = sar_remove,
+ .remove = sar_remove,
.driver = {
.name = DRVNAME,
.acpi_match_table = ACPI_PTR(sar_device_ids)
diff --git a/drivers/platform/x86/intel/int3472/Makefile b/drivers/platform/x86/intel/int3472/Makefile
index 9f16cb514397..a8aba07bf1dc 100644
--- a/drivers/platform/x86/intel/int3472/Makefile
+++ b/drivers/platform/x86/intel/int3472/Makefile
@@ -1,4 +1,7 @@
obj-$(CONFIG_INTEL_SKL_INT3472) += intel_skl_int3472_discrete.o \
- intel_skl_int3472_tps68470.o
-intel_skl_int3472_discrete-y := discrete.o clk_and_regulator.o led.o common.o
-intel_skl_int3472_tps68470-y := tps68470.o tps68470_board_data.o common.o
+ intel_skl_int3472_tps68470.o \
+ intel_skl_int3472_common.o
+intel_skl_int3472_discrete-y := discrete.o clk_and_regulator.o led.o
+intel_skl_int3472_tps68470-y := tps68470.o tps68470_board_data.o
+
+intel_skl_int3472_common-y += common.o
diff --git a/drivers/platform/x86/intel/int3472/common.c b/drivers/platform/x86/intel/int3472/common.c
index 9db2bb0bbba4..1638be8fa71e 100644
--- a/drivers/platform/x86/intel/int3472/common.c
+++ b/drivers/platform/x86/intel/int3472/common.c
@@ -29,6 +29,7 @@ union acpi_object *skl_int3472_get_acpi_buffer(struct acpi_device *adev, char *i
return obj;
}
+EXPORT_SYMBOL_GPL(skl_int3472_get_acpi_buffer);
int skl_int3472_fill_cldb(struct acpi_device *adev, struct int3472_cldb *cldb)
{
@@ -52,6 +53,7 @@ out_free_obj:
kfree(obj);
return ret;
}
+EXPORT_SYMBOL_GPL(skl_int3472_fill_cldb);
/* sensor_adev_ret may be NULL, name_ret must not be NULL */
int skl_int3472_get_sensor_adev_and_name(struct device *dev,
@@ -68,6 +70,8 @@ int skl_int3472_get_sensor_adev_and_name(struct device *dev,
return -ENODEV;
}
+ dev_dbg(dev, "Sensor name %s\n", acpi_dev_name(sensor));
+
*name_ret = devm_kasprintf(dev, GFP_KERNEL, I2C_DEV_NAME_FORMAT,
acpi_dev_name(sensor));
if (!*name_ret)
@@ -80,3 +84,8 @@ int skl_int3472_get_sensor_adev_and_name(struct device *dev,
return ret;
}
+EXPORT_SYMBOL_GPL(skl_int3472_get_sensor_adev_and_name);
+
+MODULE_DESCRIPTION("Intel SkyLake INT3472 ACPI Device Driver library");
+MODULE_AUTHOR("Daniel Scally <djrscally@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c
index 07b302e09340..092252eb95a8 100644
--- a/drivers/platform/x86/intel/int3472/discrete.c
+++ b/drivers/platform/x86/intel/int3472/discrete.c
@@ -2,6 +2,7 @@
/* Author: Dan Scally <djrscally@gmail.com> */
#include <linux/acpi.h>
+#include <linux/array_size.h>
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
@@ -11,6 +12,7 @@
#include <linux/module.h>
#include <linux/overflow.h>
#include <linux/platform_device.h>
+#include <linux/string_choices.h>
#include <linux/uuid.h>
#include "common.h"
@@ -54,7 +56,7 @@ static void skl_int3472_log_sensor_module_name(struct int3472_discrete_device *i
static int skl_int3472_fill_gpiod_lookup(struct gpiod_lookup *table_entry,
struct acpi_resource_gpio *agpio,
- const char *func, u32 polarity)
+ const char *func, unsigned long gpio_flags)
{
char *path = agpio->resource_source.string_ptr;
struct acpi_device *adev;
@@ -69,18 +71,14 @@ static int skl_int3472_fill_gpiod_lookup(struct gpiod_lookup *table_entry,
if (!adev)
return -ENODEV;
- table_entry->key = acpi_dev_name(adev);
- table_entry->chip_hwnum = agpio->pin_table[0];
- table_entry->con_id = func;
- table_entry->idx = 0;
- table_entry->flags = polarity;
+ *table_entry = GPIO_LOOKUP(acpi_dev_name(adev), agpio->pin_table[0], func, gpio_flags);
return 0;
}
static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int3472,
struct acpi_resource_gpio *agpio,
- const char *func, u32 polarity)
+ const char *func, unsigned long gpio_flags)
{
int ret;
@@ -90,7 +88,7 @@ static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int347
}
ret = skl_int3472_fill_gpiod_lookup(&int3472->gpios.table[int3472->n_sensor_gpios],
- agpio, func, polarity);
+ agpio, func, gpio_flags);
if (ret)
return ret;
@@ -103,7 +101,7 @@ static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int347
static struct gpio_desc *
skl_int3472_gpiod_get_from_temp_lookup(struct int3472_discrete_device *int3472,
struct acpi_resource_gpio *agpio,
- const char *func, u32 polarity)
+ const char *func, unsigned long gpio_flags)
{
struct gpio_desc *desc;
int ret;
@@ -114,7 +112,7 @@ skl_int3472_gpiod_get_from_temp_lookup(struct int3472_discrete_device *int3472,
return ERR_PTR(-ENOMEM);
lookup->dev_id = dev_name(int3472->dev);
- ret = skl_int3472_fill_gpiod_lookup(&lookup->table[0], agpio, func, polarity);
+ ret = skl_int3472_fill_gpiod_lookup(&lookup->table[0], agpio, func, gpio_flags);
if (ret)
return ERR_PTR(ret);
@@ -125,32 +123,76 @@ skl_int3472_gpiod_get_from_temp_lookup(struct int3472_discrete_device *int3472,
return desc;
}
-static void int3472_get_func_and_polarity(u8 type, const char **func, u32 *polarity)
+/**
+ * struct int3472_gpio_map - Map GPIOs to whatever is expected by the
+ * sensor driver (as in DT bindings)
+ * @hid: The ACPI HID of the device without the instance number e.g. INT347E
+ * @type_from: The GPIO type from ACPI ?SDT
+ * @type_to: The assigned GPIO type, typically same as @type_from
+ * @func: The function, e.g. "enable"
+ * @polarity_low: GPIO_ACTIVE_LOW true if the @polarity_low is true,
+ * GPIO_ACTIVE_HIGH otherwise
+ */
+struct int3472_gpio_map {
+ const char *hid;
+ u8 type_from;
+ u8 type_to;
+ bool polarity_low;
+ const char *func;
+};
+
+static const struct int3472_gpio_map int3472_gpio_map[] = {
+ { "INT347E", INT3472_GPIO_TYPE_RESET, INT3472_GPIO_TYPE_RESET, false, "enable" },
+};
+
+static void int3472_get_func_and_polarity(struct acpi_device *adev, u8 *type,
+ const char **func, unsigned long *gpio_flags)
{
- switch (type) {
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(int3472_gpio_map); i++) {
+ /*
+ * Map the firmware-provided GPIO to whatever a driver expects
+ * (as in DT bindings). First check if the type matches with the
+ * GPIO map, then further check that the device _HID matches.
+ */
+ if (*type != int3472_gpio_map[i].type_from)
+ continue;
+
+ if (!acpi_dev_hid_uid_match(adev, int3472_gpio_map[i].hid, NULL))
+ continue;
+
+ *type = int3472_gpio_map[i].type_to;
+ *gpio_flags = int3472_gpio_map[i].polarity_low ?
+ GPIO_ACTIVE_LOW : GPIO_ACTIVE_HIGH;
+ *func = int3472_gpio_map[i].func;
+ return;
+ }
+
+ switch (*type) {
case INT3472_GPIO_TYPE_RESET:
*func = "reset";
- *polarity = GPIO_ACTIVE_LOW;
+ *gpio_flags = GPIO_ACTIVE_LOW;
break;
case INT3472_GPIO_TYPE_POWERDOWN:
*func = "powerdown";
- *polarity = GPIO_ACTIVE_LOW;
+ *gpio_flags = GPIO_ACTIVE_LOW;
break;
case INT3472_GPIO_TYPE_CLK_ENABLE:
*func = "clk-enable";
- *polarity = GPIO_ACTIVE_HIGH;
+ *gpio_flags = GPIO_ACTIVE_HIGH;
break;
case INT3472_GPIO_TYPE_PRIVACY_LED:
*func = "privacy-led";
- *polarity = GPIO_ACTIVE_HIGH;
+ *gpio_flags = GPIO_ACTIVE_HIGH;
break;
case INT3472_GPIO_TYPE_POWER_ENABLE:
*func = "power-enable";
- *polarity = GPIO_ACTIVE_HIGH;
+ *gpio_flags = GPIO_ACTIVE_HIGH;
break;
default:
*func = "unknown";
- *polarity = GPIO_ACTIVE_HIGH;
+ *gpio_flags = GPIO_ACTIVE_HIGH;
break;
}
}
@@ -181,11 +223,11 @@ static void int3472_get_func_and_polarity(u8 type, const char **func, u32 *polar
* to create clocks and regulators via the usual frameworks.
*
* Return:
- * * 1 - To continue the loop
- * * 0 - When all resources found are handled properly.
- * * -EINVAL - If the resource is not a GPIO IO resource
- * * -ENODEV - If the resource has no corresponding _DSM entry
- * * -Other - Errors propagated from one of the sub-functions.
+ * * 1 - Continue the loop without adding a copy of the resource to
+ * * the list passed to acpi_dev_get_resources()
+ * * 0 - Continue the loop after adding a copy of the resource to
+ * * the list passed to acpi_dev_get_resources()
+ * * -errno - Error, break loop
*/
static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
void *data)
@@ -197,7 +239,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
struct gpio_desc *gpio;
const char *err_msg;
const char *func;
- u32 polarity;
+ unsigned long gpio_flags;
int ret;
if (!acpi_gpio_get_io_resource(ares, &agpio))
@@ -220,26 +262,26 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
type = FIELD_GET(INT3472_GPIO_DSM_TYPE, obj->integer.value);
- int3472_get_func_and_polarity(type, &func, &polarity);
+ int3472_get_func_and_polarity(int3472->sensor, &type, &func, &gpio_flags);
pin = FIELD_GET(INT3472_GPIO_DSM_PIN, obj->integer.value);
- if (pin != agpio->pin_table[0])
- dev_warn(int3472->dev, "%s %s pin number mismatch _DSM %d resource %d\n",
- func, agpio->resource_source.string_ptr, pin,
- agpio->pin_table[0]);
+ /* Pin field is not really used under Windows and wraps around at 8 bits */
+ if (pin != (agpio->pin_table[0] & 0xff))
+ dev_dbg(int3472->dev, FW_BUG "%s %s pin number mismatch _DSM %d resource %d\n",
+ func, agpio->resource_source.string_ptr, pin, agpio->pin_table[0]);
active_value = FIELD_GET(INT3472_GPIO_DSM_SENSOR_ON_VAL, obj->integer.value);
if (!active_value)
- polarity ^= GPIO_ACTIVE_LOW;
+ gpio_flags ^= GPIO_ACTIVE_LOW;
dev_dbg(int3472->dev, "%s %s pin %d active-%s\n", func,
agpio->resource_source.string_ptr, agpio->pin_table[0],
- (polarity == GPIO_ACTIVE_HIGH) ? "high" : "low");
+ str_high_low(gpio_flags == GPIO_ACTIVE_HIGH));
switch (type) {
case INT3472_GPIO_TYPE_RESET:
case INT3472_GPIO_TYPE_POWERDOWN:
- ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, func, polarity);
+ ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, func, gpio_flags);
if (ret)
err_msg = "Failed to map GPIO pin to sensor\n";
@@ -247,7 +289,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
case INT3472_GPIO_TYPE_CLK_ENABLE:
case INT3472_GPIO_TYPE_PRIVACY_LED:
case INT3472_GPIO_TYPE_POWER_ENABLE:
- gpio = skl_int3472_gpiod_get_from_temp_lookup(int3472, agpio, func, polarity);
+ gpio = skl_int3472_gpiod_get_from_temp_lookup(int3472, agpio, func, gpio_flags);
if (IS_ERR(gpio)) {
ret = PTR_ERR(gpio);
err_msg = "Failed to get GPIO\n";
@@ -292,7 +334,8 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
if (ret < 0)
return dev_err_probe(int3472->dev, ret, err_msg);
- return ret;
+ /* Tell acpi_dev_get_resources() to not make a copy of the resource */
+ return 1;
}
static int skl_int3472_parse_crs(struct int3472_discrete_device *int3472)
@@ -339,6 +382,9 @@ static int skl_int3472_discrete_probe(struct platform_device *pdev)
struct int3472_cldb cldb;
int ret;
+ if (!adev)
+ return -ENODEV;
+
ret = skl_int3472_fill_cldb(adev, &cldb);
if (ret) {
dev_err(&pdev->dev, "Couldn't fill CLDB structure\n");
@@ -395,7 +441,7 @@ static struct platform_driver int3472_discrete = {
.acpi_match_table = int3472_device_id,
},
.probe = skl_int3472_discrete_probe,
- .remove_new = skl_int3472_discrete_remove,
+ .remove = skl_int3472_discrete_remove,
};
module_platform_driver(int3472_discrete);
diff --git a/drivers/platform/x86/intel/int3472/tps68470.c b/drivers/platform/x86/intel/int3472/tps68470.c
index 1e107fd49f82..81ac4c691963 100644
--- a/drivers/platform/x86/intel/int3472/tps68470.c
+++ b/drivers/platform/x86/intel/int3472/tps68470.c
@@ -152,6 +152,9 @@ static int skl_int3472_tps68470_probe(struct i2c_client *client)
int ret;
int i;
+ if (!adev)
+ return -ENODEV;
+
n_consumers = skl_int3472_fill_clk_pdata(&client->dev, &clk_pdata);
if (n_consumers < 0)
return n_consumers;
diff --git a/drivers/platform/x86/intel/mrfld_pwrbtn.c b/drivers/platform/x86/intel/mrfld_pwrbtn.c
index 549a3f586f3b..6c43f801c467 100644
--- a/drivers/platform/x86/intel/mrfld_pwrbtn.c
+++ b/drivers/platform/x86/intel/mrfld_pwrbtn.c
@@ -97,7 +97,7 @@ static struct platform_driver mrfld_pwrbtn_driver = {
.name = "mrfld_bcove_pwrbtn",
},
.probe = mrfld_pwrbtn_probe,
- .remove_new = mrfld_pwrbtn_remove,
+ .remove = mrfld_pwrbtn_remove,
.id_table = mrfld_pwrbtn_id_table,
};
module_platform_driver(mrfld_pwrbtn_driver);
diff --git a/drivers/platform/x86/intel/oaktrail.c b/drivers/platform/x86/intel/oaktrail.c
index fa720967e69b..265cef327b4f 100644
--- a/drivers/platform/x86/intel/oaktrail.c
+++ b/drivers/platform/x86/intel/oaktrail.c
@@ -28,7 +28,6 @@
#include <linux/backlight.h>
#include <linux/dmi.h>
#include <linux/err.h>
-#include <linux/fb.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -250,7 +249,7 @@ static int oaktrail_backlight_init(void)
oaktrail_bl_device = bd;
bd->props.brightness = get_backlight_brightness(bd);
- bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.power = BACKLIGHT_POWER_ON;
backlight_update_status(bd);
return 0;
@@ -365,7 +364,7 @@ static void __exit oaktrail_cleanup(void)
module_init(oaktrail_init);
module_exit(oaktrail_cleanup);
-MODULE_AUTHOR("Yin Kangkai (kangkai.yin@intel.com)");
+MODULE_AUTHOR("Yin Kangkai <kangkai.yin@intel.com>");
MODULE_DESCRIPTION("Intel Oaktrail Platform ACPI Extras");
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/plr_tpmi.c b/drivers/platform/x86/intel/plr_tpmi.c
new file mode 100644
index 000000000000..2b55347a5a93
--- /dev/null
+++ b/drivers/platform/x86/intel/plr_tpmi.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Performance Limit Reasons via TPMI
+ *
+ * Copyright (c) 2024, Intel Corporation.
+ */
+
+#include <linux/array_size.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gfp_types.h>
+#include <linux/intel_tpmi.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kstrtox.h>
+#include <linux/lockdep.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/seq_file.h>
+#include <linux/sprintf.h>
+#include <linux/types.h>
+
+#include "tpmi_power_domains.h"
+
+#define PLR_HEADER 0x00
+#define PLR_MAILBOX_INTERFACE 0x08
+#define PLR_MAILBOX_DATA 0x10
+#define PLR_DIE_LEVEL 0x18
+
+#define PLR_MODULE_ID_MASK GENMASK_ULL(19, 12)
+#define PLR_RUN_BUSY BIT_ULL(63)
+
+#define PLR_COMMAND_WRITE 1
+
+#define PLR_INVALID GENMASK_ULL(63, 0)
+
+#define PLR_TIMEOUT_US 5
+#define PLR_TIMEOUT_MAX_US 1000
+
+#define PLR_COARSE_REASON_BITS 32
+
+struct tpmi_plr;
+
+struct tpmi_plr_die {
+ void __iomem *base;
+ struct mutex lock; /* Protect access to PLR mailbox */
+ int package_id;
+ int die_id;
+ struct tpmi_plr *plr;
+};
+
+struct tpmi_plr {
+ struct dentry *dbgfs_dir;
+ struct tpmi_plr_die *die_info;
+ int num_dies;
+ struct auxiliary_device *auxdev;
+};
+
+static const char * const plr_coarse_reasons[] = {
+ "FREQUENCY",
+ "CURRENT",
+ "POWER",
+ "THERMAL",
+ "PLATFORM",
+ "MCP",
+ "RAS",
+ "MISC",
+ "QOS",
+ "DFC",
+};
+
+static const char * const plr_fine_reasons[] = {
+ "FREQUENCY_CDYN0",
+ "FREQUENCY_CDYN1",
+ "FREQUENCY_CDYN2",
+ "FREQUENCY_CDYN3",
+ "FREQUENCY_CDYN4",
+ "FREQUENCY_CDYN5",
+ "FREQUENCY_FCT",
+ "FREQUENCY_PCS_TRL",
+ "CURRENT_MTPMAX",
+ "POWER_FAST_RAPL",
+ "POWER_PKG_PL1_MSR_TPMI",
+ "POWER_PKG_PL1_MMIO",
+ "POWER_PKG_PL1_PCS",
+ "POWER_PKG_PL2_MSR_TPMI",
+ "POWER_PKG_PL2_MMIO",
+ "POWER_PKG_PL2_PCS",
+ "POWER_PLATFORM_PL1_MSR_TPMI",
+ "POWER_PLATFORM_PL1_MMIO",
+ "POWER_PLATFORM_PL1_PCS",
+ "POWER_PLATFORM_PL2_MSR_TPMI",
+ "POWER_PLATFORM_PL2_MMIO",
+ "POWER_PLATFORM_PL2_PCS",
+ "UNKNOWN(22)",
+ "THERMAL_PER_CORE",
+ "DFC_UFS",
+ "PLATFORM_PROCHOT",
+ "PLATFORM_HOT_VR",
+ "UNKNOWN(27)",
+ "UNKNOWN(28)",
+ "MISC_PCS_PSTATE",
+};
+
+static u64 plr_read(struct tpmi_plr_die *plr_die, int offset)
+{
+ return readq(plr_die->base + offset);
+}
+
+static void plr_write(u64 val, struct tpmi_plr_die *plr_die, int offset)
+{
+ writeq(val, plr_die->base + offset);
+}
+
+static int plr_read_cpu_status(struct tpmi_plr_die *plr_die, int cpu,
+ u64 *status)
+{
+ u64 regval;
+ int ret;
+
+ lockdep_assert_held(&plr_die->lock);
+
+ regval = FIELD_PREP(PLR_MODULE_ID_MASK, tpmi_get_punit_core_number(cpu));
+ regval |= PLR_RUN_BUSY;
+
+ plr_write(regval, plr_die, PLR_MAILBOX_INTERFACE);
+
+ ret = readq_poll_timeout(plr_die->base + PLR_MAILBOX_INTERFACE, regval,
+ !(regval & PLR_RUN_BUSY), PLR_TIMEOUT_US,
+ PLR_TIMEOUT_MAX_US);
+ if (ret)
+ return ret;
+
+ *status = plr_read(plr_die, PLR_MAILBOX_DATA);
+
+ return 0;
+}
+
+static int plr_clear_cpu_status(struct tpmi_plr_die *plr_die, int cpu)
+{
+ u64 regval;
+
+ lockdep_assert_held(&plr_die->lock);
+
+ regval = FIELD_PREP(PLR_MODULE_ID_MASK, tpmi_get_punit_core_number(cpu));
+ regval |= PLR_RUN_BUSY | PLR_COMMAND_WRITE;
+
+ plr_write(0, plr_die, PLR_MAILBOX_DATA);
+
+ plr_write(regval, plr_die, PLR_MAILBOX_INTERFACE);
+
+ return readq_poll_timeout(plr_die->base + PLR_MAILBOX_INTERFACE, regval,
+ !(regval & PLR_RUN_BUSY), PLR_TIMEOUT_US,
+ PLR_TIMEOUT_MAX_US);
+}
+
+static void plr_print_bits(struct seq_file *s, u64 val, int bits)
+{
+ const unsigned long mask[] = { BITMAP_FROM_U64(val) };
+ int bit, index;
+
+ for_each_set_bit(bit, mask, bits) {
+ const char *str = NULL;
+
+ if (bit < PLR_COARSE_REASON_BITS) {
+ if (bit < ARRAY_SIZE(plr_coarse_reasons))
+ str = plr_coarse_reasons[bit];
+ } else {
+ index = bit - PLR_COARSE_REASON_BITS;
+ if (index < ARRAY_SIZE(plr_fine_reasons))
+ str = plr_fine_reasons[index];
+ }
+
+ if (str)
+ seq_printf(s, " %s", str);
+ else
+ seq_printf(s, " UNKNOWN(%d)", bit);
+ }
+
+ if (!val)
+ seq_puts(s, " none");
+
+ seq_putc(s, '\n');
+}
+
+static int plr_status_show(struct seq_file *s, void *unused)
+{
+ struct tpmi_plr_die *plr_die = s->private;
+ int ret;
+ u64 val;
+
+ val = plr_read(plr_die, PLR_DIE_LEVEL);
+ seq_puts(s, "cpus");
+ plr_print_bits(s, val, 32);
+
+ guard(mutex)(&plr_die->lock);
+
+ for (int cpu = 0; cpu < nr_cpu_ids; cpu++) {
+ if (plr_die->die_id != tpmi_get_power_domain_id(cpu))
+ continue;
+
+ if (plr_die->package_id != topology_physical_package_id(cpu))
+ continue;
+
+ seq_printf(s, "cpu%d", cpu);
+ ret = plr_read_cpu_status(plr_die, cpu, &val);
+ if (ret) {
+ dev_err(&plr_die->plr->auxdev->dev, "Failed to read PLR for cpu %d, ret=%d\n",
+ cpu, ret);
+ return ret;
+ }
+
+ plr_print_bits(s, val, 64);
+ }
+
+ return 0;
+}
+
+static ssize_t plr_status_write(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = filp->private_data;
+ struct tpmi_plr_die *plr_die = s->private;
+ bool val;
+ int ret;
+
+ ret = kstrtobool_from_user(ubuf, count, &val);
+ if (ret)
+ return ret;
+
+ if (val != 0)
+ return -EINVAL;
+
+ plr_write(0, plr_die, PLR_DIE_LEVEL);
+
+ guard(mutex)(&plr_die->lock);
+
+ for (int cpu = 0; cpu < nr_cpu_ids; cpu++) {
+ if (plr_die->die_id != tpmi_get_power_domain_id(cpu))
+ continue;
+
+ if (plr_die->package_id != topology_physical_package_id(cpu))
+ continue;
+
+ plr_clear_cpu_status(plr_die, cpu);
+ }
+
+ return count;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(plr_status);
+
+static int intel_plr_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id)
+{
+ struct intel_tpmi_plat_info *plat_info;
+ struct dentry *dentry;
+ int i, num_resources;
+ struct resource *res;
+ struct tpmi_plr *plr;
+ void __iomem *base;
+ char name[17];
+ int err;
+
+ plat_info = tpmi_get_platform_data(auxdev);
+ if (!plat_info)
+ return dev_err_probe(&auxdev->dev, -EINVAL, "No platform info\n");
+
+ dentry = tpmi_get_debugfs_dir(auxdev);
+ if (!dentry)
+ return dev_err_probe(&auxdev->dev, -ENODEV, "No TPMI debugfs directory.\n");
+
+ num_resources = tpmi_get_resource_count(auxdev);
+ if (!num_resources)
+ return -EINVAL;
+
+ plr = devm_kzalloc(&auxdev->dev, sizeof(*plr), GFP_KERNEL);
+ if (!plr)
+ return -ENOMEM;
+
+ plr->die_info = devm_kcalloc(&auxdev->dev, num_resources, sizeof(*plr->die_info),
+ GFP_KERNEL);
+ if (!plr->die_info)
+ return -ENOMEM;
+
+ plr->num_dies = num_resources;
+ plr->dbgfs_dir = debugfs_create_dir("plr", dentry);
+ plr->auxdev = auxdev;
+
+ for (i = 0; i < num_resources; i++) {
+ res = tpmi_get_resource_at_index(auxdev, i);
+ if (!res) {
+ err = dev_err_probe(&auxdev->dev, -EINVAL, "No resource\n");
+ goto err;
+ }
+
+ base = devm_ioremap_resource(&auxdev->dev, res);
+ if (IS_ERR(base)) {
+ err = PTR_ERR(base);
+ goto err;
+ }
+
+ plr->die_info[i].base = base;
+ plr->die_info[i].package_id = plat_info->package_id;
+ plr->die_info[i].die_id = i;
+ plr->die_info[i].plr = plr;
+ mutex_init(&plr->die_info[i].lock);
+
+ if (plr_read(&plr->die_info[i], PLR_HEADER) == PLR_INVALID)
+ continue;
+
+ snprintf(name, sizeof(name), "domain%d", i);
+
+ dentry = debugfs_create_dir(name, plr->dbgfs_dir);
+ debugfs_create_file("status", 0444, dentry, &plr->die_info[i],
+ &plr_status_fops);
+ }
+
+ auxiliary_set_drvdata(auxdev, plr);
+
+ return 0;
+
+err:
+ debugfs_remove_recursive(plr->dbgfs_dir);
+ return err;
+}
+
+static void intel_plr_remove(struct auxiliary_device *auxdev)
+{
+ struct tpmi_plr *plr = auxiliary_get_drvdata(auxdev);
+
+ debugfs_remove_recursive(plr->dbgfs_dir);
+}
+
+static const struct auxiliary_device_id intel_plr_id_table[] = {
+ { .name = "intel_vsec.tpmi-plr" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, intel_plr_id_table);
+
+static struct auxiliary_driver intel_plr_aux_driver = {
+ .id_table = intel_plr_id_table,
+ .remove = intel_plr_remove,
+ .probe = intel_plr_probe,
+};
+module_auxiliary_driver(intel_plr_aux_driver);
+
+MODULE_IMPORT_NS("INTEL_TPMI");
+MODULE_IMPORT_NS("INTEL_TPMI_POWER_DOMAIN");
+MODULE_DESCRIPTION("Intel TPMI PLR Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/pmc/arl.c b/drivers/platform/x86/intel/pmc/arl.c
index 683ae828276b..05dec4f5019f 100644
--- a/drivers/platform/x86/intel/pmc/arl.c
+++ b/drivers/platform/x86/intel/pmc/arl.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* This file contains platform specific structure definitions
- * and init function used by Meteor Lake PCH.
+ * and init function used by Arrow Lake PCH.
*
* Copyright (c) 2022, Intel Corporation.
* All Rights Reserved.
@@ -673,6 +673,7 @@ static struct pmc_info arl_pmc_info_list[] = {
};
#define ARL_NPU_PCI_DEV 0xad1d
+#define ARL_GNA_PCI_DEV 0xae4c
/*
* Set power state of select devices that do not have drivers to D3
* so that they do not block Package C entry.
@@ -680,14 +681,14 @@ static struct pmc_info arl_pmc_info_list[] = {
static void arl_d3_fixup(void)
{
pmc_core_set_device_d3(ARL_NPU_PCI_DEV);
+ pmc_core_set_device_d3(ARL_GNA_PCI_DEV);
}
static int arl_resume(struct pmc_dev *pmcdev)
{
arl_d3_fixup();
- pmc_core_send_ltr_ignore(pmcdev, 3, 0);
- return pmc_core_resume_common(pmcdev);
+ return cnl_resume(pmcdev);
}
int arl_core_init(struct pmc_dev *pmcdev)
diff --git a/drivers/platform/x86/intel/pmc/cnp.c b/drivers/platform/x86/intel/pmc/cnp.c
index dd72974bf71e..fc5193fdf8a8 100644
--- a/drivers/platform/x86/intel/pmc/cnp.c
+++ b/drivers/platform/x86/intel/pmc/cnp.c
@@ -8,6 +8,8 @@
*
*/
+#include <linux/smp.h>
+#include <linux/suspend.h>
#include "core.h"
/* Cannon Lake: PGD PFET Enable Ack Status Register(s) bitmap */
@@ -204,8 +206,57 @@ const struct pmc_reg_map cnp_reg_map = {
.etr3_offset = ETR3_OFFSET,
};
+
+/*
+ * Disable C1 auto-demotion
+ *
+ * Aggressive C1 auto-demotion may lead to failure to enter the deepest C-state
+ * during suspend-to-idle, causing high power consumption. To prevent this, we
+ * disable C1 auto-demotion during suspend and re-enable on resume.
+ *
+ * Note that, although MSR_PKG_CST_CONFIG_CONTROL has 'package' in its name, it
+ * is actually a per-core MSR on client platforms, affecting only a single CPU.
+ * Therefore, it must be configured on all online CPUs. The online cpu mask is
+ * unchanged during the phase of suspend/resume as user space is frozen.
+ */
+
+static DEFINE_PER_CPU(u64, pkg_cst_config);
+
+static void disable_c1_auto_demote(void *unused)
+{
+ int cpunum = smp_processor_id();
+ u64 val;
+
+ rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, val);
+ per_cpu(pkg_cst_config, cpunum) = val;
+ val &= ~NHM_C1_AUTO_DEMOTE;
+ wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, val);
+
+ pr_debug("%s: cpu:%d cst %llx\n", __func__, cpunum, val);
+}
+
+static void restore_c1_auto_demote(void *unused)
+{
+ int cpunum = smp_processor_id();
+
+ wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, per_cpu(pkg_cst_config, cpunum));
+
+ pr_debug("%s: cpu:%d cst %llx\n", __func__, cpunum,
+ per_cpu(pkg_cst_config, cpunum));
+}
+
+static void s2idle_cpu_quirk(smp_call_func_t func)
+{
+ if (pm_suspend_via_firmware())
+ return;
+
+ on_each_cpu(func, NULL, true);
+}
+
void cnl_suspend(struct pmc_dev *pmcdev)
{
+ s2idle_cpu_quirk(disable_c1_auto_demote);
+
/*
* Due to a hardware limitation, the GBE LTR blocks PC10
* when a cable is attached. To unblock PC10 during suspend,
@@ -216,6 +267,8 @@ void cnl_suspend(struct pmc_dev *pmcdev)
int cnl_resume(struct pmc_dev *pmcdev)
{
+ s2idle_cpu_quirk(restore_c1_auto_demote);
+
pmc_core_send_ltr_ignore(pmcdev, 3, 0);
return pmc_core_resume_common(pmcdev);
diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
index 8f9c036809c7..1ee0fb5f8250 100644
--- a/drivers/platform/x86/intel/pmc/core.c
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -22,6 +22,7 @@
#include <linux/suspend.h>
#include <linux/units.h>
+#include <asm/cpuid.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/msr.h>
@@ -87,35 +88,26 @@ static int set_etr3(struct pmc_dev *pmcdev)
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
const struct pmc_reg_map *map = pmc->map;
u32 reg;
- int err;
if (!map->etr3_offset)
return -EOPNOTSUPP;
- mutex_lock(&pmcdev->lock);
+ guard(mutex)(&pmcdev->lock);
/* check if CF9 is locked */
reg = pmc_core_reg_read(pmc, map->etr3_offset);
- if (reg & ETR3_CF9LOCK) {
- err = -EACCES;
- goto out_unlock;
- }
+ if (reg & ETR3_CF9LOCK)
+ return -EACCES;
/* write CF9 global reset bit */
reg |= ETR3_CF9GR;
pmc_core_reg_write(pmc, map->etr3_offset, reg);
reg = pmc_core_reg_read(pmc, map->etr3_offset);
- if (!(reg & ETR3_CF9GR)) {
- err = -EIO;
- goto out_unlock;
- }
-
- err = 0;
+ if (!(reg & ETR3_CF9GR))
+ return -EIO;
-out_unlock:
- mutex_unlock(&pmcdev->lock);
- return err;
+ return 0;
}
static umode_t etr3_is_visible(struct kobject *kobj,
struct attribute *attr,
@@ -127,9 +119,8 @@ static umode_t etr3_is_visible(struct kobject *kobj,
const struct pmc_reg_map *map = pmc->map;
u32 reg;
- mutex_lock(&pmcdev->lock);
- reg = pmc_core_reg_read(pmc, map->etr3_offset);
- mutex_unlock(&pmcdev->lock);
+ scoped_guard(mutex, &pmcdev->lock)
+ reg = pmc_core_reg_read(pmc, map->etr3_offset);
return reg & ETR3_CF9LOCK ? attr->mode & (SYSFS_PREALLOC | 0444) : attr->mode;
}
@@ -145,12 +136,10 @@ static ssize_t etr3_show(struct device *dev,
if (!map->etr3_offset)
return -EOPNOTSUPP;
- mutex_lock(&pmcdev->lock);
-
- reg = pmc_core_reg_read(pmc, map->etr3_offset);
- reg &= ETR3_CF9GR | ETR3_CF9LOCK;
-
- mutex_unlock(&pmcdev->lock);
+ scoped_guard(mutex, &pmcdev->lock) {
+ reg = pmc_core_reg_read(pmc, map->etr3_offset);
+ reg &= ETR3_CF9GR | ETR3_CF9LOCK;
+ }
return sysfs_emit(buf, "0x%08x", reg);
}
@@ -257,9 +246,9 @@ static void pmc_core_slps0_display(struct pmc *pmc, struct device *dev,
}
}
-static int pmc_core_lpm_get_arr_size(const struct pmc_bit_map **maps)
+static unsigned int pmc_core_lpm_get_arr_size(const struct pmc_bit_map **maps)
{
- int idx;
+ unsigned int idx;
for (idx = 0; maps[idx]; idx++)
;/* Nothing */
@@ -272,8 +261,8 @@ static void pmc_core_lpm_display(struct pmc *pmc, struct device *dev,
const char *str,
const struct pmc_bit_map **maps)
{
- int index, idx, len = 32, bit_mask, arr_size;
- u32 *lpm_regs;
+ unsigned int index, idx, len = 32, arr_size;
+ u32 bit_mask, *lpm_regs;
arr_size = pmc_core_lpm_get_arr_size(maps);
lpm_regs = kmalloc_array(arr_size, sizeof(*lpm_regs), GFP_KERNEL);
@@ -326,13 +315,13 @@ static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip,
static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- int i;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
struct pmc *pmc = pmcdev->pmcs[i];
const struct pmc_bit_map **maps;
u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES];
- int index, iter, idx, ip = 0;
+ unsigned int index, iter, idx, ip = 0;
if (!pmc)
continue;
@@ -391,7 +380,8 @@ static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
const struct pmc_bit_map *map = pmc->map->mphy_sts;
u32 mphy_core_reg_low, mphy_core_reg_high;
u32 val_low, val_high;
- int index, err = 0;
+ unsigned int index;
+ int err = 0;
if (pmcdev->pmc_xram_read_bit) {
seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
@@ -401,20 +391,18 @@ static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
mphy_core_reg_low = (SPT_PMC_MPHY_CORE_STS_0 << 16);
mphy_core_reg_high = (SPT_PMC_MPHY_CORE_STS_1 << 16);
- mutex_lock(&pmcdev->lock);
+ guard(mutex)(&pmcdev->lock);
- if (pmc_core_send_msg(pmc, &mphy_core_reg_low) != 0) {
- err = -EBUSY;
- goto out_unlock;
- }
+ err = pmc_core_send_msg(pmc, &mphy_core_reg_low);
+ if (err)
+ return err;
msleep(10);
val_low = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET);
- if (pmc_core_send_msg(pmc, &mphy_core_reg_high) != 0) {
- err = -EBUSY;
- goto out_unlock;
- }
+ err = pmc_core_send_msg(pmc, &mphy_core_reg_high);
+ if (err)
+ return err;
msleep(10);
val_high = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET);
@@ -433,9 +421,7 @@ static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
"Power gated");
}
-out_unlock:
- mutex_unlock(&pmcdev->lock);
- return err;
+ return 0;
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_mphy_pg);
@@ -445,7 +431,8 @@ static int pmc_core_pll_show(struct seq_file *s, void *unused)
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
const struct pmc_bit_map *map = pmc->map->pll_sts;
u32 mphy_common_reg, val;
- int index, err = 0;
+ unsigned int index;
+ int err = 0;
if (pmcdev->pmc_xram_read_bit) {
seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
@@ -453,12 +440,11 @@ static int pmc_core_pll_show(struct seq_file *s, void *unused)
}
mphy_common_reg = (SPT_PMC_MPHY_COM_STS_0 << 16);
- mutex_lock(&pmcdev->lock);
+ guard(mutex)(&pmcdev->lock);
- if (pmc_core_send_msg(pmc, &mphy_common_reg) != 0) {
- err = -EBUSY;
- goto out_unlock;
- }
+ err = pmc_core_send_msg(pmc, &mphy_common_reg);
+ if (err)
+ return err;
/* Observed PMC HW response latency for MTPMC-MFPMC is ~10 ms */
msleep(10);
@@ -470,9 +456,7 @@ static int pmc_core_pll_show(struct seq_file *s, void *unused)
map[index].bit_mask & val ? "Active" : "Idle");
}
-out_unlock:
- mutex_unlock(&pmcdev->lock);
- return err;
+ return 0;
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
@@ -481,7 +465,8 @@ int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore)
struct pmc *pmc;
const struct pmc_reg_map *map;
u32 reg;
- int pmc_index, ltr_index;
+ unsigned int pmc_index;
+ int ltr_index;
ltr_index = value;
/* For platforms with multiple pmcs, ltr index value given by user
@@ -511,7 +496,7 @@ int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore)
pr_debug("ltr_ignore for pmc%d: ltr_index:%d\n", pmc_index, ltr_index);
- mutex_lock(&pmcdev->lock);
+ guard(mutex)(&pmcdev->lock);
reg = pmc_core_reg_read(pmc, map->ltr_ignore_offset);
if (ignore)
@@ -520,48 +505,56 @@ int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore)
reg &= ~BIT(ltr_index);
pmc_core_reg_write(pmc, map->ltr_ignore_offset, reg);
- mutex_unlock(&pmcdev->lock);
-
return 0;
}
-static ssize_t pmc_core_ltr_ignore_write(struct file *file,
- const char __user *userbuf,
- size_t count, loff_t *ppos)
+static ssize_t pmc_core_ltr_write(struct pmc_dev *pmcdev,
+ const char __user *userbuf,
+ size_t count, int ignore)
{
- struct seq_file *s = file->private_data;
- struct pmc_dev *pmcdev = s->private;
- u32 buf_size, value;
+ u32 value;
int err;
- buf_size = min_t(u32, count, 64);
-
- err = kstrtou32_from_user(userbuf, buf_size, 10, &value);
+ err = kstrtou32_from_user(userbuf, count, 10, &value);
if (err)
return err;
- err = pmc_core_send_ltr_ignore(pmcdev, value, 1);
+ err = pmc_core_send_ltr_ignore(pmcdev, value, ignore);
+
+ return err ?: count;
+}
+
+static ssize_t pmc_core_ltr_ignore_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct pmc_dev *pmcdev = s->private;
- return err == 0 ? count : err;
+ return pmc_core_ltr_write(pmcdev, userbuf, count, 1);
}
static int pmc_core_ltr_ignore_show(struct seq_file *s, void *unused)
{
return 0;
}
+DEFINE_SHOW_STORE_ATTRIBUTE(pmc_core_ltr_ignore);
-static int pmc_core_ltr_ignore_open(struct inode *inode, struct file *file)
+static ssize_t pmc_core_ltr_restore_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
{
- return single_open(file, pmc_core_ltr_ignore_show, inode->i_private);
+ struct seq_file *s = file->private_data;
+ struct pmc_dev *pmcdev = s->private;
+
+ return pmc_core_ltr_write(pmcdev, userbuf, count, 0);
}
-static const struct file_operations pmc_core_ltr_ignore_ops = {
- .open = pmc_core_ltr_ignore_open,
- .read = seq_read,
- .write = pmc_core_ltr_ignore_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
+static int pmc_core_ltr_restore_show(struct seq_file *s, void *unused)
+{
+ return 0;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(pmc_core_ltr_restore);
static void pmc_core_slps0_dbg_latch(struct pmc_dev *pmcdev, bool reset)
{
@@ -569,10 +562,10 @@ static void pmc_core_slps0_dbg_latch(struct pmc_dev *pmcdev, bool reset)
const struct pmc_reg_map *map = pmc->map;
u32 fd;
- mutex_lock(&pmcdev->lock);
+ guard(mutex)(&pmcdev->lock);
if (!reset && !slps0_dbg_latch)
- goto out_unlock;
+ return;
fd = pmc_core_reg_read(pmc, map->slps0_dbg_offset);
if (reset)
@@ -582,9 +575,6 @@ static void pmc_core_slps0_dbg_latch(struct pmc_dev *pmcdev, bool reset)
pmc_core_reg_write(pmc, map->slps0_dbg_offset, fd);
slps0_dbg_latch = false;
-
-out_unlock:
- mutex_unlock(&pmcdev->lock);
}
static int pmc_core_slps0_dbg_show(struct seq_file *s, void *unused)
@@ -636,20 +626,32 @@ static u32 convert_ltr_scale(u32 val)
static int pmc_core_ltr_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- u64 decoded_snoop_ltr, decoded_non_snoop_ltr;
- u32 ltr_raw_data, scale, val;
+ u64 decoded_snoop_ltr, decoded_non_snoop_ltr, val;
+ u32 ltr_raw_data, scale;
u16 snoop_ltr, nonsnoop_ltr;
- int i, index, ltr_index = 0;
+ unsigned int i, index, ltr_index = 0;
for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
- struct pmc *pmc = pmcdev->pmcs[i];
+ struct pmc *pmc;
const struct pmc_bit_map *map;
+ u32 ltr_ign_reg;
+ pmc = pmcdev->pmcs[i];
if (!pmc)
continue;
+ scoped_guard(mutex, &pmcdev->lock)
+ ltr_ign_reg = pmc_core_reg_read(pmc, pmc->map->ltr_ignore_offset);
+
map = pmc->map->ltr_show_sts;
for (index = 0; map[index].name; index++) {
+ bool ltr_ign_data;
+
+ if (index > pmc->map->ltr_ignore_max)
+ ltr_ign_data = false;
+ else
+ ltr_ign_data = ltr_ign_reg & BIT(index);
+
decoded_snoop_ltr = decoded_non_snoop_ltr = 0;
ltr_raw_data = pmc_core_reg_read(pmc,
map[index].bit_mask);
@@ -667,10 +669,10 @@ static int pmc_core_ltr_show(struct seq_file *s, void *unused)
decoded_snoop_ltr = val * convert_ltr_scale(scale);
}
- seq_printf(s, "%d\tPMC%d:%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\n",
+ seq_printf(s, "%d\tPMC%d:%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\tLTR_IGNORE: %d\n",
ltr_index, i, map[index].name, ltr_raw_data,
decoded_non_snoop_ltr,
- decoded_snoop_ltr);
+ decoded_snoop_ltr, ltr_ign_data);
ltr_index++;
}
}
@@ -678,6 +680,84 @@ static int pmc_core_ltr_show(struct seq_file *s, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr);
+static int pmc_core_s0ix_blocker_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ unsigned int pmcidx;
+
+ for (pmcidx = 0; pmcidx < ARRAY_SIZE(pmcdev->pmcs); pmcidx++) {
+ const struct pmc_bit_map **maps;
+ unsigned int arr_size, r_idx;
+ u32 offset, counter;
+ struct pmc *pmc;
+
+ pmc = pmcdev->pmcs[pmcidx];
+ if (!pmc)
+ continue;
+ maps = pmc->map->s0ix_blocker_maps;
+ offset = pmc->map->s0ix_blocker_offset;
+ arr_size = pmc_core_lpm_get_arr_size(maps);
+
+ for (r_idx = 0; r_idx < arr_size; r_idx++) {
+ const struct pmc_bit_map *map;
+
+ for (map = maps[r_idx]; map->name; map++) {
+ if (!map->blk)
+ continue;
+ counter = pmc_core_reg_read(pmc, offset);
+ seq_printf(s, "PMC%d:%-30s %-30d\n", pmcidx,
+ map->name, counter);
+ offset += map->blk * S0IX_BLK_SIZE;
+ }
+ }
+ }
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_s0ix_blocker);
+
+static void pmc_core_ltr_ignore_all(struct pmc_dev *pmcdev)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); i++) {
+ struct pmc *pmc;
+ u32 ltr_ign;
+
+ pmc = pmcdev->pmcs[i];
+ if (!pmc)
+ continue;
+
+ guard(mutex)(&pmcdev->lock);
+ pmc->ltr_ign = pmc_core_reg_read(pmc, pmc->map->ltr_ignore_offset);
+
+ /* ltr_ignore_max is the max index value for LTR ignore register */
+ ltr_ign = pmc->ltr_ign | GENMASK(pmc->map->ltr_ignore_max, 0);
+ pmc_core_reg_write(pmc, pmc->map->ltr_ignore_offset, ltr_ign);
+ }
+
+ /*
+ * Ignoring ME during suspend is blocking platforms with ADL PCH to get to
+ * deeper S0ix substate.
+ */
+ pmc_core_send_ltr_ignore(pmcdev, 6, 0);
+}
+
+static void pmc_core_ltr_restore_all(struct pmc_dev *pmcdev)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); i++) {
+ struct pmc *pmc;
+
+ pmc = pmcdev->pmcs[i];
+ if (!pmc)
+ continue;
+
+ guard(mutex)(&pmcdev->lock);
+ pmc_core_reg_write(pmc, pmc->map->ltr_ignore_offset, pmc->ltr_ign);
+ }
+}
+
static inline u64 adjust_lpm_residency(struct pmc *pmc, u32 offset,
const int lpm_adj_x2)
{
@@ -692,11 +772,11 @@ static int pmc_core_substate_res_show(struct seq_file *s, void *unused)
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
const int lpm_adj_x2 = pmc->map->lpm_res_counter_step_x2;
u32 offset = pmc->map->lpm_residency_offset;
- int i, mode;
+ int mode;
seq_printf(s, "%-10s %-15s\n", "Substate", "Residency");
- pmc_for_each_mode(i, mode, pmcdev) {
+ pmc_for_each_mode(mode, pmcdev) {
seq_printf(s, "%-10s %-15llu\n", pmc_lpm_modes[mode],
adjust_lpm_residency(pmc, offset + (4 * mode), lpm_adj_x2));
}
@@ -708,7 +788,7 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_res);
static int pmc_core_substate_sts_regs_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- int i;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
struct pmc *pmc = pmcdev->pmcs[i];
@@ -729,7 +809,7 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_sts_regs);
static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- int i;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
struct pmc *pmc = pmcdev->pmcs[i];
@@ -750,21 +830,24 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_l_sts_regs);
static void pmc_core_substate_req_header_show(struct seq_file *s, int pmc_index)
{
struct pmc_dev *pmcdev = s->private;
- int i, mode;
+ int mode;
seq_printf(s, "%30s |", "Element");
- pmc_for_each_mode(i, mode, pmcdev)
+ pmc_for_each_mode(mode, pmcdev)
seq_printf(s, " %9s |", pmc_lpm_modes[mode]);
- seq_printf(s, " %9s |\n", "Status");
+ seq_printf(s, " %9s |", "Status");
+ seq_printf(s, " %11s |\n", "Live Status");
}
static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
u32 sts_offset;
+ u32 sts_offset_live;
u32 *lpm_req_regs;
- int num_maps, mp, pmc_index;
+ unsigned int mp, pmc_index;
+ int num_maps;
for (pmc_index = 0; pmc_index < ARRAY_SIZE(pmcdev->pmcs); ++pmc_index) {
struct pmc *pmc = pmcdev->pmcs[pmc_index];
@@ -776,6 +859,7 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
maps = pmc->map->lpm_sts;
num_maps = pmc->map->lpm_num_maps;
sts_offset = pmc->map->lpm_status_offset;
+ sts_offset_live = pmc->map->lpm_live_status_offset;
lpm_req_regs = pmc->lpm_req_regs;
/*
@@ -793,20 +877,24 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
for (mp = 0; mp < num_maps; mp++) {
u32 req_mask = 0;
u32 lpm_status;
+ u32 lpm_status_live;
const struct pmc_bit_map *map;
- int mode, idx, i, len = 32;
+ int mode, i, len = 32;
/*
* Capture the requirements and create a mask so that we only
* show an element if it's required for at least one of the
* enabled low power modes
*/
- pmc_for_each_mode(idx, mode, pmcdev)
+ pmc_for_each_mode(mode, pmcdev)
req_mask |= lpm_req_regs[mp + (mode * num_maps)];
/* Get the last latched status for this map */
lpm_status = pmc_core_reg_read(pmc, sts_offset + (mp * 4));
+ /* Get the runtime status for this map */
+ lpm_status_live = pmc_core_reg_read(pmc, sts_offset_live + (mp * 4));
+
/* Loop over elements in this map */
map = maps[mp];
for (i = 0; map[i].name && i < len; i++) {
@@ -824,7 +912,7 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
seq_printf(s, "pmc%d: %26s |", pmc_index, map[i].name);
/* Loop over the enabled states and display if required */
- pmc_for_each_mode(idx, mode, pmcdev) {
+ pmc_for_each_mode(mode, pmcdev) {
bool required = lpm_req_regs[mp + (mode * num_maps)] &
bit_mask;
seq_printf(s, " %9s |", required ? "Required" : " ");
@@ -833,6 +921,9 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
/* In Status column, show the last captured state of this agent */
seq_printf(s, " %9s |", lpm_status & bit_mask ? "Yes" : " ");
+ /* In Live status column, show the live state of this agent */
+ seq_printf(s, " %11s |", lpm_status_live & bit_mask ? "Yes" : " ");
+
seq_puts(s, "\n");
}
}
@@ -845,13 +936,13 @@ static unsigned int pmc_core_get_crystal_freq(void)
{
unsigned int eax_denominator, ebx_numerator, ecx_hz, edx;
- if (boot_cpu_data.cpuid_level < 0x15)
+ if (boot_cpu_data.cpuid_level < CPUID_LEAF_TSC)
return 0;
eax_denominator = ebx_numerator = ecx_hz = edx = 0;
- /* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */
- cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
+ /* TSC/Crystal ratio, plus optionally Crystal Hz */
+ cpuid(CPUID_LEAF_TSC, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
if (ebx_numerator == 0 || eax_denominator == 0)
return 0;
@@ -888,7 +979,7 @@ static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused)
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
bool c10;
u32 reg;
- int idx, mode;
+ int mode;
reg = pmc_core_reg_read(pmc, pmc->map->lpm_sts_latch_en_offset);
if (reg & LPM_STS_LATCH_MODE) {
@@ -899,7 +990,7 @@ static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused)
c10 = true;
}
- pmc_for_each_mode(idx, mode, pmcdev) {
+ pmc_for_each_mode(mode, pmcdev) {
if ((BIT(mode) & reg) && !c10)
seq_printf(s, " [%s]", pmc_lpm_modes[mode]);
else
@@ -920,7 +1011,7 @@ static ssize_t pmc_core_lpm_latch_mode_write(struct file *file,
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
bool clear = false, c10 = false;
unsigned char buf[8];
- int idx, m, mode;
+ int m, mode;
u32 reg;
if (count > sizeof(buf) - 1)
@@ -938,7 +1029,7 @@ static ssize_t pmc_core_lpm_latch_mode_write(struct file *file,
mode = sysfs_match_string(pmc_lpm_modes, buf);
/* Check string matches enabled mode */
- pmc_for_each_mode(idx, m, pmcdev)
+ pmc_for_each_mode(m, pmcdev)
if (mode == m)
break;
@@ -952,26 +1043,22 @@ static ssize_t pmc_core_lpm_latch_mode_write(struct file *file,
}
if (clear) {
- mutex_lock(&pmcdev->lock);
+ guard(mutex)(&pmcdev->lock);
reg = pmc_core_reg_read(pmc, pmc->map->etr3_offset);
reg |= ETR3_CLEAR_LPM_EVENTS;
pmc_core_reg_write(pmc, pmc->map->etr3_offset, reg);
- mutex_unlock(&pmcdev->lock);
-
return count;
}
if (c10) {
- mutex_lock(&pmcdev->lock);
+ guard(mutex)(&pmcdev->lock);
reg = pmc_core_reg_read(pmc, pmc->map->lpm_sts_latch_en_offset);
reg &= ~LPM_STS_LATCH_MODE;
pmc_core_reg_write(pmc, pmc->map->lpm_sts_latch_en_offset, reg);
- mutex_unlock(&pmcdev->lock);
-
return count;
}
@@ -980,9 +1067,8 @@ static ssize_t pmc_core_lpm_latch_mode_write(struct file *file,
* and clear everything else.
*/
reg = LPM_STS_LATCH_MODE | BIT(mode);
- mutex_lock(&pmcdev->lock);
+ guard(mutex)(&pmcdev->lock);
pmc_core_reg_write(pmc, pmc->map->lpm_sts_latch_en_offset, reg);
- mutex_unlock(&pmcdev->lock);
return count;
}
@@ -993,7 +1079,7 @@ static int pmc_core_pkgc_show(struct seq_file *s, void *unused)
struct pmc *pmc = s->private;
const struct pmc_bit_map *map = pmc->map->msr_sts;
u64 pcstate_count;
- int index;
+ unsigned int index;
for (index = 0; map[index].name ; index++) {
if (rdmsrl_safe(map[index].bit_mask, &pcstate_count))
@@ -1011,7 +1097,7 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_pkgc);
static bool pmc_core_pri_verify(u32 lpm_pri, u8 *mode_order)
{
- int i, j;
+ unsigned int i, j;
if (!lpm_pri)
return false;
@@ -1046,7 +1132,8 @@ void pmc_core_get_low_power_modes(struct pmc_dev *pmcdev)
u8 mode_order[LPM_MAX_NUM_MODES];
u32 lpm_pri;
u32 lpm_en;
- int mode, i, p;
+ unsigned int i;
+ int mode, p;
/* Use LPM Maps to indicate support for substates */
if (!pmc->map->lpm_num_maps)
@@ -1171,7 +1258,6 @@ static bool pmc_core_is_pson_residency_enabled(struct pmc_dev *pmcdev)
return val == 1;
}
-
static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
{
debugfs_remove_recursive(pmcdev->dbgfs_dir);
@@ -1193,10 +1279,15 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
pmcdev, &pmc_core_ppfear_fops);
debugfs_create_file("ltr_ignore", 0644, dir, pmcdev,
- &pmc_core_ltr_ignore_ops);
+ &pmc_core_ltr_ignore_fops);
+
+ debugfs_create_file("ltr_restore", 0200, dir, pmcdev, &pmc_core_ltr_restore_fops);
debugfs_create_file("ltr_show", 0444, dir, pmcdev, &pmc_core_ltr_fops);
+ if (primary_pmc->map->s0ix_blocker_maps)
+ debugfs_create_file("s0ix_blocker", 0444, dir, pmcdev, &pmc_core_s0ix_blocker_fops);
+
debugfs_create_file("package_cstate_show", 0444, dir, primary_pmc,
&pmc_core_pkgc_fops);
@@ -1255,29 +1346,29 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
}
static const struct x86_cpu_id intel_pmc_core_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, spt_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, spt_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, spt_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, spt_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, cnp_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, icl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, icl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, cnp_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, cnp_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, tgl_l_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, tgl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, tgl_l_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, icl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, tgl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, tgl_l_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, tgl_l_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, adl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, tgl_l_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, adl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, adl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, mtl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE, arl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(LUNARLAKE_M, lnl_core_init),
+ X86_MATCH_VFM(INTEL_SKYLAKE_L, spt_core_init),
+ X86_MATCH_VFM(INTEL_SKYLAKE, spt_core_init),
+ X86_MATCH_VFM(INTEL_KABYLAKE_L, spt_core_init),
+ X86_MATCH_VFM(INTEL_KABYLAKE, spt_core_init),
+ X86_MATCH_VFM(INTEL_CANNONLAKE_L, cnp_core_init),
+ X86_MATCH_VFM(INTEL_ICELAKE_L, icl_core_init),
+ X86_MATCH_VFM(INTEL_ICELAKE_NNPI, icl_core_init),
+ X86_MATCH_VFM(INTEL_COMETLAKE, cnp_core_init),
+ X86_MATCH_VFM(INTEL_COMETLAKE_L, cnp_core_init),
+ X86_MATCH_VFM(INTEL_TIGERLAKE_L, tgl_l_core_init),
+ X86_MATCH_VFM(INTEL_TIGERLAKE, tgl_core_init),
+ X86_MATCH_VFM(INTEL_ATOM_TREMONT, tgl_l_core_init),
+ X86_MATCH_VFM(INTEL_ATOM_TREMONT_L, icl_core_init),
+ X86_MATCH_VFM(INTEL_ROCKETLAKE, tgl_core_init),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, tgl_l_core_init),
+ X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, tgl_l_core_init),
+ X86_MATCH_VFM(INTEL_ALDERLAKE, adl_core_init),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, tgl_l_core_init),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, adl_core_init),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, adl_core_init),
+ X86_MATCH_VFM(INTEL_METEORLAKE_L, mtl_core_init),
+ X86_MATCH_VFM(INTEL_ARROWLAKE, arl_core_init),
+ X86_MATCH_VFM(INTEL_LUNARLAKE_M, lnl_core_init),
{}
};
@@ -1335,7 +1426,7 @@ static void pmc_core_do_dmi_quirks(struct pmc *pmc)
static void pmc_core_clean_structure(struct platform_device *pdev)
{
struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
- int i;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
struct pmc *pmc = pmcdev->pmcs[i];
@@ -1389,6 +1480,15 @@ static int pmc_core_probe(struct platform_device *pdev)
return -ENOMEM;
pmcdev->pmcs[PMC_IDX_MAIN] = primary_pmc;
+ /* The last element in msr_map is empty */
+ pmcdev->num_of_pkgc = ARRAY_SIZE(msr_map) - 1;
+ pmcdev->pkgc_res_cnt = devm_kcalloc(&pdev->dev,
+ pmcdev->num_of_pkgc,
+ sizeof(*pmcdev->pkgc_res_cnt),
+ GFP_KERNEL);
+ if (!pmcdev->pkgc_res_cnt)
+ return -ENOMEM;
+
/*
* Coffee Lake has CPU ID of Kaby Lake and Cannon Lake PCH. So here
* Sunrisepoint PCH regmap can't be used. Use Cannon Lake PCH regmap
@@ -1428,21 +1528,31 @@ static bool warn_on_s0ix_failures;
module_param(warn_on_s0ix_failures, bool, 0644);
MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures");
+static bool ltr_ignore_all_suspend = true;
+module_param(ltr_ignore_all_suspend, bool, 0644);
+MODULE_PARM_DESC(ltr_ignore_all_suspend, "Ignore all LTRs during suspend");
+
static __maybe_unused int pmc_core_suspend(struct device *dev)
{
struct pmc_dev *pmcdev = dev_get_drvdata(dev);
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ unsigned int i;
if (pmcdev->suspend)
pmcdev->suspend(pmcdev);
+ if (ltr_ignore_all_suspend)
+ pmc_core_ltr_ignore_all(pmcdev);
+
/* Check if the syspend will actually use S0ix */
if (pm_suspend_via_firmware())
return 0;
- /* Save PC10 residency for checking later */
- if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pmcdev->pc10_counter))
- return -EIO;
+ /* Save PKGC residency for checking later */
+ for (i = 0; i < pmcdev->num_of_pkgc; i++) {
+ if (rdmsrl_safe(msr_map[i].bit_mask, &pmcdev->pkgc_res_cnt[i]))
+ return -EIO;
+ }
/* Save S0ix residency for checking later */
if (pmc_core_dev_state_get(pmc, &pmcdev->s0ix_counter))
@@ -1451,14 +1561,15 @@ static __maybe_unused int pmc_core_suspend(struct device *dev)
return 0;
}
-static inline bool pmc_core_is_pc10_failed(struct pmc_dev *pmcdev)
+static inline bool pmc_core_is_deepest_pkgc_failed(struct pmc_dev *pmcdev)
{
- u64 pc10_counter;
+ u32 deepest_pkgc_msr = msr_map[pmcdev->num_of_pkgc - 1].bit_mask;
+ u64 deepest_pkgc_residency;
- if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pc10_counter))
+ if (rdmsrl_safe(deepest_pkgc_msr, &deepest_pkgc_residency))
return false;
- if (pc10_counter == pmcdev->pc10_counter)
+ if (deepest_pkgc_residency == pmcdev->pkgc_res_cnt[pmcdev->num_of_pkgc - 1])
return true;
return false;
@@ -1485,7 +1596,7 @@ int pmc_core_resume_common(struct pmc_dev *pmcdev)
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
const struct pmc_bit_map **maps = pmc->map->lpm_sts;
int offset = pmc->map->lpm_status_offset;
- int i;
+ unsigned int i;
/* Check if the syspend used S0ix */
if (pm_suspend_via_firmware())
@@ -1497,10 +1608,22 @@ int pmc_core_resume_common(struct pmc_dev *pmcdev)
if (!warn_on_s0ix_failures)
return 0;
- if (pmc_core_is_pc10_failed(pmcdev)) {
- /* S0ix failed because of PC10 entry failure */
- dev_info(dev, "CPU did not enter PC10!!! (PC10 cnt=0x%llx)\n",
- pmcdev->pc10_counter);
+ if (pmc_core_is_deepest_pkgc_failed(pmcdev)) {
+ /* S0ix failed because of deepest PKGC entry failure */
+ dev_info(dev, "CPU did not enter %s!!! (%s cnt=0x%llx)\n",
+ msr_map[pmcdev->num_of_pkgc - 1].name,
+ msr_map[pmcdev->num_of_pkgc - 1].name,
+ pmcdev->pkgc_res_cnt[pmcdev->num_of_pkgc - 1]);
+
+ for (i = 0; i < pmcdev->num_of_pkgc; i++) {
+ u64 pc_cnt;
+
+ if (!rdmsrl_safe(msr_map[i].bit_mask, &pc_cnt)) {
+ dev_info(dev, "Prev %s cnt = 0x%llx, Current %s cnt = 0x%llx\n",
+ msr_map[i].name, pmcdev->pkgc_res_cnt[i],
+ msr_map[i].name, pc_cnt);
+ }
+ }
return 0;
}
@@ -1527,6 +1650,9 @@ static __maybe_unused int pmc_core_resume(struct device *dev)
{
struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+ if (ltr_ignore_all_suspend)
+ pmc_core_ltr_restore_all(pmcdev);
+
if (pmcdev->resume)
return pmcdev->resume(pmcdev);
@@ -1551,7 +1677,7 @@ static struct platform_driver pmc_core_driver = {
.dev_groups = pmc_dev_groups,
},
.probe = pmc_core_probe,
- .remove_new = pmc_core_remove,
+ .remove = pmc_core_remove,
};
module_platform_driver(pmc_core_driver);
diff --git a/drivers/platform/x86/intel/pmc/core.h b/drivers/platform/x86/intel/pmc/core.h
index 54137faaae2b..b9d3291d0bf2 100644
--- a/drivers/platform/x86/intel/pmc/core.h
+++ b/drivers/platform/x86/intel/pmc/core.h
@@ -22,6 +22,7 @@ struct telem_endpoint;
#define PMC_BASE_ADDR_DEFAULT 0xFE000000
#define MAX_NUM_PMC 3
+#define S0IX_BLK_SIZE 4
/* Sunrise Point Power Management Controller PCI Device ID */
#define SPT_PMC_PCI_DEVICE_ID 0x9d21
@@ -282,12 +283,14 @@ enum ppfear_regs {
#define LNL_PMC_LTR_OSSE 0x1B88
#define LNL_NUM_IP_IGN_ALLOWED 27
#define LNL_PPFEAR_NUM_ENTRIES 12
+#define LNL_S0IX_BLOCKER_OFFSET 0x2004
extern const char *pmc_lpm_modes[];
struct pmc_bit_map {
const char *name;
u32 bit_mask;
+ u8 blk;
};
/**
@@ -298,6 +301,7 @@ struct pmc_bit_map {
* @pll_sts: Maps name of PLL to corresponding bit status
* @slps0_dbg_maps: Array of SLP_S0_DBG* registers containing debug info
* @ltr_show_sts: Maps PCH IP Names to their MMIO register offsets
+ * @s0ix_blocker_maps: Maps name of IP block to S0ix blocker counter
* @slp_s0_offset: PWRMBASE offset to read SLP_S0 residency
* @ltr_ignore_offset: PWRMBASE offset to read/write LTR ignore bit
* @regmap_length: Length of memory to map from PWRMBASE address to access
@@ -307,6 +311,7 @@ struct pmc_bit_map {
* @pm_cfg_offset: PWRMBASE offset to PM_CFG register
* @pm_read_disable_bit: Bit index to read PMC_READ_DISABLE
* @slps0_dbg_offset: PWRMBASE offset to SLP_S0_DEBUG_REG*
+ * @s0ix_blocker_offset PWRMBASE offset to S0ix blocker counter
*
* Each PCH has unique set of register offsets and bit indexes. This structure
* captures them to have a common implementation.
@@ -319,6 +324,7 @@ struct pmc_reg_map {
const struct pmc_bit_map *ltr_show_sts;
const struct pmc_bit_map *msr_sts;
const struct pmc_bit_map **lpm_sts;
+ const struct pmc_bit_map **s0ix_blocker_maps;
const u32 slp_s0_offset;
const int slp_s0_res_counter_step;
const u32 ltr_ignore_offset;
@@ -330,6 +336,7 @@ struct pmc_reg_map {
const u32 slps0_dbg_offset;
const u32 ltr_ignore_max;
const u32 pm_vric1_offset;
+ const u32 s0ix_blocker_offset;
/* Low Power Mode registers */
const int lpm_num_maps;
const int lpm_num_modes;
@@ -365,6 +372,7 @@ struct pmc_info {
* @map: pointer to pmc_reg_map struct that contains platform
* specific attributes
* @lpm_req_regs: List of substate requirements
+ * @ltr_ign: Holds LTR ignore data while suspended
*
* pmc contains info about one power management controller device.
*/
@@ -373,6 +381,7 @@ struct pmc {
void __iomem *regbase;
const struct pmc_reg_map *map;
u32 *lpm_req_regs;
+ u32 ltr_ign;
};
/**
@@ -385,7 +394,8 @@ struct pmc {
* @pmc_xram_read_bit: flag to indicate whether PMC XRAM shadow registers
* used to read MPHY PG and PLL status are available
* @mutex_lock: mutex to complete one transcation
- * @pc10_counter: PC10 residency counter
+ * @pkgc_res_cnt: Array of PKGC residency counters
+ * @num_of_pkgc: Number of PKGC
* @s0ix_counter: S0ix residency (step adjusted)
* @num_lpm_modes: Count of enabled modes
* @lpm_en_modes: Array of enabled modes from lowest to highest priority
@@ -403,13 +413,15 @@ struct pmc_dev {
int pmc_xram_read_bit;
struct mutex lock; /* generic mutex lock for PMC Core */
- u64 pc10_counter;
u64 s0ix_counter;
int num_lpm_modes;
int lpm_en_modes[LPM_MAX_NUM_MODES];
void (*suspend)(struct pmc_dev *pmcdev);
int (*resume)(struct pmc_dev *pmcdev);
+ u64 *pkgc_res_cnt;
+ u8 num_of_pkgc;
+
bool has_die_c6;
u32 die_c6_offset;
struct telem_endpoint *punit_ep;
@@ -532,8 +544,10 @@ extern const struct pmc_bit_map lnl_vnn_req_status_2_map[];
extern const struct pmc_bit_map lnl_vnn_req_status_3_map[];
extern const struct pmc_bit_map lnl_vnn_misc_status_map[];
extern const struct pmc_bit_map *lnl_lpm_maps[];
+extern const struct pmc_bit_map *lnl_blk_maps[];
extern const struct pmc_bit_map lnl_pfear_map[];
extern const struct pmc_bit_map *ext_lnl_pfear_map[];
+extern const struct pmc_bit_map lnl_signal_status_map[];
/* ARL */
extern const struct pmc_bit_map arl_socs_ltr_show_map[];
@@ -592,10 +606,12 @@ int lnl_core_init(struct pmc_dev *pmcdev);
void cnl_suspend(struct pmc_dev *pmcdev);
int cnl_resume(struct pmc_dev *pmcdev);
-#define pmc_for_each_mode(i, mode, pmcdev) \
- for (i = 0, mode = pmcdev->lpm_en_modes[i]; \
- i < pmcdev->num_lpm_modes; \
- i++, mode = pmcdev->lpm_en_modes[i])
+#define pmc_for_each_mode(mode, pmcdev) \
+ for (unsigned int __i = 0, __cond; \
+ __cond = __i < (pmcdev)->num_lpm_modes, \
+ __cond && ((mode) = (pmcdev)->lpm_en_modes[__i]), \
+ __cond; \
+ __i++)
#define DEFINE_PMC_CORE_ATTR_WRITE(__name) \
static int __name ## _open(struct inode *inode, struct file *file) \
diff --git a/drivers/platform/x86/intel/pmc/core_ssram.c b/drivers/platform/x86/intel/pmc/core_ssram.c
index 1bde86c54eb9..739569803017 100644
--- a/drivers/platform/x86/intel/pmc/core_ssram.c
+++ b/drivers/platform/x86/intel/pmc/core_ssram.c
@@ -9,11 +9,11 @@
*/
#include <linux/cleanup.h>
+#include <linux/intel_vsec.h>
#include <linux/pci.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include "core.h"
-#include "../vsec.h"
#include "../pmt/telemetry.h"
#define SSRAM_HDR_SIZE 0x100
@@ -29,7 +29,7 @@
#define LPM_REG_COUNT 28
#define LPM_MODE_OFFSET 1
-DEFINE_FREE(pmc_core_iounmap, void __iomem *, iounmap(_T));
+DEFINE_FREE(pmc_core_iounmap, void __iomem *, if (_T) iounmap(_T))
static u32 pmc_core_find_guid(struct pmc_info *list, const struct pmc_reg_map *map)
{
@@ -45,7 +45,7 @@ static int pmc_core_get_lpm_req(struct pmc_dev *pmcdev, struct pmc *pmc)
struct telem_endpoint *ep;
const u8 *lpm_indices;
int num_maps, mode_offset = 0;
- int ret, mode, i;
+ int ret, mode;
int lpm_size;
u32 guid;
@@ -116,7 +116,7 @@ static int pmc_core_get_lpm_req(struct pmc_dev *pmcdev, struct pmc *pmc)
*
*/
mode_offset = LPM_HEADER_OFFSET + LPM_MODE_OFFSET;
- pmc_for_each_mode(i, mode, pmcdev) {
+ pmc_for_each_mode(mode, pmcdev) {
u32 *req_offset = pmc->lpm_req_regs + (mode * num_maps);
int m;
@@ -262,13 +262,19 @@ pmc_core_ssram_get_pmc(struct pmc_dev *pmcdev, int pmc_idx, u32 offset)
ssram_base = ssram_pcidev->resource[0].start;
tmp_ssram = ioremap(ssram_base, SSRAM_HDR_SIZE);
+ if (!tmp_ssram)
+ return -ENOMEM;
if (pmc_idx != PMC_IDX_MAIN) {
/*
* The secondary PMC BARS (which are behind hidden PCI devices)
* are read from fixed offsets in MMIO of the primary PMC BAR.
+ * If a device is not present, the value will be 0.
*/
ssram_base = get_base(tmp_ssram, offset);
+ if (!ssram_base)
+ return 0;
+
ssram = ioremap(ssram_base, SSRAM_HDR_SIZE);
if (!ssram)
return -ENOMEM;
@@ -322,5 +328,5 @@ release_dev:
return ret;
}
-MODULE_IMPORT_NS(INTEL_VSEC);
-MODULE_IMPORT_NS(INTEL_PMT_TELEMETRY);
+MODULE_IMPORT_NS("INTEL_VSEC");
+MODULE_IMPORT_NS("INTEL_PMT_TELEMETRY");
diff --git a/drivers/platform/x86/intel/pmc/lnl.c b/drivers/platform/x86/intel/pmc/lnl.c
index abad17cdd3d7..be029f12cdf4 100644
--- a/drivers/platform/x86/intel/pmc/lnl.c
+++ b/drivers/platform/x86/intel/pmc/lnl.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* This file contains platform specific structure definitions
- * and init function used by Meteor Lake PCH.
+ * and init function used by Lunar Lake PCH.
*
* Copyright (c) 2022, Intel Corporation.
* All Rights Reserved.
@@ -13,21 +13,6 @@
#include "core.h"
-#define SOCM_LPM_REQ_GUID 0x11594920
-
-#define PMC_DEVID_SOCM 0xa87f
-
-static const u8 LNL_LPM_REG_INDEX[] = {0, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20};
-
-static struct pmc_info lnl_pmc_info_list[] = {
- {
- .guid = SOCM_LPM_REQ_GUID,
- .devid = PMC_DEVID_SOCM,
- .map = &lnl_socm_reg_map,
- },
- {}
-};
-
const struct pmc_bit_map lnl_ltr_show_map[] = {
{"SOUTHPORT_A", CNP_PMC_LTR_SPA},
{"SOUTHPORT_B", CNP_PMC_LTR_SPB},
@@ -71,264 +56,296 @@ const struct pmc_bit_map lnl_ltr_show_map[] = {
};
const struct pmc_bit_map lnl_power_gating_status_0_map[] = {
- {"PMC_PGD0_PG_STS", BIT(0)},
- {"FUSE_OSSE_PGD0_PG_STS", BIT(1)},
- {"ESPISPI_PGD0_PG_STS", BIT(2)},
- {"XHCI_PGD0_PG_STS", BIT(3)},
- {"SPA_PGD0_PG_STS", BIT(4)},
- {"SPB_PGD0_PG_STS", BIT(5)},
- {"SPR16B0_PGD0_PG_STS", BIT(6)},
- {"GBE_PGD0_PG_STS", BIT(7)},
- {"SBR8B7_PGD0_PG_STS", BIT(8)},
- {"SBR8B6_PGD0_PG_STS", BIT(9)},
- {"SBR16B1_PGD0_PG_STS", BIT(10)},
- {"SBR8B8_PGD0_PG_STS", BIT(11)},
- {"ESE_PGD3_PG_STS", BIT(12)},
- {"D2D_DISP_PGD0_PG_STS", BIT(13)},
- {"LPSS_PGD0_PG_STS", BIT(14)},
- {"LPC_PGD0_PG_STS", BIT(15)},
- {"SMB_PGD0_PG_STS", BIT(16)},
- {"ISH_PGD0_PG_STS", BIT(17)},
- {"SBR8B2_PGD0_PG_STS", BIT(18)},
- {"NPK_PGD0_PG_STS", BIT(19)},
- {"D2D_NOC_PGD0_PG_STS", BIT(20)},
- {"SAFSS_PGD0_PG_STS", BIT(21)},
- {"FUSE_PGD0_PG_STS", BIT(22)},
- {"D2D_DISP_PGD1_PG_STS", BIT(23)},
- {"MPFPW1_PGD0_PG_STS", BIT(24)},
- {"XDCI_PGD0_PG_STS", BIT(25)},
- {"EXI_PGD0_PG_STS", BIT(26)},
- {"CSE_PGD0_PG_STS", BIT(27)},
- {"KVMCC_PGD0_PG_STS", BIT(28)},
- {"PMT_PGD0_PG_STS", BIT(29)},
- {"CLINK_PGD0_PG_STS", BIT(30)},
- {"PTIO_PGD0_PG_STS", BIT(31)},
+ {"PMC_PGD0_PG_STS", BIT(0), 0},
+ {"FUSE_OSSE_PGD0_PG_STS", BIT(1), 0},
+ {"ESPISPI_PGD0_PG_STS", BIT(2), 0},
+ {"XHCI_PGD0_PG_STS", BIT(3), 1},
+ {"SPA_PGD0_PG_STS", BIT(4), 1},
+ {"SPB_PGD0_PG_STS", BIT(5), 1},
+ {"SPR16B0_PGD0_PG_STS", BIT(6), 0},
+ {"GBE_PGD0_PG_STS", BIT(7), 1},
+ {"SBR8B7_PGD0_PG_STS", BIT(8), 0},
+ {"SBR8B6_PGD0_PG_STS", BIT(9), 0},
+ {"SBR16B1_PGD0_PG_STS", BIT(10), 0},
+ {"SBR8B8_PGD0_PG_STS", BIT(11), 0},
+ {"ESE_PGD3_PG_STS", BIT(12), 1},
+ {"D2D_DISP_PGD0_PG_STS", BIT(13), 1},
+ {"LPSS_PGD0_PG_STS", BIT(14), 1},
+ {"LPC_PGD0_PG_STS", BIT(15), 0},
+ {"SMB_PGD0_PG_STS", BIT(16), 0},
+ {"ISH_PGD0_PG_STS", BIT(17), 0},
+ {"SBR8B2_PGD0_PG_STS", BIT(18), 0},
+ {"NPK_PGD0_PG_STS", BIT(19), 0},
+ {"D2D_NOC_PGD0_PG_STS", BIT(20), 0},
+ {"SAFSS_PGD0_PG_STS", BIT(21), 0},
+ {"FUSE_PGD0_PG_STS", BIT(22), 0},
+ {"D2D_DISP_PGD1_PG_STS", BIT(23), 1},
+ {"MPFPW1_PGD0_PG_STS", BIT(24), 0},
+ {"XDCI_PGD0_PG_STS", BIT(25), 1},
+ {"EXI_PGD0_PG_STS", BIT(26), 0},
+ {"CSE_PGD0_PG_STS", BIT(27), 1},
+ {"KVMCC_PGD0_PG_STS", BIT(28), 1},
+ {"PMT_PGD0_PG_STS", BIT(29), 1},
+ {"CLINK_PGD0_PG_STS", BIT(30), 1},
+ {"PTIO_PGD0_PG_STS", BIT(31), 1},
{}
};
const struct pmc_bit_map lnl_power_gating_status_1_map[] = {
- {"USBR0_PGD0_PG_STS", BIT(0)},
- {"SUSRAM_PGD0_PG_STS", BIT(1)},
- {"SMT1_PGD0_PG_STS", BIT(2)},
- {"U3FPW1_PGD0_PG_STS", BIT(3)},
- {"SMS2_PGD0_PG_STS", BIT(4)},
- {"SMS1_PGD0_PG_STS", BIT(5)},
- {"CSMERTC_PGD0_PG_STS", BIT(6)},
- {"CSMEPSF_PGD0_PG_STS", BIT(7)},
- {"FIA_PG_PGD0_PG_STS", BIT(8)},
- {"SBR16B4_PGD0_PG_STS", BIT(9)},
- {"P2SB8B_PGD0_PG_STS", BIT(10)},
- {"DBG_SBR_PGD0_PG_STS", BIT(11)},
- {"SBR8B9_PGD0_PG_STS", BIT(12)},
- {"OSSE_SMT1_PGD0_PG_STS", BIT(13)},
- {"SBR8B10_PGD0_PG_STS", BIT(14)},
- {"SBR16B3_PGD0_PG_STS", BIT(15)},
- {"G5FPW1_PGD0_PG_STS", BIT(16)},
- {"SBRG_PGD0_PG_STS", BIT(17)},
- {"PSF4_PGD0_PG_STS", BIT(18)},
- {"CNVI_PGD0_PG_STS", BIT(19)},
- {"USFX2_PGD0_PG_STS", BIT(20)},
- {"ENDBG_PGD0_PG_STS", BIT(21)},
- {"FIACPCB_P5X4_PGD0_PG_STS", BIT(22)},
- {"SBR8B3_PGD0_PG_STS", BIT(23)},
- {"SBR8B0_PGD0_PG_STS", BIT(24)},
- {"NPK_PGD1_PG_STS", BIT(25)},
- {"OSSE_HOTHAM_PGD0_PG_STS", BIT(26)},
- {"D2D_NOC_PGD2_PG_STS", BIT(27)},
- {"SBR8B1_PGD0_PG_STS", BIT(28)},
- {"PSF6_PGD0_PG_STS", BIT(29)},
- {"PSF7_PGD0_PG_STS", BIT(30)},
- {"FIA_U_PGD0_PG_STS", BIT(31)},
+ {"USBR0_PGD0_PG_STS", BIT(0), 1},
+ {"SUSRAM_PGD0_PG_STS", BIT(1), 1},
+ {"SMT1_PGD0_PG_STS", BIT(2), 1},
+ {"U3FPW1_PGD0_PG_STS", BIT(3), 0},
+ {"SMS2_PGD0_PG_STS", BIT(4), 1},
+ {"SMS1_PGD0_PG_STS", BIT(5), 1},
+ {"CSMERTC_PGD0_PG_STS", BIT(6), 0},
+ {"CSMEPSF_PGD0_PG_STS", BIT(7), 0},
+ {"FIA_PG_PGD0_PG_STS", BIT(8), 0},
+ {"SBR16B4_PGD0_PG_STS", BIT(9), 0},
+ {"P2SB8B_PGD0_PG_STS", BIT(10), 1},
+ {"DBG_SBR_PGD0_PG_STS", BIT(11), 0},
+ {"SBR8B9_PGD0_PG_STS", BIT(12), 0},
+ {"OSSE_SMT1_PGD0_PG_STS", BIT(13), 1},
+ {"SBR8B10_PGD0_PG_STS", BIT(14), 0},
+ {"SBR16B3_PGD0_PG_STS", BIT(15), 0},
+ {"G5FPW1_PGD0_PG_STS", BIT(16), 0},
+ {"SBRG_PGD0_PG_STS", BIT(17), 0},
+ {"PSF4_PGD0_PG_STS", BIT(18), 0},
+ {"CNVI_PGD0_PG_STS", BIT(19), 0},
+ {"USFX2_PGD0_PG_STS", BIT(20), 1},
+ {"ENDBG_PGD0_PG_STS", BIT(21), 0},
+ {"FIACPCB_P5X4_PGD0_PG_STS", BIT(22), 0},
+ {"SBR8B3_PGD0_PG_STS", BIT(23), 0},
+ {"SBR8B0_PGD0_PG_STS", BIT(24), 0},
+ {"NPK_PGD1_PG_STS", BIT(25), 0},
+ {"OSSE_HOTHAM_PGD0_PG_STS", BIT(26), 1},
+ {"D2D_NOC_PGD2_PG_STS", BIT(27), 1},
+ {"SBR8B1_PGD0_PG_STS", BIT(28), 0},
+ {"PSF6_PGD0_PG_STS", BIT(29), 0},
+ {"PSF7_PGD0_PG_STS", BIT(30), 0},
+ {"FIA_U_PGD0_PG_STS", BIT(31), 0},
{}
};
const struct pmc_bit_map lnl_power_gating_status_2_map[] = {
- {"PSF8_PGD0_PG_STS", BIT(0)},
- {"SBR16B2_PGD0_PG_STS", BIT(1)},
- {"D2D_IPU_PGD0_PG_STS", BIT(2)},
- {"FIACPCB_U_PGD0_PG_STS", BIT(3)},
- {"TAM_PGD0_PG_STS", BIT(4)},
- {"D2D_NOC_PGD1_PG_STS", BIT(5)},
- {"TBTLSX_PGD0_PG_STS", BIT(6)},
- {"THC0_PGD0_PG_STS", BIT(7)},
- {"THC1_PGD0_PG_STS", BIT(8)},
- {"PMC_PGD0_PG_STS", BIT(9)},
- {"SBR8B5_PGD0_PG_STS", BIT(10)},
- {"UFSPW1_PGD0_PG_STS", BIT(11)},
- {"DBC_PGD0_PG_STS", BIT(12)},
- {"TCSS_PGD0_PG_STS", BIT(13)},
- {"FIA_P5X4_PGD0_PG_STS", BIT(14)},
- {"DISP_PGA_PGD0_PG_STS", BIT(15)},
- {"DISP_PSF_PGD0_PG_STS", BIT(16)},
- {"PSF0_PGD0_PG_STS", BIT(17)},
- {"P2SB16B_PGD0_PG_STS", BIT(18)},
- {"ACE_PGD0_PG_STS", BIT(19)},
- {"ACE_PGD1_PG_STS", BIT(20)},
- {"ACE_PGD2_PG_STS", BIT(21)},
- {"ACE_PGD3_PG_STS", BIT(22)},
- {"ACE_PGD4_PG_STS", BIT(23)},
- {"ACE_PGD5_PG_STS", BIT(24)},
- {"ACE_PGD6_PG_STS", BIT(25)},
- {"ACE_PGD7_PG_STS", BIT(26)},
- {"ACE_PGD8_PG_STS", BIT(27)},
- {"ACE_PGD9_PG_STS", BIT(28)},
- {"ACE_PGD10_PG_STS", BIT(29)},
- {"FIACPCB_PG_PGD0_PG_STS", BIT(30)},
- {"OSSE_PGD0_PG_STS", BIT(31)},
+ {"PSF8_PGD0_PG_STS", BIT(0), 0},
+ {"SBR16B2_PGD0_PG_STS", BIT(1), 0},
+ {"D2D_IPU_PGD0_PG_STS", BIT(2), 1},
+ {"FIACPCB_U_PGD0_PG_STS", BIT(3), 0},
+ {"TAM_PGD0_PG_STS", BIT(4), 1},
+ {"D2D_NOC_PGD1_PG_STS", BIT(5), 1},
+ {"TBTLSX_PGD0_PG_STS", BIT(6), 1},
+ {"THC0_PGD0_PG_STS", BIT(7), 1},
+ {"THC1_PGD0_PG_STS", BIT(8), 1},
+ {"PMC_PGD0_PG_STS", BIT(9), 0},
+ {"SBR8B5_PGD0_PG_STS", BIT(10), 0},
+ {"UFSPW1_PGD0_PG_STS", BIT(11), 0},
+ {"DBC_PGD0_PG_STS", BIT(12), 0},
+ {"TCSS_PGD0_PG_STS", BIT(13), 0},
+ {"FIA_P5X4_PGD0_PG_STS", BIT(14), 0},
+ {"DISP_PGA_PGD0_PG_STS", BIT(15), 0},
+ {"DISP_PSF_PGD0_PG_STS", BIT(16), 0},
+ {"PSF0_PGD0_PG_STS", BIT(17), 0},
+ {"P2SB16B_PGD0_PG_STS", BIT(18), 1},
+ {"ACE_PGD0_PG_STS", BIT(19), 0},
+ {"ACE_PGD1_PG_STS", BIT(20), 0},
+ {"ACE_PGD2_PG_STS", BIT(21), 0},
+ {"ACE_PGD3_PG_STS", BIT(22), 0},
+ {"ACE_PGD4_PG_STS", BIT(23), 0},
+ {"ACE_PGD5_PG_STS", BIT(24), 0},
+ {"ACE_PGD6_PG_STS", BIT(25), 0},
+ {"ACE_PGD7_PG_STS", BIT(26), 0},
+ {"ACE_PGD8_PG_STS", BIT(27), 0},
+ {"ACE_PGD9_PG_STS", BIT(28), 0},
+ {"ACE_PGD10_PG_STS", BIT(29), 0},
+ {"FIACPCB_PG_PGD0_PG_STS", BIT(30), 0},
+ {"OSSE_PGD0_PG_STS", BIT(31), 1},
{}
};
const struct pmc_bit_map lnl_d3_status_0_map[] = {
- {"LPSS_D3_STS", BIT(3)},
- {"XDCI_D3_STS", BIT(4)},
- {"XHCI_D3_STS", BIT(5)},
- {"SPA_D3_STS", BIT(12)},
- {"SPB_D3_STS", BIT(13)},
- {"OSSE_D3_STS", BIT(15)},
- {"ESPISPI_D3_STS", BIT(18)},
- {"PSTH_D3_STS", BIT(21)},
+ {"LPSS_D3_STS", BIT(3), 1},
+ {"XDCI_D3_STS", BIT(4), 1},
+ {"XHCI_D3_STS", BIT(5), 1},
+ {"SPA_D3_STS", BIT(12), 0},
+ {"SPB_D3_STS", BIT(13), 0},
+ {"OSSE_D3_STS", BIT(15), 0},
+ {"ESPISPI_D3_STS", BIT(18), 0},
+ {"PSTH_D3_STS", BIT(21), 0},
{}
};
const struct pmc_bit_map lnl_d3_status_1_map[] = {
- {"OSSE_SMT1_D3_STS", BIT(7)},
- {"GBE_D3_STS", BIT(19)},
- {"ITSS_D3_STS", BIT(23)},
- {"CNVI_D3_STS", BIT(27)},
- {"UFSX2_D3_STS", BIT(28)},
- {"OSSE_HOTHAM_D3_STS", BIT(31)},
+ {"OSSE_SMT1_D3_STS", BIT(7), 0},
+ {"GBE_D3_STS", BIT(19), 0},
+ {"ITSS_D3_STS", BIT(23), 0},
+ {"CNVI_D3_STS", BIT(27), 0},
+ {"UFSX2_D3_STS", BIT(28), 1},
+ {"OSSE_HOTHAM_D3_STS", BIT(31), 0},
{}
};
const struct pmc_bit_map lnl_d3_status_2_map[] = {
- {"ESE_D3_STS", BIT(0)},
- {"CSMERTC_D3_STS", BIT(1)},
- {"SUSRAM_D3_STS", BIT(2)},
- {"CSE_D3_STS", BIT(4)},
- {"KVMCC_D3_STS", BIT(5)},
- {"USBR0_D3_STS", BIT(6)},
- {"ISH_D3_STS", BIT(7)},
- {"SMT1_D3_STS", BIT(8)},
- {"SMT2_D3_STS", BIT(9)},
- {"SMT3_D3_STS", BIT(10)},
- {"OSSE_SMT2_D3_STS", BIT(13)},
- {"CLINK_D3_STS", BIT(14)},
- {"PTIO_D3_STS", BIT(16)},
- {"PMT_D3_STS", BIT(17)},
- {"SMS1_D3_STS", BIT(18)},
- {"SMS2_D3_STS", BIT(19)},
+ {"ESE_D3_STS", BIT(0), 0},
+ {"CSMERTC_D3_STS", BIT(1), 0},
+ {"SUSRAM_D3_STS", BIT(2), 0},
+ {"CSE_D3_STS", BIT(4), 0},
+ {"KVMCC_D3_STS", BIT(5), 0},
+ {"USBR0_D3_STS", BIT(6), 0},
+ {"ISH_D3_STS", BIT(7), 0},
+ {"SMT1_D3_STS", BIT(8), 0},
+ {"SMT2_D3_STS", BIT(9), 0},
+ {"SMT3_D3_STS", BIT(10), 0},
+ {"OSSE_SMT2_D3_STS", BIT(13), 0},
+ {"CLINK_D3_STS", BIT(14), 0},
+ {"PTIO_D3_STS", BIT(16), 0},
+ {"PMT_D3_STS", BIT(17), 0},
+ {"SMS1_D3_STS", BIT(18), 0},
+ {"SMS2_D3_STS", BIT(19), 0},
{}
};
const struct pmc_bit_map lnl_d3_status_3_map[] = {
- {"THC0_D3_STS", BIT(14)},
- {"THC1_D3_STS", BIT(15)},
- {"OSSE_SMT3_D3_STS", BIT(21)},
- {"ACE_D3_STS", BIT(23)},
+ {"THC0_D3_STS", BIT(14), 1},
+ {"THC1_D3_STS", BIT(15), 1},
+ {"OSSE_SMT3_D3_STS", BIT(21), 0},
+ {"ACE_D3_STS", BIT(23), 0},
{}
};
const struct pmc_bit_map lnl_vnn_req_status_0_map[] = {
- {"LPSS_VNN_REQ_STS", BIT(3)},
- {"OSSE_VNN_REQ_STS", BIT(15)},
- {"ESPISPI_VNN_REQ_STS", BIT(18)},
+ {"LPSS_VNN_REQ_STS", BIT(3), 1},
+ {"OSSE_VNN_REQ_STS", BIT(15), 1},
+ {"ESPISPI_VNN_REQ_STS", BIT(18), 1},
{}
};
const struct pmc_bit_map lnl_vnn_req_status_1_map[] = {
- {"NPK_VNN_REQ_STS", BIT(4)},
- {"OSSE_SMT1_VNN_REQ_STS", BIT(7)},
- {"DFXAGG_VNN_REQ_STS", BIT(8)},
- {"EXI_VNN_REQ_STS", BIT(9)},
- {"P2D_VNN_REQ_STS", BIT(18)},
- {"GBE_VNN_REQ_STS", BIT(19)},
- {"SMB_VNN_REQ_STS", BIT(25)},
- {"LPC_VNN_REQ_STS", BIT(26)},
+ {"NPK_VNN_REQ_STS", BIT(4), 1},
+ {"OSSE_SMT1_VNN_REQ_STS", BIT(7), 1},
+ {"DFXAGG_VNN_REQ_STS", BIT(8), 0},
+ {"EXI_VNN_REQ_STS", BIT(9), 1},
+ {"P2D_VNN_REQ_STS", BIT(18), 1},
+ {"GBE_VNN_REQ_STS", BIT(19), 1},
+ {"SMB_VNN_REQ_STS", BIT(25), 1},
+ {"LPC_VNN_REQ_STS", BIT(26), 0},
{}
};
const struct pmc_bit_map lnl_vnn_req_status_2_map[] = {
- {"eSE_VNN_REQ_STS", BIT(0)},
- {"CSMERTC_VNN_REQ_STS", BIT(1)},
- {"CSE_VNN_REQ_STS", BIT(4)},
- {"ISH_VNN_REQ_STS", BIT(7)},
- {"SMT1_VNN_REQ_STS", BIT(8)},
- {"CLINK_VNN_REQ_STS", BIT(14)},
- {"SMS1_VNN_REQ_STS", BIT(18)},
- {"SMS2_VNN_REQ_STS", BIT(19)},
- {"GPIOCOM4_VNN_REQ_STS", BIT(20)},
- {"GPIOCOM3_VNN_REQ_STS", BIT(21)},
- {"GPIOCOM2_VNN_REQ_STS", BIT(22)},
- {"GPIOCOM1_VNN_REQ_STS", BIT(23)},
- {"GPIOCOM0_VNN_REQ_STS", BIT(24)},
+ {"eSE_VNN_REQ_STS", BIT(0), 1},
+ {"CSMERTC_VNN_REQ_STS", BIT(1), 1},
+ {"CSE_VNN_REQ_STS", BIT(4), 1},
+ {"ISH_VNN_REQ_STS", BIT(7), 1},
+ {"SMT1_VNN_REQ_STS", BIT(8), 1},
+ {"CLINK_VNN_REQ_STS", BIT(14), 1},
+ {"SMS1_VNN_REQ_STS", BIT(18), 1},
+ {"SMS2_VNN_REQ_STS", BIT(19), 1},
+ {"GPIOCOM4_VNN_REQ_STS", BIT(20), 1},
+ {"GPIOCOM3_VNN_REQ_STS", BIT(21), 1},
+ {"GPIOCOM2_VNN_REQ_STS", BIT(22), 0},
+ {"GPIOCOM1_VNN_REQ_STS", BIT(23), 1},
+ {"GPIOCOM0_VNN_REQ_STS", BIT(24), 1},
{}
};
const struct pmc_bit_map lnl_vnn_req_status_3_map[] = {
- {"DISP_SHIM_VNN_REQ_STS", BIT(2)},
- {"DTS0_VNN_REQ_STS", BIT(7)},
- {"GPIOCOM5_VNN_REQ_STS", BIT(11)},
+ {"DISP_SHIM_VNN_REQ_STS", BIT(2), 0},
+ {"DTS0_VNN_REQ_STS", BIT(7), 0},
+ {"GPIOCOM5_VNN_REQ_STS", BIT(11), 2},
{}
};
const struct pmc_bit_map lnl_vnn_misc_status_map[] = {
- {"CPU_C10_REQ_STS", BIT(0)},
- {"TS_OFF_REQ_STS", BIT(1)},
- {"PNDE_MET_REQ_STS", BIT(2)},
- {"PCIE_DEEP_PM_REQ_STS", BIT(3)},
- {"PMC_CLK_THROTTLE_EN_REQ_STS", BIT(4)},
- {"NPK_VNNAON_REQ_STS", BIT(5)},
- {"VNN_SOC_REQ_STS", BIT(6)},
- {"ISH_VNNAON_REQ_STS", BIT(7)},
- {"D2D_NOC_CFI_QACTIVE_REQ_STS", BIT(8)},
- {"D2D_NOC_GPSB_QACTIVE_REQ_STS", BIT(9)},
- {"D2D_NOC_IPU_QACTIVE_REQ_STS", BIT(10)},
- {"PLT_GREATER_REQ_STS", BIT(11)},
- {"PCIE_CLKREQ_REQ_STS", BIT(12)},
- {"PMC_IDLE_FB_OCP_REQ_STS", BIT(13)},
- {"PM_SYNC_STATES_REQ_STS", BIT(14)},
- {"EA_REQ_STS", BIT(15)},
- {"MPHY_CORE_OFF_REQ_STS", BIT(16)},
- {"BRK_EV_EN_REQ_STS", BIT(17)},
- {"AUTO_DEMO_EN_REQ_STS", BIT(18)},
- {"ITSS_CLK_SRC_REQ_STS", BIT(19)},
- {"LPC_CLK_SRC_REQ_STS", BIT(20)},
- {"ARC_IDLE_REQ_STS", BIT(21)},
- {"MPHY_SUS_REQ_STS", BIT(22)},
- {"FIA_DEEP_PM_REQ_STS", BIT(23)},
- {"UXD_CONNECTED_REQ_STS", BIT(24)},
- {"ARC_INTERRUPT_WAKE_REQ_STS", BIT(25)},
- {"D2D_NOC_DISP_DDI_QACTIVE_REQ_STS", BIT(26)},
- {"PRE_WAKE0_REQ_STS", BIT(27)},
- {"PRE_WAKE1_REQ_STS", BIT(28)},
- {"PRE_WAKE2_EN_REQ_STS", BIT(29)},
- {"WOV_REQ_STS", BIT(30)},
- {"D2D_NOC_DISP_EDP_QACTIVE_REQ_STS_31", BIT(31)},
+ {"CPU_C10_REQ_STS", BIT(0), 0},
+ {"TS_OFF_REQ_STS", BIT(1), 0},
+ {"PNDE_MET_REQ_STS", BIT(2), 1},
+ {"PCIE_DEEP_PM_REQ_STS", BIT(3), 0},
+ {"PMC_CLK_THROTTLE_EN_REQ_STS", BIT(4), 0},
+ {"NPK_VNNAON_REQ_STS", BIT(5), 0},
+ {"VNN_SOC_REQ_STS", BIT(6), 1},
+ {"ISH_VNNAON_REQ_STS", BIT(7), 0},
+ {"D2D_NOC_CFI_QACTIVE_REQ_STS", BIT(8), 1},
+ {"D2D_NOC_GPSB_QACTIVE_REQ_STS", BIT(9), 1},
+ {"D2D_NOC_IPU_QACTIVE_REQ_STS", BIT(10), 1},
+ {"PLT_GREATER_REQ_STS", BIT(11), 1},
+ {"PCIE_CLKREQ_REQ_STS", BIT(12), 0},
+ {"PMC_IDLE_FB_OCP_REQ_STS", BIT(13), 0},
+ {"PM_SYNC_STATES_REQ_STS", BIT(14), 0},
+ {"EA_REQ_STS", BIT(15), 0},
+ {"MPHY_CORE_OFF_REQ_STS", BIT(16), 0},
+ {"BRK_EV_EN_REQ_STS", BIT(17), 0},
+ {"AUTO_DEMO_EN_REQ_STS", BIT(18), 0},
+ {"ITSS_CLK_SRC_REQ_STS", BIT(19), 1},
+ {"LPC_CLK_SRC_REQ_STS", BIT(20), 0},
+ {"ARC_IDLE_REQ_STS", BIT(21), 0},
+ {"MPHY_SUS_REQ_STS", BIT(22), 0},
+ {"FIA_DEEP_PM_REQ_STS", BIT(23), 0},
+ {"UXD_CONNECTED_REQ_STS", BIT(24), 1},
+ {"ARC_INTERRUPT_WAKE_REQ_STS", BIT(25), 0},
+ {"D2D_NOC_DISP_DDI_QACTIVE_REQ_STS", BIT(26), 1},
+ {"PRE_WAKE0_REQ_STS", BIT(27), 1},
+ {"PRE_WAKE1_REQ_STS", BIT(28), 1},
+ {"PRE_WAKE2_EN_REQ_STS", BIT(29), 1},
+ {"WOV_REQ_STS", BIT(30), 0},
+ {"D2D_NOC_DISP_EDP_QACTIVE_REQ_STS_31", BIT(31), 1},
{}
};
const struct pmc_bit_map lnl_clocksource_status_map[] = {
- {"AON2_OFF_STS", BIT(0)},
- {"AON3_OFF_STS", BIT(1)},
- {"AON4_OFF_STS", BIT(2)},
- {"AON5_OFF_STS", BIT(3)},
- {"AON1_OFF_STS", BIT(4)},
- {"MPFPW1_0_PLL_OFF_STS", BIT(6)},
- {"USB3_PLL_OFF_STS", BIT(8)},
- {"AON3_SPL_OFF_STS", BIT(9)},
- {"G5FPW1_PLL_OFF_STS", BIT(15)},
- {"XTAL_AGGR_OFF_STS", BIT(17)},
- {"USB2_PLL_OFF_STS", BIT(18)},
- {"SAF_PLL_OFF_STS", BIT(19)},
- {"SE_TCSS_PLL_OFF_STS", BIT(20)},
- {"DDI_PLL_OFF_STS", BIT(21)},
- {"FILTER_PLL_OFF_STS", BIT(22)},
- {"ACE_PLL_OFF_STS", BIT(24)},
- {"FABRIC_PLL_OFF_STS", BIT(25)},
- {"SOC_PLL_OFF_STS", BIT(26)},
- {"REF_OFF_STS", BIT(28)},
- {"IMG_OFF_STS", BIT(29)},
- {"RTC_PLL_OFF_STS", BIT(31)},
+ {"AON2_OFF_STS", BIT(0), 0},
+ {"AON3_OFF_STS", BIT(1), 1},
+ {"AON4_OFF_STS", BIT(2), 1},
+ {"AON5_OFF_STS", BIT(3), 1},
+ {"AON1_OFF_STS", BIT(4), 0},
+ {"MPFPW1_0_PLL_OFF_STS", BIT(6), 1},
+ {"USB3_PLL_OFF_STS", BIT(8), 1},
+ {"AON3_SPL_OFF_STS", BIT(9), 1},
+ {"G5FPW1_PLL_OFF_STS", BIT(15), 1},
+ {"XTAL_AGGR_OFF_STS", BIT(17), 1},
+ {"USB2_PLL_OFF_STS", BIT(18), 0},
+ {"SAF_PLL_OFF_STS", BIT(19), 1},
+ {"SE_TCSS_PLL_OFF_STS", BIT(20), 1},
+ {"DDI_PLL_OFF_STS", BIT(21), 1},
+ {"FILTER_PLL_OFF_STS", BIT(22), 1},
+ {"ACE_PLL_OFF_STS", BIT(24), 0},
+ {"FABRIC_PLL_OFF_STS", BIT(25), 1},
+ {"SOC_PLL_OFF_STS", BIT(26), 1},
+ {"REF_OFF_STS", BIT(28), 1},
+ {"IMG_OFF_STS", BIT(29), 1},
+ {"RTC_PLL_OFF_STS", BIT(31), 0},
+ {}
+};
+
+const struct pmc_bit_map lnl_signal_status_map[] = {
+ {"LSX_Wake0_STS", BIT(0), 0},
+ {"LSX_Wake1_STS", BIT(1), 0},
+ {"LSX_Wake2_STS", BIT(2), 0},
+ {"LSX_Wake3_STS", BIT(3), 0},
+ {"LSX_Wake4_STS", BIT(4), 0},
+ {"LSX_Wake5_STS", BIT(5), 0},
+ {"LSX_Wake6_STS", BIT(6), 0},
+ {"LSX_Wake7_STS", BIT(7), 0},
+ {"LPSS_Wake0_STS", BIT(8), 1},
+ {"LPSS_Wake1_STS", BIT(9), 1},
+ {"Int_Timer_SS_Wake0_STS", BIT(10), 1},
+ {"Int_Timer_SS_Wake1_STS", BIT(11), 1},
+ {"Int_Timer_SS_Wake2_STS", BIT(12), 1},
+ {"Int_Timer_SS_Wake3_STS", BIT(13), 1},
+ {"Int_Timer_SS_Wake4_STS", BIT(14), 1},
+ {"Int_Timer_SS_Wake5_STS", BIT(15), 1},
+ {}
+};
+
+const struct pmc_bit_map lnl_rsc_status_map[] = {
+ {"Memory", 0, 1},
+ {"PSF0", 0, 1},
+ {"PSF4", 0, 1},
+ {"PSF6", 0, 1},
+ {"PSF7", 0, 1},
+ {"PSF8", 0, 1},
+ {"SAF_CFI_LINK", 0, 1},
+ {"SBR", 0, 1},
{}
};
@@ -346,7 +363,26 @@ const struct pmc_bit_map *lnl_lpm_maps[] = {
lnl_vnn_req_status_2_map,
lnl_vnn_req_status_3_map,
lnl_vnn_misc_status_map,
- mtl_socm_signal_status_map,
+ lnl_signal_status_map,
+ NULL
+};
+
+const struct pmc_bit_map *lnl_blk_maps[] = {
+ lnl_power_gating_status_0_map,
+ lnl_power_gating_status_1_map,
+ lnl_power_gating_status_2_map,
+ lnl_rsc_status_map,
+ lnl_vnn_req_status_0_map,
+ lnl_vnn_req_status_1_map,
+ lnl_vnn_req_status_2_map,
+ lnl_vnn_req_status_3_map,
+ lnl_d3_status_0_map,
+ lnl_d3_status_1_map,
+ lnl_d3_status_2_map,
+ lnl_d3_status_3_map,
+ lnl_clocksource_status_map,
+ lnl_vnn_misc_status_map,
+ lnl_signal_status_map,
NULL
};
@@ -490,7 +526,8 @@ const struct pmc_reg_map lnl_socm_reg_map = {
.lpm_sts = lnl_lpm_maps,
.lpm_status_offset = MTL_LPM_STATUS_OFFSET,
.lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET,
- .lpm_reg_index = LNL_LPM_REG_INDEX,
+ .s0ix_blocker_maps = lnl_blk_maps,
+ .s0ix_blocker_offset = LNL_S0IX_BLOCKER_OFFSET,
};
#define LNL_NPU_PCI_DEV 0x643e
@@ -509,41 +546,26 @@ static void lnl_d3_fixup(void)
static int lnl_resume(struct pmc_dev *pmcdev)
{
lnl_d3_fixup();
- pmc_core_send_ltr_ignore(pmcdev, 3, 0);
- return pmc_core_resume_common(pmcdev);
+ return cnl_resume(pmcdev);
}
int lnl_core_init(struct pmc_dev *pmcdev)
{
int ret;
- int func = 2;
- bool ssram_init = true;
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_SOC];
lnl_d3_fixup();
pmcdev->suspend = cnl_suspend;
pmcdev->resume = lnl_resume;
- pmcdev->regmap_list = lnl_pmc_info_list;
- ret = pmc_core_ssram_init(pmcdev, func);
-
- /* If regbase not assigned, set map and discover using legacy method */
- if (ret) {
- ssram_init = false;
- pmc->map = &lnl_socm_reg_map;
- ret = get_primary_reg_base(pmc);
- if (ret)
- return ret;
- }
- pmc_core_get_low_power_modes(pmcdev);
+ pmc->map = &lnl_socm_reg_map;
+ ret = get_primary_reg_base(pmc);
+ if (ret)
+ return ret;
- if (ssram_init) {
- ret = pmc_core_ssram_get_lpm_reqs(pmcdev);
- if (ret)
- return ret;
- }
+ pmc_core_get_low_power_modes(pmcdev);
return 0;
}
diff --git a/drivers/platform/x86/intel/pmc/mtl.c b/drivers/platform/x86/intel/pmc/mtl.c
index c7d15d864039..02949fed76e9 100644
--- a/drivers/platform/x86/intel/pmc/mtl.c
+++ b/drivers/platform/x86/intel/pmc/mtl.c
@@ -986,9 +986,8 @@ static void mtl_d3_fixup(void)
static int mtl_resume(struct pmc_dev *pmcdev)
{
mtl_d3_fixup();
- pmc_core_send_ltr_ignore(pmcdev, 3, 0);
- return pmc_core_resume_common(pmcdev);
+ return cnl_resume(pmcdev);
}
int mtl_core_init(struct pmc_dev *pmcdev)
diff --git a/drivers/platform/x86/intel/pmc/pltdrv.c b/drivers/platform/x86/intel/pmc/pltdrv.c
index ddfba38c2104..3141d6cbc41b 100644
--- a/drivers/platform/x86/intel/pmc/pltdrv.c
+++ b/drivers/platform/x86/intel/pmc/pltdrv.c
@@ -35,14 +35,14 @@ static struct platform_device *pmc_core_device;
* other list may grow, but this list should not.
*/
static const struct x86_cpu_id intel_pmc_core_platform_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &pmc_core_device),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &pmc_core_device),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &pmc_core_device),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &pmc_core_device),
- X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &pmc_core_device),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &pmc_core_device),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &pmc_core_device),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &pmc_core_device),
+ X86_MATCH_VFM(INTEL_SKYLAKE_L, &pmc_core_device),
+ X86_MATCH_VFM(INTEL_SKYLAKE, &pmc_core_device),
+ X86_MATCH_VFM(INTEL_KABYLAKE_L, &pmc_core_device),
+ X86_MATCH_VFM(INTEL_KABYLAKE, &pmc_core_device),
+ X86_MATCH_VFM(INTEL_CANNONLAKE_L, &pmc_core_device),
+ X86_MATCH_VFM(INTEL_ICELAKE_L, &pmc_core_device),
+ X86_MATCH_VFM(INTEL_COMETLAKE, &pmc_core_device),
+ X86_MATCH_VFM(INTEL_COMETLAKE_L, &pmc_core_device),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_platform_ids);
@@ -86,4 +86,5 @@ static void __exit pmc_core_platform_exit(void)
module_init(pmc_core_platform_init);
module_exit(pmc_core_platform_exit);
+MODULE_DESCRIPTION("Intel PMC Core platform driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c
index 4b53940a64e2..7233b654bbad 100644
--- a/drivers/platform/x86/intel/pmt/class.c
+++ b/drivers/platform/x86/intel/pmt/class.c
@@ -9,12 +9,12 @@
*/
#include <linux/kernel.h>
+#include <linux/intel_vsec.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/pci.h>
-#include "../vsec.h"
#include "class.h"
#define PMT_XA_START 1
@@ -33,7 +33,7 @@ bool intel_pmt_is_early_client_hw(struct device *dev)
*/
return !!(ivdev->quirks & VSEC_QUIRK_EARLY_HW);
}
-EXPORT_SYMBOL_NS_GPL(intel_pmt_is_early_client_hw, INTEL_PMT);
+EXPORT_SYMBOL_NS_GPL(intel_pmt_is_early_client_hw, "INTEL_PMT");
static inline int
pmt_memcpy64_fromio(void *to, const u64 __iomem *from, size_t count)
@@ -58,12 +58,30 @@ pmt_memcpy64_fromio(void *to, const u64 __iomem *from, size_t count)
return count;
}
+int pmt_telem_read_mmio(struct pci_dev *pdev, struct pmt_callbacks *cb, u32 guid, void *buf,
+ void __iomem *addr, loff_t off, u32 count)
+{
+ if (cb && cb->read_telem)
+ return cb->read_telem(pdev, guid, buf, off, count);
+
+ addr += off;
+
+ if (guid == GUID_SPR_PUNIT)
+ /* PUNIT on SPR only supports aligned 64-bit read */
+ return pmt_memcpy64_fromio(buf, addr, count);
+
+ memcpy_fromio(buf, addr, count);
+
+ return count;
+}
+EXPORT_SYMBOL_NS_GPL(pmt_telem_read_mmio, "INTEL_PMT");
+
/*
* sysfs
*/
static ssize_t
intel_pmt_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
+ const struct bin_attribute *attr, char *buf, loff_t off,
size_t count)
{
struct intel_pmt_entry *entry = container_of(attr,
@@ -79,18 +97,15 @@ intel_pmt_read(struct file *filp, struct kobject *kobj,
if (count > entry->size - off)
count = entry->size - off;
- if (entry->guid == GUID_SPR_PUNIT)
- /* PUNIT on SPR only supports aligned 64-bit read */
- count = pmt_memcpy64_fromio(buf, entry->base + off, count);
- else
- memcpy_fromio(buf, entry->base + off, count);
+ count = pmt_telem_read_mmio(entry->ep->pcidev, entry->cb, entry->header.guid, buf,
+ entry->base, off, count);
return count;
}
static int
intel_pmt_mmap(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, struct vm_area_struct *vma)
+ const struct bin_attribute *attr, struct vm_area_struct *vma)
{
struct intel_pmt_entry *entry = container_of(attr,
struct intel_pmt_entry,
@@ -194,7 +209,7 @@ static int intel_pmt_populate_entry(struct intel_pmt_entry *entry,
/*
* Some hardware use a different calculation for the base address
* when access_type == ACCESS_LOCAL. On the these systems
- * ACCCESS_LOCAL refers to an address in the same BAR as the
+ * ACCESS_LOCAL refers to an address in the same BAR as the
* header but at a fixed offset. But as the header address was
* supplied to the driver, we don't know which BAR it was in.
* So search for the bar whose range includes the header address.
@@ -239,6 +254,7 @@ static int intel_pmt_populate_entry(struct intel_pmt_entry *entry,
entry->guid = header->guid;
entry->size = header->size;
+ entry->cb = ivdev->priv_data;
return 0;
}
@@ -292,7 +308,7 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry,
entry->pmt_bin_attr.attr.name = ns->name;
entry->pmt_bin_attr.attr.mode = 0440;
entry->pmt_bin_attr.mmap = intel_pmt_mmap;
- entry->pmt_bin_attr.read = intel_pmt_read;
+ entry->pmt_bin_attr.read_new = intel_pmt_read;
entry->pmt_bin_attr.size = entry->size;
ret = sysfs_create_bin_file(&dev->kobj, &entry->pmt_bin_attr);
@@ -300,7 +316,7 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry,
goto fail_ioremap;
if (ns->pmt_add_endpoint) {
- ret = ns->pmt_add_endpoint(entry, ivdev->pcidev);
+ ret = ns->pmt_add_endpoint(ivdev, entry);
if (ret)
goto fail_add_endpoint;
}
@@ -343,7 +359,7 @@ int intel_pmt_dev_create(struct intel_pmt_entry *entry, struct intel_pmt_namespa
return intel_pmt_dev_register(entry, ns, dev);
}
-EXPORT_SYMBOL_NS_GPL(intel_pmt_dev_create, INTEL_PMT);
+EXPORT_SYMBOL_NS_GPL(intel_pmt_dev_create, "INTEL_PMT");
void intel_pmt_dev_destroy(struct intel_pmt_entry *entry,
struct intel_pmt_namespace *ns)
@@ -359,7 +375,7 @@ void intel_pmt_dev_destroy(struct intel_pmt_entry *entry,
device_unregister(dev);
xa_erase(ns->xa, entry->devid);
}
-EXPORT_SYMBOL_NS_GPL(intel_pmt_dev_destroy, INTEL_PMT);
+EXPORT_SYMBOL_NS_GPL(intel_pmt_dev_destroy, "INTEL_PMT");
static int __init pmt_class_init(void)
{
diff --git a/drivers/platform/x86/intel/pmt/class.h b/drivers/platform/x86/intel/pmt/class.h
index d23c63b73ab7..b2006d57779d 100644
--- a/drivers/platform/x86/intel/pmt/class.h
+++ b/drivers/platform/x86/intel/pmt/class.h
@@ -2,13 +2,13 @@
#ifndef _INTEL_PMT_CLASS_H
#define _INTEL_PMT_CLASS_H
+#include <linux/intel_vsec.h>
#include <linux/xarray.h>
#include <linux/types.h>
#include <linux/bits.h>
#include <linux/err.h>
#include <linux/io.h>
-#include "../vsec.h"
#include "telemetry.h"
/* PMT access types */
@@ -24,6 +24,7 @@ struct pci_dev;
struct telem_endpoint {
struct pci_dev *pcidev;
struct telem_header header;
+ struct pmt_callbacks *cb;
void __iomem *base;
bool present;
struct kref kref;
@@ -43,6 +44,7 @@ struct intel_pmt_entry {
struct kobject *kobj;
void __iomem *disc_table;
void __iomem *base;
+ struct pmt_callbacks *cb;
unsigned long base_addr;
size_t size;
u32 guid;
@@ -55,10 +57,12 @@ struct intel_pmt_namespace {
const struct attribute_group *attr_grp;
int (*pmt_header_decode)(struct intel_pmt_entry *entry,
struct device *dev);
- int (*pmt_add_endpoint)(struct intel_pmt_entry *entry,
- struct pci_dev *pdev);
+ int (*pmt_add_endpoint)(struct intel_vsec_device *ivdev,
+ struct intel_pmt_entry *entry);
};
+int pmt_telem_read_mmio(struct pci_dev *pdev, struct pmt_callbacks *cb, u32 guid, void *buf,
+ void __iomem *addr, loff_t off, u32 count);
bool intel_pmt_is_early_client_hw(struct device *dev);
int intel_pmt_dev_create(struct intel_pmt_entry *entry,
struct intel_pmt_namespace *ns,
diff --git a/drivers/platform/x86/intel/pmt/crashlog.c b/drivers/platform/x86/intel/pmt/crashlog.c
index 4014c02cafdb..6a9eb3c4b313 100644
--- a/drivers/platform/x86/intel/pmt/crashlog.c
+++ b/drivers/platform/x86/intel/pmt/crashlog.c
@@ -9,6 +9,7 @@
*/
#include <linux/auxiliary_bus.h>
+#include <linux/intel_vsec.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -16,7 +17,6 @@
#include <linux/uaccess.h>
#include <linux/overflow.h>
-#include "../vsec.h"
#include "class.h"
/* Crashlog discovery header types */
@@ -328,4 +328,4 @@ module_exit(pmt_crashlog_exit);
MODULE_AUTHOR("Alexander Duyck <alexander.h.duyck@linux.intel.com>");
MODULE_DESCRIPTION("Intel PMT Crashlog driver");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(INTEL_PMT);
+MODULE_IMPORT_NS("INTEL_PMT");
diff --git a/drivers/platform/x86/intel/pmt/telemetry.c b/drivers/platform/x86/intel/pmt/telemetry.c
index 09258564dfc4..ac3a9bdf5601 100644
--- a/drivers/platform/x86/intel/pmt/telemetry.c
+++ b/drivers/platform/x86/intel/pmt/telemetry.c
@@ -9,6 +9,7 @@
*/
#include <linux/auxiliary_bus.h>
+#include <linux/intel_vsec.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -16,7 +17,6 @@
#include <linux/uaccess.h>
#include <linux/overflow.h>
-#include "../vsec.h"
#include "class.h"
#define TELEM_SIZE_OFFSET 0x0
@@ -93,8 +93,8 @@ static int pmt_telem_header_decode(struct intel_pmt_entry *entry,
return 0;
}
-static int pmt_telem_add_endpoint(struct intel_pmt_entry *entry,
- struct pci_dev *pdev)
+static int pmt_telem_add_endpoint(struct intel_vsec_device *ivdev,
+ struct intel_pmt_entry *entry)
{
struct telem_endpoint *ep;
@@ -104,13 +104,14 @@ static int pmt_telem_add_endpoint(struct intel_pmt_entry *entry,
return -ENOMEM;
ep = entry->ep;
- ep->pcidev = pdev;
+ ep->pcidev = ivdev->pcidev;
ep->header.access_type = entry->header.access_type;
ep->header.guid = entry->header.guid;
ep->header.base_offset = entry->header.base_offset;
ep->header.size = entry->header.size;
ep->base = entry->base;
ep->present = true;
+ ep->cb = ivdev->priv_data;
kref_init(&ep->kref);
@@ -152,7 +153,7 @@ unsigned long pmt_telem_get_next_endpoint(unsigned long start)
return found_idx == start ? 0 : found_idx;
}
-EXPORT_SYMBOL_NS_GPL(pmt_telem_get_next_endpoint, INTEL_PMT_TELEMETRY);
+EXPORT_SYMBOL_NS_GPL(pmt_telem_get_next_endpoint, "INTEL_PMT_TELEMETRY");
struct telem_endpoint *pmt_telem_register_endpoint(int devid)
{
@@ -171,13 +172,13 @@ struct telem_endpoint *pmt_telem_register_endpoint(int devid)
return entry->ep;
}
-EXPORT_SYMBOL_NS_GPL(pmt_telem_register_endpoint, INTEL_PMT_TELEMETRY);
+EXPORT_SYMBOL_NS_GPL(pmt_telem_register_endpoint, "INTEL_PMT_TELEMETRY");
void pmt_telem_unregister_endpoint(struct telem_endpoint *ep)
{
kref_put(&ep->kref, pmt_telem_ep_release);
}
-EXPORT_SYMBOL_NS_GPL(pmt_telem_unregister_endpoint, INTEL_PMT_TELEMETRY);
+EXPORT_SYMBOL_NS_GPL(pmt_telem_unregister_endpoint, "INTEL_PMT_TELEMETRY");
int pmt_telem_get_endpoint_info(int devid, struct telem_endpoint_info *info)
{
@@ -203,7 +204,7 @@ unlock:
return err;
}
-EXPORT_SYMBOL_NS_GPL(pmt_telem_get_endpoint_info, INTEL_PMT_TELEMETRY);
+EXPORT_SYMBOL_NS_GPL(pmt_telem_get_endpoint_info, "INTEL_PMT_TELEMETRY");
int pmt_telem_read(struct telem_endpoint *ep, u32 id, u64 *data, u32 count)
{
@@ -218,11 +219,12 @@ int pmt_telem_read(struct telem_endpoint *ep, u32 id, u64 *data, u32 count)
if (offset + NUM_BYTES_QWORD(count) > size)
return -EINVAL;
- memcpy_fromio(data, ep->base + offset, NUM_BYTES_QWORD(count));
+ pmt_telem_read_mmio(ep->pcidev, ep->cb, ep->header.guid, data, ep->base, offset,
+ NUM_BYTES_QWORD(count));
return ep->present ? 0 : -EPIPE;
}
-EXPORT_SYMBOL_NS_GPL(pmt_telem_read, INTEL_PMT_TELEMETRY);
+EXPORT_SYMBOL_NS_GPL(pmt_telem_read, "INTEL_PMT_TELEMETRY");
int pmt_telem_read32(struct telem_endpoint *ep, u32 id, u32 *data, u32 count)
{
@@ -241,7 +243,7 @@ int pmt_telem_read32(struct telem_endpoint *ep, u32 id, u32 *data, u32 count)
return ep->present ? 0 : -EPIPE;
}
-EXPORT_SYMBOL_NS_GPL(pmt_telem_read32, INTEL_PMT_TELEMETRY);
+EXPORT_SYMBOL_NS_GPL(pmt_telem_read32, "INTEL_PMT_TELEMETRY");
struct telem_endpoint *
pmt_telem_find_and_register_endpoint(struct pci_dev *pcidev, u32 guid, u16 pos)
@@ -266,7 +268,7 @@ pmt_telem_find_and_register_endpoint(struct pci_dev *pcidev, u32 guid, u16 pos)
return ERR_PTR(-ENXIO);
}
-EXPORT_SYMBOL_NS_GPL(pmt_telem_find_and_register_endpoint, INTEL_PMT_TELEMETRY);
+EXPORT_SYMBOL_NS_GPL(pmt_telem_find_and_register_endpoint, "INTEL_PMT_TELEMETRY");
static void pmt_telem_remove(struct auxiliary_device *auxdev)
{
@@ -345,4 +347,4 @@ module_exit(pmt_telem_exit);
MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
MODULE_DESCRIPTION("Intel PMT Telemetry driver");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(INTEL_PMT);
+MODULE_IMPORT_NS("INTEL_PMT");
diff --git a/drivers/platform/x86/intel/punit_ipc.c b/drivers/platform/x86/intel/punit_ipc.c
index cd0ba84cc8e4..bafac8aa2baf 100644
--- a/drivers/platform/x86/intel/punit_ipc.c
+++ b/drivers/platform/x86/intel/punit_ipc.c
@@ -131,39 +131,6 @@ static int intel_punit_ipc_check_status(IPC_DEV *ipcdev, IPC_TYPE type)
}
/**
- * intel_punit_ipc_simple_command() - Simple IPC command
- * @cmd: IPC command code.
- * @para1: First 8bit parameter, set 0 if not used.
- * @para2: Second 8bit parameter, set 0 if not used.
- *
- * Send a IPC command to P-Unit when there is no data transaction
- *
- * Return: IPC error code or 0 on success.
- */
-int intel_punit_ipc_simple_command(int cmd, int para1, int para2)
-{
- IPC_DEV *ipcdev = punit_ipcdev;
- IPC_TYPE type;
- u32 val;
- int ret;
-
- mutex_lock(&ipcdev->lock);
-
- reinit_completion(&ipcdev->cmd_complete);
- type = (cmd & IPC_PUNIT_CMD_TYPE_MASK) >> IPC_TYPE_OFFSET;
-
- val = cmd & ~IPC_PUNIT_CMD_TYPE_MASK;
- val |= CMD_RUN | para2 << CMD_PARA2_SHIFT | para1 << CMD_PARA1_SHIFT;
- ipc_write_cmd(ipcdev, type, val);
- ret = intel_punit_ipc_check_status(ipcdev, type);
-
- mutex_unlock(&ipcdev->lock);
-
- return ret;
-}
-EXPORT_SYMBOL(intel_punit_ipc_simple_command);
-
-/**
* intel_punit_ipc_command() - IPC command with data and pointers
* @cmd: IPC command code.
* @para1: First 8bit parameter, set 0 if not used.
diff --git a/drivers/platform/x86/intel/rst.c b/drivers/platform/x86/intel/rst.c
index 35814a7707af..f3a60e14d4c1 100644
--- a/drivers/platform/x86/intel/rst.c
+++ b/drivers/platform/x86/intel/rst.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/slab.h>
+MODULE_DESCRIPTION("Intel Rapid Start Technology Driver");
MODULE_LICENSE("GPL");
static ssize_t irst_show_wakeup_events(struct device *dev,
@@ -125,7 +126,6 @@ static const struct acpi_device_id irst_ids[] = {
};
static struct acpi_driver irst_driver = {
- .owner = THIS_MODULE,
.name = "intel_rapid_start",
.class = "intel_rapid_start",
.ids = irst_ids,
diff --git a/drivers/platform/x86/intel/sdsi.c b/drivers/platform/x86/intel/sdsi.c
index 556e7c6dbb05..30d1c2caf984 100644
--- a/drivers/platform/x86/intel/sdsi.c
+++ b/drivers/platform/x86/intel/sdsi.c
@@ -12,17 +12,17 @@
#include <linux/bits.h>
#include <linux/bitfield.h>
#include <linux/device.h>
+#include <linux/intel_vsec.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/overflow.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#include <linux/uaccess.h>
-#include "vsec.h"
-
#define ACCESS_TYPE_BARID 2
#define ACCESS_TYPE_LOCAL 3
@@ -66,6 +66,8 @@
#define CTRL_OWNER GENMASK(5, 4)
#define CTRL_COMPLETE BIT(6)
#define CTRL_READY BIT(7)
+#define CTRL_INBAND_LOCK BIT(32)
+#define CTRL_METER_ENABLE_DRAM BIT(33)
#define CTRL_STATUS GENMASK(15, 8)
#define CTRL_PACKET_SIZE GENMASK(31, 16)
#define CTRL_MSG_SIZE GENMASK(63, 48)
@@ -93,6 +95,7 @@ enum sdsi_command {
struct sdsi_mbox_info {
u64 *payload;
void *buffer;
+ u64 control_flags;
int size;
};
@@ -156,8 +159,8 @@ static int sdsi_status_to_errno(u32 status)
}
}
-static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *info,
- size_t *data_size)
+static int sdsi_mbox_poll(struct sdsi_priv *priv, struct sdsi_mbox_info *info,
+ size_t *data_size)
{
struct device *dev = priv->dev;
u32 total, loop, eom, status, message_size;
@@ -166,18 +169,10 @@ static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *inf
lockdep_assert_held(&priv->mb_lock);
- /* Format and send the read command */
- control = FIELD_PREP(CTRL_EOM, 1) |
- FIELD_PREP(CTRL_SOM, 1) |
- FIELD_PREP(CTRL_RUN_BUSY, 1) |
- FIELD_PREP(CTRL_PACKET_SIZE, info->size);
- writeq(control, priv->control_addr);
-
/* For reads, data sizes that are larger than the mailbox size are read in packets. */
total = 0;
loop = 0;
do {
- void *buf = info->buffer + (SDSI_SIZE_MAILBOX * loop);
u32 packet_size;
/* Poll on ready bit */
@@ -195,6 +190,11 @@ static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *inf
if (ret)
break;
+ if (!packet_size) {
+ sdsi_complete_transaction(priv);
+ break;
+ }
+
/* Only the last packet can be less than the mailbox size. */
if (!eom && packet_size != SDSI_SIZE_MAILBOX) {
dev_err(dev, "Invalid packet size\n");
@@ -208,9 +208,13 @@ static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *inf
break;
}
- sdsi_memcpy64_fromio(buf, priv->mbox_addr, round_up(packet_size, SDSI_SIZE_CMD));
+ if (info->buffer) {
+ void *buf = info->buffer + array_size(SDSI_SIZE_MAILBOX, loop);
- total += packet_size;
+ sdsi_memcpy64_fromio(buf, priv->mbox_addr,
+ round_up(packet_size, SDSI_SIZE_CMD));
+ total += packet_size;
+ }
sdsi_complete_transaction(priv);
} while (!eom && ++loop < MBOX_MAX_PACKETS);
@@ -230,16 +234,34 @@ static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *inf
dev_warn(dev, "Read count %u differs from expected count %u\n",
total, message_size);
- *data_size = total;
+ if (data_size)
+ *data_size = total;
return 0;
}
-static int sdsi_mbox_cmd_write(struct sdsi_priv *priv, struct sdsi_mbox_info *info)
+static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *info,
+ size_t *data_size)
+{
+ u64 control;
+
+ lockdep_assert_held(&priv->mb_lock);
+
+ /* Format and send the read command */
+ control = FIELD_PREP(CTRL_EOM, 1) |
+ FIELD_PREP(CTRL_SOM, 1) |
+ FIELD_PREP(CTRL_RUN_BUSY, 1) |
+ FIELD_PREP(CTRL_PACKET_SIZE, info->size) |
+ info->control_flags;
+ writeq(control, priv->control_addr);
+
+ return sdsi_mbox_poll(priv, info, data_size);
+}
+
+static int sdsi_mbox_cmd_write(struct sdsi_priv *priv, struct sdsi_mbox_info *info,
+ size_t *data_size)
{
u64 control;
- u32 status;
- int ret;
lockdep_assert_held(&priv->mb_lock);
@@ -252,23 +274,11 @@ static int sdsi_mbox_cmd_write(struct sdsi_priv *priv, struct sdsi_mbox_info *in
FIELD_PREP(CTRL_SOM, 1) |
FIELD_PREP(CTRL_RUN_BUSY, 1) |
FIELD_PREP(CTRL_READ_WRITE, 1) |
+ FIELD_PREP(CTRL_MSG_SIZE, info->size) |
FIELD_PREP(CTRL_PACKET_SIZE, info->size);
writeq(control, priv->control_addr);
- /* Poll on ready bit */
- ret = readq_poll_timeout(priv->control_addr, control, control & CTRL_READY,
- MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_US);
-
- if (ret)
- goto release_mbox;
-
- status = FIELD_GET(CTRL_STATUS, control);
- ret = sdsi_status_to_errno(status);
-
-release_mbox:
- sdsi_complete_transaction(priv);
-
- return ret;
+ return sdsi_mbox_poll(priv, info, data_size);
}
static int sdsi_mbox_acquire(struct sdsi_priv *priv, struct sdsi_mbox_info *info)
@@ -312,7 +322,8 @@ static int sdsi_mbox_acquire(struct sdsi_priv *priv, struct sdsi_mbox_info *info
return ret;
}
-static int sdsi_mbox_write(struct sdsi_priv *priv, struct sdsi_mbox_info *info)
+static int sdsi_mbox_write(struct sdsi_priv *priv, struct sdsi_mbox_info *info,
+ size_t *data_size)
{
int ret;
@@ -322,7 +333,7 @@ static int sdsi_mbox_write(struct sdsi_priv *priv, struct sdsi_mbox_info *info)
if (ret)
return ret;
- return sdsi_mbox_cmd_write(priv, info);
+ return sdsi_mbox_cmd_write(priv, info, data_size);
}
static int sdsi_mbox_read(struct sdsi_priv *priv, struct sdsi_mbox_info *info, size_t *data_size)
@@ -338,15 +349,24 @@ static int sdsi_mbox_read(struct sdsi_priv *priv, struct sdsi_mbox_info *info, s
return sdsi_mbox_cmd_read(priv, info, data_size);
}
+static bool sdsi_ib_locked(struct sdsi_priv *priv)
+{
+ return !!FIELD_GET(CTRL_INBAND_LOCK, readq(priv->control_addr));
+}
+
static ssize_t sdsi_provision(struct sdsi_priv *priv, char *buf, size_t count,
enum sdsi_command command)
{
- struct sdsi_mbox_info info;
+ struct sdsi_mbox_info info = {};
int ret;
if (count > (SDSI_SIZE_WRITE_MSG - SDSI_SIZE_CMD))
return -EOVERFLOW;
+ /* Make sure In-band lock is not set */
+ if (sdsi_ib_locked(priv))
+ return -EPERM;
+
/* Qword aligned message + command qword */
info.size = round_up(count, SDSI_SIZE_CMD) + SDSI_SIZE_CMD;
@@ -363,7 +383,9 @@ static ssize_t sdsi_provision(struct sdsi_priv *priv, char *buf, size_t count,
ret = mutex_lock_interruptible(&priv->mb_lock);
if (ret)
goto free_payload;
- ret = sdsi_mbox_write(priv, &info);
+
+ ret = sdsi_mbox_write(priv, &info, NULL);
+
mutex_unlock(&priv->mb_lock);
free_payload:
@@ -376,8 +398,8 @@ free_payload:
}
static ssize_t provision_akc_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct sdsi_priv *priv = dev_get_drvdata(dev);
@@ -387,11 +409,11 @@ static ssize_t provision_akc_write(struct file *filp, struct kobject *kobj,
return sdsi_provision(priv, buf, count, SDSI_CMD_PROVISION_AKC);
}
-static BIN_ATTR_WO(provision_akc, SDSI_SIZE_WRITE_MSG);
+static const BIN_ATTR_WO(provision_akc, SDSI_SIZE_WRITE_MSG);
static ssize_t provision_cap_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct sdsi_priv *priv = dev_get_drvdata(dev);
@@ -401,13 +423,13 @@ static ssize_t provision_cap_write(struct file *filp, struct kobject *kobj,
return sdsi_provision(priv, buf, count, SDSI_CMD_PROVISION_CAP);
}
-static BIN_ATTR_WO(provision_cap, SDSI_SIZE_WRITE_MSG);
+static const BIN_ATTR_WO(provision_cap, SDSI_SIZE_WRITE_MSG);
static ssize_t
-certificate_read(u64 command, struct sdsi_priv *priv, char *buf, loff_t off,
- size_t count)
+certificate_read(u64 command, u64 control_flags, struct sdsi_priv *priv,
+ char *buf, loff_t off, size_t count)
{
- struct sdsi_mbox_info info;
+ struct sdsi_mbox_info info = {};
size_t size;
int ret;
@@ -421,6 +443,7 @@ certificate_read(u64 command, struct sdsi_priv *priv, char *buf, loff_t off,
info.payload = &command;
info.size = sizeof(command);
+ info.control_flags = control_flags;
ret = mutex_lock_interruptible(&priv->mb_lock);
if (ret)
@@ -446,31 +469,44 @@ free_buffer:
static ssize_t
state_certificate_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
+ const struct bin_attribute *attr, char *buf, loff_t off,
size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct sdsi_priv *priv = dev_get_drvdata(dev);
- return certificate_read(SDSI_CMD_READ_STATE, priv, buf, off, count);
+ return certificate_read(SDSI_CMD_READ_STATE, 0, priv, buf, off, count);
}
-static BIN_ATTR_ADMIN_RO(state_certificate, SDSI_SIZE_READ_MSG);
+static const BIN_ATTR_ADMIN_RO(state_certificate, SDSI_SIZE_READ_MSG);
static ssize_t
meter_certificate_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
+ const struct bin_attribute *attr, char *buf, loff_t off,
size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct sdsi_priv *priv = dev_get_drvdata(dev);
- return certificate_read(SDSI_CMD_READ_METER, priv, buf, off, count);
+ return certificate_read(SDSI_CMD_READ_METER, 0, priv, buf, off, count);
+}
+static const BIN_ATTR_ADMIN_RO(meter_certificate, SDSI_SIZE_READ_MSG);
+
+static ssize_t
+meter_current_read(struct file *filp, struct kobject *kobj,
+ const struct bin_attribute *attr, char *buf, loff_t off,
+ size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct sdsi_priv *priv = dev_get_drvdata(dev);
+
+ return certificate_read(SDSI_CMD_READ_METER, CTRL_METER_ENABLE_DRAM,
+ priv, buf, off, count);
}
-static BIN_ATTR_ADMIN_RO(meter_certificate, SDSI_SIZE_READ_MSG);
+static const BIN_ATTR_ADMIN_RO(meter_current, SDSI_SIZE_READ_MSG);
static ssize_t registers_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct sdsi_priv *priv = dev_get_drvdata(dev);
@@ -492,19 +528,20 @@ static ssize_t registers_read(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR_ADMIN_RO(registers, SDSI_SIZE_REGS);
+static const BIN_ATTR_ADMIN_RO(registers, SDSI_SIZE_REGS);
-static struct bin_attribute *sdsi_bin_attrs[] = {
+static const struct bin_attribute *const sdsi_bin_attrs[] = {
&bin_attr_registers,
&bin_attr_state_certificate,
&bin_attr_meter_certificate,
+ &bin_attr_meter_current,
&bin_attr_provision_akc,
&bin_attr_provision_cap,
NULL
};
static umode_t
-sdsi_battr_is_visible(struct kobject *kobj, struct bin_attribute *attr, int n)
+sdsi_battr_is_visible(struct kobject *kobj, const struct bin_attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct sdsi_priv *priv = dev_get_drvdata(dev);
@@ -517,7 +554,7 @@ sdsi_battr_is_visible(struct kobject *kobj, struct bin_attribute *attr, int n)
if (!(priv->features & SDSI_FEATURE_SDSI))
return 0;
- if (attr == &bin_attr_meter_certificate)
+ if (attr == &bin_attr_meter_certificate || attr == &bin_attr_meter_current)
return (priv->features & SDSI_FEATURE_METERING) ?
attr->attr.mode : 0;
@@ -539,7 +576,7 @@ static struct attribute *sdsi_attrs[] = {
static const struct attribute_group sdsi_group = {
.attrs = sdsi_attrs,
- .bin_attrs = sdsi_bin_attrs,
+ .bin_attrs_new = sdsi_bin_attrs,
.is_bin_visible = sdsi_battr_is_visible,
};
__ATTRIBUTE_GROUPS(sdsi);
diff --git a/drivers/platform/x86/intel/smartconnect.c b/drivers/platform/x86/intel/smartconnect.c
index 64c2dc93472f..31019a1a6d5e 100644
--- a/drivers/platform/x86/intel/smartconnect.c
+++ b/drivers/platform/x86/intel/smartconnect.c
@@ -6,6 +6,7 @@
#include <linux/acpi.h>
#include <linux/module.h>
+MODULE_DESCRIPTION("Intel Smart Connect disabling driver");
MODULE_LICENSE("GPL");
static int smartconnect_acpi_init(struct acpi_device *acpi)
@@ -32,7 +33,6 @@ static const struct acpi_device_id smartconnect_ids[] = {
MODULE_DEVICE_TABLE(acpi, smartconnect_ids);
static struct acpi_driver smartconnect_driver = {
- .owner = THIS_MODULE,
.name = "intel_smart_connect",
.class = "intel_smart_connect",
.ids = smartconnect_ids,
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index 08df9494603c..dbcd3087aaa4 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -316,7 +316,9 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn
cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
return NULL;
- pkg_id = topology_physical_package_id(cpu);
+ pkg_id = topology_logical_package_id(cpu);
+ if (pkg_id >= topology_max_packages())
+ return NULL;
bus_number = isst_cpu_info[cpu].bus_info[bus_no];
if (bus_number < 0)
@@ -651,10 +653,6 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
/* Lock to prevent module registration when already opened by user space */
static DEFINE_MUTEX(punit_misc_dev_open_lock);
-/* Lock to allow one shared misc device for all ISST interfaces */
-static DEFINE_MUTEX(punit_misc_dev_reg_lock);
-static int misc_usage_count;
-static int misc_device_ret;
static int misc_device_open;
static int isst_if_open(struct inode *inode, struct file *file)
@@ -718,53 +716,25 @@ static struct miscdevice isst_if_char_driver = {
.fops = &isst_if_char_driver_ops,
};
-static const struct x86_cpu_id hpm_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, NULL),
- {}
-};
-
static int isst_misc_reg(void)
{
- mutex_lock(&punit_misc_dev_reg_lock);
- if (misc_device_ret)
- goto unlock_exit;
-
- if (!misc_usage_count) {
- const struct x86_cpu_id *id;
-
- id = x86_match_cpu(hpm_cpu_ids);
- if (id)
- isst_hpm_support = true;
-
- misc_device_ret = isst_if_cpu_info_init();
- if (misc_device_ret)
- goto unlock_exit;
+ int ret;
- misc_device_ret = misc_register(&isst_if_char_driver);
- if (misc_device_ret) {
- isst_if_cpu_info_exit();
- goto unlock_exit;
- }
- }
- misc_usage_count++;
+ ret = isst_if_cpu_info_init();
+ if (ret)
+ return ret;
-unlock_exit:
- mutex_unlock(&punit_misc_dev_reg_lock);
+ ret = misc_register(&isst_if_char_driver);
+ if (ret)
+ isst_if_cpu_info_exit();
- return misc_device_ret;
+ return ret;
}
static void isst_misc_unreg(void)
{
- mutex_lock(&punit_misc_dev_reg_lock);
- if (misc_usage_count)
- misc_usage_count--;
- if (!misc_usage_count && !misc_device_ret) {
- misc_deregister(&isst_if_char_driver);
- isst_if_cpu_info_exit();
- }
- mutex_unlock(&punit_misc_dev_reg_lock);
+ misc_deregister(&isst_if_char_driver);
+ isst_if_cpu_info_exit();
}
/**
@@ -784,11 +754,12 @@ static void isst_misc_unreg(void)
*/
int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
{
- int ret;
-
if (device_type >= ISST_IF_DEV_MAX)
return -EINVAL;
+ if (device_type < ISST_IF_DEV_TPMI && isst_hpm_support)
+ return -ENODEV;
+
mutex_lock(&punit_misc_dev_open_lock);
/* Device is already open, we don't want to add new callbacks */
if (misc_device_open) {
@@ -803,15 +774,6 @@ int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
punit_callbacks[device_type].registered = 1;
mutex_unlock(&punit_misc_dev_open_lock);
- ret = isst_misc_reg();
- if (ret) {
- /*
- * No need of mutex as the misc device register failed
- * as no one can open device yet. Hence no contention.
- */
- punit_callbacks[device_type].registered = 0;
- return ret;
- }
return 0;
}
EXPORT_SYMBOL_GPL(isst_if_cdev_register);
@@ -827,7 +789,6 @@ EXPORT_SYMBOL_GPL(isst_if_cdev_register);
*/
void isst_if_cdev_unregister(int device_type)
{
- isst_misc_unreg();
mutex_lock(&punit_misc_dev_open_lock);
punit_callbacks[device_type].def_ioctl = NULL;
punit_callbacks[device_type].registered = 0;
@@ -837,4 +798,53 @@ void isst_if_cdev_unregister(int device_type)
}
EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
+#define SST_HPM_SUPPORTED 0x01
+#define SST_MBOX_SUPPORTED 0x02
+
+static const struct x86_cpu_id isst_cpu_ids[] = {
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, SST_HPM_SUPPORTED),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, SST_HPM_SUPPORTED),
+ X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, SST_HPM_SUPPORTED),
+ X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, 0),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, SST_HPM_SUPPORTED),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, SST_HPM_SUPPORTED),
+ X86_MATCH_VFM(INTEL_ICELAKE_D, 0),
+ X86_MATCH_VFM(INTEL_ICELAKE_X, 0),
+ X86_MATCH_VFM(INTEL_PANTHERCOVE_X, SST_HPM_SUPPORTED),
+ X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, 0),
+ X86_MATCH_VFM(INTEL_SKYLAKE_X, SST_MBOX_SUPPORTED),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, isst_cpu_ids);
+
+static int __init isst_if_common_init(void)
+{
+ const struct x86_cpu_id *id;
+
+ id = x86_match_cpu(isst_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ if (id->driver_data == SST_HPM_SUPPORTED) {
+ isst_hpm_support = true;
+ } else if (id->driver_data == SST_MBOX_SUPPORTED) {
+ u64 data;
+
+ /* Can fail only on some Skylake-X generations */
+ if (rdmsrl_safe(MSR_OS_MAILBOX_INTERFACE, &data) ||
+ rdmsrl_safe(MSR_OS_MAILBOX_DATA, &data))
+ return -ENODEV;
+ }
+
+ return isst_misc_reg();
+}
+module_init(isst_if_common_init)
+
+static void __exit isst_if_common_exit(void)
+{
+ isst_misc_unreg();
+}
+module_exit(isst_if_common_exit)
+
+MODULE_DESCRIPTION("ISST common interface module");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.h b/drivers/platform/x86/intel/speed_select_if/isst_if_common.h
index 1004f2c9cca8..378055fe1d16 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.h
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.h
@@ -16,6 +16,9 @@
#define PCI_DEVICE_ID_INTEL_RAPL_PRIO_DEVID_1 0x3251
#define PCI_DEVICE_ID_INTEL_CFG_MBOX_DEVID_1 0x3259
+#define MSR_OS_MAILBOX_INTERFACE 0xB0
+#define MSR_OS_MAILBOX_DATA 0xB1
+
/*
* Validate maximum commands in a single request.
* This is enough to handle command to every core in one ioctl, or all
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
index 1b6eab071068..c4b7af00352b 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
@@ -21,8 +21,6 @@
#include "isst_if_common.h"
-#define MSR_OS_MAILBOX_INTERFACE 0xB0
-#define MSR_OS_MAILBOX_DATA 0xB1
#define MSR_OS_MAILBOX_BUSY_BIT 31
/*
@@ -161,7 +159,7 @@ static struct notifier_block isst_pm_nb = {
};
static const struct x86_cpu_id isst_if_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
+ X86_MATCH_VFM(INTEL_SKYLAKE_X, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, isst_if_cpu_ids);
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi.c
index 17972191538a..bcf0a5cbc68d 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi.c
@@ -67,6 +67,6 @@ static struct auxiliary_driver intel_sst_aux_driver = {
module_auxiliary_driver(intel_sst_aux_driver);
-MODULE_IMPORT_NS(INTEL_TPMI_SST);
+MODULE_IMPORT_NS("INTEL_TPMI_SST");
MODULE_DESCRIPTION("Intel TPMI SST Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
index 2662fbbddf0c..9978cdd19851 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
@@ -17,12 +17,15 @@
* the hardware mapping.
*/
+#define dev_fmt(fmt) "tpmi_sst: " fmt
+
#include <linux/auxiliary_bus.h>
#include <linux/delay.h>
#include <linux/intel_tpmi.h>
#include <linux/fs.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <uapi/linux/isst_if.h>
@@ -263,20 +266,33 @@ struct tpmi_per_power_domain_info {
bool write_blocked;
};
+/* Supported maximum partitions */
+#define SST_MAX_PARTITIONS 2
+
/**
* struct tpmi_sst_struct - Store sst info for a package
* @package_id: Package id for this aux device instance
* @number_of_power_domains: Number of power_domains pointed by power_domain_info pointer
* @power_domain_info: Pointer to power domains information
+ * @cdie_mask: Mask of compute dies present in a partition from hardware.
+ * This mask is not present in the version 1 information header.
+ * @io_dies: Number of IO dies in a partition. This will be 0 for TPMI
+ * version 1 information header.
+ * @partition_mask: Mask of all partitions.
+ * @partition_mask_current: Current partition mask as some may have been unbound.
*
* This structure is used store full SST information for a package.
- * Each package has a unique OOB PCI device, which enumerates TPMI.
- * Each Package will have multiple power_domains.
+ * Each package has one or multiple OOB PCI devices. Each package can contain multiple
+ * power domains.
*/
struct tpmi_sst_struct {
int package_id;
- int number_of_power_domains;
- struct tpmi_per_power_domain_info *power_domain_info;
+ struct tpmi_per_power_domain_info *power_domain_info[SST_MAX_PARTITIONS];
+ u16 cdie_mask[SST_MAX_PARTITIONS];
+ u8 number_of_power_domains[SST_MAX_PARTITIONS];
+ u8 io_dies[SST_MAX_PARTITIONS];
+ u8 partition_mask;
+ u8 partition_mask_current;
};
/**
@@ -313,12 +329,11 @@ static int sst_add_perf_profiles(struct auxiliary_device *auxdev,
struct tpmi_per_power_domain_info *pd_info,
int levels)
{
+ struct device *dev = &auxdev->dev;
u64 perf_level_offsets;
int i;
- pd_info->perf_levels = devm_kcalloc(&auxdev->dev, levels,
- sizeof(struct perf_level),
- GFP_KERNEL);
+ pd_info->perf_levels = devm_kcalloc(dev, levels, sizeof(struct perf_level), GFP_KERNEL);
if (!pd_info->perf_levels)
return 0;
@@ -349,6 +364,7 @@ static int sst_add_perf_profiles(struct auxiliary_device *auxdev,
static int sst_main(struct auxiliary_device *auxdev, struct tpmi_per_power_domain_info *pd_info)
{
+ struct device *dev = &auxdev->dev;
int i, mask, levels;
*((u64 *)&pd_info->sst_header) = readq(pd_info->sst_base);
@@ -359,13 +375,13 @@ static int sst_main(struct auxiliary_device *auxdev, struct tpmi_per_power_domai
return -ENODEV;
if (TPMI_MAJOR_VERSION(pd_info->sst_header.interface_version) != ISST_MAJOR_VERSION) {
- dev_err(&auxdev->dev, "SST: Unsupported major version:%lx\n",
+ dev_err(dev, "SST: Unsupported major version:%lx\n",
TPMI_MAJOR_VERSION(pd_info->sst_header.interface_version));
return -ENODEV;
}
if (TPMI_MINOR_VERSION(pd_info->sst_header.interface_version) != ISST_MINOR_VERSION)
- dev_info(&auxdev->dev, "SST: Ignore: Unsupported minor version:%lx\n",
+ dev_info(dev, "SST: Ignore: Unsupported minor version:%lx\n",
TPMI_MINOR_VERSION(pd_info->sst_header.interface_version));
/* Read SST CP Header */
@@ -387,6 +403,126 @@ static int sst_main(struct auxiliary_device *auxdev, struct tpmi_per_power_domai
return 0;
}
+static u8 isst_instance_count(struct tpmi_sst_struct *sst_inst)
+{
+ u8 i, max_part, count = 0;
+
+ /* Partition mask starts from bit 0 and contains 1s only */
+ max_part = hweight8(sst_inst->partition_mask);
+ for (i = 0; i < max_part; i++)
+ count += sst_inst->number_of_power_domains[i];
+
+ return count;
+}
+
+/**
+ * map_cdies() - Map user domain ID to compute domain ID
+ * @sst_inst: TPMI Instance
+ * @id: User domain ID
+ * @partition: Resolved partition
+ *
+ * Helper function to map_partition_power_domain_id() to resolve compute
+ * domain ID and partition. Use hardware provided cdie_mask for a partition
+ * as is to resolve a compute domain ID.
+ *
+ * Return: %-EINVAL on error, otherwise mapped domain ID >= 0.
+ */
+static int map_cdies(struct tpmi_sst_struct *sst_inst, u8 id, u8 *partition)
+{
+ u8 i, max_part;
+
+ max_part = hweight8(sst_inst->partition_mask);
+ for (i = 0; i < max_part; i++) {
+ if (!(sst_inst->cdie_mask[i] & BIT(id)))
+ continue;
+
+ *partition = i;
+ return id - ffs(sst_inst->cdie_mask[i]) + 1;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * map_partition_power_domain_id() - Map user domain ID to partition domain ID
+ * @sst_inst: TPMI Instance
+ * @id: User domain ID
+ * @partition: Resolved partition
+ *
+ * In a partitioned system a CPU package has two separate MMIO ranges (Under
+ * two PCI devices). But the CPU package compute die/power domain IDs are
+ * unique in a package. User space can get compute die/power domain ID from
+ * CPUID and MSR 0x54 for a CPU. So, those IDs need to be preserved even if
+ * they are present in two different partitions with its own order.
+ *
+ * For example for command ISST_IF_COUNT_TPMI_INSTANCES, the valid_mask
+ * is 111111b for a 4 compute and 2 IO dies system. This is presented as
+ * provided by the hardware in a non-partitioned system with the following
+ * order:
+ * I1-I0-C3-C2-C1-C0
+ * Here: "C": for compute and "I" for IO die.
+ * Compute dies are always present first in TPMI instances, as they have
+ * to map to the real power domain/die ID of a system. In a non-partitioned
+ * system there is no way to identify compute and IO die boundaries from
+ * this driver without reading each CPU's mapping.
+ *
+ * The same order needs to be preserved, even if those compute dies are
+ * distributed among multiple partitions. For example:
+ * Partition 1 can contain: I1-C1-C0
+ * Partition 2 can contain: I2-C3-C2
+ *
+ * This will require a conversion of user space IDs to the actual index into
+ * array of stored power domains for each partition. For the above example
+ * this function will return partition and index as follows:
+ *
+ * ============= ========= ===== ========
+ * User space ID Partition Index Die type
+ * ============= ========= ===== ========
+ * 0 0 0 Compute
+ * 1 0 1 Compute
+ * 2 1 0 Compute
+ * 3 1 1 Compute
+ * 4 0 2 IO
+ * 5 1 2 IO
+ * ============= ========= ===== ========
+ *
+ * Return: %-EINVAL on error, otherwise mapped domain ID >= 0.
+ */
+static int map_partition_power_domain_id(struct tpmi_sst_struct *sst_inst, u8 id, u8 *partition)
+{
+ u8 i, io_start_id, max_part;
+
+ *partition = 0;
+
+ /* If any PCI device for partition is unbound, treat this as failure */
+ if (sst_inst->partition_mask != sst_inst->partition_mask_current)
+ return -EINVAL;
+
+ max_part = hweight8(sst_inst->partition_mask);
+
+ /* IO Index begin here */
+ io_start_id = fls(sst_inst->cdie_mask[max_part - 1]);
+
+ if (id < io_start_id)
+ return map_cdies(sst_inst, id, partition);
+
+ for (i = 0; i < max_part; i++) {
+ u8 io_id;
+
+ io_id = id - io_start_id;
+ if (io_id < sst_inst->io_dies[i]) {
+ u8 cdie_range;
+
+ cdie_range = fls(sst_inst->cdie_mask[i]) - ffs(sst_inst->cdie_mask[i]) + 1;
+ *partition = i;
+ return cdie_range + io_id;
+ }
+ io_start_id += sst_inst->io_dies[i];
+ }
+
+ return -EINVAL;
+}
+
/*
* Map a package and power_domain id to SST information structure unique for a power_domain.
* The caller should call under isst_tpmi_dev_lock.
@@ -395,19 +531,20 @@ static struct tpmi_per_power_domain_info *get_instance(int pkg_id, int power_dom
{
struct tpmi_per_power_domain_info *power_domain_info;
struct tpmi_sst_struct *sst_inst;
+ u8 part;
- if (pkg_id < 0 || pkg_id > isst_common.max_index ||
- pkg_id >= topology_max_packages())
+ if (!in_range(pkg_id, 0, topology_max_packages()) || pkg_id > isst_common.max_index)
return NULL;
sst_inst = isst_common.sst_inst[pkg_id];
if (!sst_inst)
return NULL;
- if (power_domain_id < 0 || power_domain_id >= sst_inst->number_of_power_domains)
+ power_domain_id = map_partition_power_domain_id(sst_inst, power_domain_id, &part);
+ if (power_domain_id < 0)
return NULL;
- power_domain_info = &sst_inst->power_domain_info[power_domain_id];
+ power_domain_info = &sst_inst->power_domain_info[part][power_domain_id];
if (power_domain_info && !power_domain_info->sst_base)
return NULL;
@@ -462,10 +599,10 @@ static long isst_if_core_power_state(void __user *argp)
struct tpmi_per_power_domain_info *power_domain_info;
struct isst_core_power core_power;
- if (disable_dynamic_sst_features())
+ if (copy_from_user(&core_power, argp, sizeof(core_power)))
return -EFAULT;
- if (copy_from_user(&core_power, argp, sizeof(core_power)))
+ if (core_power.get_set && disable_dynamic_sst_features())
return -EFAULT;
power_domain_info = get_instance(core_power.socket_id, core_power.power_domain_id);
@@ -579,6 +716,7 @@ static long isst_if_clos_assoc(void __user *argp)
struct tpmi_sst_struct *sst_inst;
int offset, shift, cpu;
u64 val, mask, clos;
+ u8 part;
if (copy_from_user(&clos_assoc, ptr, sizeof(clos_assoc)))
return -EFAULT;
@@ -602,10 +740,11 @@ static long isst_if_clos_assoc(void __user *argp)
sst_inst = isst_common.sst_inst[pkg_id];
- if (clos_assoc.power_domain_id > sst_inst->number_of_power_domains)
+ punit_id = map_partition_power_domain_id(sst_inst, punit_id, &part);
+ if (punit_id < 0)
return -EINVAL;
- power_domain_info = &sst_inst->power_domain_info[punit_id];
+ power_domain_info = &sst_inst->power_domain_info[part][punit_id];
if (assoc_cmds.get_set && power_domain_info->write_blocked)
return -EPERM;
@@ -708,6 +847,8 @@ static int isst_if_get_perf_level(void __user *argp)
{
struct isst_perf_level_info perf_level;
struct tpmi_per_power_domain_info *power_domain_info;
+ unsigned long level_mask;
+ u8 level, support;
if (copy_from_user(&perf_level, argp, sizeof(perf_level)))
return -EFAULT;
@@ -727,12 +868,34 @@ static int isst_if_get_perf_level(void __user *argp)
SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH, SST_MUL_FACTOR_NONE)
perf_level.enabled = !!(power_domain_info->sst_header.cap_mask & BIT(1));
- _read_bf_level_info("bf_support", perf_level.sst_bf_support, 0, 0,
- SST_BF_FEATURE_SUPPORTED_START, SST_BF_FEATURE_SUPPORTED_WIDTH,
- SST_MUL_FACTOR_NONE);
- _read_tf_level_info("tf_support", perf_level.sst_tf_support, 0, 0,
- SST_TF_FEATURE_SUPPORTED_START, SST_TF_FEATURE_SUPPORTED_WIDTH,
- SST_MUL_FACTOR_NONE);
+ level_mask = perf_level.level_mask;
+ perf_level.sst_bf_support = 0;
+ for_each_set_bit(level, &level_mask, BITS_PER_BYTE) {
+ /*
+ * Read BF support for a level. Read output is updated
+ * to "support" variable by the below macro.
+ */
+ _read_bf_level_info("bf_support", support, level, 0, SST_BF_FEATURE_SUPPORTED_START,
+ SST_BF_FEATURE_SUPPORTED_WIDTH, SST_MUL_FACTOR_NONE);
+
+ /* If supported set the bit for the level */
+ if (support)
+ perf_level.sst_bf_support |= BIT(level);
+ }
+
+ perf_level.sst_tf_support = 0;
+ for_each_set_bit(level, &level_mask, BITS_PER_BYTE) {
+ /*
+ * Read TF support for a level. Read output is updated
+ * to "support" variable by the below macro.
+ */
+ _read_tf_level_info("tf_support", support, level, 0, SST_TF_FEATURE_SUPPORTED_START,
+ SST_TF_FEATURE_SUPPORTED_WIDTH, SST_MUL_FACTOR_NONE);
+
+ /* If supported set the bit for the level */
+ if (support)
+ perf_level.sst_tf_support |= BIT(level);
+ }
if (copy_to_user(argp, &perf_level, sizeof(perf_level)))
return -EFAULT;
@@ -1134,18 +1297,28 @@ static int isst_if_get_tpmi_instance_count(void __user *argp)
if (tpmi_inst.socket_id >= topology_max_packages())
return -EINVAL;
- tpmi_inst.count = isst_common.sst_inst[tpmi_inst.socket_id]->number_of_power_domains;
-
sst_inst = isst_common.sst_inst[tpmi_inst.socket_id];
+
+ tpmi_inst.count = isst_instance_count(sst_inst);
+
tpmi_inst.valid_mask = 0;
- for (i = 0; i < sst_inst->number_of_power_domains; ++i) {
+ for (i = 0; i < tpmi_inst.count; i++) {
struct tpmi_per_power_domain_info *pd_info;
+ u8 part;
+ int pd;
- pd_info = &sst_inst->power_domain_info[i];
+ pd = map_partition_power_domain_id(sst_inst, i, &part);
+ if (pd < 0)
+ continue;
+
+ pd_info = &sst_inst->power_domain_info[part][pd];
if (pd_info->sst_base)
tpmi_inst.valid_mask |= BIT(i);
}
+ if (!tpmi_inst.valid_mask)
+ tpmi_inst.count = 0;
+
if (copy_to_user(argp, &tpmi_inst, sizeof(tpmi_inst)))
return -EFAULT;
@@ -1271,112 +1444,191 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
int tpmi_sst_dev_add(struct auxiliary_device *auxdev)
{
+ struct tpmi_per_power_domain_info *pd_info;
bool read_blocked = 0, write_blocked = 0;
struct intel_tpmi_plat_info *plat_info;
+ struct device *dev = &auxdev->dev;
struct tpmi_sst_struct *tpmi_sst;
- int i, ret, pkg = 0, inst = 0;
- int num_resources;
+ u8 i, num_resources, io_die_cnt;
+ int ret, pkg = 0, inst = 0;
+ bool first_enum = false;
+ u16 cdie_mask;
+ u8 partition;
ret = tpmi_get_feature_status(auxdev, TPMI_ID_SST, &read_blocked, &write_blocked);
if (ret)
- dev_info(&auxdev->dev, "Can't read feature status: ignoring read/write blocked status\n");
+ dev_info(dev, "Can't read feature status: ignoring read/write blocked status\n");
if (read_blocked) {
- dev_info(&auxdev->dev, "Firmware has blocked reads, exiting\n");
+ dev_info(dev, "Firmware has blocked reads, exiting\n");
return -ENODEV;
}
plat_info = tpmi_get_platform_data(auxdev);
if (!plat_info) {
- dev_err(&auxdev->dev, "No platform info\n");
+ dev_err(dev, "No platform info\n");
return -EINVAL;
}
pkg = plat_info->package_id;
if (pkg >= topology_max_packages()) {
- dev_err(&auxdev->dev, "Invalid package id :%x\n", pkg);
+ dev_err(dev, "Invalid package id :%x\n", pkg);
return -EINVAL;
}
- if (isst_common.sst_inst[pkg])
- return -EEXIST;
+ partition = plat_info->partition;
+ if (partition >= SST_MAX_PARTITIONS) {
+ dev_err(&auxdev->dev, "Invalid partition :%x\n", partition);
+ return -EINVAL;
+ }
num_resources = tpmi_get_resource_count(auxdev);
if (!num_resources)
return -EINVAL;
- tpmi_sst = devm_kzalloc(&auxdev->dev, sizeof(*tpmi_sst), GFP_KERNEL);
- if (!tpmi_sst)
- return -ENOMEM;
+ mutex_lock(&isst_tpmi_dev_lock);
- tpmi_sst->power_domain_info = devm_kcalloc(&auxdev->dev, num_resources,
- sizeof(*tpmi_sst->power_domain_info),
- GFP_KERNEL);
- if (!tpmi_sst->power_domain_info)
- return -ENOMEM;
+ if (isst_common.sst_inst[pkg]) {
+ tpmi_sst = isst_common.sst_inst[pkg];
+ } else {
+ /*
+ * tpmi_sst instance is for a package. So needs to be
+ * allocated only once for both partitions. We can't use
+ * devm_* allocation here as each partition is a
+ * different device, which can be unbound.
+ */
+ tpmi_sst = kzalloc(sizeof(*tpmi_sst), GFP_KERNEL);
+ if (!tpmi_sst) {
+ ret = -ENOMEM;
+ goto unlock_exit;
+ }
+ first_enum = true;
+ }
- tpmi_sst->number_of_power_domains = num_resources;
+ ret = 0;
+
+ pd_info = devm_kcalloc(dev, num_resources, sizeof(*pd_info), GFP_KERNEL);
+ if (!pd_info) {
+ ret = -ENOMEM;
+ goto unlock_free;
+ }
+
+ /* Get the IO die count, if cdie_mask is present */
+ if (plat_info->cdie_mask) {
+ u8 cdie_range;
+
+ cdie_mask = plat_info->cdie_mask;
+ cdie_range = fls(cdie_mask) - ffs(cdie_mask) + 1;
+ io_die_cnt = num_resources - cdie_range;
+ } else {
+ /*
+ * This is a synthetic mask, careful when assuming that
+ * they are compute dies only.
+ */
+ cdie_mask = (1 << num_resources) - 1;
+ io_die_cnt = 0;
+ }
for (i = 0; i < num_resources; ++i) {
struct resource *res;
res = tpmi_get_resource_at_index(auxdev, i);
if (!res) {
- tpmi_sst->power_domain_info[i].sst_base = NULL;
+ pd_info[i].sst_base = NULL;
continue;
}
- tpmi_sst->power_domain_info[i].package_id = pkg;
- tpmi_sst->power_domain_info[i].power_domain_id = i;
- tpmi_sst->power_domain_info[i].auxdev = auxdev;
- tpmi_sst->power_domain_info[i].write_blocked = write_blocked;
- tpmi_sst->power_domain_info[i].sst_base = devm_ioremap_resource(&auxdev->dev, res);
- if (IS_ERR(tpmi_sst->power_domain_info[i].sst_base))
- return PTR_ERR(tpmi_sst->power_domain_info[i].sst_base);
-
- ret = sst_main(auxdev, &tpmi_sst->power_domain_info[i]);
- if (ret) {
- devm_iounmap(&auxdev->dev, tpmi_sst->power_domain_info[i].sst_base);
- tpmi_sst->power_domain_info[i].sst_base = NULL;
+ pd_info[i].package_id = pkg;
+ pd_info[i].power_domain_id = i;
+ pd_info[i].auxdev = auxdev;
+ pd_info[i].write_blocked = write_blocked;
+ pd_info[i].sst_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pd_info[i].sst_base)) {
+ ret = PTR_ERR(pd_info[i].sst_base);
+ goto unlock_free;
+ }
+
+ if (sst_main(auxdev, &pd_info[i])) {
+ /*
+ * This entry is not valid, hardware can partially
+ * populate dies. In this case MMIO will have 0xFFs.
+ * Also possible some pre-production hardware has
+ * invalid data. But don't fail and continue to use
+ * other dies with valid data.
+ */
+ devm_iounmap(dev, pd_info[i].sst_base);
+ pd_info[i].sst_base = NULL;
continue;
}
++inst;
}
- if (!inst)
- return -ENODEV;
+ if (!inst) {
+ ret = -ENODEV;
+ goto unlock_free;
+ }
tpmi_sst->package_id = pkg;
+
+ tpmi_sst->power_domain_info[partition] = pd_info;
+ tpmi_sst->number_of_power_domains[partition] = num_resources;
+ tpmi_sst->cdie_mask[partition] = cdie_mask;
+ tpmi_sst->io_dies[partition] = io_die_cnt;
+ tpmi_sst->partition_mask |= BIT(partition);
+ tpmi_sst->partition_mask_current |= BIT(partition);
+
auxiliary_set_drvdata(auxdev, tpmi_sst);
- mutex_lock(&isst_tpmi_dev_lock);
if (isst_common.max_index < pkg)
isst_common.max_index = pkg;
isst_common.sst_inst[pkg] = tpmi_sst;
+
+unlock_free:
+ if (ret && first_enum)
+ kfree(tpmi_sst);
+unlock_exit:
mutex_unlock(&isst_tpmi_dev_lock);
- return 0;
+ return ret;
}
-EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_add, INTEL_TPMI_SST);
+EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_add, "INTEL_TPMI_SST");
void tpmi_sst_dev_remove(struct auxiliary_device *auxdev)
{
struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev);
+ struct intel_tpmi_plat_info *plat_info;
+
+ plat_info = tpmi_get_platform_data(auxdev);
+ if (!plat_info)
+ return;
mutex_lock(&isst_tpmi_dev_lock);
- isst_common.sst_inst[tpmi_sst->package_id] = NULL;
+ tpmi_sst->power_domain_info[plat_info->partition] = NULL;
+ tpmi_sst->partition_mask_current &= ~BIT(plat_info->partition);
+ /* Free the package instance when the all partitions are removed */
+ if (!tpmi_sst->partition_mask_current) {
+ isst_common.sst_inst[tpmi_sst->package_id] = NULL;
+ kfree(tpmi_sst);
+ }
mutex_unlock(&isst_tpmi_dev_lock);
}
-EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_remove, INTEL_TPMI_SST);
+EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_remove, "INTEL_TPMI_SST");
void tpmi_sst_dev_suspend(struct auxiliary_device *auxdev)
{
struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev);
- struct tpmi_per_power_domain_info *power_domain_info = tpmi_sst->power_domain_info;
+ struct tpmi_per_power_domain_info *power_domain_info;
+ struct intel_tpmi_plat_info *plat_info;
void __iomem *cp_base;
+ plat_info = tpmi_get_platform_data(auxdev);
+ if (!plat_info)
+ return;
+
+ power_domain_info = tpmi_sst->power_domain_info[plat_info->partition];
+
cp_base = power_domain_info->sst_base + power_domain_info->sst_header.cp_offset;
power_domain_info->saved_sst_cp_control = readq(cp_base + SST_CP_CONTROL_OFFSET);
@@ -1390,14 +1642,21 @@ void tpmi_sst_dev_suspend(struct auxiliary_device *auxdev)
power_domain_info->sst_header.pp_offset +
SST_PP_CONTROL_OFFSET);
}
-EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_suspend, INTEL_TPMI_SST);
+EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_suspend, "INTEL_TPMI_SST");
void tpmi_sst_dev_resume(struct auxiliary_device *auxdev)
{
struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev);
- struct tpmi_per_power_domain_info *power_domain_info = tpmi_sst->power_domain_info;
+ struct tpmi_per_power_domain_info *power_domain_info;
+ struct intel_tpmi_plat_info *plat_info;
void __iomem *cp_base;
+ plat_info = tpmi_get_platform_data(auxdev);
+ if (!plat_info)
+ return;
+
+ power_domain_info = tpmi_sst->power_domain_info[plat_info->partition];
+
cp_base = power_domain_info->sst_base + power_domain_info->sst_header.cp_offset;
writeq(power_domain_info->saved_sst_cp_control, cp_base + SST_CP_CONTROL_OFFSET);
@@ -1410,9 +1669,9 @@ void tpmi_sst_dev_resume(struct auxiliary_device *auxdev)
writeq(power_domain_info->saved_pp_control, power_domain_info->sst_base +
power_domain_info->sst_header.pp_offset + SST_PP_CONTROL_OFFSET);
}
-EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_resume, INTEL_TPMI_SST);
+EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_resume, "INTEL_TPMI_SST");
-#define ISST_TPMI_API_VERSION 0x02
+#define ISST_TPMI_API_VERSION 0x03
int tpmi_sst_init(void)
{
@@ -1450,7 +1709,7 @@ init_done:
mutex_unlock(&isst_tpmi_dev_lock);
return ret;
}
-EXPORT_SYMBOL_NS_GPL(tpmi_sst_init, INTEL_TPMI_SST);
+EXPORT_SYMBOL_NS_GPL(tpmi_sst_init, "INTEL_TPMI_SST");
void tpmi_sst_exit(void)
{
@@ -1464,9 +1723,10 @@ void tpmi_sst_exit(void)
}
mutex_unlock(&isst_tpmi_dev_lock);
}
-EXPORT_SYMBOL_NS_GPL(tpmi_sst_exit, INTEL_TPMI_SST);
+EXPORT_SYMBOL_NS_GPL(tpmi_sst_exit, "INTEL_TPMI_SST");
-MODULE_IMPORT_NS(INTEL_TPMI);
-MODULE_IMPORT_NS(INTEL_TPMI_POWER_DOMAIN);
+MODULE_IMPORT_NS("INTEL_TPMI");
+MODULE_IMPORT_NS("INTEL_TPMI_POWER_DOMAIN");
+MODULE_DESCRIPTION("ISST TPMI interface module");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/telemetry/debugfs.c b/drivers/platform/x86/intel/telemetry/debugfs.c
index 1d4d0fbfd63c..70e5736c44c7 100644
--- a/drivers/platform/x86/intel/telemetry/debugfs.c
+++ b/drivers/platform/x86/intel/telemetry/debugfs.c
@@ -308,8 +308,8 @@ static struct telemetry_debugfs_conf telem_apl_debugfs_conf = {
};
static const struct x86_cpu_id telemetry_debugfs_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &telem_apl_debugfs_conf),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &telem_apl_debugfs_conf),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &telem_apl_debugfs_conf),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &telem_apl_debugfs_conf),
{}
};
MODULE_DEVICE_TABLE(x86cpu, telemetry_debugfs_cpu_ids);
diff --git a/drivers/platform/x86/intel/telemetry/pltdrv.c b/drivers/platform/x86/intel/telemetry/pltdrv.c
index 06311d0e9451..9a499efa1e4d 100644
--- a/drivers/platform/x86/intel/telemetry/pltdrv.c
+++ b/drivers/platform/x86/intel/telemetry/pltdrv.c
@@ -177,8 +177,8 @@ static struct telemetry_plt_config telem_glk_config = {
};
static const struct x86_cpu_id telemetry_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &telem_apl_config),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &telem_glk_config),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &telem_apl_config),
+ X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &telem_glk_config),
{}
};
@@ -1163,7 +1163,7 @@ static void telemetry_pltdrv_remove(struct platform_device *pdev)
static struct platform_driver telemetry_soc_driver = {
.probe = telemetry_pltdrv_probe,
- .remove_new = telemetry_pltdrv_remove,
+ .remove = telemetry_pltdrv_remove,
.driver = {
.name = DRIVER_NAME,
},
diff --git a/drivers/platform/x86/intel/tpmi_power_domains.c b/drivers/platform/x86/intel/tpmi_power_domains.c
new file mode 100644
index 000000000000..2f01cd22a6ee
--- /dev/null
+++ b/drivers/platform/x86/intel/tpmi_power_domains.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Mapping of TPMI power domains CPU mapping
+ *
+ * Copyright (c) 2024, Intel Corporation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpumask.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/hashtable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/overflow.h>
+#include <linux/slab.h>
+#include <linux/topology.h>
+#include <linux/types.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+#include <asm/msr.h>
+
+#include "tpmi_power_domains.h"
+
+#define MSR_PM_LOGICAL_ID 0x54
+
+/*
+ * Struct of MSR 0x54
+ * [15:11] PM_DOMAIN_ID
+ * [10:3] MODULE_ID (aka IDI_AGENT_ID)
+ * [2:0] LP_ID
+ * For Atom:
+ * [2] Always 0
+ * [1:0] core ID within module
+ * For Core
+ * [2:1] Always 0
+ * [0] thread ID
+ */
+
+#define LP_ID_MASK GENMASK_ULL(2, 0)
+#define MODULE_ID_MASK GENMASK_ULL(10, 3)
+#define PM_DOMAIN_ID_MASK GENMASK_ULL(15, 11)
+
+/**
+ * struct tpmi_cpu_info - Mapping information for a CPU
+ * @hnode: Used to add mapping information to hash list
+ * @linux_cpu: Linux CPU number
+ * @pkg_id: Package ID of this CPU
+ * @punit_thread_id: Punit thread id of this CPU
+ * @punit_core_id: Punit core id
+ * @punit_domain_id: Power domain id from Punit
+ *
+ * Structure to store mapping information for a Linux CPU
+ * to a Punit core, thread and power domain.
+ */
+struct tpmi_cpu_info {
+ struct hlist_node hnode;
+ int linux_cpu;
+ u8 pkg_id;
+ u8 punit_thread_id;
+ u8 punit_core_id;
+ u8 punit_domain_id;
+};
+
+static DEFINE_PER_CPU(struct tpmi_cpu_info, tpmi_cpu_info);
+
+/* The dynamically assigned cpu hotplug state to free later */
+static enum cpuhp_state tpmi_hp_state __read_mostly;
+
+#define MAX_POWER_DOMAINS 8
+
+static cpumask_t *tpmi_power_domain_mask;
+
+/* Lock to protect tpmi_power_domain_mask and tpmi_cpu_hash */
+static DEFINE_MUTEX(tpmi_lock);
+
+static const struct x86_cpu_id tpmi_cpu_ids[] = {
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, NULL),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, NULL),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, NULL),
+ X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, NULL),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, NULL),
+ X86_MATCH_VFM(INTEL_PANTHERCOVE_X, NULL),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, tpmi_cpu_ids);
+
+static DECLARE_HASHTABLE(tpmi_cpu_hash, 8);
+
+static bool tpmi_domain_is_valid(struct tpmi_cpu_info *info)
+{
+ return info->pkg_id < topology_max_packages() &&
+ info->punit_domain_id < MAX_POWER_DOMAINS;
+}
+
+int tpmi_get_linux_cpu_number(int package_id, int domain_id, int punit_core_id)
+{
+ struct tpmi_cpu_info *info;
+ int ret = -EINVAL;
+
+ guard(mutex)(&tpmi_lock);
+ hash_for_each_possible(tpmi_cpu_hash, info, hnode, punit_core_id) {
+ if (info->punit_domain_id == domain_id && info->pkg_id == package_id) {
+ ret = info->linux_cpu;
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_get_linux_cpu_number, "INTEL_TPMI_POWER_DOMAIN");
+
+int tpmi_get_punit_core_number(int cpu_no)
+{
+ if (cpu_no >= num_possible_cpus())
+ return -EINVAL;
+
+ return per_cpu(tpmi_cpu_info, cpu_no).punit_core_id;
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_get_punit_core_number, "INTEL_TPMI_POWER_DOMAIN");
+
+int tpmi_get_power_domain_id(int cpu_no)
+{
+ if (cpu_no >= num_possible_cpus())
+ return -EINVAL;
+
+ return per_cpu(tpmi_cpu_info, cpu_no).punit_domain_id;
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_get_power_domain_id, "INTEL_TPMI_POWER_DOMAIN");
+
+cpumask_t *tpmi_get_power_domain_mask(int cpu_no)
+{
+ struct tpmi_cpu_info *info;
+ cpumask_t *mask;
+ int index;
+
+ if (cpu_no >= num_possible_cpus())
+ return NULL;
+
+ info = &per_cpu(tpmi_cpu_info, cpu_no);
+ if (!tpmi_domain_is_valid(info))
+ return NULL;
+
+ index = info->pkg_id * MAX_POWER_DOMAINS + info->punit_domain_id;
+ guard(mutex)(&tpmi_lock);
+ mask = &tpmi_power_domain_mask[index];
+
+ return mask;
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_get_power_domain_mask, "INTEL_TPMI_POWER_DOMAIN");
+
+static int tpmi_get_logical_id(unsigned int cpu, struct tpmi_cpu_info *info)
+{
+ u64 data;
+ int ret;
+
+ ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data);
+ if (ret)
+ return ret;
+
+ info->punit_domain_id = FIELD_GET(PM_DOMAIN_ID_MASK, data);
+ if (info->punit_domain_id >= MAX_POWER_DOMAINS)
+ return -EINVAL;
+
+ info->punit_thread_id = FIELD_GET(LP_ID_MASK, data);
+ info->punit_core_id = FIELD_GET(MODULE_ID_MASK, data);
+ info->pkg_id = topology_physical_package_id(cpu);
+ info->linux_cpu = cpu;
+
+ return 0;
+}
+
+static int tpmi_cpu_online(unsigned int cpu)
+{
+ struct tpmi_cpu_info *info = &per_cpu(tpmi_cpu_info, cpu);
+ int ret, index;
+
+ /* Don't fail CPU online for some bad mapping of CPUs */
+ ret = tpmi_get_logical_id(cpu, info);
+ if (ret)
+ return 0;
+
+ index = info->pkg_id * MAX_POWER_DOMAINS + info->punit_domain_id;
+
+ guard(mutex)(&tpmi_lock);
+ cpumask_set_cpu(cpu, &tpmi_power_domain_mask[index]);
+ hash_add(tpmi_cpu_hash, &info->hnode, info->punit_core_id);
+
+ return 0;
+}
+
+static int __init tpmi_init(void)
+{
+ const struct x86_cpu_id *id;
+ u64 data;
+ int ret;
+
+ id = x86_match_cpu(tpmi_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ /* Check for MSR 0x54 presence */
+ ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data);
+ if (ret)
+ return ret;
+
+ tpmi_power_domain_mask = kcalloc(size_mul(topology_max_packages(), MAX_POWER_DOMAINS),
+ sizeof(*tpmi_power_domain_mask), GFP_KERNEL);
+ if (!tpmi_power_domain_mask)
+ return -ENOMEM;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "platform/x86/tpmi_power_domains:online",
+ tpmi_cpu_online, NULL);
+ if (ret < 0) {
+ kfree(tpmi_power_domain_mask);
+ return ret;
+ }
+
+ tpmi_hp_state = ret;
+
+ return 0;
+}
+module_init(tpmi_init)
+
+static void __exit tpmi_exit(void)
+{
+ cpuhp_remove_state(tpmi_hp_state);
+ kfree(tpmi_power_domain_mask);
+}
+module_exit(tpmi_exit)
+
+MODULE_DESCRIPTION("TPMI Power Domains Mapping");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/tpmi_power_domains.h b/drivers/platform/x86/intel/tpmi_power_domains.h
new file mode 100644
index 000000000000..e35750dd9273
--- /dev/null
+++ b/drivers/platform/x86/intel/tpmi_power_domains.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Mapping of TPMI power domain and CPUs
+ *
+ * Copyright (c) 2024, Intel Corporation.
+ */
+
+#ifndef _TPMI_POWER_DOMAINS_H_
+#define _TPMI_POWER_DOMAINS_H_
+
+#include <linux/cpumask.h>
+
+int tpmi_get_linux_cpu_number(int package_id, int die_id, int punit_core_id);
+int tpmi_get_punit_core_number(int cpu_no);
+int tpmi_get_power_domain_id(int cpu_no);
+cpumask_t *tpmi_get_power_domain_mask(int cpu_no);
+
+#endif
diff --git a/drivers/platform/x86/intel/turbo_max_3.c b/drivers/platform/x86/intel/turbo_max_3.c
index 892140b62898..79a0bcdeffb8 100644
--- a/drivers/platform/x86/intel/turbo_max_3.c
+++ b/drivers/platform/x86/intel/turbo_max_3.c
@@ -114,8 +114,8 @@ static int itmt_legacy_cpu_online(unsigned int cpu)
}
static const struct x86_cpu_id itmt_legacy_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
+ X86_MATCH_VFM(INTEL_BROADWELL_X, NULL),
+ X86_MATCH_VFM(INTEL_SKYLAKE_X, NULL),
{}
};
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
index 33bb58dc3f78..4e2c6a2d7e6e 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
@@ -19,9 +19,8 @@ static int uncore_instance_count;
static DEFINE_IDA(intel_uncore_ida);
/* callbacks for actual HW read/write */
-static int (*uncore_read)(struct uncore_data *data, unsigned int *min, unsigned int *max);
-static int (*uncore_write)(struct uncore_data *data, unsigned int input, unsigned int min_max);
-static int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq);
+static int (*uncore_read)(struct uncore_data *data, unsigned int *value, enum uncore_index index);
+static int (*uncore_write)(struct uncore_data *data, unsigned int input, enum uncore_index index);
static ssize_t show_domain_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
@@ -44,36 +43,36 @@ static ssize_t show_package_id(struct kobject *kobj, struct kobj_attribute *attr
return sprintf(buf, "%u\n", data->package_id);
}
-static ssize_t show_min_max_freq_khz(struct uncore_data *data,
- char *buf, int min_max)
+static ssize_t show_attr(struct uncore_data *data, char *buf, enum uncore_index index)
{
- unsigned int min, max;
+ unsigned int value;
int ret;
mutex_lock(&uncore_lock);
- ret = uncore_read(data, &min, &max);
+ ret = uncore_read(data, &value, index);
mutex_unlock(&uncore_lock);
if (ret)
return ret;
- if (min_max)
- return sprintf(buf, "%u\n", max);
-
- return sprintf(buf, "%u\n", min);
+ return sprintf(buf, "%u\n", value);
}
-static ssize_t store_min_max_freq_khz(struct uncore_data *data,
- const char *buf, ssize_t count,
- int min_max)
+static ssize_t store_attr(struct uncore_data *data, const char *buf, ssize_t count,
+ enum uncore_index index)
{
- unsigned int input;
+ unsigned int input = 0;
int ret;
- if (kstrtouint(buf, 10, &input))
- return -EINVAL;
+ if (index == UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE) {
+ if (kstrtobool(buf, (bool *)&input))
+ return -EINVAL;
+ } else {
+ if (kstrtouint(buf, 10, &input))
+ return -EINVAL;
+ }
mutex_lock(&uncore_lock);
- ret = uncore_write(data, input, min_max);
+ ret = uncore_write(data, input, index);
mutex_unlock(&uncore_lock);
if (ret)
@@ -82,56 +81,44 @@ static ssize_t store_min_max_freq_khz(struct uncore_data *data,
return count;
}
-static ssize_t show_perf_status_freq_khz(struct uncore_data *data, char *buf)
-{
- unsigned int freq;
- int ret;
-
- mutex_lock(&uncore_lock);
- ret = uncore_read_freq(data, &freq);
- mutex_unlock(&uncore_lock);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", freq);
-}
-
-#define store_uncore_min_max(name, min_max) \
+#define store_uncore_attr(name, index) \
static ssize_t store_##name(struct kobject *kobj, \
struct kobj_attribute *attr, \
const char *buf, size_t count) \
{ \
struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
\
- return store_min_max_freq_khz(data, buf, count, \
- min_max); \
+ return store_attr(data, buf, count, index); \
}
-#define show_uncore_min_max(name, min_max) \
+#define show_uncore_attr(name, index) \
static ssize_t show_##name(struct kobject *kobj, \
struct kobj_attribute *attr, char *buf)\
{ \
struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
\
- return show_min_max_freq_khz(data, buf, min_max); \
+ return show_attr(data, buf, index); \
}
-#define show_uncore_perf_status(name) \
- static ssize_t show_##name(struct kobject *kobj, \
- struct kobj_attribute *attr, char *buf)\
- { \
- struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
- \
- return show_perf_status_freq_khz(data, buf); \
- }
+store_uncore_attr(min_freq_khz, UNCORE_INDEX_MIN_FREQ);
+store_uncore_attr(max_freq_khz, UNCORE_INDEX_MAX_FREQ);
+
+show_uncore_attr(min_freq_khz, UNCORE_INDEX_MIN_FREQ);
+show_uncore_attr(max_freq_khz, UNCORE_INDEX_MAX_FREQ);
-store_uncore_min_max(min_freq_khz, 0);
-store_uncore_min_max(max_freq_khz, 1);
+show_uncore_attr(current_freq_khz, UNCORE_INDEX_CURRENT_FREQ);
-show_uncore_min_max(min_freq_khz, 0);
-show_uncore_min_max(max_freq_khz, 1);
+store_uncore_attr(elc_low_threshold_percent, UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD);
+store_uncore_attr(elc_high_threshold_percent, UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD);
+store_uncore_attr(elc_high_threshold_enable,
+ UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE);
+store_uncore_attr(elc_floor_freq_khz, UNCORE_INDEX_EFF_LAT_CTRL_FREQ);
-show_uncore_perf_status(current_freq_khz);
+show_uncore_attr(elc_low_threshold_percent, UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD);
+show_uncore_attr(elc_high_threshold_percent, UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD);
+show_uncore_attr(elc_high_threshold_enable,
+ UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE);
+show_uncore_attr(elc_floor_freq_khz, UNCORE_INDEX_EFF_LAT_CTRL_FREQ);
#define show_uncore_data(member_name) \
static ssize_t show_##member_name(struct kobject *kobj, \
@@ -176,7 +163,8 @@ show_uncore_data(initial_max_freq_khz);
static int create_attr_group(struct uncore_data *data, char *name)
{
- int ret, freq, index = 0;
+ int ret, index = 0;
+ unsigned int val;
init_attribute_rw(max_freq_khz);
init_attribute_rw(min_freq_khz);
@@ -198,10 +186,24 @@ static int create_attr_group(struct uncore_data *data, char *name)
data->uncore_attrs[index++] = &data->initial_min_freq_khz_kobj_attr.attr;
data->uncore_attrs[index++] = &data->initial_max_freq_khz_kobj_attr.attr;
- ret = uncore_read_freq(data, &freq);
+ ret = uncore_read(data, &val, UNCORE_INDEX_CURRENT_FREQ);
if (!ret)
data->uncore_attrs[index++] = &data->current_freq_khz_kobj_attr.attr;
+ ret = uncore_read(data, &val, UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD);
+ if (!ret) {
+ init_attribute_rw(elc_low_threshold_percent);
+ init_attribute_rw(elc_high_threshold_percent);
+ init_attribute_rw(elc_high_threshold_enable);
+ init_attribute_rw(elc_floor_freq_khz);
+
+ data->uncore_attrs[index++] = &data->elc_low_threshold_percent_kobj_attr.attr;
+ data->uncore_attrs[index++] = &data->elc_high_threshold_percent_kobj_attr.attr;
+ data->uncore_attrs[index++] =
+ &data->elc_high_threshold_enable_kobj_attr.attr;
+ data->uncore_attrs[index++] = &data->elc_floor_freq_khz_kobj_attr.attr;
+ }
+
data->uncore_attrs[index] = NULL;
data->uncore_attr_group.name = name;
@@ -238,7 +240,8 @@ int uncore_freq_add_entry(struct uncore_data *data, int cpu)
sprintf(data->name, "package_%02d_die_%02d", data->package_id, data->die_id);
}
- uncore_read(data, &data->initial_min_freq_khz, &data->initial_max_freq_khz);
+ uncore_read(data, &data->initial_min_freq_khz, UNCORE_INDEX_MIN_FREQ);
+ uncore_read(data, &data->initial_max_freq_khz, UNCORE_INDEX_MAX_FREQ);
ret = create_attr_group(data, data->name);
if (ret) {
@@ -254,7 +257,7 @@ uncore_unlock:
return ret;
}
-EXPORT_SYMBOL_NS_GPL(uncore_freq_add_entry, INTEL_UNCORE_FREQUENCY);
+EXPORT_SYMBOL_NS_GPL(uncore_freq_add_entry, "INTEL_UNCORE_FREQUENCY");
void uncore_freq_remove_die_entry(struct uncore_data *data)
{
@@ -267,17 +270,17 @@ void uncore_freq_remove_die_entry(struct uncore_data *data)
mutex_unlock(&uncore_lock);
}
-EXPORT_SYMBOL_NS_GPL(uncore_freq_remove_die_entry, INTEL_UNCORE_FREQUENCY);
+EXPORT_SYMBOL_NS_GPL(uncore_freq_remove_die_entry, "INTEL_UNCORE_FREQUENCY");
-int uncore_freq_common_init(int (*read_control_freq)(struct uncore_data *data, unsigned int *min, unsigned int *max),
- int (*write_control_freq)(struct uncore_data *data, unsigned int input, unsigned int set_max),
- int (*read_freq)(struct uncore_data *data, unsigned int *freq))
+int uncore_freq_common_init(int (*read)(struct uncore_data *data, unsigned int *value,
+ enum uncore_index index),
+ int (*write)(struct uncore_data *data, unsigned int input,
+ enum uncore_index index))
{
mutex_lock(&uncore_lock);
- uncore_read = read_control_freq;
- uncore_write = write_control_freq;
- uncore_read_freq = read_freq;
+ uncore_read = read;
+ uncore_write = write;
if (!uncore_root_kobj) {
struct device *dev_root = bus_get_dev_root(&cpu_subsys);
@@ -294,7 +297,7 @@ int uncore_freq_common_init(int (*read_control_freq)(struct uncore_data *data, u
return uncore_root_kobj ? 0 : -ENOMEM;
}
-EXPORT_SYMBOL_NS_GPL(uncore_freq_common_init, INTEL_UNCORE_FREQUENCY);
+EXPORT_SYMBOL_NS_GPL(uncore_freq_common_init, "INTEL_UNCORE_FREQUENCY");
void uncore_freq_common_exit(void)
{
@@ -306,7 +309,7 @@ void uncore_freq_common_exit(void)
}
mutex_unlock(&uncore_lock);
}
-EXPORT_SYMBOL_NS_GPL(uncore_freq_common_exit, INTEL_UNCORE_FREQUENCY);
+EXPORT_SYMBOL_NS_GPL(uncore_freq_common_exit, "INTEL_UNCORE_FREQUENCY");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
index 0e5bf507e555..26c854cd5d97 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
@@ -34,6 +34,13 @@
* @domain_id_kobj_attr: Storage for kobject attribute domain_id
* @fabric_cluster_id_kobj_attr: Storage for kobject attribute fabric_cluster_id
* @package_id_kobj_attr: Storage for kobject attribute package_id
+ * @elc_low_threshold_percent_kobj_attr:
+ Storage for kobject attribute elc_low_threshold_percent
+ * @elc_high_threshold_percent_kobj_attr:
+ Storage for kobject attribute elc_high_threshold_percent
+ * @elc_high_threshold_enable_kobj_attr:
+ Storage for kobject attribute elc_high_threshold_enable
+ * @elc_floor_freq_khz_kobj_attr: Storage for kobject attribute elc_floor_freq_khz
* @uncore_attrs: Attribute storage for group creation
*
* This structure is used to encapsulate all data related to uncore sysfs
@@ -61,14 +68,29 @@ struct uncore_data {
struct kobj_attribute domain_id_kobj_attr;
struct kobj_attribute fabric_cluster_id_kobj_attr;
struct kobj_attribute package_id_kobj_attr;
- struct attribute *uncore_attrs[9];
+ struct kobj_attribute elc_low_threshold_percent_kobj_attr;
+ struct kobj_attribute elc_high_threshold_percent_kobj_attr;
+ struct kobj_attribute elc_high_threshold_enable_kobj_attr;
+ struct kobj_attribute elc_floor_freq_khz_kobj_attr;
+ struct attribute *uncore_attrs[13];
};
#define UNCORE_DOMAIN_ID_INVALID -1
-int uncore_freq_common_init(int (*read_control_freq)(struct uncore_data *data, unsigned int *min, unsigned int *max),
- int (*write_control_freq)(struct uncore_data *data, unsigned int input, unsigned int min_max),
- int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq));
+enum uncore_index {
+ UNCORE_INDEX_MIN_FREQ,
+ UNCORE_INDEX_MAX_FREQ,
+ UNCORE_INDEX_CURRENT_FREQ,
+ UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD,
+ UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD,
+ UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE,
+ UNCORE_INDEX_EFF_LAT_CTRL_FREQ,
+};
+
+int uncore_freq_common_init(int (*read)(struct uncore_data *data, unsigned int *value,
+ enum uncore_index index),
+ int (*write)(struct uncore_data *data, unsigned int input,
+ enum uncore_index index));
void uncore_freq_common_exit(void);
int uncore_freq_add_entry(struct uncore_data *data, int cpu);
void uncore_freq_remove_die_entry(struct uncore_data *data);
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
index bd75d61ff8a6..4aa6c227ec82 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
@@ -29,7 +29,8 @@
#include "uncore-frequency-common.h"
#define UNCORE_MAJOR_VERSION 0
-#define UNCORE_MINOR_VERSION 1
+#define UNCORE_MINOR_VERSION 2
+#define UNCORE_ELC_SUPPORTED_VERSION 2
#define UNCORE_HEADER_INDEX 0
#define UNCORE_FABRIC_CLUSTER_OFFSET 8
@@ -46,6 +47,7 @@ struct tpmi_uncore_struct;
/* Information for each cluster */
struct tpmi_uncore_cluster_info {
bool root_domain;
+ bool elc_supported;
u8 __iomem *cluster_base;
struct uncore_data uncore_data;
struct tpmi_uncore_struct *uncore_root;
@@ -69,26 +71,77 @@ struct tpmi_uncore_struct {
bool write_blocked;
};
-#define UNCORE_GENMASK_MIN_RATIO GENMASK_ULL(21, 15)
-#define UNCORE_GENMASK_MAX_RATIO GENMASK_ULL(14, 8)
-#define UNCORE_GENMASK_CURRENT_RATIO GENMASK_ULL(6, 0)
+/* Bit definitions for STATUS register */
+#define UNCORE_CURRENT_RATIO_MASK GENMASK_ULL(6, 0)
+
+/* Bit definitions for CONTROL register */
+#define UNCORE_MAX_RATIO_MASK GENMASK_ULL(14, 8)
+#define UNCORE_MIN_RATIO_MASK GENMASK_ULL(21, 15)
+#define UNCORE_EFF_LAT_CTRL_RATIO_MASK GENMASK_ULL(28, 22)
+#define UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK GENMASK_ULL(38, 32)
+#define UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE BIT(39)
+#define UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK GENMASK_ULL(46, 40)
/* Helper function to read MMIO offset for max/min control frequency */
static void read_control_freq(struct tpmi_uncore_cluster_info *cluster_info,
- unsigned int *min, unsigned int *max)
+ unsigned int *value, enum uncore_index index)
{
u64 control;
control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
- *max = FIELD_GET(UNCORE_GENMASK_MAX_RATIO, control) * UNCORE_FREQ_KHZ_MULTIPLIER;
- *min = FIELD_GET(UNCORE_GENMASK_MIN_RATIO, control) * UNCORE_FREQ_KHZ_MULTIPLIER;
+ if (index == UNCORE_INDEX_MAX_FREQ)
+ *value = FIELD_GET(UNCORE_MAX_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER;
+ else
+ *value = FIELD_GET(UNCORE_MIN_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER;
+}
+
+/* Helper function to read efficiency latency control values over MMIO */
+static int read_eff_lat_ctrl(struct uncore_data *data, unsigned int *val, enum uncore_index index)
+{
+ struct tpmi_uncore_cluster_info *cluster_info;
+ u64 ctrl;
+
+ cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
+ if (cluster_info->root_domain)
+ return -ENODATA;
+
+ if (!cluster_info->elc_supported)
+ return -EOPNOTSUPP;
+
+ ctrl = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
+
+ switch (index) {
+ case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
+ *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK, ctrl);
+ *val *= 100;
+ *val = DIV_ROUND_UP(*val, FIELD_MAX(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK));
+ break;
+
+ case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
+ *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK, ctrl);
+ *val *= 100;
+ *val = DIV_ROUND_UP(*val, FIELD_MAX(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK));
+ break;
+
+ case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
+ *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE, ctrl);
+ break;
+ case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
+ *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_RATIO_MASK, ctrl) * UNCORE_FREQ_KHZ_MULTIPLIER;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
}
-#define UNCORE_MAX_RATIO FIELD_MAX(UNCORE_GENMASK_MAX_RATIO)
+#define UNCORE_MAX_RATIO FIELD_MAX(UNCORE_MAX_RATIO_MASK)
-/* Callback for sysfs read for max/min frequencies. Called under mutex locks */
-static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min,
- unsigned int *max)
+/* Helper for sysfs read for max/min frequencies. Called under mutex locks */
+static int uncore_read_control_freq(struct uncore_data *data, unsigned int *value,
+ enum uncore_index index)
{
struct tpmi_uncore_cluster_info *cluster_info;
@@ -96,10 +149,11 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min,
if (cluster_info->root_domain) {
struct tpmi_uncore_struct *uncore_root = cluster_info->uncore_root;
- int i, _min = 0, _max = 0;
+ unsigned int min, max, v;
+ int i;
- *min = UNCORE_MAX_RATIO * UNCORE_FREQ_KHZ_MULTIPLIER;
- *max = 0;
+ min = UNCORE_MAX_RATIO * UNCORE_FREQ_KHZ_MULTIPLIER;
+ max = 0;
/*
* Get the max/min by looking at each cluster. Get the lowest
@@ -110,43 +164,125 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min,
for (j = 0; j < uncore_root->pd_info[i].cluster_count; ++j) {
read_control_freq(&uncore_root->pd_info[i].cluster_infos[j],
- &_min, &_max);
- if (*min > _min)
- *min = _min;
- if (*max < _max)
- *max = _max;
+ &v, index);
+ if (v < min)
+ min = v;
+ if (v > max)
+ max = v;
}
}
+
+ if (index == UNCORE_INDEX_MIN_FREQ)
+ *value = min;
+ else
+ *value = max;
+
return 0;
}
- read_control_freq(cluster_info, min, max);
+ read_control_freq(cluster_info, value, index);
+
+ return 0;
+}
+
+/* Helper function for writing efficiency latency control values over MMIO */
+static int write_eff_lat_ctrl(struct uncore_data *data, unsigned int val, enum uncore_index index)
+{
+ struct tpmi_uncore_cluster_info *cluster_info;
+ u64 control;
+
+ cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
+
+ if (cluster_info->root_domain)
+ return -ENODATA;
+
+ if (!cluster_info->elc_supported)
+ return -EOPNOTSUPP;
+
+ switch (index) {
+ case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
+ if (val > 100)
+ return -EINVAL;
+ break;
+
+ case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
+ if (val > 100)
+ return -EINVAL;
+ break;
+
+ case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
+ if (val > 1)
+ return -EINVAL;
+ break;
+
+ case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
+ val /= UNCORE_FREQ_KHZ_MULTIPLIER;
+ if (val > FIELD_MAX(UNCORE_EFF_LAT_CTRL_RATIO_MASK))
+ return -EINVAL;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
+
+ switch (index) {
+ case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
+ val *= FIELD_MAX(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK);
+ val /= 100;
+ control &= ~UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK;
+ control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK, val);
+ break;
+
+ case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
+ val *= FIELD_MAX(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK);
+ val /= 100;
+ control &= ~UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK;
+ control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK, val);
+ break;
+
+ case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
+ control &= ~UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE;
+ control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE, val);
+ break;
+
+ case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
+ control &= ~UNCORE_EFF_LAT_CTRL_RATIO_MASK;
+ control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_RATIO_MASK, val);
+ break;
+
+ default:
+ break;
+ }
+
+ writeq(control, cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
return 0;
}
/* Helper function to write MMIO offset for max/min control frequency */
static void write_control_freq(struct tpmi_uncore_cluster_info *cluster_info, unsigned int input,
- unsigned int min_max)
+ unsigned int index)
{
u64 control;
control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
- if (min_max) {
- control &= ~UNCORE_GENMASK_MAX_RATIO;
- control |= FIELD_PREP(UNCORE_GENMASK_MAX_RATIO, input);
+ if (index == UNCORE_INDEX_MAX_FREQ) {
+ control &= ~UNCORE_MAX_RATIO_MASK;
+ control |= FIELD_PREP(UNCORE_MAX_RATIO_MASK, input);
} else {
- control &= ~UNCORE_GENMASK_MIN_RATIO;
- control |= FIELD_PREP(UNCORE_GENMASK_MIN_RATIO, input);
+ control &= ~UNCORE_MIN_RATIO_MASK;
+ control |= FIELD_PREP(UNCORE_MIN_RATIO_MASK, input);
}
writeq(control, (cluster_info->cluster_base + UNCORE_CONTROL_INDEX));
}
-/* Callback for sysfs write for max/min frequencies. Called under mutex locks */
+/* Helper for sysfs write for max/min frequencies. Called under mutex locks */
static int uncore_write_control_freq(struct uncore_data *data, unsigned int input,
- unsigned int min_max)
+ enum uncore_index index)
{
struct tpmi_uncore_cluster_info *cluster_info;
struct tpmi_uncore_struct *uncore_root;
@@ -171,10 +307,10 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu
for (j = 0; j < uncore_root->pd_info[i].cluster_count; ++j)
write_control_freq(&uncore_root->pd_info[i].cluster_infos[j],
- input, min_max);
+ input, index);
}
- if (min_max)
+ if (index == UNCORE_INDEX_MAX_FREQ)
uncore_root->max_ratio = input;
else
uncore_root->min_ratio = input;
@@ -182,18 +318,20 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu
return 0;
}
- if (min_max && uncore_root->max_ratio && uncore_root->max_ratio < input)
+ if (index == UNCORE_INDEX_MAX_FREQ && uncore_root->max_ratio &&
+ uncore_root->max_ratio < input)
return -EINVAL;
- if (!min_max && uncore_root->min_ratio && uncore_root->min_ratio > input)
+ if (index == UNCORE_INDEX_MIN_FREQ && uncore_root->min_ratio &&
+ uncore_root->min_ratio > input)
return -EINVAL;
- write_control_freq(cluster_info, input, min_max);
+ write_control_freq(cluster_info, input, index);
return 0;
}
-/* Callback for sysfs read for the current uncore frequency. Called under mutex locks */
+/* Helper for sysfs read for the current uncore frequency. Called under mutex locks */
static int uncore_read_freq(struct uncore_data *data, unsigned int *freq)
{
struct tpmi_uncore_cluster_info *cluster_info;
@@ -204,11 +342,56 @@ static int uncore_read_freq(struct uncore_data *data, unsigned int *freq)
return -ENODATA;
status = readq((u8 __iomem *)cluster_info->cluster_base + UNCORE_STATUS_INDEX);
- *freq = FIELD_GET(UNCORE_GENMASK_CURRENT_RATIO, status) * UNCORE_FREQ_KHZ_MULTIPLIER;
+ *freq = FIELD_GET(UNCORE_CURRENT_RATIO_MASK, status) * UNCORE_FREQ_KHZ_MULTIPLIER;
return 0;
}
+/* Callback for sysfs read for TPMI uncore values. Called under mutex locks. */
+static int uncore_read(struct uncore_data *data, unsigned int *value, enum uncore_index index)
+{
+ switch (index) {
+ case UNCORE_INDEX_MIN_FREQ:
+ case UNCORE_INDEX_MAX_FREQ:
+ return uncore_read_control_freq(data, value, index);
+
+ case UNCORE_INDEX_CURRENT_FREQ:
+ return uncore_read_freq(data, value);
+
+ case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
+ case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
+ case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
+ case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
+ return read_eff_lat_ctrl(data, value, index);
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+/* Callback for sysfs write for TPMI uncore data. Called under mutex locks. */
+static int uncore_write(struct uncore_data *data, unsigned int value, enum uncore_index index)
+{
+ switch (index) {
+ case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
+ case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
+ case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
+ case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
+ return write_eff_lat_ctrl(data, value, index);
+
+ case UNCORE_INDEX_MIN_FREQ:
+ case UNCORE_INDEX_MAX_FREQ:
+ return uncore_write_control_freq(data, value, index);
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
static void remove_cluster_entries(struct tpmi_uncore_struct *tpmi_uncore)
{
int i;
@@ -240,6 +423,7 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
bool read_blocked = 0, write_blocked = 0;
struct intel_tpmi_plat_info *plat_info;
struct tpmi_uncore_struct *tpmi_uncore;
+ bool uncore_sysfs_added = false;
int ret, i, pkg = 0;
int num_resources;
@@ -258,8 +442,7 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
return -EINVAL;
/* Register callbacks to uncore core */
- ret = uncore_freq_common_init(uncore_read_control_freq, uncore_write_control_freq,
- uncore_read_freq);
+ ret = uncore_freq_common_init(uncore_read, uncore_write);
if (ret)
return ret;
@@ -329,7 +512,7 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
goto remove_clusters;
}
- if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) != UNCORE_MINOR_VERSION)
+ if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) > UNCORE_MINOR_VERSION)
dev_info(&auxdev->dev, "Uncore: Ignore: Unsupported minor version:%lx\n",
TPMI_MINOR_VERSION(pd_info->ufs_header_ver));
@@ -377,6 +560,9 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
cluster_info->uncore_root = tpmi_uncore;
+ if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) >= UNCORE_ELC_SUPPORTED_VERSION)
+ cluster_info->elc_supported = true;
+
ret = uncore_freq_add_entry(&cluster_info->uncore_data, 0);
if (ret) {
cluster_info->cluster_base = NULL;
@@ -384,11 +570,20 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
}
/* Point to next cluster offset */
cluster_offset >>= UNCORE_MAX_CLUSTER_PER_DOMAIN;
+ uncore_sysfs_added = true;
}
}
+ if (!uncore_sysfs_added) {
+ ret = -ENODEV;
+ goto remove_clusters;
+ }
+
auxiliary_set_drvdata(auxdev, tpmi_uncore);
+ if (topology_max_dies_per_package() > 1)
+ return 0;
+
tpmi_uncore->root_cluster.root_domain = true;
tpmi_uncore->root_cluster.uncore_root = tpmi_uncore;
@@ -412,7 +607,9 @@ static void uncore_remove(struct auxiliary_device *auxdev)
{
struct tpmi_uncore_struct *tpmi_uncore = auxiliary_get_drvdata(auxdev);
- uncore_freq_remove_die_entry(&tpmi_uncore->root_cluster.uncore_data);
+ if (tpmi_uncore->root_cluster.root_domain)
+ uncore_freq_remove_die_entry(&tpmi_uncore->root_cluster.uncore_data);
+
remove_cluster_entries(tpmi_uncore);
uncore_freq_common_exit();
@@ -432,7 +629,7 @@ static struct auxiliary_driver intel_uncore_aux_driver = {
module_auxiliary_driver(intel_uncore_aux_driver);
-MODULE_IMPORT_NS(INTEL_TPMI);
-MODULE_IMPORT_NS(INTEL_UNCORE_FREQUENCY);
+MODULE_IMPORT_NS("INTEL_TPMI");
+MODULE_IMPORT_NS("INTEL_UNCORE_FREQUENCY");
MODULE_DESCRIPTION("Intel TPMI UFS Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
index a5e0f5c22179..40bbf8e45fa4 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
@@ -14,6 +14,7 @@
* Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
*/
+#include <linux/bitfield.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -36,8 +37,13 @@ static enum cpuhp_state uncore_hp_state __read_mostly;
#define MSR_UNCORE_PERF_STATUS 0x621
#define UNCORE_FREQ_KHZ_MULTIPLIER 100000
-static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min,
- unsigned int *max)
+#define UNCORE_MAX_RATIO_MASK GENMASK_ULL(6, 0)
+#define UNCORE_MIN_RATIO_MASK GENMASK_ULL(14, 8)
+
+#define UNCORE_CURRENT_RATIO_MASK GENMASK_ULL(6, 0)
+
+static int uncore_read_control_freq(struct uncore_data *data, unsigned int *value,
+ enum uncore_index index)
{
u64 cap;
int ret;
@@ -49,20 +55,22 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min,
if (ret)
return ret;
- *max = (cap & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER;
- *min = ((cap & GENMASK(14, 8)) >> 8) * UNCORE_FREQ_KHZ_MULTIPLIER;
+ if (index == UNCORE_INDEX_MAX_FREQ)
+ *value = FIELD_GET(UNCORE_MAX_RATIO_MASK, cap) * UNCORE_FREQ_KHZ_MULTIPLIER;
+ else
+ *value = FIELD_GET(UNCORE_MIN_RATIO_MASK, cap) * UNCORE_FREQ_KHZ_MULTIPLIER;
return 0;
}
static int uncore_write_control_freq(struct uncore_data *data, unsigned int input,
- unsigned int min_max)
+ enum uncore_index index)
{
int ret;
u64 cap;
input /= UNCORE_FREQ_KHZ_MULTIPLIER;
- if (!input || input > 0x7F)
+ if (!input || input > FIELD_MAX(UNCORE_MAX_RATIO_MASK))
return -EINVAL;
if (data->control_cpu < 0)
@@ -72,12 +80,12 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu
if (ret)
return ret;
- if (min_max) {
- cap &= ~0x7F;
- cap |= input;
+ if (index == UNCORE_INDEX_MAX_FREQ) {
+ cap &= ~UNCORE_MAX_RATIO_MASK;
+ cap |= FIELD_PREP(UNCORE_MAX_RATIO_MASK, input);
} else {
- cap &= ~GENMASK(14, 8);
- cap |= (input << 8);
+ cap &= ~UNCORE_MIN_RATIO_MASK;
+ cap |= FIELD_PREP(UNCORE_MIN_RATIO_MASK, input);
}
ret = wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap);
@@ -101,11 +109,28 @@ static int uncore_read_freq(struct uncore_data *data, unsigned int *freq)
if (ret)
return ret;
- *freq = (ratio & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER;
+ *freq = FIELD_GET(UNCORE_CURRENT_RATIO_MASK, ratio) * UNCORE_FREQ_KHZ_MULTIPLIER;
return 0;
}
+static int uncore_read(struct uncore_data *data, unsigned int *value, enum uncore_index index)
+{
+ switch (index) {
+ case UNCORE_INDEX_MIN_FREQ:
+ case UNCORE_INDEX_MAX_FREQ:
+ return uncore_read_control_freq(data, value, index);
+
+ case UNCORE_INDEX_CURRENT_FREQ:
+ return uncore_read_freq(data, value);
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
/* Caller provides protection */
static struct uncore_data *uncore_get_instance(unsigned int cpu)
{
@@ -197,34 +222,34 @@ static struct notifier_block uncore_pm_nb = {
};
static const struct x86_cpu_id intel_uncore_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE_H, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(LUNARLAKE_M, NULL),
+ X86_MATCH_VFM(INTEL_BROADWELL_G, NULL),
+ X86_MATCH_VFM(INTEL_BROADWELL_X, NULL),
+ X86_MATCH_VFM(INTEL_BROADWELL_D, NULL),
+ X86_MATCH_VFM(INTEL_SKYLAKE_X, NULL),
+ X86_MATCH_VFM(INTEL_ICELAKE_X, NULL),
+ X86_MATCH_VFM(INTEL_ICELAKE_D, NULL),
+ X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, NULL),
+ X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, NULL),
+ X86_MATCH_VFM(INTEL_KABYLAKE, NULL),
+ X86_MATCH_VFM(INTEL_KABYLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_COMETLAKE, NULL),
+ X86_MATCH_VFM(INTEL_COMETLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_CANNONLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_ICELAKE, NULL),
+ X86_MATCH_VFM(INTEL_ICELAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_ROCKETLAKE, NULL),
+ X86_MATCH_VFM(INTEL_TIGERLAKE, NULL),
+ X86_MATCH_VFM(INTEL_TIGERLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE, NULL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, NULL),
+ X86_MATCH_VFM(INTEL_METEORLAKE, NULL),
+ X86_MATCH_VFM(INTEL_METEORLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_ARROWLAKE, NULL),
+ X86_MATCH_VFM(INTEL_ARROWLAKE_H, NULL),
+ X86_MATCH_VFM(INTEL_LUNARLAKE_M, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_uncore_cpu_ids);
@@ -242,14 +267,13 @@ static int __init intel_uncore_init(void)
return -ENODEV;
uncore_max_entries = topology_max_packages() *
- topology_max_die_per_package();
+ topology_max_dies_per_package();
uncore_instances = kcalloc(uncore_max_entries,
sizeof(*uncore_instances), GFP_KERNEL);
if (!uncore_instances)
return -ENOMEM;
- ret = uncore_freq_common_init(uncore_read_control_freq, uncore_write_control_freq,
- uncore_read_freq);
+ ret = uncore_freq_common_init(uncore_read, uncore_write_control_freq);
if (ret)
goto err_free;
@@ -292,6 +316,6 @@ static void __exit intel_uncore_exit(void)
}
module_exit(intel_uncore_exit)
-MODULE_IMPORT_NS(INTEL_UNCORE_FREQUENCY);
+MODULE_IMPORT_NS("INTEL_UNCORE_FREQUENCY");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Intel Uncore Frequency Limits Driver");
diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c
index 084c355c86f5..232cd12e3c9f 100644
--- a/drivers/platform/x86/intel/vbtn.c
+++ b/drivers/platform/x86/intel/vbtn.c
@@ -7,11 +7,13 @@
*/
#include <linux/acpi.h>
+#include <linux/cleanup.h>
#include <linux/dmi.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/suspend.h>
#include "../dual_accel_detect.h"
@@ -24,6 +26,7 @@
#define VGBS_TABLET_MODE_FLAGS (VGBS_TABLET_MODE_FLAG | VGBS_TABLET_MODE_FLAG_ALT)
+MODULE_DESCRIPTION("Intel Virtual Button driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("AceLan Kao");
@@ -65,6 +68,7 @@ static const struct key_entry intel_vbtn_switchmap[] = {
};
struct intel_vbtn_priv {
+ struct mutex mutex; /* Avoid notify_handler() racing with itself */
struct input_dev *buttons_dev;
struct input_dev *switches_dev;
bool dual_accel;
@@ -136,8 +140,6 @@ static int intel_vbtn_input_setup(struct platform_device *device)
priv->switches_dev->id.bustype = BUS_HOST;
if (priv->has_switches) {
- detect_tablet_mode(&device->dev);
-
ret = input_register_device(priv->switches_dev);
if (ret)
return ret;
@@ -156,9 +158,12 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
bool autorelease;
int ret;
+ guard(mutex)(&priv->mutex);
+
if ((ke = sparse_keymap_entry_from_scancode(priv->buttons_dev, event))) {
if (!priv->has_buttons) {
- dev_warn(&device->dev, "Warning: received a button event on a device without buttons, please report this.\n");
+ dev_warn(&device->dev, "Warning: received 0x%02x button event on a device without buttons, please report this.\n",
+ event);
return;
}
input_dev = priv->buttons_dev;
@@ -258,9 +263,6 @@ static const struct dmi_system_id dmi_switches_allow_list[] = {
static bool intel_vbtn_has_switches(acpi_handle handle, bool dual_accel)
{
- unsigned long long vgbs;
- acpi_status status;
-
/* See dual_accel_detect.h for more info */
if (dual_accel)
return false;
@@ -268,8 +270,7 @@ static bool intel_vbtn_has_switches(acpi_handle handle, bool dual_accel)
if (!dmi_check_system(dmi_switches_allow_list))
return false;
- status = acpi_evaluate_integer(handle, "VGBS", NULL, &vgbs);
- return ACPI_SUCCESS(status);
+ return acpi_has_method(handle, "VGBS");
}
static int intel_vbtn_probe(struct platform_device *device)
@@ -294,6 +295,10 @@ static int intel_vbtn_probe(struct platform_device *device)
return -ENOMEM;
dev_set_drvdata(&device->dev, priv);
+ err = devm_mutex_init(&device->dev, &priv->mutex);
+ if (err)
+ return err;
+
priv->dual_accel = dual_accel;
priv->has_buttons = has_buttons;
priv->has_switches = has_switches;
@@ -316,6 +321,9 @@ static int intel_vbtn_probe(struct platform_device *device)
if (ACPI_FAILURE(status))
dev_err(&device->dev, "Error VBDL failed with ACPI status %d\n", status);
}
+ // Check switches after buttons since VBDL may have side effects.
+ if (has_switches)
+ detect_tablet_mode(&device->dev);
device_init_wakeup(&device->dev, true);
/*
@@ -379,7 +387,7 @@ static struct platform_driver intel_vbtn_pl_driver = {
.pm = &intel_vbtn_pm_ops,
},
.probe = intel_vbtn_probe,
- .remove_new = intel_vbtn_remove,
+ .remove = intel_vbtn_remove,
};
static acpi_status __init
diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
index 778eb0aa3479..db3c031d1757 100644
--- a/drivers/platform/x86/intel/vsec.c
+++ b/drivers/platform/x86/intel/vsec.c
@@ -17,14 +17,13 @@
#include <linux/bits.h>
#include <linux/cleanup.h>
#include <linux/delay.h>
-#include <linux/kernel.h>
#include <linux/idr.h>
+#include <linux/intel_vsec.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/types.h>
-#include "vsec.h"
-
#define PMT_XA_START 0
#define PMT_XA_MAX INT_MAX
#define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX)
@@ -80,17 +79,13 @@ static void intel_vsec_remove_aux(void *data)
auxiliary_device_uninit(data);
}
-static DEFINE_MUTEX(vsec_ida_lock);
-
static void intel_vsec_dev_release(struct device *dev)
{
struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(dev);
xa_erase(&auxdev_array, intel_vsec_dev->id);
- mutex_lock(&vsec_ida_lock);
ida_free(intel_vsec_dev->ida, intel_vsec_dev->auxdev.id);
- mutex_unlock(&vsec_ida_lock);
kfree(intel_vsec_dev->resource);
kfree(intel_vsec_dev);
@@ -114,9 +109,7 @@ int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent,
return ret;
}
- mutex_lock(&vsec_ida_lock);
id = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL);
- mutex_unlock(&vsec_ida_lock);
if (id < 0) {
xa_erase(&auxdev_array, intel_vsec_dev->id);
kfree(intel_vsec_dev->resource);
@@ -144,7 +137,7 @@ int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent,
return devm_add_action_or_reset(parent, intel_vsec_remove_aux,
auxdev);
}
-EXPORT_SYMBOL_NS_GPL(intel_vsec_add_aux, INTEL_VSEC);
+EXPORT_SYMBOL_NS_GPL(intel_vsec_add_aux, "INTEL_VSEC");
static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *header,
struct intel_vsec_platform_info *info)
@@ -213,6 +206,7 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he
intel_vsec_dev->num_resources = header->num_entries;
intel_vsec_dev->quirks = info->quirks;
intel_vsec_dev->base_addr = info->base_addr;
+ intel_vsec_dev->priv_data = info->priv_data;
if (header->id == VSEC_ID_SDSI)
intel_vsec_dev->ida = &intel_vsec_sdsi_ida;
@@ -236,10 +230,7 @@ static bool intel_vsec_walk_header(struct pci_dev *pdev,
for ( ; *header; header++) {
ret = intel_vsec_add_dev(pdev, *header, info);
- if (ret)
- dev_info(&pdev->dev, "Could not add device for VSEC id %d\n",
- (*header)->id);
- else
+ if (!ret)
have_devices = true;
}
@@ -344,12 +335,12 @@ static bool intel_vsec_walk_vsec(struct pci_dev *pdev,
void intel_vsec_register(struct pci_dev *pdev,
struct intel_vsec_platform_info *info)
{
- if (!pdev || !info)
+ if (!pdev || !info || !info->headers)
return;
intel_vsec_walk_header(pdev, info);
}
-EXPORT_SYMBOL_NS_GPL(intel_vsec_register, INTEL_VSEC);
+EXPORT_SYMBOL_NS_GPL(intel_vsec_register, "INTEL_VSEC");
static int intel_vsec_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
@@ -413,6 +404,11 @@ static const struct intel_vsec_platform_info oobmsm_info = {
.caps = VSEC_CAP_TELEMETRY | VSEC_CAP_SDSI | VSEC_CAP_TPMI,
};
+/* DMR OOBMSM info */
+static const struct intel_vsec_platform_info dmr_oobmsm_info = {
+ .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_TPMI,
+};
+
/* TGL info */
static const struct intel_vsec_platform_info tgl_info = {
.caps = VSEC_CAP_TELEMETRY,
@@ -429,18 +425,22 @@ static const struct intel_vsec_platform_info lnl_info = {
#define PCI_DEVICE_ID_INTEL_VSEC_MTL_M 0x7d0d
#define PCI_DEVICE_ID_INTEL_VSEC_MTL_S 0xad0d
#define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM 0x09a7
+#define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM_DMR 0x09a1
#define PCI_DEVICE_ID_INTEL_VSEC_RPL 0xa77d
#define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d
#define PCI_DEVICE_ID_INTEL_VSEC_LNL_M 0x647d
+#define PCI_DEVICE_ID_INTEL_VSEC_PTL 0xb07d
static const struct pci_device_id intel_vsec_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, VSEC_ADL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_DG1, &dg1_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_MTL_M, &mtl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_MTL_S, &mtl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, &oobmsm_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM_DMR, &dmr_oobmsm_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_RPL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_LNL_M, &lnl_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_PTL, &mtl_info) },
{ }
};
MODULE_DEVICE_TABLE(pci, intel_vsec_pci_ids);
diff --git a/drivers/platform/x86/intel/vsec.h b/drivers/platform/x86/intel/vsec.h
deleted file mode 100644
index e23e76129691..000000000000
--- a/drivers/platform/x86/intel/vsec.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _VSEC_H
-#define _VSEC_H
-
-#include <linux/auxiliary_bus.h>
-#include <linux/bits.h>
-
-#define VSEC_CAP_TELEMETRY BIT(0)
-#define VSEC_CAP_WATCHER BIT(1)
-#define VSEC_CAP_CRASHLOG BIT(2)
-#define VSEC_CAP_SDSI BIT(3)
-#define VSEC_CAP_TPMI BIT(4)
-
-/* Intel DVSEC offsets */
-#define INTEL_DVSEC_ENTRIES 0xA
-#define INTEL_DVSEC_SIZE 0xB
-#define INTEL_DVSEC_TABLE 0xC
-#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0))
-#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3))
-#define TABLE_OFFSET_SHIFT 3
-
-struct pci_dev;
-struct resource;
-
-enum intel_vsec_id {
- VSEC_ID_TELEMETRY = 2,
- VSEC_ID_WATCHER = 3,
- VSEC_ID_CRASHLOG = 4,
- VSEC_ID_SDSI = 65,
- VSEC_ID_TPMI = 66,
-};
-
-/**
- * struct intel_vsec_header - Common fields of Intel VSEC and DVSEC registers.
- * @rev: Revision ID of the VSEC/DVSEC register space
- * @length: Length of the VSEC/DVSEC register space
- * @id: ID of the feature
- * @num_entries: Number of instances of the feature
- * @entry_size: Size of the discovery table for each feature
- * @tbir: BAR containing the discovery tables
- * @offset: BAR offset of start of the first discovery table
- */
-struct intel_vsec_header {
- u8 rev;
- u16 length;
- u16 id;
- u8 num_entries;
- u8 entry_size;
- u8 tbir;
- u32 offset;
-};
-
-enum intel_vsec_quirks {
- /* Watcher feature not supported */
- VSEC_QUIRK_NO_WATCHER = BIT(0),
-
- /* Crashlog feature not supported */
- VSEC_QUIRK_NO_CRASHLOG = BIT(1),
-
- /* Use shift instead of mask to read discovery table offset */
- VSEC_QUIRK_TABLE_SHIFT = BIT(2),
-
- /* DVSEC not present (provided in driver data) */
- VSEC_QUIRK_NO_DVSEC = BIT(3),
-
- /* Platforms requiring quirk in the auxiliary driver */
- VSEC_QUIRK_EARLY_HW = BIT(4),
-};
-
-/* Platform specific data */
-struct intel_vsec_platform_info {
- struct device *parent;
- struct intel_vsec_header **headers;
- unsigned long caps;
- unsigned long quirks;
- u64 base_addr;
-};
-
-struct intel_vsec_device {
- struct auxiliary_device auxdev;
- struct pci_dev *pcidev;
- struct resource *resource;
- struct ida *ida;
- int num_resources;
- int id; /* xa */
- void *priv_data;
- size_t priv_data_size;
- unsigned long quirks;
- u64 base_addr;
-};
-
-int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent,
- struct intel_vsec_device *intel_vsec_dev,
- const char *name);
-
-static inline struct intel_vsec_device *dev_to_ivdev(struct device *dev)
-{
- return container_of(dev, struct intel_vsec_device, auxdev.dev);
-}
-
-static inline struct intel_vsec_device *auxdev_to_ivdev(struct auxiliary_device *auxdev)
-{
- return container_of(auxdev, struct intel_vsec_device, auxdev);
-}
-
-void intel_vsec_register(struct pci_dev *pdev,
- struct intel_vsec_platform_info *info);
-#endif
diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/vsec_tpmi.c
index e73cdea67fff..5c383a27bbe8 100644
--- a/drivers/platform/x86/intel/tpmi.c
+++ b/drivers/platform/x86/intel/vsec_tpmi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * intel-tpmi : Driver to enumerate TPMI features and create devices
+ * Driver to enumerate TPMI features and create devices
*
* Copyright (c) 2023, Intel Corporation.
* All Rights Reserved.
@@ -51,6 +51,7 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/intel_tpmi.h>
+#include <linux/intel_vsec.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
@@ -59,8 +60,6 @@
#include <linux/sizes.h>
#include <linux/string_helpers.h>
-#include "vsec.h"
-
/**
* struct intel_tpmi_pfs_entry - TPMI PM Feature Structure (PFS) entry
* @tpmi_id: TPMI feature identifier (what the feature is and its data format).
@@ -96,7 +95,7 @@ struct intel_tpmi_pfs_entry {
*/
struct intel_tpmi_pm_feature {
struct intel_tpmi_pfs_entry pfs_header;
- unsigned int vsec_offset;
+ u64 vsec_offset;
struct intel_vsec_device *vsec_dev;
};
@@ -128,6 +127,9 @@ struct intel_tpmi_info {
* @dev: PCI device number
* @bus: PCI bus number
* @pkg: CPU Package id
+ * @segment: PCI segment id
+ * @partition: Package Partition id
+ * @cdie_mask: Bitmap of compute dies in the current partition
* @reserved: Reserved for future use
* @lock: When set to 1 the register is locked and becomes read-only
* until next reset. Not for use by the OS driver.
@@ -139,7 +141,10 @@ struct tpmi_info_header {
u64 dev:5;
u64 bus:8;
u64 pkg:8;
- u64 reserved:39;
+ u64 segment:8;
+ u64 partition:2;
+ u64 cdie_mask:16;
+ u64 reserved:13;
u64 lock:1;
} __packed;
@@ -188,7 +193,7 @@ struct intel_tpmi_plat_info *tpmi_get_platform_data(struct auxiliary_device *aux
return vsec_dev->priv_data;
}
-EXPORT_SYMBOL_NS_GPL(tpmi_get_platform_data, INTEL_TPMI);
+EXPORT_SYMBOL_NS_GPL(tpmi_get_platform_data, "INTEL_TPMI");
int tpmi_get_resource_count(struct auxiliary_device *auxdev)
{
@@ -199,7 +204,7 @@ int tpmi_get_resource_count(struct auxiliary_device *auxdev)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(tpmi_get_resource_count, INTEL_TPMI);
+EXPORT_SYMBOL_NS_GPL(tpmi_get_resource_count, "INTEL_TPMI");
struct resource *tpmi_get_resource_at_index(struct auxiliary_device *auxdev, int index)
{
@@ -210,7 +215,7 @@ struct resource *tpmi_get_resource_at_index(struct auxiliary_device *auxdev, int
return NULL;
}
-EXPORT_SYMBOL_NS_GPL(tpmi_get_resource_at_index, INTEL_TPMI);
+EXPORT_SYMBOL_NS_GPL(tpmi_get_resource_at_index, "INTEL_TPMI");
/* TPMI Control Interface */
@@ -349,7 +354,16 @@ int tpmi_get_feature_status(struct auxiliary_device *auxdev,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(tpmi_get_feature_status, INTEL_TPMI);
+EXPORT_SYMBOL_NS_GPL(tpmi_get_feature_status, "INTEL_TPMI");
+
+struct dentry *tpmi_get_debugfs_dir(struct auxiliary_device *auxdev)
+{
+ struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(auxdev->dev.parent);
+ struct intel_tpmi_info *tpmi_info = auxiliary_get_drvdata(&intel_vsec_dev->auxdev);
+
+ return tpmi_info->dbgfs_dir;
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_get_debugfs_dir, "INTEL_TPMI");
static int tpmi_pfs_dbg_show(struct seq_file *s, void *unused)
{
@@ -376,7 +390,7 @@ static int tpmi_pfs_dbg_show(struct seq_file *s, void *unused)
read_blocked = feature_state.read_blocked ? 'Y' : 'N';
write_blocked = feature_state.write_blocked ? 'Y' : 'N';
}
- seq_printf(s, "0x%02x\t\t0x%02x\t\t0x%04x\t\t0x%04x\t\t0x%02x\t\t0x%08x\t%c\t%c\t\t%c\t\t%c\n",
+ seq_printf(s, "0x%02x\t\t0x%02x\t\t0x%04x\t\t0x%04x\t\t0x%02x\t\t0x%016llx\t%c\t%c\t\t%c\t\t%c\n",
pfs->pfs_header.tpmi_id, pfs->pfs_header.num_entries,
pfs->pfs_header.entry_size, pfs->pfs_header.cap_offset,
pfs->pfs_header.attribute, pfs->vsec_offset, locked, disabled,
@@ -395,7 +409,8 @@ static int tpmi_mem_dump_show(struct seq_file *s, void *unused)
struct intel_tpmi_pm_feature *pfs = s->private;
int count, ret = 0;
void __iomem *mem;
- u32 off, size;
+ u32 size;
+ u64 off;
u8 *buffer;
size = TPMI_GET_SINGLE_ENTRY_SIZE(pfs);
@@ -411,7 +426,7 @@ static int tpmi_mem_dump_show(struct seq_file *s, void *unused)
mutex_lock(&tpmi_dev_lock);
for (count = 0; count < pfs->pfs_header.num_entries; ++count) {
- seq_printf(s, "TPMI Instance:%d offset:0x%x\n", count, off);
+ seq_printf(s, "TPMI Instance:%d offset:0x%llx\n", count, off);
mem = ioremap(off, size);
if (!mem) {
@@ -570,6 +585,8 @@ static const char *intel_tpmi_name(enum intel_tpmi_id id)
return "uncore";
case TPMI_ID_SST:
return "sst";
+ case TPMI_ID_PLR:
+ return "plr";
default:
return NULL;
}
@@ -665,28 +682,44 @@ static int tpmi_create_devices(struct intel_tpmi_info *tpmi_info)
}
#define TPMI_INFO_BUS_INFO_OFFSET 0x08
+#define TPMI_INFO_MAJOR_VERSION 0x00
+#define TPMI_INFO_MINOR_VERSION 0x02
static int tpmi_process_info(struct intel_tpmi_info *tpmi_info,
struct intel_tpmi_pm_feature *pfs)
{
struct tpmi_info_header header;
void __iomem *info_mem;
+ u64 feature_header;
+ int ret = 0;
- info_mem = ioremap(pfs->vsec_offset + TPMI_INFO_BUS_INFO_OFFSET,
- pfs->pfs_header.entry_size * sizeof(u32) - TPMI_INFO_BUS_INFO_OFFSET);
+ info_mem = ioremap(pfs->vsec_offset, pfs->pfs_header.entry_size * sizeof(u32));
if (!info_mem)
return -ENOMEM;
- memcpy_fromio(&header, info_mem, sizeof(header));
+ feature_header = readq(info_mem);
+ if (TPMI_MAJOR_VERSION(feature_header) != TPMI_INFO_MAJOR_VERSION) {
+ ret = -ENODEV;
+ goto error_info_header;
+ }
+
+ memcpy_fromio(&header, info_mem + TPMI_INFO_BUS_INFO_OFFSET, sizeof(header));
tpmi_info->plat_info.package_id = header.pkg;
tpmi_info->plat_info.bus_number = header.bus;
tpmi_info->plat_info.device_number = header.dev;
tpmi_info->plat_info.function_number = header.fn;
+ if (TPMI_MINOR_VERSION(feature_header) >= TPMI_INFO_MINOR_VERSION) {
+ tpmi_info->plat_info.cdie_mask = header.cdie_mask;
+ tpmi_info->plat_info.partition = header.partition;
+ tpmi_info->plat_info.segment = header.segment;
+ }
+
+error_info_header:
iounmap(info_mem);
- return 0;
+ return ret;
}
static int tpmi_fetch_pfs_header(struct intel_tpmi_pm_feature *pfs, u64 start, int size)
@@ -762,8 +795,11 @@ static int intel_vsec_tpmi_init(struct auxiliary_device *auxdev)
* when actual device nodes created outside this
* loop via tpmi_create_devices().
*/
- if (pfs->pfs_header.tpmi_id == TPMI_INFO_ID)
- tpmi_process_info(tpmi_info, pfs);
+ if (pfs->pfs_header.tpmi_id == TPMI_INFO_ID) {
+ ret = tpmi_process_info(tpmi_info, pfs);
+ if (ret)
+ return ret;
+ }
if (pfs->pfs_header.tpmi_id == TPMI_CONTROL_ID)
tpmi_set_control_base(auxdev, tpmi_info, pfs);
@@ -816,6 +852,6 @@ static struct auxiliary_driver tpmi_aux_driver = {
module_auxiliary_driver(tpmi_aux_driver);
-MODULE_IMPORT_NS(INTEL_VSEC);
+MODULE_IMPORT_NS("INTEL_VSEC");
MODULE_DESCRIPTION("Intel TPMI enumeration module");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/wmi/sbl-fw-update.c b/drivers/platform/x86/intel/wmi/sbl-fw-update.c
index 040153ad67c1..75c82c08117f 100644
--- a/drivers/platform/x86/intel/wmi/sbl-fw-update.c
+++ b/drivers/platform/x86/intel/wmi/sbl-fw-update.c
@@ -131,6 +131,7 @@ static struct wmi_driver intel_wmi_sbl_fw_update_driver = {
.probe = intel_wmi_sbl_fw_update_probe,
.remove = intel_wmi_sbl_fw_update_remove,
.id_table = intel_wmi_sbl_id_table,
+ .no_singleton = true,
};
module_wmi_driver(intel_wmi_sbl_fw_update_driver);
diff --git a/drivers/platform/x86/intel/wmi/thunderbolt.c b/drivers/platform/x86/intel/wmi/thunderbolt.c
index e2ad3f46f356..08df560a2c7a 100644
--- a/drivers/platform/x86/intel/wmi/thunderbolt.c
+++ b/drivers/platform/x86/intel/wmi/thunderbolt.c
@@ -63,6 +63,7 @@ static struct wmi_driver intel_wmi_thunderbolt_driver = {
.dev_groups = tbt_groups,
},
.id_table = intel_wmi_thunderbolt_id_table,
+ .no_singleton = true,
};
module_wmi_driver(intel_wmi_thunderbolt_driver);